DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF
@ 2020-03-13  1:04 wei.zhao1@intel.com
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF wei.zhao1@intel.com
                   ` (7 more replies)
  0 siblings, 8 replies; 12+ messages in thread
From: wei.zhao1@intel.com @ 2020-03-13  1:04 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, xiaolong.ye

A DCF (Device Config Function) framework has been add for intel device,
this patch set add add switch filter support for it, this set also fix
bugs which block this feature.

This patchset is based on:
[1] https://patchwork.dpdk.org/cover/66480/ : add Intel DCF PMD support

Depends-on: series-8859


wei zhao (7):
  net/ice: enable switch flow on DCF
  net/ice: support for more PPPoE input set
  net/ice: change swicth parser to support flexible mask
  net/ice: add support for MAC VLAN rule
  net/ice: change default tunnle type
  net/ice: add action number check for swicth
  net/ice: fix input set of VLAN item

 config/common_linux                 |   1 +
 drivers/net/ice/ice_dcf_ethdev.c    |  10 +-
 drivers/net/ice/ice_dcf_parent.c    |   7 +
 drivers/net/ice/ice_fdir_filter.c   |   6 +
 drivers/net/ice/ice_generic_flow.c  |  13 +
 drivers/net/ice/ice_generic_flow.h  |   9 +
 drivers/net/ice/ice_hash.c          |   6 +
 drivers/net/ice/ice_switch_filter.c | 461 ++++++++++++++++++----------
 8 files changed, 348 insertions(+), 165 deletions(-)

-- 
2.19.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF
  2020-03-13  1:04 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF wei.zhao1@intel.com
@ 2020-03-13  1:04 ` wei.zhao1@intel.com
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 2/7] net/ice: support for more PPPoE input set wei.zhao1@intel.com
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 12+ messages in thread
From: wei.zhao1@intel.com @ 2020-03-13  1:04 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, xiaolong.ye, Wei Zhao

DCF on CVL is a control plane VF which take the responsibility to
configure all the PF/global resources, this patch add support DCF
on to program forward rule to direct packetS to VFs.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/ice_dcf_ethdev.c    | 10 ++++++--
 drivers/net/ice/ice_dcf_parent.c    |  7 ++++++
 drivers/net/ice/ice_fdir_filter.c   |  6 +++++
 drivers/net/ice/ice_hash.c          |  6 +++++
 drivers/net/ice/ice_switch_filter.c | 39 ++++++++++++++++++++++++++++-
 5 files changed, 65 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index f65b962d4..759d92afb 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -115,8 +115,8 @@ ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
 static int
 ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
 			enum rte_filter_type filter_type,
-			__rte_unused enum rte_filter_op filter_op,
-			__rte_unused void *arg)
+			enum rte_filter_op filter_op,
+			void *arg)
 {
 	int ret = 0;
 
@@ -124,6 +124,12 @@ ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+
 	default:
 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
 			    filter_type);
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index bca9cd34a..c2dc13936 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -314,6 +314,12 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
 	}
 	parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
 
+	err = ice_flow_init(parent_adapter);
+	if (err) {
+		PMD_INIT_LOG(ERR, "Failed to initialize flow");
+		goto uninit_hw;
+	}
+
 	ice_dcf_update_vf_vsi_map(parent_hw,
 				  hw->num_vfs, hw->vf_vsi_map);
 
@@ -344,5 +350,6 @@ ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev)
 	rte_eal_alarm_cancel(ice_dcf_vsi_update_service_handler,
 			     &adapter->real_hw);
 
+	ice_flow_uninit(parent_adapter);
 	ice_dcf_uninit_parent_hw(parent_hw);
 }
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index d737c1acd..c9343c1fa 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -1061,6 +1061,9 @@ ice_fdir_init(struct ice_adapter *ad)
 	struct ice_flow_parser *parser;
 	int ret;
 
+	if (ad->hw.dcf_enabled)
+		return 0;
+
 	ret = ice_fdir_setup(pf);
 	if (ret)
 		return ret;
@@ -1081,6 +1084,9 @@ ice_fdir_uninit(struct ice_adapter *ad)
 	struct ice_pf *pf = &ad->pf;
 	struct ice_flow_parser *parser;
 
+	if (ad->hw.dcf_enabled)
+		return;
+
 	if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
 		parser = &ice_fdir_parser_comms;
 	else
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index d891538bd..69d805248 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -243,6 +243,9 @@ ice_hash_init(struct ice_adapter *ad)
 {
 	struct ice_flow_parser *parser = NULL;
 
+	if (ad->hw.dcf_enabled)
+		return 0;
+
 	if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
 		parser = &ice_hash_parser_os;
 	else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
@@ -560,6 +563,9 @@ ice_hash_destroy(struct ice_adapter *ad,
 static void
 ice_hash_uninit(struct ice_adapter *ad)
 {
+	if (ad->hw.dcf_enabled)
+		return;
+
 	if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
 		ice_unregister_parser(&ice_hash_parser_os, ad);
 	else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4a9356b31..c55e44e1a 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -913,6 +913,39 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 	return 0;
 }
 
+static int
+ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
+			    struct rte_flow_error *error,
+			    struct ice_adv_rule_info *rule_info)
+{
+	const struct rte_flow_action_vf *act_vf;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VF:
+			rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
+			act_vf = action->conf;
+			rule_info->sw_act.vsi_handle = act_vf->id;
+			break;
+		default:
+			rte_flow_error_set(error,
+					   EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions,
+					   "Invalid action type or queue number");
+			return -rte_errno;
+		}
+	}
+
+	rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
+	rule_info->rx = 1;
+	rule_info->priority = 5;
+
+	return 0;
+}
 
 static int
 ice_switch_parse_action(struct ice_pf *pf,
@@ -1081,7 +1114,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
 		goto error;
 	}
 
-	ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+	if (ad->hw.dcf_enabled)
+		ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
+	else
+		ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+
 	if (ret) {
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-- 
2.19.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 2/7] net/ice: support for more PPPoE input set
  2020-03-13  1:04 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF wei.zhao1@intel.com
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF wei.zhao1@intel.com
@ 2020-03-13  1:04 ` wei.zhao1@intel.com
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask wei.zhao1@intel.com
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 12+ messages in thread
From: wei.zhao1@intel.com @ 2020-03-13  1:04 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, xiaolong.ye, Wei Zhao

This patch add more support for PPPoE packet,
it enable swicth filter to direct PPPoE packet base on
session id and PPP protocol type.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 config/common_linux                 |  1 +
 drivers/net/ice/ice_generic_flow.c  | 13 +++++
 drivers/net/ice/ice_generic_flow.h  |  9 ++++
 drivers/net/ice/ice_switch_filter.c | 82 +++++++++++++++++++++++++++--
 4 files changed, 100 insertions(+), 5 deletions(-)

diff --git a/config/common_linux b/config/common_linux
index 816810671..c6630d2bd 100644
--- a/config/common_linux
+++ b/config/common_linux
@@ -8,6 +8,7 @@ CONFIG_RTE_EXEC_ENV_LINUX=y
 CONFIG_RTE_EXEC_ENV_LINUXAPP=y
 
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_EAL_IGB_UIO=y
 CONFIG_RTE_EAL_VFIO=y
 CONFIG_RTE_LIBRTE_KNI=y
 CONFIG_RTE_LIBRTE_PMD_KNI=y
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 38ac799d8..af0fff814 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1122,12 +1122,25 @@ enum rte_flow_item_type pattern_eth_pppoes[] = {
 	RTE_FLOW_ITEM_TYPE_PPPOES,
 	RTE_FLOW_ITEM_TYPE_END,
 };
+enum rte_flow_item_type pattern_eth_pppoes_proto[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_PPPOES,
+	RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+	RTE_FLOW_ITEM_TYPE_END,
+};
 enum rte_flow_item_type pattern_eth_vlan_pppoes[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_VLAN,
 	RTE_FLOW_ITEM_TYPE_PPPOES,
 	RTE_FLOW_ITEM_TYPE_END,
 };
+enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_VLAN,
+	RTE_FLOW_ITEM_TYPE_PPPOES,
+	RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+	RTE_FLOW_ITEM_TYPE_END,
+};
 enum rte_flow_item_type pattern_eth_qinq_pppoes[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_VLAN,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index adc30ee2a..f1139c690 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -30,6 +30,7 @@
 #define ICE_PROT_VXLAN             (1ULL << 19)
 #define ICE_PROT_NVGRE             (1ULL << 20)
 #define ICE_PROT_GTPU              (1ULL << 21)
+#define ICE_PROT_PPPoE             (1ULL << 22)
 
 /* field */
 
@@ -49,6 +50,8 @@
 #define ICE_NVGRE_TNI              (1ULL << 50)
 #define ICE_GTPU_TEID              (1ULL << 49)
 #define ICE_GTPU_QFI               (1ULL << 48)
+#define ICE_PPPOE_SESSION          (1ULL << 47)
+#define ICE_PPPOE_PROTO            (1ULL << 46)
 
 /* input set */
 
@@ -177,6 +180,10 @@
 	(ICE_PROT_GTPU | ICE_GTPU_TEID)
 #define ICE_INSET_GTPU_QFI \
 	(ICE_PROT_GTPU | ICE_GTPU_QFI)
+#define ICE_INSET_PPPOE_SESSION \
+	(ICE_PROT_PPPoE | ICE_PPPOE_SESSION)
+#define ICE_INSET_PPPOE_PROTO \
+	(ICE_PROT_PPPoE | ICE_PPPOE_PROTO)
 
 /* empty pattern */
 extern enum rte_flow_item_type pattern_empty[];
@@ -349,7 +356,9 @@ extern enum rte_flow_item_type pattern_eth_pppoed[];
 extern enum rte_flow_item_type pattern_eth_vlan_pppoed[];
 extern enum rte_flow_item_type pattern_eth_qinq_pppoed[];
 extern enum rte_flow_item_type pattern_eth_pppoes[];
+extern enum rte_flow_item_type pattern_eth_pppoes_proto[];
 extern enum rte_flow_item_type pattern_eth_vlan_pppoes[];
+extern enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[];
 extern enum rte_flow_item_type pattern_eth_qinq_pppoes[];
 extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[];
 extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index c55e44e1a..39b5c7266 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -87,7 +87,11 @@
 	ICE_INSET_TUN_IPV4_TOS)
 #define ICE_SW_INSET_MAC_PPPOE  ( \
 	ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
-	ICE_INSET_DMAC | ICE_INSET_ETHERTYPE)
+	ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
+#define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
+	ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
+	ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
+	ICE_INSET_PPPOE_PROTO)
 
 struct sw_meta {
 	struct ice_adv_lkup_elem *list;
@@ -135,6 +139,10 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
 			ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
 	{pattern_eth_vlan_pppoes,
 			ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+	{pattern_eth_pppoes_proto,
+			ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+	{pattern_eth_vlan_pppoes_proto,
+			ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
 };
 
 static struct
@@ -316,12 +324,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
 	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
 	const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
+	const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
+				*pppoe_proto_mask;
 	uint8_t  ipv6_addr_mask[16] = {
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
 	uint64_t input_set = ICE_INSET_NONE;
 	uint16_t j, t = 0;
 	uint16_t tunnel_valid = 0;
+	uint16_t pppoe_valid = 0;
 
 
 	for (item = pattern; item->type !=
@@ -885,14 +896,75 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 			pppoe_mask = item->mask;
 			/* Check if PPPoE item is used to describe protocol.
 			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
 			 */
-			if (pppoe_spec || pppoe_mask) {
+			if ((!pppoe_spec && pppoe_mask) ||
+				(pppoe_spec && !pppoe_mask)) {
 				rte_flow_error_set(error, EINVAL,
-					   RTE_FLOW_ERROR_TYPE_ITEM,
-					   item,
-					   "Invalid pppoe item");
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item,
+					"Invalid pppoe item");
 				return 0;
 			}
+			if (pppoe_spec && pppoe_mask) {
+				/* Check pppoe mask and update input set */
+				if (pppoe_mask->length ||
+					pppoe_mask->code ||
+					pppoe_mask->version_type) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid pppoe mask");
+					return 0;
+				}
+				list[t].type = ICE_PPPOE;
+				if (pppoe_mask->session_id == UINT16_MAX) {
+					list[t].h_u.pppoe_hdr.session_id =
+						pppoe_spec->session_id;
+					list[t].m_u.pppoe_hdr.session_id =
+						UINT16_MAX;
+					input_set |= ICE_INSET_PPPOE_SESSION;
+				}
+				t++;
+				pppoe_valid = 1;
+			} else if (!pppoe_spec && !pppoe_mask) {
+				list[t].type = ICE_PPPOE;
+			}
+
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
+			pppoe_proto_spec = item->spec;
+			pppoe_proto_mask = item->mask;
+			/* Check if PPPoE optional proto_id item
+			 * is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!pppoe_proto_spec && pppoe_proto_mask) ||
+				(pppoe_proto_spec && !pppoe_proto_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item,
+					"Invalid pppoe proto item");
+				return 0;
+			}
+			if (pppoe_proto_spec && pppoe_proto_mask) {
+				if (pppoe_valid)
+					t--;
+				list[t].type = ICE_PPPOE;
+				if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+					list[t].h_u.pppoe_hdr.ppp_prot_id =
+						pppoe_proto_spec->proto_id;
+					list[t].m_u.pppoe_hdr.ppp_prot_id =
+						UINT16_MAX;
+					input_set |= ICE_INSET_PPPOE_PROTO;
+				}
+				t++;
+			} else if (!pppoe_proto_spec && !pppoe_proto_mask) {
+				list[t].type = ICE_PPPOE;
+			}
+
 			break;
 
 		case RTE_FLOW_ITEM_TYPE_VOID:
-- 
2.19.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask
  2020-03-13  1:04 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF wei.zhao1@intel.com
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF wei.zhao1@intel.com
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 2/7] net/ice: support for more PPPoE input set wei.zhao1@intel.com
@ 2020-03-13  1:04 ` wei.zhao1@intel.com
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 4/7] net/ice: add support for MAC VLAN rule wei.zhao1@intel.com
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 12+ messages in thread
From: wei.zhao1@intel.com @ 2020-03-13  1:04 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, xiaolong.ye, Wei Zhao

DCF need to make configuration of flexible mask, that is to say
some iput set mask may be not 0xFFFF type all one bit. In order
to direct L2/IP multicast packets, the mask for source IP maybe
0xF0000000, this patch enable switch filter parser for it.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/ice_switch_filter.c | 298 +++++++++++++---------------
 1 file changed, 133 insertions(+), 165 deletions(-)

diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 39b5c7266..af7e9cb0b 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -326,9 +326,6 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 	const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
 	const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
 				*pppoe_proto_mask;
-	uint8_t  ipv6_addr_mask[16] = {
-		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
-		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
 	uint64_t input_set = ICE_INSET_NONE;
 	uint16_t j, t = 0;
 	uint16_t tunnel_valid = 0;
@@ -351,19 +348,29 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 			eth_spec = item->spec;
 			eth_mask = item->mask;
 			if (eth_spec && eth_mask) {
-				if (tunnel_valid &&
-				    rte_is_broadcast_ether_addr(&eth_mask->src))
-					input_set |= ICE_INSET_TUN_SMAC;
-				else if (
-				rte_is_broadcast_ether_addr(&eth_mask->src))
-					input_set |= ICE_INSET_SMAC;
-				if (tunnel_valid &&
-				    rte_is_broadcast_ether_addr(&eth_mask->dst))
-					input_set |= ICE_INSET_TUN_DMAC;
-				else if (
-				rte_is_broadcast_ether_addr(&eth_mask->dst))
-					input_set |= ICE_INSET_DMAC;
-				if (eth_mask->type == RTE_BE16(0xffff))
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j]) {
+					if (tunnel_valid)
+						input_set |=
+							ICE_INSET_TUN_SMAC;
+					else
+						input_set |=
+							ICE_INSET_SMAC;
+					break;
+					}
+				}
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->dst.addr_bytes[j]) {
+					if (tunnel_valid)
+						input_set |=
+						ICE_INSET_TUN_DMAC;
+					else
+						input_set |=
+						ICE_INSET_DMAC;
+					break;
+					}
+				}
+				if (eth_mask->type)
 					input_set |= ICE_INSET_ETHERTYPE;
 				list[t].type = (tunnel_valid  == 0) ?
 					ICE_MAC_OFOS : ICE_MAC_IL;
@@ -373,16 +380,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				h = &list[t].h_u.eth_hdr;
 				m = &list[t].m_u.eth_hdr;
 				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
-					if (eth_mask->src.addr_bytes[j] ==
-								UINT8_MAX) {
+					if (eth_mask->src.addr_bytes[j]) {
 						h->src_addr[j] =
 						eth_spec->src.addr_bytes[j];
 						m->src_addr[j] =
 						eth_mask->src.addr_bytes[j];
 						i = 1;
 					}
-					if (eth_mask->dst.addr_bytes[j] ==
-								UINT8_MAX) {
+					if (eth_mask->dst.addr_bytes[j]) {
 						h->dst_addr[j] =
 						eth_spec->dst.addr_bytes[j];
 						m->dst_addr[j] =
@@ -392,17 +397,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 				if (i)
 					t++;
-				if (eth_mask->type == UINT16_MAX) {
+				if (eth_mask->type) {
 					list[t].type = ICE_ETYPE_OL;
 					list[t].h_u.ethertype.ethtype_id =
 						eth_spec->type;
 					list[t].m_u.ethertype.ethtype_id =
-						UINT16_MAX;
+						eth_mask->type;
 					t++;
 				}
-			} else if (!eth_spec && !eth_mask) {
-				list[t].type = (tun_type == ICE_NON_TUN) ?
-					ICE_MAC_OFOS : ICE_MAC_IL;
 			}
 			break;
 
@@ -423,81 +425,68 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 
 				if (tunnel_valid) {
-					if (ipv4_mask->hdr.type_of_service ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.type_of_service)
 						input_set |=
 							ICE_INSET_TUN_IPV4_TOS;
-					if (ipv4_mask->hdr.src_addr ==
-							UINT32_MAX)
+					if (ipv4_mask->hdr.src_addr)
 						input_set |=
 							ICE_INSET_TUN_IPV4_SRC;
-					if (ipv4_mask->hdr.dst_addr ==
-							UINT32_MAX)
+					if (ipv4_mask->hdr.dst_addr)
 						input_set |=
 							ICE_INSET_TUN_IPV4_DST;
-					if (ipv4_mask->hdr.time_to_live ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.time_to_live)
 						input_set |=
 							ICE_INSET_TUN_IPV4_TTL;
-					if (ipv4_mask->hdr.next_proto_id ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.next_proto_id)
 						input_set |=
 						ICE_INSET_TUN_IPV4_PROTO;
 				} else {
-					if (ipv4_mask->hdr.src_addr ==
-							UINT32_MAX)
+					if (ipv4_mask->hdr.src_addr)
 						input_set |= ICE_INSET_IPV4_SRC;
-					if (ipv4_mask->hdr.dst_addr ==
-							UINT32_MAX)
+					if (ipv4_mask->hdr.dst_addr)
 						input_set |= ICE_INSET_IPV4_DST;
-					if (ipv4_mask->hdr.time_to_live ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.time_to_live)
 						input_set |= ICE_INSET_IPV4_TTL;
-					if (ipv4_mask->hdr.next_proto_id ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.next_proto_id)
 						input_set |=
 						ICE_INSET_IPV4_PROTO;
-					if (ipv4_mask->hdr.type_of_service ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.type_of_service)
 						input_set |=
 							ICE_INSET_IPV4_TOS;
 				}
 				list[t].type = (tunnel_valid  == 0) ?
 					ICE_IPV4_OFOS : ICE_IPV4_IL;
-				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+				if (ipv4_mask->hdr.src_addr) {
 					list[t].h_u.ipv4_hdr.src_addr =
 						ipv4_spec->hdr.src_addr;
 					list[t].m_u.ipv4_hdr.src_addr =
-						UINT32_MAX;
+						ipv4_mask->hdr.src_addr;
 				}
-				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+				if (ipv4_mask->hdr.dst_addr) {
 					list[t].h_u.ipv4_hdr.dst_addr =
 						ipv4_spec->hdr.dst_addr;
 					list[t].m_u.ipv4_hdr.dst_addr =
-						UINT32_MAX;
+						ipv4_mask->hdr.dst_addr;
 				}
-				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+				if (ipv4_mask->hdr.time_to_live) {
 					list[t].h_u.ipv4_hdr.time_to_live =
 						ipv4_spec->hdr.time_to_live;
 					list[t].m_u.ipv4_hdr.time_to_live =
-						UINT8_MAX;
+						ipv4_mask->hdr.time_to_live;
 				}
-				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+				if (ipv4_mask->hdr.next_proto_id) {
 					list[t].h_u.ipv4_hdr.protocol =
 						ipv4_spec->hdr.next_proto_id;
 					list[t].m_u.ipv4_hdr.protocol =
-						UINT8_MAX;
+						ipv4_mask->hdr.next_proto_id;
 				}
-				if (ipv4_mask->hdr.type_of_service ==
-						UINT8_MAX) {
+				if (ipv4_mask->hdr.type_of_service) {
 					list[t].h_u.ipv4_hdr.tos =
 						ipv4_spec->hdr.type_of_service;
-					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+					list[t].m_u.ipv4_hdr.tos =
+						ipv4_mask->hdr.type_of_service;
 				}
 				t++;
-			} else if (!ipv4_spec && !ipv4_mask) {
-				list[t].type = (tunnel_valid  == 0) ?
-					ICE_IPV4_OFOS : ICE_IPV4_IL;
 			}
 			break;
 
@@ -514,51 +503,58 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 
 				if (tunnel_valid) {
-					if (!memcmp(ipv6_mask->hdr.src_addr,
-						ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+						j++) {
+					if (ipv6_mask->hdr.src_addr[j]) {
 						input_set |=
-							ICE_INSET_TUN_IPV6_SRC;
-					if (!memcmp(ipv6_mask->hdr.dst_addr,
-						ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+						ICE_INSET_TUN_IPV6_SRC;
+						break;
+						}
+					}
+					for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+						j++) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
 						input_set |=
-							ICE_INSET_TUN_IPV6_DST;
-					if (ipv6_mask->hdr.proto == UINT8_MAX)
+						ICE_INSET_TUN_IPV6_DST;
+						break;
+						}
+					}
+					if (ipv6_mask->hdr.proto)
 						input_set |=
 						ICE_INSET_TUN_IPV6_NEXT_HDR;
-					if (ipv6_mask->hdr.hop_limits ==
-							UINT8_MAX)
+					if (ipv6_mask->hdr.hop_limits)
 						input_set |=
 						ICE_INSET_TUN_IPV6_HOP_LIMIT;
-					if ((ipv6_mask->hdr.vtc_flow &
+					if (ipv6_mask->hdr.vtc_flow &
 						rte_cpu_to_be_32
 						(RTE_IPV6_HDR_TC_MASK))
-							== rte_cpu_to_be_32
-							(RTE_IPV6_HDR_TC_MASK))
 						input_set |=
 							ICE_INSET_TUN_IPV6_TC;
 				} else {
-					if (!memcmp(ipv6_mask->hdr.src_addr,
-						ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+							j++) {
+					if (ipv6_mask->hdr.src_addr[j]) {
 						input_set |= ICE_INSET_IPV6_SRC;
-					if (!memcmp(ipv6_mask->hdr.dst_addr,
-						ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
-						input_set |= ICE_INSET_IPV6_DST;
-					if (ipv6_mask->hdr.proto == UINT8_MAX)
+						break;
+						}
+					}
+					for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+						j++) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
+						input_set |=
+						ICE_INSET_IPV6_DST;
+						break;
+						}
+					}
+					if (ipv6_mask->hdr.proto)
 						input_set |=
 						ICE_INSET_IPV6_NEXT_HDR;
-					if (ipv6_mask->hdr.hop_limits ==
-							UINT8_MAX)
+					if (ipv6_mask->hdr.hop_limits)
 						input_set |=
 						ICE_INSET_IPV6_HOP_LIMIT;
-					if ((ipv6_mask->hdr.vtc_flow &
+					if (ipv6_mask->hdr.vtc_flow &
 						rte_cpu_to_be_32
 						(RTE_IPV6_HDR_TC_MASK))
-							== rte_cpu_to_be_32
-							(RTE_IPV6_HDR_TC_MASK))
 						input_set |= ICE_INSET_IPV6_TC;
 				}
 				list[t].type = (tunnel_valid  == 0) ?
@@ -568,35 +564,33 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				f = &list[t].h_u.ipv6_hdr;
 				s = &list[t].m_u.ipv6_hdr;
 				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.src_addr[j] ==
-						UINT8_MAX) {
+					if (ipv6_mask->hdr.src_addr[j]) {
 						f->src_addr[j] =
 						ipv6_spec->hdr.src_addr[j];
 						s->src_addr[j] =
 						ipv6_mask->hdr.src_addr[j];
 					}
-					if (ipv6_mask->hdr.dst_addr[j] ==
-								UINT8_MAX) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
 						f->dst_addr[j] =
 						ipv6_spec->hdr.dst_addr[j];
 						s->dst_addr[j] =
 						ipv6_mask->hdr.dst_addr[j];
 					}
 				}
-				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+				if (ipv6_mask->hdr.proto) {
 					f->next_hdr =
 						ipv6_spec->hdr.proto;
-					s->next_hdr = UINT8_MAX;
+					s->next_hdr =
+						ipv6_mask->hdr.proto;
 				}
-				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+				if (ipv6_mask->hdr.hop_limits) {
 					f->hop_limit =
 						ipv6_spec->hdr.hop_limits;
-					s->hop_limit = UINT8_MAX;
+					s->hop_limit =
+						ipv6_mask->hdr.hop_limits;
 				}
-				if ((ipv6_mask->hdr.vtc_flow &
+				if (ipv6_mask->hdr.vtc_flow &
 						rte_cpu_to_be_32
-						(RTE_IPV6_HDR_TC_MASK))
-						== rte_cpu_to_be_32
 						(RTE_IPV6_HDR_TC_MASK)) {
 					struct ice_le_ver_tc_flow vtf;
 					vtf.u.fld.version = 0;
@@ -606,13 +600,13 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 							RTE_IPV6_HDR_TC_MASK) >>
 							RTE_IPV6_HDR_TC_SHIFT;
 					f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
-					vtf.u.fld.tc = UINT8_MAX;
+					vtf.u.fld.tc = (rte_be_to_cpu_32
+						(ipv6_mask->hdr.vtc_flow) &
+							RTE_IPV6_HDR_TC_MASK) >>
+							RTE_IPV6_HDR_TC_SHIFT;
 					s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
 				}
 				t++;
-			} else if (!ipv6_spec && !ipv6_mask) {
-				list[t].type = (tun_type == ICE_NON_TUN) ?
-					ICE_IPV4_OFOS : ICE_IPV4_IL;
 			}
 			break;
 
@@ -631,21 +625,17 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 
 				if (tunnel_valid) {
-					if (udp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (udp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_TUN_UDP_SRC_PORT;
-					if (udp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (udp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_TUN_UDP_DST_PORT;
 				} else {
-					if (udp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (udp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_UDP_SRC_PORT;
-					if (udp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (udp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_UDP_DST_PORT;
 				}
@@ -654,21 +644,19 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					list[t].type = ICE_UDP_OF;
 				else
 					list[t].type = ICE_UDP_ILOS;
-				if (udp_mask->hdr.src_port == UINT16_MAX) {
+				if (udp_mask->hdr.src_port) {
 					list[t].h_u.l4_hdr.src_port =
 						udp_spec->hdr.src_port;
 					list[t].m_u.l4_hdr.src_port =
 						udp_mask->hdr.src_port;
 				}
-				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+				if (udp_mask->hdr.dst_port) {
 					list[t].h_u.l4_hdr.dst_port =
 						udp_spec->hdr.dst_port;
 					list[t].m_u.l4_hdr.dst_port =
 						udp_mask->hdr.dst_port;
 				}
 						t++;
-			} else if (!udp_spec && !udp_mask) {
-				list[t].type = ICE_UDP_ILOS;
 			}
 			break;
 
@@ -692,40 +680,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 
 				if (tunnel_valid) {
-					if (tcp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (tcp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_TUN_TCP_SRC_PORT;
-					if (tcp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (tcp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_TUN_TCP_DST_PORT;
 				} else {
-					if (tcp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (tcp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_TCP_SRC_PORT;
-					if (tcp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (tcp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_TCP_DST_PORT;
 				}
 				list[t].type = ICE_TCP_IL;
-				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+				if (tcp_mask->hdr.src_port) {
 					list[t].h_u.l4_hdr.src_port =
 						tcp_spec->hdr.src_port;
 					list[t].m_u.l4_hdr.src_port =
 						tcp_mask->hdr.src_port;
 				}
-				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+				if (tcp_mask->hdr.dst_port) {
 					list[t].h_u.l4_hdr.dst_port =
 						tcp_spec->hdr.dst_port;
 					list[t].m_u.l4_hdr.dst_port =
 						tcp_mask->hdr.dst_port;
 				}
 				t++;
-			} else if (!tcp_spec && !tcp_mask) {
-				list[t].type = ICE_TCP_IL;
 			}
 			break;
 
@@ -743,40 +725,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 
 				if (tunnel_valid) {
-					if (sctp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (sctp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_TUN_SCTP_SRC_PORT;
-					if (sctp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (sctp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_TUN_SCTP_DST_PORT;
 				} else {
-					if (sctp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (sctp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_SCTP_SRC_PORT;
-					if (sctp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (sctp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_SCTP_DST_PORT;
 				}
 				list[t].type = ICE_SCTP_IL;
-				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+				if (sctp_mask->hdr.src_port) {
 					list[t].h_u.sctp_hdr.src_port =
 						sctp_spec->hdr.src_port;
 					list[t].m_u.sctp_hdr.src_port =
 						sctp_mask->hdr.src_port;
 				}
-				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+				if (sctp_mask->hdr.dst_port) {
 					list[t].h_u.sctp_hdr.dst_port =
 						sctp_spec->hdr.dst_port;
 					list[t].m_u.sctp_hdr.dst_port =
 						sctp_mask->hdr.dst_port;
 				}
 				t++;
-			} else if (!sctp_spec && !sctp_mask) {
-				list[t].type = ICE_SCTP_IL;
 			}
 			break;
 
@@ -799,21 +775,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 			tunnel_valid = 1;
 			if (vxlan_spec && vxlan_mask) {
 				list[t].type = ICE_VXLAN;
-				if (vxlan_mask->vni[0] == UINT8_MAX &&
-					vxlan_mask->vni[1] == UINT8_MAX &&
-					vxlan_mask->vni[2] == UINT8_MAX) {
+				if (vxlan_mask->vni[0] ||
+					vxlan_mask->vni[1] ||
+					vxlan_mask->vni[2]) {
 					list[t].h_u.tnl_hdr.vni =
 						(vxlan_spec->vni[2] << 16) |
 						(vxlan_spec->vni[1] << 8) |
 						vxlan_spec->vni[0];
 					list[t].m_u.tnl_hdr.vni =
-						UINT32_MAX;
+						(vxlan_mask->vni[2] << 16) |
+						(vxlan_mask->vni[1] << 8) |
+						vxlan_mask->vni[0];
 					input_set |=
 						ICE_INSET_TUN_VXLAN_VNI;
 				}
 				t++;
-			} else if (!vxlan_spec && !vxlan_mask) {
-				list[t].type = ICE_VXLAN;
 			}
 			break;
 
@@ -835,21 +811,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 			tunnel_valid = 1;
 			if (nvgre_spec && nvgre_mask) {
 				list[t].type = ICE_NVGRE;
-				if (nvgre_mask->tni[0] == UINT8_MAX &&
-					nvgre_mask->tni[1] == UINT8_MAX &&
-					nvgre_mask->tni[2] == UINT8_MAX) {
+				if (nvgre_mask->tni[0] ||
+					nvgre_mask->tni[1] ||
+					nvgre_mask->tni[2]) {
 					list[t].h_u.nvgre_hdr.tni_flow =
 						(nvgre_spec->tni[2] << 16) |
 						(nvgre_spec->tni[1] << 8) |
 						nvgre_spec->tni[0];
 					list[t].m_u.nvgre_hdr.tni_flow =
-						UINT32_MAX;
+						(nvgre_mask->tni[2] << 16) |
+						(nvgre_mask->tni[1] << 8) |
+						nvgre_mask->tni[0];
 					input_set |=
 						ICE_INSET_TUN_NVGRE_TNI;
 				}
 				t++;
-			} else if (!nvgre_spec && !nvgre_mask) {
-				list[t].type = ICE_NVGRE;
 			}
 			break;
 
@@ -870,23 +846,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 			}
 			if (vlan_spec && vlan_mask) {
 				list[t].type = ICE_VLAN_OFOS;
-				if (vlan_mask->tci == UINT16_MAX) {
+				if (vlan_mask->tci) {
 					list[t].h_u.vlan_hdr.vlan =
 						vlan_spec->tci;
 					list[t].m_u.vlan_hdr.vlan =
-						UINT16_MAX;
+						vlan_mask->tci;
 					input_set |= ICE_INSET_VLAN_OUTER;
 				}
-				if (vlan_mask->inner_type == UINT16_MAX) {
+				if (vlan_mask->inner_type) {
 					list[t].h_u.vlan_hdr.type =
 						vlan_spec->inner_type;
 					list[t].m_u.vlan_hdr.type =
-						UINT16_MAX;
+						vlan_mask->inner_type;
 					input_set |= ICE_INSET_VLAN_OUTER;
 				}
 				t++;
-			} else if (!vlan_spec && !vlan_mask) {
-				list[t].type = ICE_VLAN_OFOS;
 			}
 			break;
 
@@ -918,19 +892,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					return 0;
 				}
 				list[t].type = ICE_PPPOE;
-				if (pppoe_mask->session_id == UINT16_MAX) {
+				if (pppoe_mask->session_id) {
 					list[t].h_u.pppoe_hdr.session_id =
 						pppoe_spec->session_id;
 					list[t].m_u.pppoe_hdr.session_id =
-						UINT16_MAX;
+						pppoe_mask->session_id;
 					input_set |= ICE_INSET_PPPOE_SESSION;
 				}
 				t++;
 				pppoe_valid = 1;
-			} else if (!pppoe_spec && !pppoe_mask) {
-				list[t].type = ICE_PPPOE;
 			}
-
 			break;
 
 		case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
@@ -953,18 +924,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				if (pppoe_valid)
 					t--;
 				list[t].type = ICE_PPPOE;
-				if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+				if (pppoe_proto_mask->proto_id) {
 					list[t].h_u.pppoe_hdr.ppp_prot_id =
 						pppoe_proto_spec->proto_id;
 					list[t].m_u.pppoe_hdr.ppp_prot_id =
-						UINT16_MAX;
+						pppoe_proto_mask->proto_id;
 					input_set |= ICE_INSET_PPPOE_PROTO;
 				}
 				t++;
-			} else if (!pppoe_proto_spec && !pppoe_proto_mask) {
-				list[t].type = ICE_PPPOE;
 			}
-
 			break;
 
 		case RTE_FLOW_ITEM_TYPE_VOID:
-- 
2.19.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 4/7] net/ice: add support for MAC VLAN rule
  2020-03-13  1:04 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF wei.zhao1@intel.com
                   ` (2 preceding siblings ...)
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask wei.zhao1@intel.com
@ 2020-03-13  1:04 ` wei.zhao1@intel.com
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type wei.zhao1@intel.com
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 12+ messages in thread
From: wei.zhao1@intel.com @ 2020-03-13  1:04 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, xiaolong.ye, Wei Zhao

This patch add support for MAC VLAN rule,
it enable swicth filter to direct packet base on
mac address and vlan id.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/ice_switch_filter.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index af7e9cb0b..20d0577b5 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -29,6 +29,9 @@
 
 #define ICE_SW_INSET_ETHER ( \
 	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define ICE_SW_INSET_MAC_VLAN ( \
+		ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
+		ICE_INSET_VLAN_OUTER)
 #define ICE_SW_INSET_MAC_IPV4 ( \
 	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
 	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
@@ -107,6 +110,8 @@ static struct
 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
 	{pattern_ethertype,
 			ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+	{pattern_ethertype_vlan,
+			ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
 	{pattern_eth_ipv4,
 			ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
 	{pattern_eth_ipv4_udp,
@@ -149,6 +154,8 @@ static struct
 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
 	{pattern_ethertype,
 			ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+	{pattern_ethertype_vlan,
+			ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
 	{pattern_eth_arp,
 			ICE_INSET_NONE, ICE_INSET_NONE},
 	{pattern_eth_ipv4,
@@ -179,6 +186,8 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = {
 
 static struct
 ice_pattern_match_item ice_switch_pattern_perm[] = {
+	{pattern_ethertype_vlan,
+			ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
 	{pattern_eth_ipv4,
 			ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
 	{pattern_eth_ipv4_udp,
-- 
2.19.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type
  2020-03-13  1:04 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF wei.zhao1@intel.com
                   ` (3 preceding siblings ...)
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 4/7] net/ice: add support for MAC VLAN rule wei.zhao1@intel.com
@ 2020-03-13  1:04 ` wei.zhao1@intel.com
  2020-03-13 16:14   ` Stillwell Jr, Paul M
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 6/7] net/ice: add action number check for swicth wei.zhao1@intel.com
                   ` (2 subsequent siblings)
  7 siblings, 1 reply; 12+ messages in thread
From: wei.zhao1@intel.com @ 2020-03-13  1:04 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, xiaolong.ye, stable, Wei Zhao

The default tunnle type for swicth filter change to new
defination of ICE_SW_TUN_AND_NON_TUN in order that the rule
will be apply to more packet type.

Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/ice_switch_filter.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 20d0577b5..7ca922602 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1097,7 +1097,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
 	uint16_t lkups_num = 0;
 	const struct rte_flow_item *item = pattern;
 	uint16_t item_num = 0;
-	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+	enum ice_sw_tunnel_type tun_type =
+		ICE_SW_TUN_AND_NON_TUN;
 	struct ice_pattern_match_item *pattern_match_item = NULL;
 
 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
-- 
2.19.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 6/7] net/ice: add action number check for swicth
  2020-03-13  1:04 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF wei.zhao1@intel.com
                   ` (4 preceding siblings ...)
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type wei.zhao1@intel.com
@ 2020-03-13  1:04 ` wei.zhao1@intel.com
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 7/7] net/ice: fix input set of VLAN item wei.zhao1@intel.com
  2020-03-13 16:12 ` [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Stillwell Jr, Paul M
  7 siblings, 0 replies; 12+ messages in thread
From: wei.zhao1@intel.com @ 2020-03-13  1:04 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, xiaolong.ye, stable, Wei Zhao

The action number can only be one for DCF or PF
switch filter, not support large action.

Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/ice_switch_filter.c | 48 +++++++++++++++++++++++++++++
 1 file changed, 48 insertions(+)

diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 7ca922602..48d689deb 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1079,6 +1079,46 @@ ice_switch_parse_action(struct ice_pf *pf,
 	return -rte_errno;
 }
 
+static int
+ice_switch_check_action(const struct rte_flow_action *actions,
+			    struct rte_flow_error *error)
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	uint16_t actions_num = 0;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VF:
+		case RTE_FLOW_ACTION_TYPE_RSS:
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			continue;
+		default:
+			rte_flow_error_set(error,
+					   EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions,
+					   "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	if (actions_num > 1) {
+		rte_flow_error_set(error,
+				   EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   actions,
+				   "Invalid action number");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
 static int
 ice_switch_parse_pattern_action(struct ice_adapter *ad,
 		struct ice_pattern_match_item *array,
@@ -1164,6 +1204,14 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
 		goto error;
 	}
 
+	ret = ice_switch_check_action(actions, error);
+	if (ret) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Invalid input action number");
+		goto error;
+	}
+
 	if (ad->hw.dcf_enabled)
 		ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
 	else
-- 
2.19.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 7/7] net/ice: fix input set of VLAN item
  2020-03-13  1:04 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF wei.zhao1@intel.com
                   ` (5 preceding siblings ...)
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 6/7] net/ice: add action number check for swicth wei.zhao1@intel.com
@ 2020-03-13  1:04 ` wei.zhao1@intel.com
  2020-03-13 16:12 ` [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Stillwell Jr, Paul M
  7 siblings, 0 replies; 12+ messages in thread
From: wei.zhao1@intel.com @ 2020-03-13  1:04 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, xiaolong.ye, stable, Wei Zhao

The input set for inner type of vlan item should
be ICE_INSET_ETHERTYPE, not ICE_INSET_VLAN_OUTER.
This mac vlan filter is also part of DCF switch filter.

Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/ice_switch_filter.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 48d689deb..ecd7c75aa 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -867,7 +867,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						vlan_spec->inner_type;
 					list[t].m_u.vlan_hdr.type =
 						vlan_mask->inner_type;
-					input_set |= ICE_INSET_VLAN_OUTER;
+					input_set |= ICE_INSET_ETHERTYPE;
 				}
 				t++;
 			}
-- 
2.19.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF
  2020-03-13  1:04 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF wei.zhao1@intel.com
                   ` (6 preceding siblings ...)
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 7/7] net/ice: fix input set of VLAN item wei.zhao1@intel.com
@ 2020-03-13 16:12 ` Stillwell Jr, Paul M
  2020-03-15  2:19   ` Zhang, Qi Z
  7 siblings, 1 reply; 12+ messages in thread
From: Stillwell Jr, Paul M @ 2020-03-13 16:12 UTC (permalink / raw)
  To: wei.zhao1@intel.com, dev; +Cc: Zhang, Qi Z, Ye, Xiaolong

This patch set is confusing to me. Only the DCF can deal with switch rules so why is the ice PMD trying to do anything with switch rules related to DCF?

Paul

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of wei.zhao1@intel.com
> Sent: Thursday, March 12, 2020 6:04 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Ye, Xiaolong
> <xiaolong.ye@intel.com>
> Subject: [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF
> 
> A DCF (Device Config Function) framework has been add for intel device, this
> patch set add add switch filter support for it, this set also fix bugs which block
> this feature.
> 
> This patchset is based on:
> [1] https://patchwork.dpdk.org/cover/66480/ : add Intel DCF PMD support
> 
> Depends-on: series-8859
> 
> 
> wei zhao (7):
>   net/ice: enable switch flow on DCF
>   net/ice: support for more PPPoE input set
>   net/ice: change swicth parser to support flexible mask
>   net/ice: add support for MAC VLAN rule
>   net/ice: change default tunnle type
>   net/ice: add action number check for swicth
>   net/ice: fix input set of VLAN item
> 
>  config/common_linux                 |   1 +
>  drivers/net/ice/ice_dcf_ethdev.c    |  10 +-
>  drivers/net/ice/ice_dcf_parent.c    |   7 +
>  drivers/net/ice/ice_fdir_filter.c   |   6 +
>  drivers/net/ice/ice_generic_flow.c  |  13 +
>  drivers/net/ice/ice_generic_flow.h  |   9 +
>  drivers/net/ice/ice_hash.c          |   6 +
>  drivers/net/ice/ice_switch_filter.c | 461 ++++++++++++++++++----------
>  8 files changed, 348 insertions(+), 165 deletions(-)
> 
> --
> 2.19.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type
  2020-03-13  1:04 ` [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type wei.zhao1@intel.com
@ 2020-03-13 16:14   ` Stillwell Jr, Paul M
  0 siblings, 0 replies; 12+ messages in thread
From: Stillwell Jr, Paul M @ 2020-03-13 16:14 UTC (permalink / raw)
  To: wei.zhao1@intel.com, dev; +Cc: Zhang, Qi Z, Ye, Xiaolong, stable, Zhao1, Wei


> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of wei.zhao1@intel.com
> Sent: Thursday, March 12, 2020 6:04 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Ye, Xiaolong
> <xiaolong.ye@intel.com>; stable@dpdk.org; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type
> 
> The default tunnle type for swicth filter change to new defination of

s/tunnle/tunnel

> ICE_SW_TUN_AND_NON_TUN in order that the rule will be apply to more
> packet type.
> 
> Cc: stable@dpdk.org
> Fixes: 47d460d63233 ("net/ice: rework switch filter")
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
>  drivers/net/ice/ice_switch_filter.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/ice/ice_switch_filter.c
> b/drivers/net/ice/ice_switch_filter.c
> index 20d0577b5..7ca922602 100644
> --- a/drivers/net/ice/ice_switch_filter.c
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -1097,7 +1097,8 @@ ice_switch_parse_pattern_action(struct
> ice_adapter *ad,
>  	uint16_t lkups_num = 0;
>  	const struct rte_flow_item *item = pattern;
>  	uint16_t item_num = 0;
> -	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
> +	enum ice_sw_tunnel_type tun_type =
> +		ICE_SW_TUN_AND_NON_TUN;
>  	struct ice_pattern_match_item *pattern_match_item = NULL;
> 
>  	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> --
> 2.19.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF
  2020-03-13 16:12 ` [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Stillwell Jr, Paul M
@ 2020-03-15  2:19   ` Zhang, Qi Z
  0 siblings, 0 replies; 12+ messages in thread
From: Zhang, Qi Z @ 2020-03-15  2:19 UTC (permalink / raw)
  To: Stillwell Jr, Paul M, wei.zhao1@intel.com, dev; +Cc: Ye, Xiaolong



> -----Original Message-----
> From: Stillwell Jr, Paul M <paul.m.stillwell.jr@intel.com>
> Sent: Saturday, March 14, 2020 12:13 AM
> To: wei.zhao1@intel.com <"Wei Zhao"@dpdk.org>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Ye, Xiaolong <xiaolong.ye@intel.com>
> Subject: RE: [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF
> 
> This patch set is confusing to me. Only the DCF can deal with switch rules so
> why is the ice PMD trying to do anything with switch rules related to DCF?

Ice pf PMD already implemented the switch rule engine and some parsers for PF only usage
The ice DCF just share same engine and add some new parsers for DCF usage.
And as I explained in another email, it is the case that the same compiled code can support different usage at runtime.

Thanks
Qi

> 
> Paul
> 
> > -----Original Message-----
> > From: dev <dev-bounces@dpdk.org> On Behalf Of wei.zhao1@intel.com
> > Sent: Thursday, March 12, 2020 6:04 PM
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Ye, Xiaolong
> > <xiaolong.ye@intel.com>
> > Subject: [dpdk-dev] [PATCH 0/7] add switch filter support for intel
> > DCF
> >
> > A DCF (Device Config Function) framework has been add for intel
> > device, this patch set add add switch filter support for it, this set
> > also fix bugs which block this feature.
> >
> > This patchset is based on:
> > [1] https://patchwork.dpdk.org/cover/66480/ : add Intel DCF PMD
> > support
> >
> > Depends-on: series-8859
> >
> >
> > wei zhao (7):
> >   net/ice: enable switch flow on DCF
> >   net/ice: support for more PPPoE input set
> >   net/ice: change swicth parser to support flexible mask
> >   net/ice: add support for MAC VLAN rule
> >   net/ice: change default tunnle type
> >   net/ice: add action number check for swicth
> >   net/ice: fix input set of VLAN item
> >
> >  config/common_linux                 |   1 +
> >  drivers/net/ice/ice_dcf_ethdev.c    |  10 +-
> >  drivers/net/ice/ice_dcf_parent.c    |   7 +
> >  drivers/net/ice/ice_fdir_filter.c   |   6 +
> >  drivers/net/ice/ice_generic_flow.c  |  13 +
> >  drivers/net/ice/ice_generic_flow.h  |   9 +
> >  drivers/net/ice/ice_hash.c          |   6 +
> >  drivers/net/ice/ice_switch_filter.c | 461
> > ++++++++++++++++++----------
> >  8 files changed, 348 insertions(+), 165 deletions(-)
> >
> > --
> > 2.19.1
> 


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF
  2020-03-13  2:07 Wei Zhao
@ 2020-03-13  2:08 ` Wei Zhao
  0 siblings, 0 replies; 12+ messages in thread
From: Wei Zhao @ 2020-03-13  2:08 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, xiaolong.ye, Wei Zhao

DCF on CVL is a control plane VF which take the responsibility to
configure all the PF/global resources, this patch add support DCF
on to program forward rule to direct packetS to VFs.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/ice_dcf_ethdev.c    | 10 ++++++--
 drivers/net/ice/ice_dcf_parent.c    |  7 ++++++
 drivers/net/ice/ice_fdir_filter.c   |  6 +++++
 drivers/net/ice/ice_hash.c          |  6 +++++
 drivers/net/ice/ice_switch_filter.c | 39 ++++++++++++++++++++++++++++-
 5 files changed, 65 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index f65b962d4..759d92afb 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -115,8 +115,8 @@ ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
 static int
 ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
 			enum rte_filter_type filter_type,
-			__rte_unused enum rte_filter_op filter_op,
-			__rte_unused void *arg)
+			enum rte_filter_op filter_op,
+			void *arg)
 {
 	int ret = 0;
 
@@ -124,6 +124,12 @@ ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+
 	default:
 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
 			    filter_type);
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index bca9cd34a..c2dc13936 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -314,6 +314,12 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
 	}
 	parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
 
+	err = ice_flow_init(parent_adapter);
+	if (err) {
+		PMD_INIT_LOG(ERR, "Failed to initialize flow");
+		goto uninit_hw;
+	}
+
 	ice_dcf_update_vf_vsi_map(parent_hw,
 				  hw->num_vfs, hw->vf_vsi_map);
 
@@ -344,5 +350,6 @@ ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev)
 	rte_eal_alarm_cancel(ice_dcf_vsi_update_service_handler,
 			     &adapter->real_hw);
 
+	ice_flow_uninit(parent_adapter);
 	ice_dcf_uninit_parent_hw(parent_hw);
 }
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index d737c1acd..c9343c1fa 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -1061,6 +1061,9 @@ ice_fdir_init(struct ice_adapter *ad)
 	struct ice_flow_parser *parser;
 	int ret;
 
+	if (ad->hw.dcf_enabled)
+		return 0;
+
 	ret = ice_fdir_setup(pf);
 	if (ret)
 		return ret;
@@ -1081,6 +1084,9 @@ ice_fdir_uninit(struct ice_adapter *ad)
 	struct ice_pf *pf = &ad->pf;
 	struct ice_flow_parser *parser;
 
+	if (ad->hw.dcf_enabled)
+		return;
+
 	if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
 		parser = &ice_fdir_parser_comms;
 	else
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index d891538bd..69d805248 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -243,6 +243,9 @@ ice_hash_init(struct ice_adapter *ad)
 {
 	struct ice_flow_parser *parser = NULL;
 
+	if (ad->hw.dcf_enabled)
+		return 0;
+
 	if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
 		parser = &ice_hash_parser_os;
 	else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
@@ -560,6 +563,9 @@ ice_hash_destroy(struct ice_adapter *ad,
 static void
 ice_hash_uninit(struct ice_adapter *ad)
 {
+	if (ad->hw.dcf_enabled)
+		return;
+
 	if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
 		ice_unregister_parser(&ice_hash_parser_os, ad);
 	else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4a9356b31..c55e44e1a 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -913,6 +913,39 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 	return 0;
 }
 
+static int
+ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
+			    struct rte_flow_error *error,
+			    struct ice_adv_rule_info *rule_info)
+{
+	const struct rte_flow_action_vf *act_vf;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VF:
+			rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
+			act_vf = action->conf;
+			rule_info->sw_act.vsi_handle = act_vf->id;
+			break;
+		default:
+			rte_flow_error_set(error,
+					   EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions,
+					   "Invalid action type or queue number");
+			return -rte_errno;
+		}
+	}
+
+	rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
+	rule_info->rx = 1;
+	rule_info->priority = 5;
+
+	return 0;
+}
 
 static int
 ice_switch_parse_action(struct ice_pf *pf,
@@ -1081,7 +1114,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
 		goto error;
 	}
 
-	ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+	if (ad->hw.dcf_enabled)
+		ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
+	else
+		ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+
 	if (ret) {
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-- 
2.19.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2020-03-15  2:19 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-13  1:04 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF wei.zhao1@intel.com
2020-03-13  1:04 ` [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF wei.zhao1@intel.com
2020-03-13  1:04 ` [dpdk-dev] [PATCH 2/7] net/ice: support for more PPPoE input set wei.zhao1@intel.com
2020-03-13  1:04 ` [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask wei.zhao1@intel.com
2020-03-13  1:04 ` [dpdk-dev] [PATCH 4/7] net/ice: add support for MAC VLAN rule wei.zhao1@intel.com
2020-03-13  1:04 ` [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type wei.zhao1@intel.com
2020-03-13 16:14   ` Stillwell Jr, Paul M
2020-03-13  1:04 ` [dpdk-dev] [PATCH 6/7] net/ice: add action number check for swicth wei.zhao1@intel.com
2020-03-13  1:04 ` [dpdk-dev] [PATCH 7/7] net/ice: fix input set of VLAN item wei.zhao1@intel.com
2020-03-13 16:12 ` [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Stillwell Jr, Paul M
2020-03-15  2:19   ` Zhang, Qi Z
2020-03-13  2:07 Wei Zhao
2020-03-13  2:08 ` [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF Wei Zhao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).