DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/ice: enable QINQ filter in switch
@ 2020-09-02 10:01 Qiming Yang
  0 siblings, 0 replies; only message in thread
From: Qiming Yang @ 2020-09-02 10:01 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, Qiming Yang, Wei Zhao

This patch enabled QINQ filter in switch filter. This code
depend on base code update.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/ice_generic_flow.c  |   8 +++
 drivers/net/ice/ice_generic_flow.h  |   1 +
 drivers/net/ice/ice_switch_filter.c | 106 +++++++++++++++++++++++++---
 3 files changed, 104 insertions(+), 11 deletions(-)

diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 54b0316b9..9356bff72 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1448,6 +1448,14 @@ enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[] = {
 	RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
 	RTE_FLOW_ITEM_TYPE_END,
 };
+enum rte_flow_item_type pattern_eth_qinq_pppoes_proto[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_VLAN,
+	RTE_FLOW_ITEM_TYPE_VLAN,
+	RTE_FLOW_ITEM_TYPE_PPPOES,
+	RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+	RTE_FLOW_ITEM_TYPE_END,
+};
 enum rte_flow_item_type pattern_eth_qinq_pppoes[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_VLAN,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 434d2f425..887531905 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -425,6 +425,7 @@ extern enum rte_flow_item_type pattern_eth_pppoes[];
 extern enum rte_flow_item_type pattern_eth_pppoes_proto[];
 extern enum rte_flow_item_type pattern_eth_vlan_pppoes[];
 extern enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[];
+extern enum rte_flow_item_type pattern_eth_qinq_pppoes_proto[];
 extern enum rte_flow_item_type pattern_eth_qinq_pppoes[];
 extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[];
 extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 24320ac7d..f9849bbf5 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -35,8 +35,8 @@
 #define ICE_SW_INSET_ETHER ( \
 	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
 #define ICE_SW_INSET_MAC_VLAN ( \
-		ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
-		ICE_INSET_VLAN_OUTER)
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
+	ICE_INSET_VLAN_INNER)
 #define ICE_SW_INSET_MAC_IPV4 ( \
 	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
 	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
@@ -130,6 +130,12 @@
 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
 	ICE_SW_INSET_MAC_IPV6 | \
 	ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
+#define ICE_SW_INSET_MAC_QINQ  ( \
+	ICE_SW_INSET_MAC_VLAN | ICE_INSET_VLAN_OUTER)
+#define ICE_SW_INSET_MAC_IPV4_QINQ ( \
+	ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
+#define ICE_SW_INSET_MAC_IPV6_QINQ ( \
+	ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
 
 struct sw_meta {
 	struct ice_adv_lkup_elem *list;
@@ -225,6 +231,20 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
 			ICE_INSET_NONE, ICE_INSET_NONE},
 	{pattern_eth_ipv6_pfcp,
 			ICE_INSET_NONE, ICE_INSET_NONE},
+	{pattern_ethertype_qinq,
+			ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
+	{pattern_eth_qinq_ipv4,
+			ICE_SW_INSET_MAC_IPV4_QINQ, ICE_INSET_NONE},
+	{pattern_eth_qinq_ipv6,
+			ICE_SW_INSET_MAC_IPV6_QINQ, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes,
+			ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_proto,
+			ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_ipv4,
+			ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_ipv6,
+			ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
 };
 
 static struct
@@ -345,6 +365,20 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
 			ICE_INSET_NONE, ICE_INSET_NONE},
 	{pattern_eth_ipv6_pfcp,
 			ICE_INSET_NONE, ICE_INSET_NONE},
+	{pattern_ethertype_qinq,
+			ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
+	{pattern_eth_qinq_ipv4,
+			ICE_SW_INSET_MAC_IPV4_QINQ, ICE_INSET_NONE},
+	{pattern_eth_qinq_ipv6,
+			ICE_SW_INSET_MAC_IPV6_QINQ, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes,
+			ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_proto,
+			ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_ipv4,
+			ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_ipv6,
+			ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
 };
 
 static int
@@ -477,6 +511,8 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 	bool pppoe_elem_valid = 0;
 	bool pppoe_patt_valid = 0;
 	bool pppoe_prot_valid = 0;
+	bool inner_vlan_valid = 0;
+	bool outer_vlan_valid = 0;
 	bool tunnel_valid = 0;
 	bool profile_rule = 0;
 	bool nvgre_valid = 0;
@@ -1023,23 +1059,42 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					   "Invalid VLAN item");
 				return 0;
 			}
+
+			if (!outer_vlan_valid &&
+			    (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
+			    *tun_type == ICE_NON_TUN_QINQ))
+			    outer_vlan_valid = 1;
+			else if (!inner_vlan_valid &&
+				(*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
+				*tun_type == ICE_NON_TUN_QINQ))
+				inner_vlan_valid = 1;
+			else if (!inner_vlan_valid)
+				 inner_vlan_valid = 1;
+
 			if (vlan_spec && vlan_mask) {
-				list[t].type = ICE_VLAN_OFOS;
+				if (outer_vlan_valid &&
+				    !inner_vlan_valid) {
+					list[t].type = ICE_VLAN_EX;
+					input_set |= ICE_INSET_VLAN_OUTER;
+				} else if (inner_vlan_valid) {
+					list[t].type = ICE_VLAN_OFOS;
+					input_set |= ICE_INSET_VLAN_INNER;
+				}
+
 				if (vlan_mask->tci) {
 					list[t].h_u.vlan_hdr.vlan =
 						vlan_spec->tci;
 					list[t].m_u.vlan_hdr.vlan =
 						vlan_mask->tci;
-					input_set |= ICE_INSET_VLAN_OUTER;
 					input_set_byte += 2;
 				}
+
 				if (vlan_mask->inner_type) {
-					list[t].h_u.vlan_hdr.type =
-						vlan_spec->inner_type;
-					list[t].m_u.vlan_hdr.type =
-						vlan_mask->inner_type;
-					input_set |= ICE_INSET_ETHERTYPE;
-					input_set_byte += 2;
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid VLAN input set.");
+					return 0;
 				}
 				t++;
 			}
@@ -1341,8 +1396,27 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 		}
 	}
 
+	if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
+	    inner_vlan_valid && outer_vlan_valid)
+		*tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
+	else if (*tun_type == ICE_SW_TUN_PPPOE &&
+		 inner_vlan_valid && outer_vlan_valid)
+		*tun_type = ICE_SW_TUN_PPPOE_QINQ;
+	else if (*tun_type == ICE_NON_TUN &&
+		 inner_vlan_valid && outer_vlan_valid)
+		*tun_type = ICE_NON_TUN_QINQ;
+	else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
+		 inner_vlan_valid && outer_vlan_valid)
+		*tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
+
 	if (pppoe_patt_valid && !pppoe_prot_valid) {
-		if (ipv6_valid && udp_valid)
+		if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
+			*tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
+		else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
+			*tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
+		else if (inner_vlan_valid && outer_vlan_valid)
+			*tun_type = ICE_SW_TUN_PPPOE_QINQ;
+		else if (ipv6_valid && udp_valid)
 			*tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
 		else if (ipv6_valid && tcp_valid)
 			*tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
@@ -1615,6 +1689,7 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
 	uint16_t lkups_num = 0;
 	const struct rte_flow_item *item = pattern;
 	uint16_t item_num = 0;
+	uint16_t vlan_num = 0;
 	enum ice_sw_tunnel_type tun_type =
 			ICE_NON_TUN;
 	struct ice_pattern_match_item *pattern_match_item = NULL;
@@ -1630,6 +1705,10 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
 			if (eth_mask->type == UINT16_MAX)
 				tun_type = ICE_SW_TUN_AND_NON_TUN;
 		}
+
+		if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
+			vlan_num++;
+
 		/* reserve one more memory slot for ETH which may
 		 * consume 2 lookup items.
 		 */
@@ -1637,6 +1716,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
 			item_num++;
 	}
 
+	if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
+		tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
+	else if (vlan_num == 2)
+		tun_type = ICE_NON_TUN_QINQ;
+
 	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
 	if (!list) {
 		rte_flow_error_set(error, EINVAL,
-- 
2.17.1


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2020-09-02 10:23 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-02 10:01 [dpdk-dev] [PATCH] net/ice: enable QINQ filter in switch Qiming Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).