DPDK patches and discussions
 help / color / mirror / Atom feed
From: Wei Zhao <wei.zhao1@intel.com>
To: dev@dpdk.org
Cc: qi.z.zhang@intel.com, xiaolong.ye@intel.com,
	Wei Zhao <wei.zhao1@intel.com>
Subject: [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask
Date: Fri, 13 Mar 2020 10:08:02 +0800	[thread overview]
Message-ID: <20200313020806.21654-4-wei.zhao1@intel.com> (raw)
In-Reply-To: <20200313020806.21654-1-wei.zhao1@intel.com>

DCF need to make configuration of flexible mask, that is to say
some iput set mask may be not 0xFFFF type all one bit. In order
to direct L2/IP multicast packets, the mask for source IP maybe
0xF0000000, this patch enable switch filter parser for it.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/ice_switch_filter.c | 298 +++++++++++++---------------
 1 file changed, 133 insertions(+), 165 deletions(-)

diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 39b5c7266..af7e9cb0b 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -326,9 +326,6 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 	const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
 	const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
 				*pppoe_proto_mask;
-	uint8_t  ipv6_addr_mask[16] = {
-		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
-		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
 	uint64_t input_set = ICE_INSET_NONE;
 	uint16_t j, t = 0;
 	uint16_t tunnel_valid = 0;
@@ -351,19 +348,29 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 			eth_spec = item->spec;
 			eth_mask = item->mask;
 			if (eth_spec && eth_mask) {
-				if (tunnel_valid &&
-				    rte_is_broadcast_ether_addr(&eth_mask->src))
-					input_set |= ICE_INSET_TUN_SMAC;
-				else if (
-				rte_is_broadcast_ether_addr(&eth_mask->src))
-					input_set |= ICE_INSET_SMAC;
-				if (tunnel_valid &&
-				    rte_is_broadcast_ether_addr(&eth_mask->dst))
-					input_set |= ICE_INSET_TUN_DMAC;
-				else if (
-				rte_is_broadcast_ether_addr(&eth_mask->dst))
-					input_set |= ICE_INSET_DMAC;
-				if (eth_mask->type == RTE_BE16(0xffff))
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j]) {
+					if (tunnel_valid)
+						input_set |=
+							ICE_INSET_TUN_SMAC;
+					else
+						input_set |=
+							ICE_INSET_SMAC;
+					break;
+					}
+				}
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->dst.addr_bytes[j]) {
+					if (tunnel_valid)
+						input_set |=
+						ICE_INSET_TUN_DMAC;
+					else
+						input_set |=
+						ICE_INSET_DMAC;
+					break;
+					}
+				}
+				if (eth_mask->type)
 					input_set |= ICE_INSET_ETHERTYPE;
 				list[t].type = (tunnel_valid  == 0) ?
 					ICE_MAC_OFOS : ICE_MAC_IL;
@@ -373,16 +380,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				h = &list[t].h_u.eth_hdr;
 				m = &list[t].m_u.eth_hdr;
 				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
-					if (eth_mask->src.addr_bytes[j] ==
-								UINT8_MAX) {
+					if (eth_mask->src.addr_bytes[j]) {
 						h->src_addr[j] =
 						eth_spec->src.addr_bytes[j];
 						m->src_addr[j] =
 						eth_mask->src.addr_bytes[j];
 						i = 1;
 					}
-					if (eth_mask->dst.addr_bytes[j] ==
-								UINT8_MAX) {
+					if (eth_mask->dst.addr_bytes[j]) {
 						h->dst_addr[j] =
 						eth_spec->dst.addr_bytes[j];
 						m->dst_addr[j] =
@@ -392,17 +397,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 				if (i)
 					t++;
-				if (eth_mask->type == UINT16_MAX) {
+				if (eth_mask->type) {
 					list[t].type = ICE_ETYPE_OL;
 					list[t].h_u.ethertype.ethtype_id =
 						eth_spec->type;
 					list[t].m_u.ethertype.ethtype_id =
-						UINT16_MAX;
+						eth_mask->type;
 					t++;
 				}
-			} else if (!eth_spec && !eth_mask) {
-				list[t].type = (tun_type == ICE_NON_TUN) ?
-					ICE_MAC_OFOS : ICE_MAC_IL;
 			}
 			break;
 
@@ -423,81 +425,68 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 
 				if (tunnel_valid) {
-					if (ipv4_mask->hdr.type_of_service ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.type_of_service)
 						input_set |=
 							ICE_INSET_TUN_IPV4_TOS;
-					if (ipv4_mask->hdr.src_addr ==
-							UINT32_MAX)
+					if (ipv4_mask->hdr.src_addr)
 						input_set |=
 							ICE_INSET_TUN_IPV4_SRC;
-					if (ipv4_mask->hdr.dst_addr ==
-							UINT32_MAX)
+					if (ipv4_mask->hdr.dst_addr)
 						input_set |=
 							ICE_INSET_TUN_IPV4_DST;
-					if (ipv4_mask->hdr.time_to_live ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.time_to_live)
 						input_set |=
 							ICE_INSET_TUN_IPV4_TTL;
-					if (ipv4_mask->hdr.next_proto_id ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.next_proto_id)
 						input_set |=
 						ICE_INSET_TUN_IPV4_PROTO;
 				} else {
-					if (ipv4_mask->hdr.src_addr ==
-							UINT32_MAX)
+					if (ipv4_mask->hdr.src_addr)
 						input_set |= ICE_INSET_IPV4_SRC;
-					if (ipv4_mask->hdr.dst_addr ==
-							UINT32_MAX)
+					if (ipv4_mask->hdr.dst_addr)
 						input_set |= ICE_INSET_IPV4_DST;
-					if (ipv4_mask->hdr.time_to_live ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.time_to_live)
 						input_set |= ICE_INSET_IPV4_TTL;
-					if (ipv4_mask->hdr.next_proto_id ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.next_proto_id)
 						input_set |=
 						ICE_INSET_IPV4_PROTO;
-					if (ipv4_mask->hdr.type_of_service ==
-							UINT8_MAX)
+					if (ipv4_mask->hdr.type_of_service)
 						input_set |=
 							ICE_INSET_IPV4_TOS;
 				}
 				list[t].type = (tunnel_valid  == 0) ?
 					ICE_IPV4_OFOS : ICE_IPV4_IL;
-				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+				if (ipv4_mask->hdr.src_addr) {
 					list[t].h_u.ipv4_hdr.src_addr =
 						ipv4_spec->hdr.src_addr;
 					list[t].m_u.ipv4_hdr.src_addr =
-						UINT32_MAX;
+						ipv4_mask->hdr.src_addr;
 				}
-				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+				if (ipv4_mask->hdr.dst_addr) {
 					list[t].h_u.ipv4_hdr.dst_addr =
 						ipv4_spec->hdr.dst_addr;
 					list[t].m_u.ipv4_hdr.dst_addr =
-						UINT32_MAX;
+						ipv4_mask->hdr.dst_addr;
 				}
-				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+				if (ipv4_mask->hdr.time_to_live) {
 					list[t].h_u.ipv4_hdr.time_to_live =
 						ipv4_spec->hdr.time_to_live;
 					list[t].m_u.ipv4_hdr.time_to_live =
-						UINT8_MAX;
+						ipv4_mask->hdr.time_to_live;
 				}
-				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+				if (ipv4_mask->hdr.next_proto_id) {
 					list[t].h_u.ipv4_hdr.protocol =
 						ipv4_spec->hdr.next_proto_id;
 					list[t].m_u.ipv4_hdr.protocol =
-						UINT8_MAX;
+						ipv4_mask->hdr.next_proto_id;
 				}
-				if (ipv4_mask->hdr.type_of_service ==
-						UINT8_MAX) {
+				if (ipv4_mask->hdr.type_of_service) {
 					list[t].h_u.ipv4_hdr.tos =
 						ipv4_spec->hdr.type_of_service;
-					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+					list[t].m_u.ipv4_hdr.tos =
+						ipv4_mask->hdr.type_of_service;
 				}
 				t++;
-			} else if (!ipv4_spec && !ipv4_mask) {
-				list[t].type = (tunnel_valid  == 0) ?
-					ICE_IPV4_OFOS : ICE_IPV4_IL;
 			}
 			break;
 
@@ -514,51 +503,58 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 
 				if (tunnel_valid) {
-					if (!memcmp(ipv6_mask->hdr.src_addr,
-						ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+						j++) {
+					if (ipv6_mask->hdr.src_addr[j]) {
 						input_set |=
-							ICE_INSET_TUN_IPV6_SRC;
-					if (!memcmp(ipv6_mask->hdr.dst_addr,
-						ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+						ICE_INSET_TUN_IPV6_SRC;
+						break;
+						}
+					}
+					for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+						j++) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
 						input_set |=
-							ICE_INSET_TUN_IPV6_DST;
-					if (ipv6_mask->hdr.proto == UINT8_MAX)
+						ICE_INSET_TUN_IPV6_DST;
+						break;
+						}
+					}
+					if (ipv6_mask->hdr.proto)
 						input_set |=
 						ICE_INSET_TUN_IPV6_NEXT_HDR;
-					if (ipv6_mask->hdr.hop_limits ==
-							UINT8_MAX)
+					if (ipv6_mask->hdr.hop_limits)
 						input_set |=
 						ICE_INSET_TUN_IPV6_HOP_LIMIT;
-					if ((ipv6_mask->hdr.vtc_flow &
+					if (ipv6_mask->hdr.vtc_flow &
 						rte_cpu_to_be_32
 						(RTE_IPV6_HDR_TC_MASK))
-							== rte_cpu_to_be_32
-							(RTE_IPV6_HDR_TC_MASK))
 						input_set |=
 							ICE_INSET_TUN_IPV6_TC;
 				} else {
-					if (!memcmp(ipv6_mask->hdr.src_addr,
-						ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+							j++) {
+					if (ipv6_mask->hdr.src_addr[j]) {
 						input_set |= ICE_INSET_IPV6_SRC;
-					if (!memcmp(ipv6_mask->hdr.dst_addr,
-						ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
-						input_set |= ICE_INSET_IPV6_DST;
-					if (ipv6_mask->hdr.proto == UINT8_MAX)
+						break;
+						}
+					}
+					for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+						j++) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
+						input_set |=
+						ICE_INSET_IPV6_DST;
+						break;
+						}
+					}
+					if (ipv6_mask->hdr.proto)
 						input_set |=
 						ICE_INSET_IPV6_NEXT_HDR;
-					if (ipv6_mask->hdr.hop_limits ==
-							UINT8_MAX)
+					if (ipv6_mask->hdr.hop_limits)
 						input_set |=
 						ICE_INSET_IPV6_HOP_LIMIT;
-					if ((ipv6_mask->hdr.vtc_flow &
+					if (ipv6_mask->hdr.vtc_flow &
 						rte_cpu_to_be_32
 						(RTE_IPV6_HDR_TC_MASK))
-							== rte_cpu_to_be_32
-							(RTE_IPV6_HDR_TC_MASK))
 						input_set |= ICE_INSET_IPV6_TC;
 				}
 				list[t].type = (tunnel_valid  == 0) ?
@@ -568,35 +564,33 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				f = &list[t].h_u.ipv6_hdr;
 				s = &list[t].m_u.ipv6_hdr;
 				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.src_addr[j] ==
-						UINT8_MAX) {
+					if (ipv6_mask->hdr.src_addr[j]) {
 						f->src_addr[j] =
 						ipv6_spec->hdr.src_addr[j];
 						s->src_addr[j] =
 						ipv6_mask->hdr.src_addr[j];
 					}
-					if (ipv6_mask->hdr.dst_addr[j] ==
-								UINT8_MAX) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
 						f->dst_addr[j] =
 						ipv6_spec->hdr.dst_addr[j];
 						s->dst_addr[j] =
 						ipv6_mask->hdr.dst_addr[j];
 					}
 				}
-				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+				if (ipv6_mask->hdr.proto) {
 					f->next_hdr =
 						ipv6_spec->hdr.proto;
-					s->next_hdr = UINT8_MAX;
+					s->next_hdr =
+						ipv6_mask->hdr.proto;
 				}
-				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+				if (ipv6_mask->hdr.hop_limits) {
 					f->hop_limit =
 						ipv6_spec->hdr.hop_limits;
-					s->hop_limit = UINT8_MAX;
+					s->hop_limit =
+						ipv6_mask->hdr.hop_limits;
 				}
-				if ((ipv6_mask->hdr.vtc_flow &
+				if (ipv6_mask->hdr.vtc_flow &
 						rte_cpu_to_be_32
-						(RTE_IPV6_HDR_TC_MASK))
-						== rte_cpu_to_be_32
 						(RTE_IPV6_HDR_TC_MASK)) {
 					struct ice_le_ver_tc_flow vtf;
 					vtf.u.fld.version = 0;
@@ -606,13 +600,13 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 							RTE_IPV6_HDR_TC_MASK) >>
 							RTE_IPV6_HDR_TC_SHIFT;
 					f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
-					vtf.u.fld.tc = UINT8_MAX;
+					vtf.u.fld.tc = (rte_be_to_cpu_32
+						(ipv6_mask->hdr.vtc_flow) &
+							RTE_IPV6_HDR_TC_MASK) >>
+							RTE_IPV6_HDR_TC_SHIFT;
 					s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
 				}
 				t++;
-			} else if (!ipv6_spec && !ipv6_mask) {
-				list[t].type = (tun_type == ICE_NON_TUN) ?
-					ICE_IPV4_OFOS : ICE_IPV4_IL;
 			}
 			break;
 
@@ -631,21 +625,17 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 
 				if (tunnel_valid) {
-					if (udp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (udp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_TUN_UDP_SRC_PORT;
-					if (udp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (udp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_TUN_UDP_DST_PORT;
 				} else {
-					if (udp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (udp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_UDP_SRC_PORT;
-					if (udp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (udp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_UDP_DST_PORT;
 				}
@@ -654,21 +644,19 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					list[t].type = ICE_UDP_OF;
 				else
 					list[t].type = ICE_UDP_ILOS;
-				if (udp_mask->hdr.src_port == UINT16_MAX) {
+				if (udp_mask->hdr.src_port) {
 					list[t].h_u.l4_hdr.src_port =
 						udp_spec->hdr.src_port;
 					list[t].m_u.l4_hdr.src_port =
 						udp_mask->hdr.src_port;
 				}
-				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+				if (udp_mask->hdr.dst_port) {
 					list[t].h_u.l4_hdr.dst_port =
 						udp_spec->hdr.dst_port;
 					list[t].m_u.l4_hdr.dst_port =
 						udp_mask->hdr.dst_port;
 				}
 						t++;
-			} else if (!udp_spec && !udp_mask) {
-				list[t].type = ICE_UDP_ILOS;
 			}
 			break;
 
@@ -692,40 +680,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 
 				if (tunnel_valid) {
-					if (tcp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (tcp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_TUN_TCP_SRC_PORT;
-					if (tcp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (tcp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_TUN_TCP_DST_PORT;
 				} else {
-					if (tcp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (tcp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_TCP_SRC_PORT;
-					if (tcp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (tcp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_TCP_DST_PORT;
 				}
 				list[t].type = ICE_TCP_IL;
-				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+				if (tcp_mask->hdr.src_port) {
 					list[t].h_u.l4_hdr.src_port =
 						tcp_spec->hdr.src_port;
 					list[t].m_u.l4_hdr.src_port =
 						tcp_mask->hdr.src_port;
 				}
-				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+				if (tcp_mask->hdr.dst_port) {
 					list[t].h_u.l4_hdr.dst_port =
 						tcp_spec->hdr.dst_port;
 					list[t].m_u.l4_hdr.dst_port =
 						tcp_mask->hdr.dst_port;
 				}
 				t++;
-			} else if (!tcp_spec && !tcp_mask) {
-				list[t].type = ICE_TCP_IL;
 			}
 			break;
 
@@ -743,40 +725,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				}
 
 				if (tunnel_valid) {
-					if (sctp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (sctp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_TUN_SCTP_SRC_PORT;
-					if (sctp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (sctp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_TUN_SCTP_DST_PORT;
 				} else {
-					if (sctp_mask->hdr.src_port ==
-							UINT16_MAX)
+					if (sctp_mask->hdr.src_port)
 						input_set |=
 						ICE_INSET_SCTP_SRC_PORT;
-					if (sctp_mask->hdr.dst_port ==
-							UINT16_MAX)
+					if (sctp_mask->hdr.dst_port)
 						input_set |=
 						ICE_INSET_SCTP_DST_PORT;
 				}
 				list[t].type = ICE_SCTP_IL;
-				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+				if (sctp_mask->hdr.src_port) {
 					list[t].h_u.sctp_hdr.src_port =
 						sctp_spec->hdr.src_port;
 					list[t].m_u.sctp_hdr.src_port =
 						sctp_mask->hdr.src_port;
 				}
-				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+				if (sctp_mask->hdr.dst_port) {
 					list[t].h_u.sctp_hdr.dst_port =
 						sctp_spec->hdr.dst_port;
 					list[t].m_u.sctp_hdr.dst_port =
 						sctp_mask->hdr.dst_port;
 				}
 				t++;
-			} else if (!sctp_spec && !sctp_mask) {
-				list[t].type = ICE_SCTP_IL;
 			}
 			break;
 
@@ -799,21 +775,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 			tunnel_valid = 1;
 			if (vxlan_spec && vxlan_mask) {
 				list[t].type = ICE_VXLAN;
-				if (vxlan_mask->vni[0] == UINT8_MAX &&
-					vxlan_mask->vni[1] == UINT8_MAX &&
-					vxlan_mask->vni[2] == UINT8_MAX) {
+				if (vxlan_mask->vni[0] ||
+					vxlan_mask->vni[1] ||
+					vxlan_mask->vni[2]) {
 					list[t].h_u.tnl_hdr.vni =
 						(vxlan_spec->vni[2] << 16) |
 						(vxlan_spec->vni[1] << 8) |
 						vxlan_spec->vni[0];
 					list[t].m_u.tnl_hdr.vni =
-						UINT32_MAX;
+						(vxlan_mask->vni[2] << 16) |
+						(vxlan_mask->vni[1] << 8) |
+						vxlan_mask->vni[0];
 					input_set |=
 						ICE_INSET_TUN_VXLAN_VNI;
 				}
 				t++;
-			} else if (!vxlan_spec && !vxlan_mask) {
-				list[t].type = ICE_VXLAN;
 			}
 			break;
 
@@ -835,21 +811,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 			tunnel_valid = 1;
 			if (nvgre_spec && nvgre_mask) {
 				list[t].type = ICE_NVGRE;
-				if (nvgre_mask->tni[0] == UINT8_MAX &&
-					nvgre_mask->tni[1] == UINT8_MAX &&
-					nvgre_mask->tni[2] == UINT8_MAX) {
+				if (nvgre_mask->tni[0] ||
+					nvgre_mask->tni[1] ||
+					nvgre_mask->tni[2]) {
 					list[t].h_u.nvgre_hdr.tni_flow =
 						(nvgre_spec->tni[2] << 16) |
 						(nvgre_spec->tni[1] << 8) |
 						nvgre_spec->tni[0];
 					list[t].m_u.nvgre_hdr.tni_flow =
-						UINT32_MAX;
+						(nvgre_mask->tni[2] << 16) |
+						(nvgre_mask->tni[1] << 8) |
+						nvgre_mask->tni[0];
 					input_set |=
 						ICE_INSET_TUN_NVGRE_TNI;
 				}
 				t++;
-			} else if (!nvgre_spec && !nvgre_mask) {
-				list[t].type = ICE_NVGRE;
 			}
 			break;
 
@@ -870,23 +846,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 			}
 			if (vlan_spec && vlan_mask) {
 				list[t].type = ICE_VLAN_OFOS;
-				if (vlan_mask->tci == UINT16_MAX) {
+				if (vlan_mask->tci) {
 					list[t].h_u.vlan_hdr.vlan =
 						vlan_spec->tci;
 					list[t].m_u.vlan_hdr.vlan =
-						UINT16_MAX;
+						vlan_mask->tci;
 					input_set |= ICE_INSET_VLAN_OUTER;
 				}
-				if (vlan_mask->inner_type == UINT16_MAX) {
+				if (vlan_mask->inner_type) {
 					list[t].h_u.vlan_hdr.type =
 						vlan_spec->inner_type;
 					list[t].m_u.vlan_hdr.type =
-						UINT16_MAX;
+						vlan_mask->inner_type;
 					input_set |= ICE_INSET_VLAN_OUTER;
 				}
 				t++;
-			} else if (!vlan_spec && !vlan_mask) {
-				list[t].type = ICE_VLAN_OFOS;
 			}
 			break;
 
@@ -918,19 +892,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					return 0;
 				}
 				list[t].type = ICE_PPPOE;
-				if (pppoe_mask->session_id == UINT16_MAX) {
+				if (pppoe_mask->session_id) {
 					list[t].h_u.pppoe_hdr.session_id =
 						pppoe_spec->session_id;
 					list[t].m_u.pppoe_hdr.session_id =
-						UINT16_MAX;
+						pppoe_mask->session_id;
 					input_set |= ICE_INSET_PPPOE_SESSION;
 				}
 				t++;
 				pppoe_valid = 1;
-			} else if (!pppoe_spec && !pppoe_mask) {
-				list[t].type = ICE_PPPOE;
 			}
-
 			break;
 
 		case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
@@ -953,18 +924,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				if (pppoe_valid)
 					t--;
 				list[t].type = ICE_PPPOE;
-				if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+				if (pppoe_proto_mask->proto_id) {
 					list[t].h_u.pppoe_hdr.ppp_prot_id =
 						pppoe_proto_spec->proto_id;
 					list[t].m_u.pppoe_hdr.ppp_prot_id =
-						UINT16_MAX;
+						pppoe_proto_mask->proto_id;
 					input_set |= ICE_INSET_PPPOE_PROTO;
 				}
 				t++;
-			} else if (!pppoe_proto_spec && !pppoe_proto_mask) {
-				list[t].type = ICE_PPPOE;
 			}
-
 			break;
 
 		case RTE_FLOW_ITEM_TYPE_VOID:
-- 
2.19.1


  parent reply	other threads:[~2020-03-13  2:29 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-13  2:07 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Wei Zhao
2020-03-13  2:08 ` [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF Wei Zhao
2020-03-13  2:08 ` [dpdk-dev] [PATCH 2/7] net/ice: support for more PPPoE input set Wei Zhao
2020-03-13  2:08 ` Wei Zhao [this message]
2020-03-13  2:08 ` [dpdk-dev] [PATCH 4/7] net/ice: add support for MAC VLAN rule Wei Zhao
2020-03-13  2:08 ` [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type Wei Zhao
2020-03-13  2:08 ` [dpdk-dev] [PATCH 6/7] net/ice: add action number check for swicth Wei Zhao
2020-03-13  2:08 ` [dpdk-dev] [PATCH 7/7] net/ice: fix input set of VLAN item Wei Zhao
2020-04-02  6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 01/13] net/ice: enable switch flow on DCF Wei Zhao
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 02/13] net/ice: support for more PPPoE input set Wei Zhao
2020-04-02  9:31     ` Lu, Nannan
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
2020-04-02  9:21     ` Lu, Nannan
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 05/13] net/ice: change default tunnle type Wei Zhao
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 06/13] net/ice: add action number check for swicth Wei Zhao
2020-04-02  8:29     ` Zhang, Qi Z
2020-04-02  8:31       ` Zhao1, Wei
2020-04-03  1:49     ` Lu, Nannan
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 08/13] net/ice: add support for PFCP Wei Zhao
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 09/13] net/ice: add support for NAT-T Wei Zhao
2020-04-02  8:45     ` Zhang, Qi Z
2020-04-02 23:37       ` Zhao1, Wei
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 10/13] net/ice: add more flow support for permit mode Wei Zhao
2020-04-02  8:45     ` Zhang, Qi Z
2020-04-02  9:41       ` Zhao1, Wei
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 11/13] net/ice: fix input set of VLAN item Wei Zhao
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 12/13] net/ice: enable flow redirect on switch Wei Zhao
2020-04-02  7:34     ` Wang, Haiyue
2020-04-02  7:38       ` Xing, Beilei
2020-04-02  6:46   ` [dpdk-dev] [PATCH v2 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
2020-04-02  7:32     ` Wang, Haiyue
2020-04-03  2:43   ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 01/13] net/ice: enable switch flow on DCF Wei Zhao
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 02/13] net/ice: support for more PPPoE input set Wei Zhao
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 05/13] net/ice: change default tunnle type Wei Zhao
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 06/13] net/ice: add action number check for swicth Wei Zhao
2020-04-03  3:15       ` Zhang, Qi Z
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 08/13] net/ice: add support for PFCP Wei Zhao
2020-04-03  3:16       ` Zhang, Qi Z
2020-04-03  3:18         ` Zhao1, Wei
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 09/13] net/ice: add support for IPv6 NAT-T Wei Zhao
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 10/13] net/ice: add more flow support for permit stage Wei Zhao
2020-04-03  3:20       ` Zhang, Qi Z
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 11/13] net/ice: fix input set of VLAN item Wei Zhao
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 12/13] net/ice: enable flow redirect on switch Wei Zhao
2020-04-03  2:43     ` [dpdk-dev] [PATCH v3 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
2020-04-03  4:45     ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
2020-04-03  4:45       ` [dpdk-dev] [PATCH v4 01/13] net/ice: enable switch flow on DCF Wei Zhao
2020-04-03  4:45       ` [dpdk-dev] [PATCH v4 02/13] net/ice: support for more PPPoE input set Wei Zhao
2020-04-03  4:45       ` [dpdk-dev] [PATCH v4 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
2020-04-03  4:46       ` [dpdk-dev] [PATCH v4 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
2020-04-03  4:46       ` [dpdk-dev] [PATCH v4 05/13] net/ice: change default tunnle type Wei Zhao
2020-04-03  4:46       ` [dpdk-dev] [PATCH v4 06/13] net/ice: add action number check for swicth Wei Zhao
2020-04-03  4:46       ` [dpdk-dev] [PATCH v4 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
2020-04-03  4:46       ` [dpdk-dev] [PATCH v4 08/13] net/ice: add support for PFCP Wei Zhao
2020-04-03  4:46       ` [dpdk-dev] [PATCH v4 09/13] net/ice: add support for IPv6 NAT-T Wei Zhao
2020-04-03  4:46       ` [dpdk-dev] [PATCH v4 10/13] net/ice: add more flow support for permission stage Wei Zhao
2020-04-03  4:46       ` [dpdk-dev] [PATCH v4 11/13] net/ice: fix input set of VLAN item Wei Zhao
2020-04-03  4:46       ` [dpdk-dev] [PATCH v4 12/13] net/ice: enable flow redirect on switch Wei Zhao
2020-04-03  4:46       ` [dpdk-dev] [PATCH v4 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
2020-04-03  5:09       ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Zhang, Qi Z
2020-04-04  6:17         ` Ye Xiaolong
  -- strict thread matches above, loose matches on Subject: below --
2020-03-13  1:04 [dpdk-dev] [PATCH 0/7] " wei.zhao1@intel.com
2020-03-13  1:04 ` [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask wei.zhao1@intel.com

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200313020806.21654-4-wei.zhao1@intel.com \
    --to=wei.zhao1@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=xiaolong.ye@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).