DPDK patches and discussions
 help / color / mirror / Atom feed
From: Zhirun Yan <zhirun.yan@intel.com>
To: dev@dpdk.org, qi.z.zhang@intel.com, yahui.cao@intel.com,
	xiao.w.wang@intel.com, simei.su@intel.com
Cc: Zhirun Yan <zhirun.yan@intel.com>
Subject: [dpdk-dev] [PATCH v1] net/ice: refactor flow pattern parser
Date: Tue, 17 Nov 2020 16:45:24 +0800	[thread overview]
Message-ID: <20201117084524.3610038-1-zhirun.yan@intel.com> (raw)

Distinguish inner/outer fields. And avoid too many nested conditionals
in each type's parser.

Signed-off-by: Zhirun Yan <zhirun.yan@intel.com>
---
 drivers/net/ice/ice_fdir_filter.c | 504 ++++++++++++++++--------------
 1 file changed, 269 insertions(+), 235 deletions(-)

diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 175abcdd5c..b53ed30b1c 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -1646,7 +1646,9 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
 	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
-	uint64_t input_set = ICE_INSET_NONE;
+	uint64_t inner_input_set = ICE_INSET_NONE;
+	uint64_t outer_input_set = ICE_INSET_NONE;
+	uint64_t *input_set;
 	uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
 	uint8_t  ipv6_addr_mask[16] = {
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -1655,289 +1657,315 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 	uint32_t vtc_flow_cpu;
 	uint16_t ether_type;
 	enum rte_flow_item_type next_type;
+	bool is_outer = true;
+	struct ice_fdir_extra *p_ext_data;
+	struct ice_fdir_v4 *p_v4;
+	struct ice_fdir_v6 *p_v6;
 
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+			tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
+			break;
+		}
+	}
+
+	/* This loop parse flow pattern and distinguish Non-tunnel and tunnel
+	 * flow. For tunnel flow, reuse non-tunnel structure to track inner
+	 * part.
+	 *
+	 *        is_outer tunnel_type p_input_set input_set_bit data_struct
+	 * Non-Tun    Y         N         inner        outer        origin
+	 * Tun-out    Y         Y         outer        outer        outer
+	 * Tun-in     N         Y         inner        inner        origin
+	 */
 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
-					RTE_FLOW_ERROR_TYPE_ITEM,
-					item,
-					"Not support range");
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
 			return -rte_errno;
 		}
 		item_type = item->type;
 
+		input_set = (tunnel_type && is_outer) ?
+			    &outer_input_set :
+			    &inner_input_set;
+
+		if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+			p_v4 = (tunnel_type && is_outer) ?
+			       &filter->input.ip_outer.v4 :
+			       &filter->input.ip.v4;
+		if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+			p_v6 = (tunnel_type && is_outer) ?
+			       &filter->input.ip_outer.v6 :
+			       &filter->input.ip.v6;
+
 		switch (item_type) {
 		case RTE_FLOW_ITEM_TYPE_ETH:
+			flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
 			eth_spec = item->spec;
 			eth_mask = item->mask;
-			next_type = (item + 1)->type;
 
-			if (eth_spec && eth_mask) {
-				if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
-					input_set |= ICE_INSET_DMAC;
-					rte_memcpy(&filter->input.ext_data.dst_mac,
-						   &eth_spec->dst,
-						   RTE_ETHER_ADDR_LEN);
-				}
+			if (!(eth_spec && eth_mask))
+				break;
 
-				if (!rte_is_zero_ether_addr(&eth_mask->src)) {
-					input_set |= ICE_INSET_SMAC;
-					rte_memcpy(&filter->input.ext_data.src_mac,
-						   &eth_spec->src,
-						   RTE_ETHER_ADDR_LEN);
-				}
+			*input_set |= is_outer ? ICE_PROT_MAC_OUTER : ICE_PROT_MAC_INNER;
+			if (!rte_is_zero_ether_addr(&eth_mask->dst))
+				*input_set |= ICE_DMAC;
+			if (!rte_is_zero_ether_addr(&eth_mask->src))
+				*input_set |= ICE_SMAC;
 
-				/* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
-				if (eth_mask->type == RTE_BE16(0xffff) &&
-				    next_type == RTE_FLOW_ITEM_TYPE_END) {
-					input_set |= ICE_INSET_ETHERTYPE;
-					ether_type = rte_be_to_cpu_16(eth_spec->type);
-
-					if (ether_type == RTE_ETHER_TYPE_IPV4 ||
-					    ether_type == RTE_ETHER_TYPE_IPV6) {
-						rte_flow_error_set(error, EINVAL,
-								   RTE_FLOW_ERROR_TYPE_ITEM,
-								   item,
-								   "Unsupported ether_type.");
-						return -rte_errno;
-					}
-
-					rte_memcpy(&filter->input.ext_data.ether_type,
-						   &eth_spec->type,
-						   sizeof(eth_spec->type));
-					flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
+			next_type = (item + 1)->type;
+			/* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
+			if (eth_mask->type == RTE_BE16(0xffff) &&
+			    next_type == RTE_FLOW_ITEM_TYPE_END) {
+				*input_set |= ICE_INSET_ETHERTYPE;
+				ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
+				    ether_type == RTE_ETHER_TYPE_IPV6) {
+					rte_flow_error_set(error, EINVAL,
+							   RTE_FLOW_ERROR_TYPE_ITEM,
+							   item,
+							   "Unsupported ether_type.");
+					return -rte_errno;
 				}
 			}
+
+			p_ext_data = (tunnel_type && is_outer) ?
+				     &filter->input.ext_data_outer :
+				     &filter->input.ext_data;
+			rte_memcpy(&p_ext_data->src_mac,
+				   &eth_spec->src,
+				   RTE_ETHER_ADDR_LEN);
+			rte_memcpy(&p_ext_data->dst_mac,
+				   &eth_spec->dst,
+				   RTE_ETHER_ADDR_LEN);
+			rte_memcpy(&p_ext_data->ether_type,
+				   &eth_spec->type,
+				   sizeof(eth_spec->type));
+
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
+			flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
 			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
 			ipv4_spec = item->spec;
 			ipv4_mask = item->mask;
 
-			if (ipv4_spec && ipv4_mask) {
-				/* Check IPv4 mask and update input set */
-				if (ipv4_mask->hdr.version_ihl ||
-				    ipv4_mask->hdr.total_length ||
-				    ipv4_mask->hdr.packet_id ||
-				    ipv4_mask->hdr.fragment_offset ||
-				    ipv4_mask->hdr.hdr_checksum) {
-					rte_flow_error_set(error, EINVAL,
+			if (!(ipv4_spec && ipv4_mask))
+				break;
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.fragment_offset ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
 						   RTE_FLOW_ERROR_TYPE_ITEM,
 						   item,
 						   "Invalid IPv4 mask.");
-					return -rte_errno;
-				}
-				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
-					input_set |= tunnel_type ?
-						     ICE_INSET_TUN_IPV4_SRC :
-						     ICE_INSET_IPV4_SRC;
-				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
-					input_set |= tunnel_type ?
-						     ICE_INSET_TUN_IPV4_DST :
-						     ICE_INSET_IPV4_DST;
-				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
-					input_set |= ICE_INSET_IPV4_TOS;
-				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
-					input_set |= ICE_INSET_IPV4_TTL;
-				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
-					input_set |= ICE_INSET_IPV4_PROTO;
-
-				filter->input.ip.v4.dst_ip =
-					ipv4_spec->hdr.dst_addr;
-				filter->input.ip.v4.src_ip =
-					ipv4_spec->hdr.src_addr;
-				filter->input.ip.v4.tos =
-					ipv4_spec->hdr.type_of_service;
-				filter->input.ip.v4.ttl =
-					ipv4_spec->hdr.time_to_live;
-				filter->input.ip.v4.proto =
-					ipv4_spec->hdr.next_proto_id;
+				return -rte_errno;
 			}
 
-			flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+			/* handle outer L3 fields */
+			*input_set |= is_outer ? ICE_PROT_IPV4_OUTER : ICE_PROT_IPV4_INNER;
+			if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+				*input_set |= ICE_IP_DST;
+			if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+				*input_set |= ICE_IP_SRC;
+			if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+				*input_set |= ICE_IP_TOS;
+
+			p_v4 = (tunnel_type && is_outer) ?
+			       &filter->input.ip_outer.v4 :
+			       &filter->input.ip.v4;
+			p_v4->dst_ip = ipv4_spec->hdr.dst_addr;
+			p_v4->src_ip = ipv4_spec->hdr.src_addr;
+			p_v4->tos = ipv4_spec->hdr.type_of_service;
+
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
+			flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
 			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
 			ipv6_spec = item->spec;
 			ipv6_mask = item->mask;
 
-			if (ipv6_spec && ipv6_mask) {
-				/* Check IPv6 mask and update input set */
-				if (ipv6_mask->hdr.payload_len) {
-					rte_flow_error_set(error, EINVAL,
+			if (!(ipv6_spec && ipv6_mask))
+				break;
+
+			/* Check IPv6 mask and update input set */
+			if (ipv6_mask->hdr.payload_len) {
+				rte_flow_error_set(error, EINVAL,
 						   RTE_FLOW_ERROR_TYPE_ITEM,
 						   item,
 						   "Invalid IPv6 mask");
-					return -rte_errno;
-				}
-
-				if (!memcmp(ipv6_mask->hdr.src_addr,
-					    ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.src_addr)))
-					input_set |= ICE_INSET_IPV6_SRC;
-				if (!memcmp(ipv6_mask->hdr.dst_addr,
-					    ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
-					input_set |= ICE_INSET_IPV6_DST;
-
-				if ((ipv6_mask->hdr.vtc_flow &
-				     rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
-				    == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
-					input_set |= ICE_INSET_IPV6_TC;
-				if (ipv6_mask->hdr.proto == UINT8_MAX)
-					input_set |= ICE_INSET_IPV6_NEXT_HDR;
-				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
-					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
-
-				rte_memcpy(filter->input.ip.v6.dst_ip,
-					   ipv6_spec->hdr.dst_addr, 16);
-				rte_memcpy(filter->input.ip.v6.src_ip,
-					   ipv6_spec->hdr.src_addr, 16);
-
-				vtc_flow_cpu =
-				      rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
-				filter->input.ip.v6.tc =
-					(uint8_t)(vtc_flow_cpu >>
-						  ICE_FDIR_IPV6_TC_OFFSET);
-				filter->input.ip.v6.proto =
-					ipv6_spec->hdr.proto;
-				filter->input.ip.v6.hlim =
-					ipv6_spec->hdr.hop_limits;
+				return -rte_errno;
 			}
 
-			flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+			*input_set |= is_outer ? ICE_PROT_IPV6_OUTER : ICE_PROT_IPV6_INNER;
+			if (!memcmp(ipv6_mask->hdr.src_addr,
+				    ipv6_addr_mask,
+				    RTE_DIM(ipv6_mask->hdr.src_addr)))
+				*input_set |= ICE_IP_SRC;
+			if (!memcmp(ipv6_mask->hdr.dst_addr,
+				    ipv6_addr_mask,
+				    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+				*input_set |= ICE_IP_DST;
+
+			if ((ipv6_mask->hdr.vtc_flow &
+			     rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+			    == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+				*input_set |= ICE_IP_TOS;
+			if (ipv6_mask->hdr.proto == UINT8_MAX)
+				*input_set |= ICE_IP_PROTO;
+			if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+				*input_set |= ICE_IP_TTL;
+
+			if (!memcmp(ipv6_mask->hdr.src_addr,
+				    ipv6_addr_mask,
+				    RTE_DIM(ipv6_mask->hdr.src_addr)))
+				*input_set |= ICE_IP_SRC;
+			if (!memcmp(ipv6_mask->hdr.dst_addr,
+				    ipv6_addr_mask,
+				    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+				*input_set |= ICE_IP_DST;
+
+			if ((ipv6_mask->hdr.vtc_flow &
+			     rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+			    == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+				*input_set |= ICE_IP_TOS;
+			if (ipv6_mask->hdr.proto == UINT8_MAX)
+				*input_set |= ICE_IP_PROTO;
+			if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+				*input_set |= ICE_IP_TTL;
+
+			p_v6 = (tunnel_type && is_outer) ?
+			       &filter->input.ip_outer.v6 :
+			       &filter->input.ip.v6;
+			rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
+			rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
+			vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
+			p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
+			p_v6->proto = ipv6_spec->hdr.proto;
+			p_v6->hlim = ipv6_spec->hdr.hop_limits;
+
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
-			tcp_spec = item->spec;
-			tcp_mask = item->mask;
-
 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 				flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
-			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
 				flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
 
-			if (tcp_spec && tcp_mask) {
-				/* Check TCP mask and update input set */
-				if (tcp_mask->hdr.sent_seq ||
-				    tcp_mask->hdr.recv_ack ||
-				    tcp_mask->hdr.data_off ||
-				    tcp_mask->hdr.tcp_flags ||
-				    tcp_mask->hdr.rx_win ||
-				    tcp_mask->hdr.cksum ||
-				    tcp_mask->hdr.tcp_urp) {
-					rte_flow_error_set(error, EINVAL,
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask))
+				break;
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
 						   RTE_FLOW_ERROR_TYPE_ITEM,
 						   item,
 						   "Invalid TCP mask");
-					return -rte_errno;
-				}
+				return -rte_errno;
+			}
 
-				if (tcp_mask->hdr.src_port == UINT16_MAX)
-					input_set |= tunnel_type ?
-						     ICE_INSET_TUN_TCP_SRC_PORT :
-						     ICE_INSET_TCP_SRC_PORT;
-				if (tcp_mask->hdr.dst_port == UINT16_MAX)
-					input_set |= tunnel_type ?
-						     ICE_INSET_TUN_TCP_DST_PORT :
-						     ICE_INSET_TCP_DST_PORT;
-
-				/* Get filter info */
-				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
-					filter->input.ip.v4.dst_port =
-						tcp_spec->hdr.dst_port;
-					filter->input.ip.v4.src_port =
-						tcp_spec->hdr.src_port;
-				} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
-					filter->input.ip.v6.dst_port =
-						tcp_spec->hdr.dst_port;
-					filter->input.ip.v6.src_port =
-						tcp_spec->hdr.src_port;
-				}
+			*input_set |= is_outer ? ICE_PROT_TCP_OUTER : ICE_PROT_TCP_INNER;
+			if (tcp_mask->hdr.src_port == UINT16_MAX)
+				*input_set |= ICE_SPORT;
+			if (tcp_mask->hdr.dst_port == UINT16_MAX)
+				*input_set |= ICE_DPORT;
+
+			/* Get filter info */
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+				p_v4->dst_port = tcp_spec->hdr.dst_port;
+				p_v4->src_port = tcp_spec->hdr.src_port;
+			} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+				p_v6->dst_port = tcp_spec->hdr.dst_port;
+				p_v6->src_port = tcp_spec->hdr.src_port;
 			}
+
 			break;
 		case RTE_FLOW_ITEM_TYPE_UDP:
-			udp_spec = item->spec;
-			udp_mask = item->mask;
-
 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 				flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
-			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
 				flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
 
-			if (udp_spec && udp_mask) {
-				/* Check UDP mask and update input set*/
-				if (udp_mask->hdr.dgram_len ||
-				    udp_mask->hdr.dgram_cksum) {
-					rte_flow_error_set(error, EINVAL,
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask))
+				break;
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
 						   RTE_FLOW_ERROR_TYPE_ITEM,
 						   item,
 						   "Invalid UDP mask");
-					return -rte_errno;
-				}
+				return -rte_errno;
+			}
 
-				if (udp_mask->hdr.src_port == UINT16_MAX)
-					input_set |= tunnel_type ?
-						     ICE_INSET_TUN_UDP_SRC_PORT :
-						     ICE_INSET_UDP_SRC_PORT;
-				if (udp_mask->hdr.dst_port == UINT16_MAX)
-					input_set |= tunnel_type ?
-						     ICE_INSET_TUN_UDP_DST_PORT :
-						     ICE_INSET_UDP_DST_PORT;
-
-				/* Get filter info */
-				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
-					filter->input.ip.v4.dst_port =
-						udp_spec->hdr.dst_port;
-					filter->input.ip.v4.src_port =
-						udp_spec->hdr.src_port;
-				} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
-					filter->input.ip.v6.src_port =
-						udp_spec->hdr.src_port;
-					filter->input.ip.v6.dst_port =
-						udp_spec->hdr.dst_port;
-				}
+			*input_set |= is_outer ? ICE_PROT_UDP_OUTER : ICE_PROT_UDP_INNER;
+			if (udp_mask->hdr.src_port == UINT16_MAX)
+				*input_set |= ICE_SPORT;
+			if (udp_mask->hdr.dst_port == UINT16_MAX)
+				*input_set |= ICE_DPORT;
+
+			/* Get filter info */
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+				p_v4->dst_port = udp_spec->hdr.dst_port;
+				p_v4->src_port = udp_spec->hdr.src_port;
+			} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+				p_v6->src_port = udp_spec->hdr.src_port;
+				p_v6->dst_port = udp_spec->hdr.dst_port;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_SCTP:
-			sctp_spec = item->spec;
-			sctp_mask = item->mask;
-
 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 				flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
-			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
 				flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
 
-			if (sctp_spec && sctp_mask) {
-				/* Check SCTP mask and update input set */
-				if (sctp_mask->hdr.cksum) {
-					rte_flow_error_set(error, EINVAL,
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask))
+				break;
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
 						   RTE_FLOW_ERROR_TYPE_ITEM,
 						   item,
 						   "Invalid UDP mask");
-					return -rte_errno;
-				}
+				return -rte_errno;
+			}
 
-				if (sctp_mask->hdr.src_port == UINT16_MAX)
-					input_set |= tunnel_type ?
-						     ICE_INSET_TUN_SCTP_SRC_PORT :
-						     ICE_INSET_SCTP_SRC_PORT;
-				if (sctp_mask->hdr.dst_port == UINT16_MAX)
-					input_set |= tunnel_type ?
-						     ICE_INSET_TUN_SCTP_DST_PORT :
-						     ICE_INSET_SCTP_DST_PORT;
-
-				/* Get filter info */
-				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
-					filter->input.ip.v4.dst_port =
-						sctp_spec->hdr.dst_port;
-					filter->input.ip.v4.src_port =
-						sctp_spec->hdr.src_port;
-				} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
-					filter->input.ip.v6.dst_port =
-						sctp_spec->hdr.dst_port;
-					filter->input.ip.v6.src_port =
-						sctp_spec->hdr.src_port;
-				}
+			*input_set |= is_outer ? ICE_PROT_SCTP_OUTER : ICE_PROT_SCTP_INNER;
+			if (sctp_mask->hdr.src_port == UINT16_MAX)
+				*input_set |= ICE_SPORT;
+			if (sctp_mask->hdr.dst_port == UINT16_MAX)
+				*input_set |= ICE_DPORT;
+
+			/* Get filter info */
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+				p_v4->dst_port = sctp_spec->hdr.dst_port;
+				p_v4->src_port = sctp_spec->hdr.src_port;
+			} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+				p_v6->dst_port = sctp_spec->hdr.dst_port;
+				p_v6->src_port = sctp_spec->hdr.src_port;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_VOID:
@@ -1946,6 +1974,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			l3 = RTE_FLOW_ITEM_TYPE_END;
 			vxlan_spec = item->spec;
 			vxlan_mask = item->mask;
+			is_outer = false;
 
 			if (vxlan_spec || vxlan_mask) {
 				rte_flow_error_set(error, EINVAL,
@@ -1955,50 +1984,54 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 				return -rte_errno;
 			}
 
-			tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GTPU:
 			l3 = RTE_FLOW_ITEM_TYPE_END;
 			gtp_spec = item->spec;
 			gtp_mask = item->mask;
+			is_outer = false;
 
-			if (gtp_spec && gtp_mask) {
-				if (gtp_mask->v_pt_rsv_flags ||
-				    gtp_mask->msg_type ||
-				    gtp_mask->msg_len) {
-					rte_flow_error_set(error, EINVAL,
+			if (!(gtp_spec && gtp_mask))
+				break;
+
+			if (gtp_mask->v_pt_rsv_flags ||
+			    gtp_mask->msg_type ||
+			    gtp_mask->msg_len) {
+				rte_flow_error_set(error, EINVAL,
 						   RTE_FLOW_ERROR_TYPE_ITEM,
 						   item,
 						   "Invalid GTP mask");
-					return -rte_errno;
-				}
+				return -rte_errno;
+			}
 
-				if (gtp_mask->teid == UINT32_MAX)
-					input_set |= ICE_INSET_GTPU_TEID;
+			if (gtp_mask->teid == UINT32_MAX)
+				*input_set |= ICE_INSET_GTPU_TEID;
 
-				filter->input.gtpu_data.teid = gtp_spec->teid;
-			}
+			filter->input.gtpu_data.teid = gtp_spec->teid;
 
 			tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
 			gtp_psc_spec = item->spec;
 			gtp_psc_mask = item->mask;
+			is_outer = false;
 
-			if (gtp_psc_spec && gtp_psc_mask) {
-				if (gtp_psc_mask->qfi == UINT8_MAX)
-					input_set |= ICE_INSET_GTPU_QFI;
+			if (!(gtp_psc_spec && gtp_psc_mask))
+				break;
+
+			if (gtp_psc_mask->qfi == UINT8_MAX)
+				*input_set |= ICE_INSET_GTPU_QFI;
+
+			filter->input.gtpu_data.qfi =
+				gtp_psc_spec->qfi;
 
-				filter->input.gtpu_data.qfi =
-					gtp_psc_spec->qfi;
-			}
 			tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
 			break;
 		default:
 			rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Invalid pattern item.");
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid pattern item.");
 			return -rte_errno;
 		}
 	}
@@ -2018,7 +2051,8 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 
 	filter->tunnel_type = tunnel_type;
 	filter->input.flow_type = flow_type;
-	filter->input_set = input_set;
+	filter->input_set = inner_input_set;
+	filter->outer_input_set = outer_input_set;
 
 	return 0;
 }
-- 
2.25.1


             reply	other threads:[~2020-11-17  8:52 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-17  8:45 Zhirun Yan [this message]
2020-11-17 15:31 ` Cao, Yahui
2020-11-18  3:15   ` Yan, Zhirun
2020-11-18  4:16     ` Cao, Yahui

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201117084524.3610038-1-zhirun.yan@intel.com \
    --to=zhirun.yan@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=simei.su@intel.com \
    --cc=xiao.w.wang@intel.com \
    --cc=yahui.cao@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).