From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7C91BA0471 for ; Mon, 15 Jul 2019 11:41:08 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5335E325F; Mon, 15 Jul 2019 11:41:08 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 3D8F93256 for ; Mon, 15 Jul 2019 11:41:07 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 15 Jul 2019 02:41:06 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.63,493,1557212400"; d="scan'208";a="169564703" Received: from map1.sh.intel.com ([10.67.111.138]) by orsmga003.jf.intel.com with ESMTP; 15 Jul 2019 02:41:05 -0700 From: Qiming Yang To: qabuild@intel.com Cc: Qiming Yang , stable@dpdk.org Date: Mon, 15 Jul 2019 17:38:05 +0800 Message-Id: <20190715093807.128101-2-qiming.yang@intel.com> X-Mailer: git-send-email 2.9.5 In-Reply-To: <20190715093807.128101-1-qiming.yang@intel.com> References: <20190715093807.128101-1-qiming.yang@intel.com> Subject: [dpdk-stable] [DPDK] net/ice: fix tunnel rule not recognize X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org Sender: "stable" In the past, to distinguish whether the input set is outer or inner by check the item appear once or twice. But this way doesn't work when user don't configure the outer input set. This patch fix the issue. Fixes: d76116a4678f ("net/ice: add generic flow API") Cc: stable@dpdk.org Signed-off-by: Qiming Yang --- drivers/net/ice/ice_generic_flow.c | 80 ++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 42 deletions(-) diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index e6a2c4b..05a1678 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -209,8 +209,7 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; uint64_t input_set = ICE_INSET_NONE; - bool outer_ip = true; - bool outer_l4 = true; + bool is_tunnel = false; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { @@ -259,27 +258,26 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], return 0; } - if (outer_ip) { + if (is_tunnel) { if (ipv4_mask->hdr.src_addr == UINT32_MAX) - input_set |= ICE_INSET_IPV4_SRC; + input_set |= ICE_INSET_TUN_IPV4_SRC; if (ipv4_mask->hdr.dst_addr == UINT32_MAX) - input_set |= ICE_INSET_IPV4_DST; - if (ipv4_mask->hdr.type_of_service == UINT8_MAX) - input_set |= ICE_INSET_IPV4_TOS; + input_set |= ICE_INSET_TUN_IPV4_DST; if (ipv4_mask->hdr.time_to_live == UINT8_MAX) - input_set |= ICE_INSET_IPV4_TTL; + input_set |= ICE_INSET_TUN_IPV4_TTL; if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) - input_set |= ICE_INSET_IPV4_PROTO; - outer_ip = false; + input_set |= ICE_INSET_TUN_IPV4_PROTO; } else { if (ipv4_mask->hdr.src_addr == UINT32_MAX) - input_set |= ICE_INSET_TUN_IPV4_SRC; + input_set |= ICE_INSET_IPV4_SRC; if (ipv4_mask->hdr.dst_addr == UINT32_MAX) - input_set |= ICE_INSET_TUN_IPV4_DST; + input_set |= ICE_INSET_IPV4_DST; if (ipv4_mask->hdr.time_to_live == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV4_TTL; + input_set |= ICE_INSET_IPV4_TTL; if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV4_PROTO; + input_set |= ICE_INSET_IPV4_PROTO; + if (ipv4_mask->hdr.type_of_service == UINT8_MAX) + input_set |= ICE_INSET_IPV4_TOS; } break; case RTE_FLOW_ITEM_TYPE_IPV6: @@ -302,33 +300,32 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], return 0; } - if (outer_ip) { + if (is_tunnel) { if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask, RTE_DIM(ipv6_mask->hdr.src_addr))) - input_set |= ICE_INSET_IPV6_SRC; + input_set |= ICE_INSET_TUN_IPV6_SRC; if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask, RTE_DIM(ipv6_mask->hdr.dst_addr))) - input_set |= ICE_INSET_IPV6_DST; + input_set |= ICE_INSET_TUN_IPV6_DST; if (ipv6_mask->hdr.proto == UINT8_MAX) - input_set |= ICE_INSET_IPV6_PROTO; + input_set |= ICE_INSET_TUN_IPV6_PROTO; if (ipv6_mask->hdr.hop_limits == UINT8_MAX) - input_set |= ICE_INSET_IPV6_HOP_LIMIT; - outer_ip = false; + input_set |= ICE_INSET_TUN_IPV6_TTL; } else { if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask, RTE_DIM(ipv6_mask->hdr.src_addr))) - input_set |= ICE_INSET_TUN_IPV6_SRC; + input_set |= ICE_INSET_IPV6_SRC; if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask, RTE_DIM(ipv6_mask->hdr.dst_addr))) - input_set |= ICE_INSET_TUN_IPV6_DST; + input_set |= ICE_INSET_IPV6_DST; if (ipv6_mask->hdr.proto == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV6_PROTO; + input_set |= ICE_INSET_IPV6_PROTO; if (ipv6_mask->hdr.hop_limits == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV6_TTL; + input_set |= ICE_INSET_IPV6_HOP_LIMIT; } break; @@ -353,17 +350,16 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], return 0; } - if (outer_l4) { + if (is_tunnel) { if (udp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_SRC_PORT; + input_set |= ICE_INSET_TUN_SRC_PORT; if (udp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_DST_PORT; - outer_l4 = false; + input_set |= ICE_INSET_TUN_DST_PORT; } else { if (udp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_SRC_PORT; + input_set |= ICE_INSET_SRC_PORT; if (udp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_DST_PORT; + input_set |= ICE_INSET_DST_PORT; } break; @@ -393,17 +389,16 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], return 0; } - if (outer_l4) { + if (is_tunnel) { if (tcp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_SRC_PORT; + input_set |= ICE_INSET_TUN_SRC_PORT; if (tcp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_DST_PORT; - outer_l4 = false; + input_set |= ICE_INSET_TUN_DST_PORT; } else { if (tcp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_SRC_PORT; + input_set |= ICE_INSET_SRC_PORT; if (tcp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_DST_PORT; + input_set |= ICE_INSET_DST_PORT; } break; @@ -427,17 +422,16 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], return 0; } - if (outer_l4) { + if (is_tunnel) { if (sctp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_SRC_PORT; + input_set |= ICE_INSET_TUN_SRC_PORT; if (sctp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_DST_PORT; - outer_l4 = false; + input_set |= ICE_INSET_TUN_DST_PORT; } else { if (sctp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_SRC_PORT; + input_set |= ICE_INSET_SRC_PORT; if (sctp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_DST_PORT; + input_set |= ICE_INSET_DST_PORT; } break; @@ -486,6 +480,7 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], "Invalid VXLAN item"); return 0; } + is_tunnel = 1; break; case RTE_FLOW_ITEM_TYPE_NVGRE: @@ -503,6 +498,7 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], "Invalid NVGRE item"); return 0; } + is_tunnel = 1; break; default: -- 2.9.5