From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D7248A0350; Sun, 28 Jun 2020 07:54:29 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 33B241C1BB; Sun, 28 Jun 2020 07:54:11 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id C08711C197; Sun, 28 Jun 2020 07:53:58 +0200 (CEST) IronPort-SDR: thcwhQXZ/iiMjg2veGq7MZQoLzej3kqqsv+84JD0xM+G0Bd+xW9wxctAKaR9yEjD9i4X7unsMp WJETTCUDud8Q== X-IronPort-AV: E=McAfee;i="6000,8403,9665"; a="125928179" X-IronPort-AV: E=Sophos;i="5.75,290,1589266800"; d="scan'208";a="125928179" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Jun 2020 22:53:58 -0700 IronPort-SDR: FkcWSJNxqNcuOntkSLzNul1TRknEfb2ERuCUKRQpK0ZXP9j9+/dKGf2wUSbUxlYVdwAjjBaQhs 0TCIgVb9BGSg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,290,1589266800"; d="scan'208";a="480429323" Received: from unknown (HELO localhost.localdomain.bj.intel.com) ([172.16.182.123]) by fmsmga005.fm.intel.com with ESMTP; 27 Jun 2020 22:53:56 -0700 From: Wei Zhao To: dev@dpdk.org Cc: stable@dpdk.org, qi.z.zhang@intel.com, nannan.lu@intel.com, Wei Zhao Date: Sun, 28 Jun 2020 13:28:57 +0800 Message-Id: <20200628052857.67428-5-wei.zhao1@intel.com> X-Mailer: git-send-email 2.19.1 In-Reply-To: <20200628052857.67428-1-wei.zhao1@intel.com> References: <20200628050145.72810-1-wei.zhao1@intel.com> <20200628052857.67428-1-wei.zhao1@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v4 4/4] net/ice: add input set byte number check X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add the total input set byte number check, as there is a hardware requirement for the total number of 32 byte. Fixes: 47d460d63233 ("net/ice: rework switch filter") Cc: stable@dpdk.org Signed-off-by: Wei Zhao --- drivers/net/ice/ice_switch_filter.c | 43 +++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index c1ea74c73..d399c5a2e 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -25,7 +25,8 @@ #include "ice_generic_flow.h" -#define MAX_QGRP_NUM_TYPE 7 +#define MAX_QGRP_NUM_TYPE 7 +#define MAX_INPUT_SET_BYTE 32 #define ICE_PPP_IPV4_PROTO 0x0021 #define ICE_PPP_IPV6_PROTO 0x0057 #define ICE_IPV4_PROTO_NVGRE 0x002F @@ -473,6 +474,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask; const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; uint64_t input_set = ICE_INSET_NONE; + uint16_t input_set_byte = 0; uint16_t tunnel_valid = 0; bool pppoe_elem_valid = 0; bool pppoe_patt_valid = 0; @@ -540,6 +542,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], m->src_addr[j] = eth_mask->src.addr_bytes[j]; i = 1; + input_set_byte++; } if (eth_mask->dst.addr_bytes[j]) { h->dst_addr[j] = @@ -547,6 +550,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], m->dst_addr[j] = eth_mask->dst.addr_bytes[j]; i = 1; + input_set_byte++; } } if (i) @@ -557,6 +561,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], eth_spec->type; list[t].m_u.ethertype.ethtype_id = eth_mask->type; + input_set_byte += 2; t++; } } @@ -616,24 +621,28 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv4_spec->hdr.src_addr; list[t].m_u.ipv4_hdr.src_addr = ipv4_mask->hdr.src_addr; + input_set_byte += 2; } if (ipv4_mask->hdr.dst_addr) { list[t].h_u.ipv4_hdr.dst_addr = ipv4_spec->hdr.dst_addr; list[t].m_u.ipv4_hdr.dst_addr = ipv4_mask->hdr.dst_addr; + input_set_byte += 2; } if (ipv4_mask->hdr.time_to_live) { list[t].h_u.ipv4_hdr.time_to_live = ipv4_spec->hdr.time_to_live; list[t].m_u.ipv4_hdr.time_to_live = ipv4_mask->hdr.time_to_live; + input_set_byte++; } if (ipv4_mask->hdr.next_proto_id) { list[t].h_u.ipv4_hdr.protocol = ipv4_spec->hdr.next_proto_id; list[t].m_u.ipv4_hdr.protocol = ipv4_mask->hdr.next_proto_id; + input_set_byte++; } if ((ipv4_spec->hdr.next_proto_id & ipv4_mask->hdr.next_proto_id) == @@ -644,6 +653,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv4_spec->hdr.type_of_service; list[t].m_u.ipv4_hdr.tos = ipv4_mask->hdr.type_of_service; + input_set_byte++; } t++; } @@ -721,12 +731,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv6_spec->hdr.src_addr[j]; s->src_addr[j] = ipv6_mask->hdr.src_addr[j]; + input_set_byte++; } if (ipv6_mask->hdr.dst_addr[j]) { f->dst_addr[j] = ipv6_spec->hdr.dst_addr[j]; s->dst_addr[j] = ipv6_mask->hdr.dst_addr[j]; + input_set_byte++; } } if (ipv6_mask->hdr.proto) { @@ -734,12 +746,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv6_spec->hdr.proto; s->next_hdr = ipv6_mask->hdr.proto; + input_set_byte++; } if (ipv6_mask->hdr.hop_limits) { f->hop_limit = ipv6_spec->hdr.hop_limits; s->hop_limit = ipv6_mask->hdr.hop_limits; + input_set_byte++; } if (ipv6_mask->hdr.vtc_flow & rte_cpu_to_be_32 @@ -757,6 +771,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT; s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val); + input_set_byte += 4; } t++; } @@ -802,14 +817,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], udp_spec->hdr.src_port; list[t].m_u.l4_hdr.src_port = udp_mask->hdr.src_port; + input_set_byte += 2; } if (udp_mask->hdr.dst_port) { list[t].h_u.l4_hdr.dst_port = udp_spec->hdr.dst_port; list[t].m_u.l4_hdr.dst_port = udp_mask->hdr.dst_port; + input_set_byte += 2; } - t++; + t++; } break; @@ -854,12 +871,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], tcp_spec->hdr.src_port; list[t].m_u.l4_hdr.src_port = tcp_mask->hdr.src_port; + input_set_byte += 2; } if (tcp_mask->hdr.dst_port) { list[t].h_u.l4_hdr.dst_port = tcp_spec->hdr.dst_port; list[t].m_u.l4_hdr.dst_port = tcp_mask->hdr.dst_port; + input_set_byte += 2; } t++; } @@ -899,12 +918,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], sctp_spec->hdr.src_port; list[t].m_u.sctp_hdr.src_port = sctp_mask->hdr.src_port; + input_set_byte += 2; } if (sctp_mask->hdr.dst_port) { list[t].h_u.sctp_hdr.dst_port = sctp_spec->hdr.dst_port; list[t].m_u.sctp_hdr.dst_port = sctp_mask->hdr.dst_port; + input_set_byte += 2; } t++; } @@ -942,6 +963,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], vxlan_mask->vni[0]; input_set |= ICE_INSET_TUN_VXLAN_VNI; + input_set_byte += 2; } t++; } @@ -978,6 +1000,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], nvgre_mask->tni[0]; input_set |= ICE_INSET_TUN_NVGRE_TNI; + input_set_byte += 2; } t++; } @@ -1006,6 +1029,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.vlan_hdr.vlan = vlan_mask->tci; input_set |= ICE_INSET_VLAN_OUTER; + input_set_byte += 2; } if (vlan_mask->inner_type) { list[t].h_u.vlan_hdr.type = @@ -1013,6 +1037,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.vlan_hdr.type = vlan_mask->inner_type; input_set |= ICE_INSET_ETHERTYPE; + input_set_byte += 2; } t++; } @@ -1053,6 +1078,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.pppoe_hdr.session_id = pppoe_mask->session_id; input_set |= ICE_INSET_PPPOE_SESSION; + input_set_byte += 2; } t++; pppoe_elem_valid = 1; @@ -1085,7 +1111,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.pppoe_hdr.ppp_prot_id = pppoe_proto_mask->proto_id; input_set |= ICE_INSET_PPPOE_PROTO; - + input_set_byte += 2; pppoe_prot_valid = 1; } if ((pppoe_proto_mask->proto_id & @@ -1142,6 +1168,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.esp_hdr.spi = esp_mask->hdr.spi; input_set |= ICE_INSET_ESP_SPI; + input_set_byte += 4; t++; } @@ -1198,6 +1225,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.ah_hdr.spi = ah_mask->spi; input_set |= ICE_INSET_AH_SPI; + input_set_byte += 4; t++; } @@ -1237,6 +1265,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.l2tpv3_sess_hdr.session_id = l2tp_mask->session_id; input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID; + input_set_byte += 4; t++; } @@ -1342,6 +1371,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], *tun_type = ICE_SW_IPV6_UDP; } + if (input_set_byte > MAX_INPUT_SET_BYTE) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "too much input set"); + return -ENOTSUP; + } + *lkups_num = t; return input_set; -- 2.19.1