From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 747F2A04C0; Mon, 28 Sep 2020 08:37:55 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BF36D1D5FC; Mon, 28 Sep 2020 08:37:22 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id EA0C71BFDA for ; Mon, 28 Sep 2020 08:37:19 +0200 (CEST) IronPort-SDR: /LRfvvcqxvb4p2cM9C/sICn4LHSz5OyQh0Gx7D3+lx7vT+wzIYwfMw6gazWqQ1Qag7azYhQ6UW zm8kiWovTnpg== X-IronPort-AV: E=McAfee;i="6000,8403,9757"; a="159293726" X-IronPort-AV: E=Sophos;i="5.77,312,1596524400"; d="scan'208";a="159293726" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Sep 2020 23:37:19 -0700 IronPort-SDR: QqhmMqP0JALxCu3xEWA+YG7XHf2WcBtrOq36xiveq7cyO84D5nLtozP9xSEizRHwGVXQep97oH SX6LonQqCVlg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,312,1596524400"; d="scan'208";a="324188100" Received: from dpdk-zhirun-dev.sh.intel.com ([10.67.118.201]) by orsmga002.jf.intel.com with ESMTP; 27 Sep 2020 23:37:16 -0700 From: Zhirun Yan To: qi.z.zhang@intel.com, dev@dpdk.org Cc: yahui.cao@intel.com, xiao.w.wang@intel.com, simei.su@intel.com, junfeng.guo@intel.com, Zhirun Yan Date: Mon, 28 Sep 2020 14:31:46 +0800 Message-Id: <20200928063146.668003-3-zhirun.yan@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20200928063146.668003-1-zhirun.yan@intel.com> References: <20200928063146.668003-1-zhirun.yan@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v1 2/2] net/ice: support inner/outer L2/L3 field for FDIR X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Distinguish inner/outer fields for parse pattern. So FDIR for tunnel can be more flexible. Enable VXLAN inner/outer L3/L4 different fields for FDIR. Signed-off-by: Zhirun Yan --- drivers/net/ice/ice_fdir_filter.c | 59 +++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 3 deletions(-) diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index 175abcdd5..961528d17 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -56,6 +56,11 @@ ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT) #define ICE_FDIR_INSET_VXLAN_IPV4 (\ + ICE_FDIR_INSET_ETH | \ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_IPV4_TOS | \ + ICE_INSET_UDP_DST_PORT | \ + ICE_INSET_TUN_DMAC | ICE_INSET_TUN_SMAC | \ ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST) #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\ @@ -907,6 +912,7 @@ ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field) }; static const struct ice_inset_map ice_inset_map[] = { {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA}, + {ICE_INSET_SMAC, ICE_FLOW_FIELD_IDX_ETH_SA}, {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE}, {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA}, {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA}, @@ -1655,6 +1661,14 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, uint32_t vtc_flow_cpu; uint16_t ether_type; enum rte_flow_item_type next_type; + bool is_outer_part = true; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) { + tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN; + break; + } + } for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { @@ -1672,7 +1686,25 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, eth_mask = item->mask; next_type = (item + 1)->type; - if (eth_spec && eth_mask) { + if (!(eth_spec && eth_mask)) + break; + + /* handle outer L2 fields */ + if (is_outer_part && tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN) { + if (!rte_is_zero_ether_addr(ð_mask->dst)) { + filter->outer_input_set |= ICE_INSET_DMAC; + rte_memcpy(&filter->input.ext_data_outer.dst_mac, + ð_spec->dst, + RTE_ETHER_ADDR_LEN); + } + + if (!rte_is_zero_ether_addr(ð_mask->src)) { + filter->outer_input_set |= ICE_INSET_SMAC; + rte_memcpy(&filter->input.ext_data_outer.src_mac, + ð_spec->src, + RTE_ETHER_ADDR_LEN); + } + } else { if (!rte_is_zero_ether_addr(ð_mask->dst)) { input_set |= ICE_INSET_DMAC; rte_memcpy(&filter->input.ext_data.dst_mac, @@ -1714,7 +1746,27 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, ipv4_spec = item->spec; ipv4_mask = item->mask; - if (ipv4_spec && ipv4_mask) { + if (!(ipv4_spec && ipv4_mask)) + break; + + /* handle outer L3 fields */ + if (is_outer_part && tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN) { + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { + filter->outer_input_set |= ICE_INSET_IPV4_DST; + filter->input.ip_outer.v4.dst_ip = + ipv4_spec->hdr.dst_addr; + } + if (ipv4_mask->hdr.src_addr == UINT32_MAX) { + filter->outer_input_set |= ICE_INSET_IPV4_SRC; + filter->input.ip_outer.v4.src_ip = + ipv4_spec->hdr.src_addr; + } + if (ipv4_mask->hdr.type_of_service == UINT8_MAX) { + input_set |= ICE_INSET_IPV4_TOS; + filter->input.ip_outer.v4.tos = + ipv4_spec->hdr.type_of_service; + } + } else { /* Check IPv4 mask and update input set */ if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.total_length || @@ -1944,6 +1996,8 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, break; case RTE_FLOW_ITEM_TYPE_VXLAN: l3 = RTE_FLOW_ITEM_TYPE_END; + is_outer_part = false; + vxlan_spec = item->spec; vxlan_mask = item->mask; @@ -1955,7 +2009,6 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, return -rte_errno; } - tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN; break; case RTE_FLOW_ITEM_TYPE_GTPU: l3 = RTE_FLOW_ITEM_TYPE_END; -- 2.25.1