From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E151BA04C7; Wed, 16 Sep 2020 06:33:28 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3E4201C1B2; Wed, 16 Sep 2020 06:32:27 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 772C51C1AA for ; Wed, 16 Sep 2020 06:32:25 +0200 (CEST) IronPort-SDR: 3l7LNFiZirYd8dzcMtC2Q4qTUN6KK+QUPs/bN22Wuj3uDOFkK9goUhXdbuXqbLAQARdMGtTH8Z ZwD/zMg0u/cA== X-IronPort-AV: E=McAfee;i="6000,8403,9745"; a="138899591" X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="138899591" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Sep 2020 21:32:24 -0700 IronPort-SDR: pYqRFF53ujD1MUKk49kPjw0nUAVx5kP7G9od908a89gkuHYrDPy7pyVFjpEFw+p8wMH8NKBLhl +7P0pORcKJuQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="288234832" Received: from npg-dpdk-cvl-jeffguo-01.sh.intel.com ([10.67.111.128]) by fmsmga008.fm.intel.com with ESMTP; 15 Sep 2020 21:32:22 -0700 From: Jeff Guo To: jingjing.wu@intel.com, qi.z.zhang@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, jia.guo@intel.com Date: Wed, 16 Sep 2020 12:29:46 +0800 Message-Id: <20200916042946.36105-1-jia.guo@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20200909082031.28299-1-jia.guo@intel.com> References: <20200909082031.28299-1-jia.guo@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v3] net/iavf: support gtpu outer and inner co-exist X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Although currently only the gtpu inner hash be enabled while not the gtpu outer hash, but the outer protocol still needed to co-exist with inner protocol when configure the gtpu inner hash rule, that would allow the gtpu innner hash support for the different outer protocols. Signed-off-by: Jeff Guo --- v3->v2: delete unused param --- drivers/net/iavf/iavf_hash.c | 52 +++++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c index 3152218dc..7ffaf42d1 100644 --- a/drivers/net/iavf/iavf_hash.c +++ b/drivers/net/iavf/iavf_hash.c @@ -29,11 +29,21 @@ #define IAVF_PHINT_GTPU_EH_DWN BIT_ULL(2) #define IAVF_PHINT_GTPU_EH_UP BIT_ULL(3) +#define IAVF_PHINT_OUTER_IPV4_INNER_IPV4 BIT_ULL(4) +#define IAVF_PHINT_OUTER_IPV4_INNER_IPV6 BIT_ULL(5) +#define IAVF_PHINT_OUTER_IPV6_INNER_IPV4 BIT_ULL(6) +#define IAVF_PHINT_OUTER_IPV6_INNER_IPV6 BIT_ULL(7) + #define IAVF_PHINT_GTPU_MSK (IAVF_PHINT_GTPU | \ IAVF_PHINT_GTPU_EH | \ IAVF_PHINT_GTPU_EH_DWN | \ IAVF_PHINT_GTPU_EH_UP) +#define IAVF_PHINT_LAYERS_MSK (IAVF_PHINT_OUTER_IPV4_INNER_IPV4 | \ + IAVF_PHINT_OUTER_IPV4_INNER_IPV6 | \ + IAVF_PHINT_OUTER_IPV6_INNER_IPV4 | \ + IAVF_PHINT_OUTER_IPV6_INNER_IPV6) + #define IAVF_GTPU_EH_DWNLINK 0 #define IAVF_GTPU_EH_UPLINK 1 @@ -499,12 +509,13 @@ iavf_hash_init(struct iavf_adapter *ad) } static int -iavf_hash_parse_pattern(struct iavf_pattern_match_item *pattern_match_item, - const struct rte_flow_item pattern[], uint64_t *phint, +iavf_hash_parse_pattern(const struct rte_flow_item pattern[], uint64_t *phint, struct rte_flow_error *error) { const struct rte_flow_item *item = pattern; const struct rte_flow_item_gtp_psc *psc; + bool outer_ipv4 = false; + bool outer_ipv6 = false; for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { @@ -515,6 +526,22 @@ iavf_hash_parse_pattern(struct iavf_pattern_match_item *pattern_match_item, } switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + if (outer_ipv4) + *phint |= IAVF_PHINT_OUTER_IPV4_INNER_IPV4; + else if (outer_ipv6) + *phint |= IAVF_PHINT_OUTER_IPV6_INNER_IPV4; + else + outer_ipv4 = true; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + if (outer_ipv4) + *phint |= IAVF_PHINT_OUTER_IPV4_INNER_IPV6; + else if (outer_ipv6) + *phint |= IAVF_PHINT_OUTER_IPV6_INNER_IPV6; + else + outer_ipv6 = true; + break; case RTE_FLOW_ITEM_TYPE_GTPU: *phint |= IAVF_PHINT_GTPU; break; @@ -533,9 +560,6 @@ iavf_hash_parse_pattern(struct iavf_pattern_match_item *pattern_match_item, } } - /* update and restore pattern hint */ - *phint |= *(uint64_t *)(pattern_match_item->meta); - return 0; } @@ -712,6 +736,7 @@ static void iavf_refine_proto_hdrs_by_pattern(struct virtchnl_proto_hdrs *proto_hdrs, uint64_t phint) { + struct virtchnl_proto_hdr *hdr_outer; struct virtchnl_proto_hdr *hdr1; struct virtchnl_proto_hdr *hdr2; int i; @@ -720,6 +745,20 @@ iavf_refine_proto_hdrs_by_pattern(struct virtchnl_proto_hdrs *proto_hdrs, return; if (proto_hdrs->tunnel_level == TUNNEL_LEVEL_INNER) { + if (phint & IAVF_PHINT_LAYERS_MSK) { + /* adding gtpu outer header */ + hdr_outer = &proto_hdrs->proto_hdr[proto_hdrs->count]; + hdr_outer->field_selector = 0; + proto_hdrs->count++; + + if (phint & (IAVF_PHINT_OUTER_IPV4_INNER_IPV4 | + IAVF_PHINT_OUTER_IPV4_INNER_IPV6)) + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_outer, IPV4); + else if (phint & (IAVF_PHINT_OUTER_IPV6_INNER_IPV4 | + IAVF_PHINT_OUTER_IPV6_INNER_IPV6)) + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_outer, IPV6); + } + /* shift headers 1 layer */ for (i = proto_hdrs->count; i > 0; i--) { hdr1 = &proto_hdrs->proto_hdr[i]; @@ -908,8 +947,7 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad, goto error; } - ret = iavf_hash_parse_pattern(pattern_match_item, pattern, &phint, - error); + ret = iavf_hash_parse_pattern(pattern, &phint, error); if (ret) goto error; -- 2.20.1