DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jie Wang <jie1x.wang@intel.com>
To: dev@dpdk.org
Cc: stevex.yang@intel.com, orika@nvidia.com,
	aman.deep.singh@intel.com, ferruh.yigit@intel.com,
	thomas@monjalon.net, andrew.rybchenko@oktetlabs.ru,
	jingjing.wu@intel.com, beilei.xing@intel.com,
	qi.z.zhang@intel.com, Jie Wang <jie1x.wang@intel.com>
Subject: [PATCH 5/6] net/iavf: support L2TPv2 for AVF HASH
Date: Mon, 24 Jan 2022 14:56:12 +0800	[thread overview]
Message-ID: <20220124065613.2197436-6-jie1x.wang@intel.com> (raw)
In-Reply-To: <20220124065613.2197436-1-jie1x.wang@intel.com>

Add support for PPP over L2TPv2 over UDP protocol and L2TPv2
protocol RSS hash based on outer MAC src address and L2TPv2
session ID.

Patterns are listed below:
eth/ipv4/udp/l2tpv2
eth/ipv4/udp/l2tpv2/ppp
eth/ipv6/udp/l2tpv2
eth/ipv6/udp/l2tpv2/ppp

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 doc/guides/rel_notes/release_22_03.rst |  5 ++
 drivers/net/iavf/iavf.h                |  2 +
 drivers/net/iavf/iavf_generic_flow.c   | 34 +++++++++++
 drivers/net/iavf/iavf_generic_flow.h   |  6 ++
 drivers/net/iavf/iavf_hash.c           | 83 ++++++++++++++++++++++++--
 5 files changed, 126 insertions(+), 4 deletions(-)

diff --git a/doc/guides/rel_notes/release_22_03.rst b/doc/guides/rel_notes/release_22_03.rst
index 30695f11b7..472005e74c 100644
--- a/doc/guides/rel_notes/release_22_03.rst
+++ b/doc/guides/rel_notes/release_22_03.rst
@@ -60,6 +60,11 @@ New Features
   Added macros RTE_ETH_RSS_L2TPV2, now L2TPv2 field can be used as
   input set for RSS.
 
+* **Updated Intel iavf driver.**
+
+  Added L2TPv2(include PPP over L2tpv2) RSS hash distribute packets
+  based on outer MAC src address and L2TPv2 session ID.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 0bb5698583..a01d18e61b 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -93,6 +93,8 @@
 
 #define IAVF_VLAN_TAG_PCP_OFFSET 13
 
+#define IAVF_L2TPV2_FLAGS_LEN	0x4000
+
 struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index 2befa125ac..7f411a764c 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1611,6 +1611,40 @@ enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_udp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_l2tpv2[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_L2TPV2,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_l2tpv2_ppp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_L2TPV2,
+	RTE_FLOW_ITEM_TYPE_PPP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_L2TPV2,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2_ppp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_L2TPV2,
+	RTE_FLOW_ITEM_TYPE_PPP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 /* PPPoL2TPv2oUDP */
 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 3681a96b31..107bbc1a23 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -410,6 +410,12 @@ extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_tcp[];
 extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_udp[];
 extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_udp[];
 
+/* L2TPv2 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_l2tpv2[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_l2tpv2_ppp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2_ppp[];
+
 /* PPPoL2TPv2oUDP */
 extern enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4[];
 extern enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6[];
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 5e0888ea68..c3b1bfe270 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -36,6 +36,7 @@
 #define IAVF_PHINT_MID_IPV6			BIT_ULL(8)
 /* L2TPv2 */
 #define IAVF_PHINT_L2TPV2			BIT_ULL(9)
+#define IAVF_PHINT_L2TPV2_LEN			BIT_ULL(10)
 
 #define IAVF_PHINT_GTPU_MSK	(IAVF_PHINT_GTPU	| \
 				 IAVF_PHINT_GTPU_EH	| \
@@ -167,7 +168,9 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID), {BUFF_NOUSED} }
 
 #define proto_hdr_l2tpv2 { \
-	VIRTCHNL_PROTO_HDR_L2TPV2, 0, {BUFF_NOUSED} }
+	VIRTCHNL_PROTO_HDR_L2TPV2, \
+	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID) | \
+	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID), {BUFF_NOUSED} }
 
 #define proto_hdr_ppp { \
 	VIRTCHNL_PROTO_HDR_PPP, 0, {BUFF_NOUSED} }
@@ -392,6 +395,40 @@ struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv6_tcp_tmplt = {
 	 proto_hdr_tcp}
 };
 
+struct virtchnl_proto_hdrs ipv4_l2tpv2_tmplt = {
+	TUNNEL_LEVEL_OUTER, 4,
+	{proto_hdr_eth,
+	 proto_hdr_ipv4,
+	 proto_hdr_udp,
+	 proto_hdr_l2tpv2}
+};
+
+struct virtchnl_proto_hdrs ipv6_l2tpv2_tmplt = {
+	TUNNEL_LEVEL_OUTER, 4,
+	{proto_hdr_eth,
+	 proto_hdr_ipv6,
+	 proto_hdr_udp,
+	 proto_hdr_l2tpv2}
+};
+
+struct virtchnl_proto_hdrs ipv4_l2tpv2_ppp_tmplt = {
+	TUNNEL_LEVEL_OUTER, 5,
+	{proto_hdr_eth,
+	 proto_hdr_ipv4,
+	 proto_hdr_udp,
+	 proto_hdr_l2tpv2,
+	 proto_hdr_ppp}
+};
+
+struct virtchnl_proto_hdrs ipv6_l2tpv2_ppp_tmplt = {
+	TUNNEL_LEVEL_OUTER, 5,
+	{proto_hdr_eth,
+	 proto_hdr_ipv6,
+	 proto_hdr_udp,
+	 proto_hdr_l2tpv2,
+	 proto_hdr_ppp}
+};
+
 /* rss type super set */
 
 /* IPv4 outer */
@@ -480,6 +517,11 @@ struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv6_tcp_tmplt = {
 #define IAVF_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
 #define IAVF_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
+/* L2TPv2 control*/
+#define IAVF_RSS_TYPE_ETH_L2TPV2	(RTE_ETH_RSS_L2TPV2 | \
+					 RTE_ETH_RSS_ETH | \
+					 RTE_ETH_RSS_L2_SRC_ONLY)
+
 /**
  * Supported pattern for hash.
  * The first member is pattern item type,
@@ -547,6 +589,8 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv6_gre_ipv4_tcp,	IAVF_RSS_TYPE_INNER_IPV4_TCP, &inner_ipv4_tcp_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4_udp,	IAVF_RSS_TYPE_INNER_IPV4_UDP, &inner_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv4_udp,	IAVF_RSS_TYPE_INNER_IPV4_UDP, &inner_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv4_udp_l2tpv2,		IAVF_RSS_TYPE_ETH_L2TPV2,	&ipv4_l2tpv2_tmplt},
+	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp,		IAVF_RSS_TYPE_ETH_L2TPV2,	&ipv4_l2tpv2_ppp_tmplt},
 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4,	IAVF_RSS_TYPE_INNER_IPV4,	&udp_l2tpv2_ppp_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4_udp,	IAVF_RSS_TYPE_INNER_IPV4_UDP,	&udp_l2tpv2_ppp_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4_tcp,	IAVF_RSS_TYPE_INNER_IPV4_TCP,	&udp_l2tpv2_ppp_ipv4_tcp_tmplt},
@@ -614,6 +658,8 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv6_gre_ipv6_tcp,	IAVF_RSS_TYPE_INNER_IPV6_TCP, &inner_ipv6_tcp_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6_udp,	IAVF_RSS_TYPE_INNER_IPV6_UDP, &inner_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv6_udp,	IAVF_RSS_TYPE_INNER_IPV6_UDP, &inner_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_udp_l2tpv2,		IAVF_RSS_TYPE_ETH_L2TPV2,	&ipv6_l2tpv2_tmplt},
+	{iavf_pattern_eth_ipv6_udp_l2tpv2_ppp,		IAVF_RSS_TYPE_ETH_L2TPV2,	&ipv6_l2tpv2_ppp_tmplt},
 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6,	IAVF_RSS_TYPE_INNER_IPV6,	&udp_l2tpv2_ppp_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6_udp,	IAVF_RSS_TYPE_INNER_IPV6_UDP,	&udp_l2tpv2_ppp_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6_tcp,	IAVF_RSS_TYPE_INNER_IPV6_TCP,	&udp_l2tpv2_ppp_ipv6_tcp_tmplt},
@@ -744,6 +790,8 @@ iavf_hash_parse_pattern(const struct rte_flow_item pattern[], uint64_t *phint,
 	const struct rte_flow_item_gtp_psc *psc;
 	const struct rte_flow_item_ecpri *ecpri;
 	struct rte_ecpri_common_hdr ecpri_common;
+	const struct rte_flow_item_l2tpv2 *l2tpv2;
+	uint16_t flags_version;
 
 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
@@ -802,7 +850,18 @@ iavf_hash_parse_pattern(const struct rte_flow_item pattern[], uint64_t *phint,
 			*phint |= IAVF_PHINT_GRE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_L2TPV2:
-			*phint |= IAVF_PHINT_L2TPV2;
+			l2tpv2 = item->spec;
+
+			if (l2tpv2) {
+				flags_version = rte_be_to_cpu_16(
+					    l2tpv2->hdr.common.flags_version);
+				if (flags_version & IAVF_L2TPV2_FLAGS_LEN)
+					*phint |= IAVF_PHINT_L2TPV2_LEN;
+				else
+					*phint |= IAVF_PHINT_L2TPV2;
+			} else {
+				*phint |= IAVF_PHINT_L2TPV2;
+			}
 			break;
 		default:
 			break;
@@ -1024,6 +1083,10 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			if (!(rss_type & RTE_ETH_RSS_ECPRI))
 				hdr->field_selector = 0;
 			break;
+		case VIRTCHNL_PROTO_HDR_L2TPV2:
+			if (!(rss_type & RTE_ETH_RSS_L2TPV2))
+				hdr->field_selector = 0;
+			break;
 		default:
 			break;
 		}
@@ -1130,10 +1193,10 @@ static void
 iavf_refine_proto_hdrs_l2tpv2(struct virtchnl_proto_hdrs *proto_hdrs,
 			      uint64_t phint)
 {
-	struct virtchnl_proto_hdr *hdr1;
+	struct virtchnl_proto_hdr *hdr, *hdr1;
 	int i;
 
-	if (!(phint & IAVF_PHINT_L2TPV2))
+	if (!(phint & IAVF_PHINT_L2TPV2) && !(phint & IAVF_PHINT_L2TPV2_LEN))
 		return;
 
 	if (proto_hdrs->tunnel_level == TUNNEL_LEVEL_INNER) {
@@ -1150,7 +1213,19 @@ iavf_refine_proto_hdrs_l2tpv2(struct virtchnl_proto_hdrs *proto_hdrs,
 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4);
 		else if (phint & IAVF_PHINT_OUTER_IPV6)
 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV6);
+	} else {
+		for (i = 0; i < proto_hdrs->count; i++) {
+			hdr = &proto_hdrs->proto_hdr[i];
+			if (hdr->type == VIRTCHNL_PROTO_HDR_L2TPV2) {
+				if (phint & IAVF_PHINT_L2TPV2) {
+					REFINE_PROTO_FLD(DEL, L2TPV2_LEN_SESS_ID);
+				} else if (phint & IAVF_PHINT_L2TPV2_LEN) {
+					REFINE_PROTO_FLD(DEL, L2TPV2_SESS_ID);
+				}
+			}
+		}
 	}
+
 }
 
 static void iavf_refine_proto_hdrs(struct virtchnl_proto_hdrs *proto_hdrs,
-- 
2.25.1


  parent reply	other threads:[~2022-01-24  6:57 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-24  6:56 [PATCH 0/6] support L2TPv2 for AVF RSS hash and FDIR Jie Wang
2022-01-24  6:56 ` [PATCH 1/6] ethdev: add L2TPv2 RSS offload type Jie Wang
2022-01-24 16:35   ` Ori Kam
2022-01-25  1:54     ` Wang, Jie1X
2022-01-24  6:56 ` [PATCH 2/6] net: fix L2TPv2 common header Jie Wang
2022-01-24  6:56 ` [PATCH 3/6] app/testpmd: add 6 types of L2TPv2 message Jie Wang
2022-01-24  6:56 ` [PATCH 4/6] common/iavf: add session ID fields for L2TPv2 Jie Wang
2022-01-24  6:56 ` Jie Wang [this message]
2022-01-24  6:56 ` [PATCH 6/6] net/iavf: support L2TPv2 for AVF FDIR Jie Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220124065613.2197436-6-jie1x.wang@intel.com \
    --to=jie1x.wang@intel.com \
    --cc=aman.deep.singh@intel.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=jingjing.wu@intel.com \
    --cc=orika@nvidia.com \
    --cc=qi.z.zhang@intel.com \
    --cc=stevex.yang@intel.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).