DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/2] support GTPU inner IPv4/IPv6 for AVF FDIR
@ 2021-02-22 10:09 Junfeng Guo
  2021-02-22 10:09 ` [dpdk-dev] [PATCH 1/2] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
  2021-02-22 10:09 ` [dpdk-dev] [PATCH 2/2] net/iavf: support GTPU inner IPv6 for FDIR Junfeng Guo
  0 siblings, 2 replies; 27+ messages in thread
From: Junfeng Guo @ 2021-02-22 10:09 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, beilei.xing; +Cc: dev, junfeng.guo

Support GTPU_(EH)_IPV4/IPV6 inner L3 and L4 fields matching for AVF FDIR.

[PATCH 1/2] support GTPU inner IPv4 for FDIR.
[PATCH 2/2] support GTPU inner IPv6 for FDIR.

Junfeng Guo (2):
  net/iavf: support GTPU inner IPv4 for FDIR
  net/iavf: support GTPU inner IPv6 for FDIR

 drivers/net/iavf/iavf_fdir.c         | 61 ++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.h | 31 ++++++++++++++
 2 files changed, 92 insertions(+)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH 1/2] net/iavf: support GTPU inner IPv4 for FDIR
  2021-02-22 10:09 [dpdk-dev] [PATCH 0/2] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
@ 2021-02-22 10:09 ` Junfeng Guo
  2021-03-26 10:42   ` [dpdk-dev] [PATCH v2 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
  2021-02-22 10:09 ` [dpdk-dev] [PATCH 2/2] net/iavf: support GTPU inner IPv6 for FDIR Junfeng Guo
  1 sibling, 1 reply; 27+ messages in thread
From: Junfeng Guo @ 2021-02-22 10:09 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, beilei.xing; +Cc: dev, junfeng.guo

Support GTPU_(EH)_IPV4 inner L3 and L4 fields matching for AVF FDIR.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         | 37 ++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.h | 21 ++++++++++++++++
 2 files changed, 58 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 4e864b4b9c..a15574c9ea 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -75,6 +75,19 @@
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID)
 
+#define IAVF_FDIR_INSET_GTPU_IPV4 (\
+	IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
+	IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
+	IAVF_INSET_TUN_IPV4_TTL)
+
+#define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
+	IAVF_FDIR_INSET_GTPU_IPV4 | \
+	IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
+	IAVF_FDIR_INSET_GTPU_IPV4 | \
+	IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
+
 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
@@ -121,7 +134,13 @@ static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_IPV4_GTPU,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_IPV4_GTPU_EH,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4,    IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp,IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp,IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu,		IAVF_FDIR_INSET_IPV6_GTPU,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu_eh,		IAVF_FDIR_INSET_IPV6_GTPU_EH,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
@@ -534,6 +553,7 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 	enum rte_flow_item_type next_type;
 	uint16_t ether_type;
 
+	u8 tun_inner = 0;
 	int layer = 0;
 	struct virtchnl_proto_hdr *hdr;
 
@@ -650,6 +670,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_IPV4_OUTER;
+					input_set |= IAVF_PROT_IPV4_INNER;
+				}
+
 				rte_memcpy(hdr->buffer,
 					&ipv4_spec->hdr,
 					sizeof(ipv4_spec->hdr));
@@ -736,6 +761,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_UDP_OUTER;
+					input_set |= IAVF_PROT_UDP_INNER;
+				}
+
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 					rte_memcpy(hdr->buffer,
 						&udp_spec->hdr,
@@ -780,6 +810,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_TCP_OUTER;
+					input_set |= IAVF_PROT_TCP_INNER;
+				}
+
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 					rte_memcpy(hdr->buffer,
 						&tcp_spec->hdr,
@@ -858,6 +893,8 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					gtp_spec, sizeof(*gtp_spec));
 			}
 
+			tun_inner = 1;
+
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 0ccf5901b4..f7bdd094e1 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -95,6 +95,17 @@
 #define IAVF_INSET_IPV6_TC \
 	(IAVF_PROT_IPV6_OUTER | IAVF_IP_TOS)
 
+#define IAVF_INSET_TUN_IPV4_SRC \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_SRC)
+#define IAVF_INSET_TUN_IPV4_DST \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_DST)
+#define IAVF_INSET_TUN_IPV4_TOS \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_TOS)
+#define IAVF_INSET_TUN_IPV4_PROTO \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_PROTO)
+#define IAVF_INSET_TUN_IPV4_TTL \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_TTL)
+
 #define IAVF_INSET_TCP_SRC_PORT \
 	(IAVF_PROT_TCP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_TCP_DST_PORT \
@@ -103,6 +114,16 @@
 	(IAVF_PROT_UDP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_UDP_DST_PORT \
 	(IAVF_PROT_UDP_OUTER | IAVF_DPORT)
+
+#define IAVF_INSET_TUN_TCP_SRC_PORT \
+	(IAVF_PROT_TCP_INNER | IAVF_SPORT)
+#define IAVF_INSET_TUN_TCP_DST_PORT \
+	(IAVF_PROT_TCP_INNER | IAVF_DPORT)
+#define IAVF_INSET_TUN_UDP_SRC_PORT \
+	(IAVF_PROT_UDP_INNER | IAVF_SPORT)
+#define IAVF_INSET_TUN_UDP_DST_PORT \
+	(IAVF_PROT_UDP_INNER | IAVF_DPORT)
+
 #define IAVF_INSET_SCTP_SRC_PORT \
 	(IAVF_PROT_SCTP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_SCTP_DST_PORT \
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH 2/2] net/iavf: support GTPU inner IPv6 for FDIR
  2021-02-22 10:09 [dpdk-dev] [PATCH 0/2] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
  2021-02-22 10:09 ` [dpdk-dev] [PATCH 1/2] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
@ 2021-02-22 10:09 ` Junfeng Guo
  1 sibling, 0 replies; 27+ messages in thread
From: Junfeng Guo @ 2021-02-22 10:09 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, beilei.xing; +Cc: dev, junfeng.guo

Support GTPU_(EH)_IPV6 inner L3 and L4 fields matching for AVF FDIR.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         | 24 ++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.h | 10 ++++++++++
 2 files changed, 34 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index a15574c9ea..94ea2d959e 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -96,6 +96,19 @@
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_GTPU_TEID)
 
+#define IAVF_FDIR_INSET_GTPU_IPV6 (\
+	IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
+	IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
+	IAVF_INSET_TUN_IPV6_HOP_LIMIT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
+	IAVF_FDIR_INSET_GTPU_IPV6 | \
+	IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
+	IAVF_FDIR_INSET_GTPU_IPV6 | \
+	IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
+
 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
@@ -137,10 +150,16 @@ static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_eth_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_IPV4_GTPU_EH,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4,    IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp,IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp,IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6,    IAVF_FDIR_INSET_GTPU_IPV6,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp,IAVF_FDIR_INSET_GTPU_IPV6_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp,IAVF_FDIR_INSET_GTPU_IPV6_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu,		IAVF_FDIR_INSET_IPV6_GTPU,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu_eh,		IAVF_FDIR_INSET_IPV6_GTPU_EH,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
@@ -727,6 +746,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_IPV6_OUTER;
+					input_set |= IAVF_PROT_IPV6_INNER;
+				}
+
 				rte_memcpy(hdr->buffer,
 					&ipv6_spec->hdr,
 					sizeof(ipv6_spec->hdr));
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index f7bdd094e1..005eeb3553 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -105,6 +105,16 @@
 	(IAVF_PROT_IPV4_INNER | IAVF_IP_PROTO)
 #define IAVF_INSET_TUN_IPV4_TTL \
 	(IAVF_PROT_IPV4_INNER | IAVF_IP_TTL)
+#define IAVF_INSET_TUN_IPV6_SRC \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_SRC)
+#define IAVF_INSET_TUN_IPV6_DST \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_DST)
+#define IAVF_INSET_TUN_IPV6_NEXT_HDR \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_PROTO)
+#define IAVF_INSET_TUN_IPV6_HOP_LIMIT \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_TTL)
+#define IAVF_INSET_TUN_IPV6_TC \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_TOS)
 
 #define IAVF_INSET_TCP_SRC_PORT \
 	(IAVF_PROT_TCP_OUTER | IAVF_SPORT)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/3] doc: add release notes for 21.05
  2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 3/3] doc: add release notes for 21.05 Junfeng Guo
@ 2021-03-26  6:08       ` Zhang, Qi Z
  0 siblings, 0 replies; 27+ messages in thread
From: Zhang, Qi Z @ 2021-03-26  6:08 UTC (permalink / raw)
  To: Guo, Junfeng, Wu, Jingjing, Xing, Beilei; +Cc: dev



> -----Original Message-----
> From: Guo, Junfeng <junfeng.guo@intel.com>
> Sent: Friday, March 26, 2021 6:42 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Guo, Junfeng <junfeng.guo@intel.com>
> Subject: [PATCH v2 3/3] doc: add release notes for 21.05
> 
> Add support for FDIR GTPU_(EH)_IPv4/IPv6 inner L3/L4 fields matching.
> 
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> ---
>  doc/guides/rel_notes/release_21_05.rst | 6 ++++++
>  1 file changed, 6 insertions(+)
> 
> diff --git a/doc/guides/rel_notes/release_21_05.rst
> b/doc/guides/rel_notes/release_21_05.rst
> index 22aa80a15b..f4ec406a6e 100644
> --- a/doc/guides/rel_notes/release_21_05.rst
> +++ b/doc/guides/rel_notes/release_21_05.rst
> @@ -117,6 +117,12 @@ New Features
>    * Added command to display Rx queue used descriptor count.
>      ``show port (port_id) rxq (queue_id) desc used count``
> 
> +* **Updated Intel iavf driver.**
> +
> +  Updated the Intel iavf driver with new features and improvements,
> including:
> +
> +  * Added support for FDIR GTPU_(EH)_IPv4/IPv6 inner L3/L4 fields
> matching.

How about 
"Added flow filter to support GTPU inner L3/L4 fields matching."


> +
> 
>  Removed Items
>  -------------
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v3 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR.
  2021-03-26 14:29       ` [dpdk-dev] [PATCH v3 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
@ 2021-03-26  6:41         ` Zhang, Qi Z
  2021-03-31 14:54           ` Ferruh Yigit
  2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 1/3] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
                           ` (2 subsequent siblings)
  3 siblings, 1 reply; 27+ messages in thread
From: Zhang, Qi Z @ 2021-03-26  6:41 UTC (permalink / raw)
  To: Guo, Junfeng, Wu, Jingjing, Xing, Beilei; +Cc: dev, Wang, Haiyue, Zhang, Yuying



> -----Original Message-----
> From: Guo, Junfeng <junfeng.guo@intel.com>
> Sent: Friday, March 26, 2021 10:30 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Zhang, Yuying
> <yuying.zhang@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> Subject: [PATCH v3 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR.
> 
> Support GTPU_(EH)_IPV4/IPV6 inner L3 and L4 fields matching for AVF FDIR.
> 
> v2:
> * add release notes for 21.05.
> v3:
> * update release notes for 21.05.
> 
> [PATCH v3 1/3] support GTPU inner IPv4 for FDIR.
> [PATCH v3 2/3] support GTPU inner IPv6 for FDIR.
> [PATCH v3 3/3] add release notes for 21.05.
> 
> Junfeng Guo (3):
>   net/iavf: support GTPU inner IPv4 for FDIR
>   net/iavf: support GTPU inner IPv6 for FDIR
>   doc: add release notes for 21.05
> 
>  doc/guides/rel_notes/release_21_05.rst |  6 +++
>  drivers/net/iavf/iavf_fdir.c           | 61 ++++++++++++++++++++++++++
>  drivers/net/iavf/iavf_generic_flow.h   | 31 +++++++++++++
>  3 files changed, 98 insertions(+)
> 
> --
> 2.25.1

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v2 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR.
  2021-02-22 10:09 ` [dpdk-dev] [PATCH 1/2] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
@ 2021-03-26 10:42   ` Junfeng Guo
  2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 1/3] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
                       ` (2 more replies)
  0 siblings, 3 replies; 27+ messages in thread
From: Junfeng Guo @ 2021-03-26 10:42 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, beilei.xing; +Cc: dev, junfeng.guo

Support GTPU_(EH)_IPV4/IPV6 inner L3 and L4 fields matching for AVF FDIR.

v2:
* add release notes for 21.05.

[PATCH v2 1/3] support GTPU inner IPv4 for FDIR.
[PATCH v2 2/3] support GTPU inner IPv6 for FDIR.
[PATCH v2 3/3] add release notes for 21.05.

Junfeng Guo (3):
  net/iavf: support GTPU inner IPv4 for FDIR
  net/iavf: support GTPU inner IPv6 for FDIR
  doc: add release notes for 21.05

 doc/guides/rel_notes/release_21_05.rst |  6 +++
 drivers/net/iavf/iavf_fdir.c           | 61 ++++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.h   | 31 +++++++++++++
 3 files changed, 98 insertions(+)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v2 1/3] net/iavf: support GTPU inner IPv4 for FDIR
  2021-03-26 10:42   ` [dpdk-dev] [PATCH v2 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
@ 2021-03-26 10:42     ` Junfeng Guo
  2021-03-26 14:29       ` [dpdk-dev] [PATCH v3 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
  2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 2/3] net/iavf: support GTPU inner IPv6 for FDIR Junfeng Guo
  2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 3/3] doc: add release notes for 21.05 Junfeng Guo
  2 siblings, 1 reply; 27+ messages in thread
From: Junfeng Guo @ 2021-03-26 10:42 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, beilei.xing; +Cc: dev, junfeng.guo

Support GTPU_(EH)_IPV4 inner L3 and L4 fields matching for AVF FDIR.

+------------------------------+---------------------------------+
|           Pattern            |            Input Set            |
+------------------------------+---------------------------------+
| eth/ipv4/gtpu/ipv4           | inner: src/dst ip               |
| eth/ipv4/gtpu/ipv4/udp       | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/ipv4/tcp       | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh/ipv4        | inner: src/dst ip               |
| eth/ipv4/gtpu/eh/ipv4/udp    | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh/ipv4/tcp    | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(0)/ipv4     | inner: src/dst ip               |
| eth/ipv4/gtpu/eh(0)/ipv4/udp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(0)/ipv4/tcp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(1)/ipv4     | inner: src/dst ip               |
| eth/ipv4/gtpu/eh(1)/ipv4/udp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(1)/ipv4/tcp | inner: src/dst ip, src/dst port |
+------------------------------+---------------------------------+

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         | 37 ++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.h | 21 ++++++++++++++++
 2 files changed, 58 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 4e864b4b9c..a15574c9ea 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -75,6 +75,19 @@
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID)
 
+#define IAVF_FDIR_INSET_GTPU_IPV4 (\
+	IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
+	IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
+	IAVF_INSET_TUN_IPV4_TTL)
+
+#define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
+	IAVF_FDIR_INSET_GTPU_IPV4 | \
+	IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
+	IAVF_FDIR_INSET_GTPU_IPV4 | \
+	IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
+
 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
@@ -121,7 +134,13 @@ static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_IPV4_GTPU,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_IPV4_GTPU_EH,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4,    IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp,IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp,IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu,		IAVF_FDIR_INSET_IPV6_GTPU,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu_eh,		IAVF_FDIR_INSET_IPV6_GTPU_EH,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
@@ -534,6 +553,7 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 	enum rte_flow_item_type next_type;
 	uint16_t ether_type;
 
+	u8 tun_inner = 0;
 	int layer = 0;
 	struct virtchnl_proto_hdr *hdr;
 
@@ -650,6 +670,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_IPV4_OUTER;
+					input_set |= IAVF_PROT_IPV4_INNER;
+				}
+
 				rte_memcpy(hdr->buffer,
 					&ipv4_spec->hdr,
 					sizeof(ipv4_spec->hdr));
@@ -736,6 +761,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_UDP_OUTER;
+					input_set |= IAVF_PROT_UDP_INNER;
+				}
+
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 					rte_memcpy(hdr->buffer,
 						&udp_spec->hdr,
@@ -780,6 +810,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_TCP_OUTER;
+					input_set |= IAVF_PROT_TCP_INNER;
+				}
+
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 					rte_memcpy(hdr->buffer,
 						&tcp_spec->hdr,
@@ -858,6 +893,8 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					gtp_spec, sizeof(*gtp_spec));
 			}
 
+			tun_inner = 1;
+
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 0ccf5901b4..f7bdd094e1 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -95,6 +95,17 @@
 #define IAVF_INSET_IPV6_TC \
 	(IAVF_PROT_IPV6_OUTER | IAVF_IP_TOS)
 
+#define IAVF_INSET_TUN_IPV4_SRC \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_SRC)
+#define IAVF_INSET_TUN_IPV4_DST \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_DST)
+#define IAVF_INSET_TUN_IPV4_TOS \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_TOS)
+#define IAVF_INSET_TUN_IPV4_PROTO \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_PROTO)
+#define IAVF_INSET_TUN_IPV4_TTL \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_TTL)
+
 #define IAVF_INSET_TCP_SRC_PORT \
 	(IAVF_PROT_TCP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_TCP_DST_PORT \
@@ -103,6 +114,16 @@
 	(IAVF_PROT_UDP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_UDP_DST_PORT \
 	(IAVF_PROT_UDP_OUTER | IAVF_DPORT)
+
+#define IAVF_INSET_TUN_TCP_SRC_PORT \
+	(IAVF_PROT_TCP_INNER | IAVF_SPORT)
+#define IAVF_INSET_TUN_TCP_DST_PORT \
+	(IAVF_PROT_TCP_INNER | IAVF_DPORT)
+#define IAVF_INSET_TUN_UDP_SRC_PORT \
+	(IAVF_PROT_UDP_INNER | IAVF_SPORT)
+#define IAVF_INSET_TUN_UDP_DST_PORT \
+	(IAVF_PROT_UDP_INNER | IAVF_DPORT)
+
 #define IAVF_INSET_SCTP_SRC_PORT \
 	(IAVF_PROT_SCTP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_SCTP_DST_PORT \
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v2 2/3] net/iavf: support GTPU inner IPv6 for FDIR
  2021-03-26 10:42   ` [dpdk-dev] [PATCH v2 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
  2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 1/3] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
@ 2021-03-26 10:42     ` Junfeng Guo
  2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 3/3] doc: add release notes for 21.05 Junfeng Guo
  2 siblings, 0 replies; 27+ messages in thread
From: Junfeng Guo @ 2021-03-26 10:42 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, beilei.xing; +Cc: dev, junfeng.guo

Support GTPU_(EH)_IPV6 inner L3 and L4 fields matching for AVF FDIR.

+------------------------------+---------------------------------+
|           Pattern            |            Input Set            |
+------------------------------+---------------------------------+
| eth/ipv4/gtpu/ipv6           | inner: src/dst ip               |
| eth/ipv4/gtpu/ipv6/udp       | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/ipv6/tcp       | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh/ipv6        | inner: src/dst ip               |
| eth/ipv4/gtpu/eh/ipv6/udp    | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh/ipv6/tcp    | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(0)/ipv6     | inner: src/dst ip               |
| eth/ipv4/gtpu/eh(0)/ipv6/udp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(0)/ipv6/tcp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(1)/ipv6     | inner: src/dst ip               |
| eth/ipv4/gtpu/eh(1)/ipv6/udp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(1)/ipv6/tcp | inner: src/dst ip, src/dst port |
+------------------------------+---------------------------------+

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         | 24 ++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.h | 10 ++++++++++
 2 files changed, 34 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index a15574c9ea..94ea2d959e 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -96,6 +96,19 @@
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_GTPU_TEID)
 
+#define IAVF_FDIR_INSET_GTPU_IPV6 (\
+	IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
+	IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
+	IAVF_INSET_TUN_IPV6_HOP_LIMIT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
+	IAVF_FDIR_INSET_GTPU_IPV6 | \
+	IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
+	IAVF_FDIR_INSET_GTPU_IPV6 | \
+	IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
+
 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
@@ -137,10 +150,16 @@ static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_eth_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_IPV4_GTPU_EH,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4,    IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp,IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp,IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6,    IAVF_FDIR_INSET_GTPU_IPV6,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp,IAVF_FDIR_INSET_GTPU_IPV6_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp,IAVF_FDIR_INSET_GTPU_IPV6_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu,		IAVF_FDIR_INSET_IPV6_GTPU,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu_eh,		IAVF_FDIR_INSET_IPV6_GTPU_EH,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
@@ -727,6 +746,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_IPV6_OUTER;
+					input_set |= IAVF_PROT_IPV6_INNER;
+				}
+
 				rte_memcpy(hdr->buffer,
 					&ipv6_spec->hdr,
 					sizeof(ipv6_spec->hdr));
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index f7bdd094e1..005eeb3553 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -105,6 +105,16 @@
 	(IAVF_PROT_IPV4_INNER | IAVF_IP_PROTO)
 #define IAVF_INSET_TUN_IPV4_TTL \
 	(IAVF_PROT_IPV4_INNER | IAVF_IP_TTL)
+#define IAVF_INSET_TUN_IPV6_SRC \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_SRC)
+#define IAVF_INSET_TUN_IPV6_DST \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_DST)
+#define IAVF_INSET_TUN_IPV6_NEXT_HDR \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_PROTO)
+#define IAVF_INSET_TUN_IPV6_HOP_LIMIT \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_TTL)
+#define IAVF_INSET_TUN_IPV6_TC \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_TOS)
 
 #define IAVF_INSET_TCP_SRC_PORT \
 	(IAVF_PROT_TCP_OUTER | IAVF_SPORT)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v2 3/3] doc: add release notes for 21.05
  2021-03-26 10:42   ` [dpdk-dev] [PATCH v2 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
  2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 1/3] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
  2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 2/3] net/iavf: support GTPU inner IPv6 for FDIR Junfeng Guo
@ 2021-03-26 10:42     ` Junfeng Guo
  2021-03-26  6:08       ` Zhang, Qi Z
  2 siblings, 1 reply; 27+ messages in thread
From: Junfeng Guo @ 2021-03-26 10:42 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, beilei.xing; +Cc: dev, junfeng.guo

Add support for FDIR GTPU_(EH)_IPv4/IPv6 inner L3/L4 fields matching.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 doc/guides/rel_notes/release_21_05.rst | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/doc/guides/rel_notes/release_21_05.rst b/doc/guides/rel_notes/release_21_05.rst
index 22aa80a15b..f4ec406a6e 100644
--- a/doc/guides/rel_notes/release_21_05.rst
+++ b/doc/guides/rel_notes/release_21_05.rst
@@ -117,6 +117,12 @@ New Features
   * Added command to display Rx queue used descriptor count.
     ``show port (port_id) rxq (queue_id) desc used count``
 
+* **Updated Intel iavf driver.**
+
+  Updated the Intel iavf driver with new features and improvements, including:
+
+  * Added support for FDIR GTPU_(EH)_IPv4/IPv6 inner L3/L4 fields matching.
+
 
 Removed Items
 -------------
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v3 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR.
  2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 1/3] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
@ 2021-03-26 14:29       ` Junfeng Guo
  2021-03-26  6:41         ` Zhang, Qi Z
                           ` (3 more replies)
  0 siblings, 4 replies; 27+ messages in thread
From: Junfeng Guo @ 2021-03-26 14:29 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, beilei.xing
  Cc: dev, haiyue.wang, yuying.zhang, junfeng.guo

Support GTPU_(EH)_IPV4/IPV6 inner L3 and L4 fields matching for AVF FDIR.

v2:
* add release notes for 21.05.
v3:
* update release notes for 21.05.

[PATCH v3 1/3] support GTPU inner IPv4 for FDIR.
[PATCH v3 2/3] support GTPU inner IPv6 for FDIR.
[PATCH v3 3/3] add release notes for 21.05.

Junfeng Guo (3):
  net/iavf: support GTPU inner IPv4 for FDIR
  net/iavf: support GTPU inner IPv6 for FDIR
  doc: add release notes for 21.05

 doc/guides/rel_notes/release_21_05.rst |  6 +++
 drivers/net/iavf/iavf_fdir.c           | 61 ++++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.h   | 31 +++++++++++++
 3 files changed, 98 insertions(+)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v3 1/3] net/iavf: support GTPU inner IPv4 for FDIR
  2021-03-26 14:29       ` [dpdk-dev] [PATCH v3 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
  2021-03-26  6:41         ` Zhang, Qi Z
@ 2021-03-26 14:29         ` Junfeng Guo
  2021-03-29  7:50           ` [dpdk-dev] rte_flow ageing David Bouyeure
                             ` (2 more replies)
  2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 2/3] net/iavf: support GTPU inner IPv6 for FDIR Junfeng Guo
  2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 3/3] doc: add release notes for 21.05 Junfeng Guo
  3 siblings, 3 replies; 27+ messages in thread
From: Junfeng Guo @ 2021-03-26 14:29 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, beilei.xing
  Cc: dev, haiyue.wang, yuying.zhang, junfeng.guo

Support GTPU_(EH)_IPV4 inner L3 and L4 fields matching for AVF FDIR.

+------------------------------+---------------------------------+
|           Pattern            |            Input Set            |
+------------------------------+---------------------------------+
| eth/ipv4/gtpu/ipv4           | inner: src/dst ip               |
| eth/ipv4/gtpu/ipv4/udp       | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/ipv4/tcp       | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh/ipv4        | inner: src/dst ip               |
| eth/ipv4/gtpu/eh/ipv4/udp    | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh/ipv4/tcp    | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(0)/ipv4     | inner: src/dst ip               |
| eth/ipv4/gtpu/eh(0)/ipv4/udp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(0)/ipv4/tcp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(1)/ipv4     | inner: src/dst ip               |
| eth/ipv4/gtpu/eh(1)/ipv4/udp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(1)/ipv4/tcp | inner: src/dst ip, src/dst port |
+------------------------------+---------------------------------+

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         | 37 ++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.h | 21 ++++++++++++++++
 2 files changed, 58 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 4e864b4b9c..a15574c9ea 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -75,6 +75,19 @@
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID)
 
+#define IAVF_FDIR_INSET_GTPU_IPV4 (\
+	IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
+	IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
+	IAVF_INSET_TUN_IPV4_TTL)
+
+#define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
+	IAVF_FDIR_INSET_GTPU_IPV4 | \
+	IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
+	IAVF_FDIR_INSET_GTPU_IPV4 | \
+	IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
+
 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
@@ -121,7 +134,13 @@ static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_IPV4_GTPU,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_IPV4_GTPU_EH,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4,    IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp,IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp,IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu,		IAVF_FDIR_INSET_IPV6_GTPU,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu_eh,		IAVF_FDIR_INSET_IPV6_GTPU_EH,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
@@ -534,6 +553,7 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 	enum rte_flow_item_type next_type;
 	uint16_t ether_type;
 
+	u8 tun_inner = 0;
 	int layer = 0;
 	struct virtchnl_proto_hdr *hdr;
 
@@ -650,6 +670,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_IPV4_OUTER;
+					input_set |= IAVF_PROT_IPV4_INNER;
+				}
+
 				rte_memcpy(hdr->buffer,
 					&ipv4_spec->hdr,
 					sizeof(ipv4_spec->hdr));
@@ -736,6 +761,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_UDP_OUTER;
+					input_set |= IAVF_PROT_UDP_INNER;
+				}
+
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 					rte_memcpy(hdr->buffer,
 						&udp_spec->hdr,
@@ -780,6 +810,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_TCP_OUTER;
+					input_set |= IAVF_PROT_TCP_INNER;
+				}
+
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 					rte_memcpy(hdr->buffer,
 						&tcp_spec->hdr,
@@ -858,6 +893,8 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					gtp_spec, sizeof(*gtp_spec));
 			}
 
+			tun_inner = 1;
+
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 0ccf5901b4..f7bdd094e1 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -95,6 +95,17 @@
 #define IAVF_INSET_IPV6_TC \
 	(IAVF_PROT_IPV6_OUTER | IAVF_IP_TOS)
 
+#define IAVF_INSET_TUN_IPV4_SRC \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_SRC)
+#define IAVF_INSET_TUN_IPV4_DST \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_DST)
+#define IAVF_INSET_TUN_IPV4_TOS \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_TOS)
+#define IAVF_INSET_TUN_IPV4_PROTO \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_PROTO)
+#define IAVF_INSET_TUN_IPV4_TTL \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_TTL)
+
 #define IAVF_INSET_TCP_SRC_PORT \
 	(IAVF_PROT_TCP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_TCP_DST_PORT \
@@ -103,6 +114,16 @@
 	(IAVF_PROT_UDP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_UDP_DST_PORT \
 	(IAVF_PROT_UDP_OUTER | IAVF_DPORT)
+
+#define IAVF_INSET_TUN_TCP_SRC_PORT \
+	(IAVF_PROT_TCP_INNER | IAVF_SPORT)
+#define IAVF_INSET_TUN_TCP_DST_PORT \
+	(IAVF_PROT_TCP_INNER | IAVF_DPORT)
+#define IAVF_INSET_TUN_UDP_SRC_PORT \
+	(IAVF_PROT_UDP_INNER | IAVF_SPORT)
+#define IAVF_INSET_TUN_UDP_DST_PORT \
+	(IAVF_PROT_UDP_INNER | IAVF_DPORT)
+
 #define IAVF_INSET_SCTP_SRC_PORT \
 	(IAVF_PROT_SCTP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_SCTP_DST_PORT \
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v3 2/3] net/iavf: support GTPU inner IPv6 for FDIR
  2021-03-26 14:29       ` [dpdk-dev] [PATCH v3 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
  2021-03-26  6:41         ` Zhang, Qi Z
  2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 1/3] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
@ 2021-03-26 14:29         ` Junfeng Guo
  2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 3/3] doc: add release notes for 21.05 Junfeng Guo
  3 siblings, 0 replies; 27+ messages in thread
From: Junfeng Guo @ 2021-03-26 14:29 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, beilei.xing
  Cc: dev, haiyue.wang, yuying.zhang, junfeng.guo

Support GTPU_(EH)_IPV6 inner L3 and L4 fields matching for AVF FDIR.

+------------------------------+---------------------------------+
|           Pattern            |            Input Set            |
+------------------------------+---------------------------------+
| eth/ipv4/gtpu/ipv6           | inner: src/dst ip               |
| eth/ipv4/gtpu/ipv6/udp       | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/ipv6/tcp       | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh/ipv6        | inner: src/dst ip               |
| eth/ipv4/gtpu/eh/ipv6/udp    | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh/ipv6/tcp    | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(0)/ipv6     | inner: src/dst ip               |
| eth/ipv4/gtpu/eh(0)/ipv6/udp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(0)/ipv6/tcp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(1)/ipv6     | inner: src/dst ip               |
| eth/ipv4/gtpu/eh(1)/ipv6/udp | inner: src/dst ip, src/dst port |
| eth/ipv4/gtpu/eh(1)/ipv6/tcp | inner: src/dst ip, src/dst port |
+------------------------------+---------------------------------+

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         | 24 ++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.h | 10 ++++++++++
 2 files changed, 34 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index a15574c9ea..94ea2d959e 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -96,6 +96,19 @@
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_GTPU_TEID)
 
+#define IAVF_FDIR_INSET_GTPU_IPV6 (\
+	IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
+	IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
+	IAVF_INSET_TUN_IPV6_HOP_LIMIT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
+	IAVF_FDIR_INSET_GTPU_IPV6 | \
+	IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
+	IAVF_FDIR_INSET_GTPU_IPV6 | \
+	IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
+
 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
@@ -137,10 +150,16 @@ static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_eth_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_IPV4_GTPU_EH,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4,    IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp,IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp,IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6,    IAVF_FDIR_INSET_GTPU_IPV6,              IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp,IAVF_FDIR_INSET_GTPU_IPV6_UDP,          IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp,IAVF_FDIR_INSET_GTPU_IPV6_TCP,          IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu,		IAVF_FDIR_INSET_IPV6_GTPU,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu_eh,		IAVF_FDIR_INSET_IPV6_GTPU_EH,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
@@ -727,6 +746,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_IPV6_OUTER;
+					input_set |= IAVF_PROT_IPV6_INNER;
+				}
+
 				rte_memcpy(hdr->buffer,
 					&ipv6_spec->hdr,
 					sizeof(ipv6_spec->hdr));
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index f7bdd094e1..005eeb3553 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -105,6 +105,16 @@
 	(IAVF_PROT_IPV4_INNER | IAVF_IP_PROTO)
 #define IAVF_INSET_TUN_IPV4_TTL \
 	(IAVF_PROT_IPV4_INNER | IAVF_IP_TTL)
+#define IAVF_INSET_TUN_IPV6_SRC \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_SRC)
+#define IAVF_INSET_TUN_IPV6_DST \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_DST)
+#define IAVF_INSET_TUN_IPV6_NEXT_HDR \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_PROTO)
+#define IAVF_INSET_TUN_IPV6_HOP_LIMIT \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_TTL)
+#define IAVF_INSET_TUN_IPV6_TC \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_TOS)
 
 #define IAVF_INSET_TCP_SRC_PORT \
 	(IAVF_PROT_TCP_OUTER | IAVF_SPORT)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v3 3/3] doc: add release notes for 21.05
  2021-03-26 14:29       ` [dpdk-dev] [PATCH v3 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
                           ` (2 preceding siblings ...)
  2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 2/3] net/iavf: support GTPU inner IPv6 for FDIR Junfeng Guo
@ 2021-03-26 14:29         ` Junfeng Guo
  3 siblings, 0 replies; 27+ messages in thread
From: Junfeng Guo @ 2021-03-26 14:29 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, beilei.xing
  Cc: dev, haiyue.wang, yuying.zhang, junfeng.guo

Add support for FDIR GTPU_(EH)_IPv4/IPv6 inner L3/L4 fields matching.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 doc/guides/rel_notes/release_21_05.rst | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/doc/guides/rel_notes/release_21_05.rst b/doc/guides/rel_notes/release_21_05.rst
index 22aa80a15b..0e4a1b6f57 100644
--- a/doc/guides/rel_notes/release_21_05.rst
+++ b/doc/guides/rel_notes/release_21_05.rst
@@ -117,6 +117,12 @@ New Features
   * Added command to display Rx queue used descriptor count.
     ``show port (port_id) rxq (queue_id) desc used count``
 
+* **Updated Intel iavf driver.**
+
+  Updated the Intel iavf driver with new features and improvements, including:
+
+  * Added flow filter to support GTPU inner L3/L4 fields matching.
+
 
 Removed Items
 -------------
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] rte_flow ageing
  2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 1/3] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
@ 2021-03-29  7:50           ` David Bouyeure
  2021-03-29  8:32             ` David Bouyeure
  2021-03-31 14:53           ` [dpdk-dev] [PATCH v3 1/3] net/iavf: support GTPU inner IPv4 for FDIR Ferruh Yigit
  2021-12-28 14:45           ` [dpdk-dev] net/mlx5: rte_flow_item_gtp restricted to GTPU David Bouyeure
  2 siblings, 1 reply; 27+ messages in thread
From: David Bouyeure @ 2021-03-29  7:50 UTC (permalink / raw)
  To: dev

Hi,


I've found out the pretty useful experimental brand new flow ageing API 
implemented in the mlx5 PMD.

I'm trying it (rte_eth_dev_callback_register(RTE_ETH_EVENT_FLOW_AGED), 
RTE_FLOW_ACTION_TYPE_AGE) to recover any flow that I previously offloaded.

The DPDK version is 20.08 and Mellanox(Connect-X6) OFED drivers are 
5.1-2.5.8.0.

I eventually don't see the usefulness of the callback since it's 
actually triggered indirectly by us(the DPDK application) when calling 
rte_flow_get_aged_flows(). If we don't call it, the callback is called 
only once.

And, calling rte_flow_get_aged_flows() from the callback won't trigger 
it next time(MLX5_AGE_TRIGGER is reset after the callback call)

Furthermore, I don't see the point of computing ageing flows in 
mlx5_fow.c::mlx5_flow_aging_check() if the client callback isn't called.

So far, I can handle the flow ageing from the same thread as the one 
which is handling the flow direction(rte_flow), it even avoid threads 
synchronization. But, in the future, I may need to be noticed as soon as 
possible of a single flow ageing, and thus handle this flow logic from 
the ageing callback.


I may misunderstand the whole ageing API... Thanks a lot for any 
clarification.


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] rte_flow ageing
  2021-03-29  7:50           ` [dpdk-dev] rte_flow ageing David Bouyeure
@ 2021-03-29  8:32             ` David Bouyeure
  0 siblings, 0 replies; 27+ messages in thread
From: David Bouyeure @ 2021-03-29  8:32 UTC (permalink / raw)
  To: dev

Sorry for the below topic confusion(in-reply-to).

On 3/29/21 9:50 AM, David Bouyeure wrote:
> Hi,
>
>
> I've found out the pretty useful experimental brand new flow ageing 
> API implemented in the mlx5 PMD.
>
> I'm trying it (rte_eth_dev_callback_register(RTE_ETH_EVENT_FLOW_AGED), 
> RTE_FLOW_ACTION_TYPE_AGE) to recover any flow that I previously 
> offloaded.
>
> The DPDK version is 20.08 and Mellanox(Connect-X6) OFED drivers are 
> 5.1-2.5.8.0.
>
> I eventually don't see the usefulness of the callback since it's 
> actually triggered indirectly by us(the DPDK application) when calling 
> rte_flow_get_aged_flows(). If we don't call it, the callback is called 
> only once.
>
> And, calling rte_flow_get_aged_flows() from the callback won't trigger 
> it next time(MLX5_AGE_TRIGGER is reset after the callback call)
>
> Furthermore, I don't see the point of computing ageing flows in 
> mlx5_fow.c::mlx5_flow_aging_check() if the client callback isn't called.
>
> So far, I can handle the flow ageing from the same thread as the one 
> which is handling the flow direction(rte_flow), it even avoid threads 
> synchronization. But, in the future, I may need to be noticed as soon 
> as possible of a single flow ageing, and thus handle this flow logic 
> from the ageing callback.
>
>
> I may misunderstand the whole ageing API... Thanks a lot for any 
> clarification.
>

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v3 1/3] net/iavf: support GTPU inner IPv4 for FDIR
  2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 1/3] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
  2021-03-29  7:50           ` [dpdk-dev] rte_flow ageing David Bouyeure
@ 2021-03-31 14:53           ` Ferruh Yigit
  2021-12-28 14:45           ` [dpdk-dev] net/mlx5: rte_flow_item_gtp restricted to GTPU David Bouyeure
  2 siblings, 0 replies; 27+ messages in thread
From: Ferruh Yigit @ 2021-03-31 14:53 UTC (permalink / raw)
  To: Junfeng Guo, qi.z.zhang, jingjing.wu, beilei.xing
  Cc: dev, haiyue.wang, yuying.zhang

On 3/26/2021 2:29 PM, Junfeng Guo wrote:
> Support GTPU_(EH)_IPV4 inner L3 and L4 fields matching for AVF FDIR.
> 
> +------------------------------+---------------------------------+
> |           Pattern            |            Input Set            |
> +------------------------------+---------------------------------+
> | eth/ipv4/gtpu/ipv4           | inner: src/dst ip               |
> | eth/ipv4/gtpu/ipv4/udp       | inner: src/dst ip, src/dst port |
> | eth/ipv4/gtpu/ipv4/tcp       | inner: src/dst ip, src/dst port |
> | eth/ipv4/gtpu/eh/ipv4        | inner: src/dst ip               |
> | eth/ipv4/gtpu/eh/ipv4/udp    | inner: src/dst ip, src/dst port |
> | eth/ipv4/gtpu/eh/ipv4/tcp    | inner: src/dst ip, src/dst port |
> | eth/ipv4/gtpu/eh(0)/ipv4     | inner: src/dst ip               |
> | eth/ipv4/gtpu/eh(0)/ipv4/udp | inner: src/dst ip, src/dst port |
> | eth/ipv4/gtpu/eh(0)/ipv4/tcp | inner: src/dst ip, src/dst port |
> | eth/ipv4/gtpu/eh(1)/ipv4     | inner: src/dst ip               |
> | eth/ipv4/gtpu/eh(1)/ipv4/udp | inner: src/dst ip, src/dst port |
> | eth/ipv4/gtpu/eh(1)/ipv4/tcp | inner: src/dst ip, src/dst port |
> +------------------------------+---------------------------------+
> 
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>

<...>

> @@ -121,7 +134,13 @@ static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
>   	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
>   	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
>   	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_IPV4_GTPU,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},
>   	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_IPV4_GTPU_EH,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4,    IAVF_FDIR_INSET_GTPU_IPV4,              IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp,IAVF_FDIR_INSET_GTPU_IPV4_UDP,          IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp,IAVF_FDIR_INSET_GTPU_IPV4_TCP,          IAVF_INSET_NONE},

Syntax updated to fix checkpatch warning in next-net, space added after ',' & 
addtional tab removed before third field.

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v3 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR.
  2021-03-26  6:41         ` Zhang, Qi Z
@ 2021-03-31 14:54           ` Ferruh Yigit
  0 siblings, 0 replies; 27+ messages in thread
From: Ferruh Yigit @ 2021-03-31 14:54 UTC (permalink / raw)
  To: Zhang, Qi Z, Guo, Junfeng, Wu, Jingjing, Xing, Beilei
  Cc: dev, Wang, Haiyue, Zhang, Yuying

On 3/26/2021 6:41 AM, Zhang, Qi Z wrote:
> 
> 
>> -----Original Message-----
>> From: Guo, Junfeng <junfeng.guo@intel.com>
>> Sent: Friday, March 26, 2021 10:30 PM
>> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
>> Xing, Beilei <beilei.xing@intel.com>
>> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Zhang, Yuying
>> <yuying.zhang@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
>> Subject: [PATCH v3 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR.
>>
>> Support GTPU_(EH)_IPV4/IPV6 inner L3 and L4 fields matching for AVF FDIR.
>>
>> v2:
>> * add release notes for 21.05.
>> v3:
>> * update release notes for 21.05.
>>
>> [PATCH v3 1/3] support GTPU inner IPv4 for FDIR.
>> [PATCH v3 2/3] support GTPU inner IPv6 for FDIR.
>> [PATCH v3 3/3] add release notes for 21.05.
>>
>> Junfeng Guo (3):
>>    net/iavf: support GTPU inner IPv4 for FDIR
>>    net/iavf: support GTPU inner IPv6 for FDIR
>>    doc: add release notes for 21.05
>>
>>   doc/guides/rel_notes/release_21_05.rst |  6 +++
>>   drivers/net/iavf/iavf_fdir.c           | 61 ++++++++++++++++++++++++++
>>   drivers/net/iavf/iavf_generic_flow.h   | 31 +++++++++++++
>>   3 files changed, 98 insertions(+)
>>
>> --
>> 2.25.1
> 
> Acked-by: Qi Zhang <qi.z.zhang@intel.com>
> 
> Applied to dpdk-next-net-intel.
> 

Release notes update patch merged to actual patch in next-net.


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] net/mlx5: rte_flow_item_gtp restricted to GTPU
  2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 1/3] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
  2021-03-29  7:50           ` [dpdk-dev] rte_flow ageing David Bouyeure
  2021-03-31 14:53           ` [dpdk-dev] [PATCH v3 1/3] net/iavf: support GTPU inner IPv4 for FDIR Ferruh Yigit
@ 2021-12-28 14:45           ` David Bouyeure
  2 siblings, 0 replies; 27+ messages in thread
From: David Bouyeure @ 2021-12-28 14:45 UTC (permalink / raw)
  To: dev

Hi everybody,


I've implemented some flows offloading through rte_flow, and I'm just 
finding out that my GTPC packets aren't handled the same way as my GTPU 
ones by a Mellanox Connect-X6(FLEX_PARSER_PROFILE_ENABLE==3).

In others words, the GTP rules that I set are ignoring GTPC packets. The 
DPDK api doc doesn't say anything about any GTP-C/GTP-U distinction as 
far as I know, only that there's no way to set some filter against a 
GTPv2(GTP-C) header.

There's a reference to RTE_GTPU_UDP_PORT in 
dpdk.20.11.2/drivers/net/mlx5/mlx5_flow_dv.c::flow_dv_translate_item_gtp(), 
and none to RTE_GTPC_UDP_PORT.

Do you confirm that mlx5 rte_flow implementation is definitely ignoring 
GTPC packets? And, if so, for which reason?


Thanks a lot.


Regards.




^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] rte_flow ageing
  2021-04-08  9:52             ` Matan Azrad
@ 2021-04-08 16:45               ` David Bouyeure
  0 siblings, 0 replies; 27+ messages in thread
From: David Bouyeure @ 2021-04-08 16:45 UTC (permalink / raw)
  To: Matan Azrad, Asaf Penso, dev; +Cc: Jack Min

As seen with Matan, there's really a bug preventing to schedule a later 
age event(with /rte_flow_get_aged_flows()/) from the user's callback itself.


I could suggest the following patch:

--- dpdk-21.02/drivers/net/mlx5/mlx5.c    2021-02-14 10:58:34.000000000 
+0100
+++ dpdk-21.02/drivers/net/mlx5/mlx5.c.new    2021-04-08 
18:35:44.124965176 +0200
@@ -554,7 +554,7 @@
              rte_eth_dev_callback_process
(&rte_eth_devices[sh->port[i].devx_ih_port_id],
                  RTE_ETH_EVENT_FLOW_AGED, NULL);
-        age_info->flags = 0;
+        age_info->flags &= ~MLX5_AGE_EVENT_NEW;
      }
  }

Thanks.

On 4/8/21 11:52 AM, Matan Azrad wrote:
>
> Yes, you right for the meaning of MLX5_AGE_TRIGGER.
>
> But why you said it is erased when the callback return?
>
> It is erased when a new aged-out flow is detected by the driver….
>
> Do you have an issue with option 1?
>
> Matan
>
> *From:*David Bouyeure <david.bouyeure@fraudbuster.mobi>
> *Sent:* Thursday, April 8, 2021 10:50 AM
> *To:* Matan Azrad <matan@nvidia.com>; Asaf Penso <asafp@nvidia.com>; 
> dev@dpdk.org
> *Cc:* Jack Min <jackmin@nvidia.com>
> *Subject:* Re: [dpdk-dev] rte_flow ageing
>
> *External email: Use caution opening links or attachments*
>
> Hi Matan,
>
> below are my comments.
>
> Thank you.
>
> On 4/7/21 8:09 PM, Matan Azrad wrote:
>
>     Yes you can call it from the event callback.
>
> Sure, but it won't trigger the event callback as it would for the next 
> aged-out flow(s) if called from outside the callback.
>
>     Yes, MLX5_AGE_TRIGGER probably means that event was sent and no
>     need to send it again in the next aged-out flow.
>
> I don't think so. MLX5_AGE_TRIGGER means 'Do call the callback for 
> next aged-out flow(s)'.
>
>     Erasing it cause new event to be sent in the next aged-out flow...
>
> No, I think it's the reverse.
>
>     I don't understand what is the issue for you in option 1...
>
>     השג את ‏Outlookעבור Android‏
>     <https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Faka.ms%2FAAb9ysg&data=04%7C01%7Cmatan%40nvidia.com%7C5f8447e5e9a0419d028408d8fa62f35c%7C43083d15727340c1b7db39efd9ccc17a%7C0%7C0%7C637534650206068346%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=vfiLHcxWtpQEh9jqE7wIsGhs%2FZlMN5r4aJtSrvE7OgY%3D&reserved=0>
>
>     ------------------------------------------------------------------------
>
>     *From:*David Bouyeure <david.bouyeure@fraudbuster.mobi>
>     <mailto:david.bouyeure@fraudbuster.mobi>
>     *Sent:* Wednesday, April 7, 2021 7:19:34 PM
>     *To:* Matan Azrad <matan@nvidia.com> <mailto:matan@nvidia.com>;
>     Asaf Penso <asafp@nvidia.com> <mailto:asafp@nvidia.com>;
>     dev@dpdk.org <mailto:dev@dpdk.org> <dev@dpdk.org>
>     <mailto:dev@dpdk.org>
>     *Cc:* Jack Min <jackmin@nvidia.com> <mailto:jackmin@nvidia.com>
>     *Subject:* Re: [dpdk-dev] rte_flow ageing
>
>     *External email: Use caution opening links or attachments*
>
>     Hi Matan, and thanks a lot,
>
>     regarding the mode *1*, I still have a doubt:
>
>          1. Register the AGE event -> in event time to query the
>             aged-out flows by the rte_flow_get_aged_flows API, this
>             call will trigger a new event when new aged-out flow will
>             be detected for the port.(if you don’t call
>             rte_flow_get_aged_flows the event will not be retriggered.)
>
>     You meant calling rte_flow_get_aged_flows() from the event
>     callback I guess...?
>
>     I think this is not working because MLX5_AGE_TRIGGER is erased
>     when the callback returns.
>
>     Anyway, the polling mode is enough to me so far.
>
>     Thanks again.
>
>     Regards.
>
>     On 4/5/21 12:23 PM, Matan Azrad wrote:
>
> -->
>

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] rte_flow ageing
  2021-04-08  7:50           ` David Bouyeure
@ 2021-04-08  9:52             ` Matan Azrad
  2021-04-08 16:45               ` David Bouyeure
  0 siblings, 1 reply; 27+ messages in thread
From: Matan Azrad @ 2021-04-08  9:52 UTC (permalink / raw)
  To: David Bouyeure, Asaf Penso, dev; +Cc: Jack Min

Yes, you right for the meaning of MLX5_AGE_TRIGGER.

But why you said it is erased when the callback return?

It is erased when a new aged-out flow is detected by the driver….

Do you have an issue with option 1?

Matan

From: David Bouyeure <david.bouyeure@fraudbuster.mobi>
Sent: Thursday, April 8, 2021 10:50 AM
To: Matan Azrad <matan@nvidia.com>; Asaf Penso <asafp@nvidia.com>; dev@dpdk.org
Cc: Jack Min <jackmin@nvidia.com>
Subject: Re: [dpdk-dev] rte_flow ageing

External email: Use caution opening links or attachments


Hi Matan,



below are my comments.



Thank you.


On 4/7/21 8:09 PM, Matan Azrad wrote:

Yes you can call it from the event callback.
Sure, but it won't trigger the event callback as it would for the next aged-out flow(s) if called from outside the callback.


Yes, MLX5_AGE_TRIGGER probably means that event was sent and no need to send it again in the next aged-out flow.
I don't think so. MLX5_AGE_TRIGGER means 'Do call the callback for next aged-out flow(s)'.

Erasing it cause new event to be sent in the next aged-out flow...
No, I think it's the reverse.


I don't understand what is the issue for you in option 1...
השג את ‏Outlook עבור Android‏<https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Faka.ms%2FAAb9ysg&data=04%7C01%7Cmatan%40nvidia.com%7C5f8447e5e9a0419d028408d8fa62f35c%7C43083d15727340c1b7db39efd9ccc17a%7C0%7C0%7C637534650206068346%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=vfiLHcxWtpQEh9jqE7wIsGhs%2FZlMN5r4aJtSrvE7OgY%3D&reserved=0>

________________________________
From: David Bouyeure <david.bouyeure@fraudbuster.mobi><mailto:david.bouyeure@fraudbuster.mobi>
Sent: Wednesday, April 7, 2021 7:19:34 PM
To: Matan Azrad <matan@nvidia.com><mailto:matan@nvidia.com>; Asaf Penso <asafp@nvidia.com><mailto:asafp@nvidia.com>; dev@dpdk.org<mailto:dev@dpdk.org> <dev@dpdk.org><mailto:dev@dpdk.org>
Cc: Jack Min <jackmin@nvidia.com><mailto:jackmin@nvidia.com>
Subject: Re: [dpdk-dev] rte_flow ageing

External email: Use caution opening links or attachments


Hi Matan, and thanks a lot,
regarding the mode 1, I still have a doubt:

  1.  Register the AGE event -> in event time to query the aged-out flows by the rte_flow_get_aged_flows API, this call will trigger a new event when new aged-out flow will be detected for the port.(if you don’t call rte_flow_get_aged_flows the event will not be retriggered.)

You meant calling rte_flow_get_aged_flows() from the event callback I guess...?

I think this is not working because MLX5_AGE_TRIGGER is erased when the callback returns.



Anyway, the polling mode is enough to me so far.

Thanks again.



Regards.


On 4/5/21 12:23 PM, Matan Azrad wrote:
-->

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] rte_flow ageing
  2021-04-07 18:09         ` Matan Azrad
@ 2021-04-08  7:50           ` David Bouyeure
  2021-04-08  9:52             ` Matan Azrad
  0 siblings, 1 reply; 27+ messages in thread
From: David Bouyeure @ 2021-04-08  7:50 UTC (permalink / raw)
  To: Matan Azrad, Asaf Penso, dev; +Cc: Jack Min

Hi Matan,


below are my comments.


Thank you.


On 4/7/21 8:09 PM, Matan Azrad wrote:
>
> Yes you can call it from the event callback.
Sure, but it won't trigger the event callback as it would for the next 
aged-out flow(s) if called from outside the callback.
>
> Yes, MLX5_AGE_TRIGGER probably means that event was sent and no need 
> to send it again in the next aged-out flow.
I don't think so. MLX5_AGE_TRIGGER means 'Do call the callback for next 
aged-out flow(s)'.
> Erasing it cause new event to be sent in the next aged-out flow...
No, I think it's the reverse.
>
> I don't understand what is the issue for you in option 1...
>
> השג את ‏Outlook עבור Android‏ <https://aka.ms/AAb9ysg>
>
> ------------------------------------------------------------------------
> *From:* David Bouyeure <david.bouyeure@fraudbuster.mobi>
> *Sent:* Wednesday, April 7, 2021 7:19:34 PM
> *To:* Matan Azrad <matan@nvidia.com>; Asaf Penso <asafp@nvidia.com>; 
> dev@dpdk.org <dev@dpdk.org>
> *Cc:* Jack Min <jackmin@nvidia.com>
> *Subject:* Re: [dpdk-dev] rte_flow ageing
> *External email: Use caution opening links or attachments*
>
>
> Hi Matan, and thanks a lot,
>
> regarding the mode *1*, I still have a doubt:
>>
>>  1. Register the AGE event -> in event time to query the aged-out
>>     flows by the rte_flow_get_aged_flows API, this call will trigger
>>     a new event when new aged-out flow will be detected for the
>>     port.(if you don’t call rte_flow_get_aged_flows the event will
>>     not be retriggered.)
>>
> You meant calling rte_flow_get_aged_flows() from the event callback I 
> guess...?
>
> I think this is not working because MLX5_AGE_TRIGGER is erased when 
> the callback returns.
>
>
> Anyway, the polling mode is enough to me so far.
>
> Thanks again.
>
>
> Regards.
>
>
> On 4/5/21 12:23 PM, Matan Azrad wrote:

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] rte_flow ageing
  2021-04-07 16:19       ` David Bouyeure
@ 2021-04-07 18:09         ` Matan Azrad
  2021-04-08  7:50           ` David Bouyeure
  0 siblings, 1 reply; 27+ messages in thread
From: Matan Azrad @ 2021-04-07 18:09 UTC (permalink / raw)
  To: Asaf Penso, dev, David Bouyeure; +Cc: Jack Min


Yes you can call it from the event callback.

Yes, MLX5_AGE_TRIGGER probably means that event was sent and no need to send it again in the next aged-out flow.
Erasing it cause new event to be sent in the next aged-out flow...

I don't understand what is the issue for you in option 1...

השג את ‏Outlook עבור Android‏<https://aka.ms/AAb9ysg>

________________________________
From: David Bouyeure <david.bouyeure@fraudbuster.mobi>
Sent: Wednesday, April 7, 2021 7:19:34 PM
To: Matan Azrad <matan@nvidia.com>; Asaf Penso <asafp@nvidia.com>; dev@dpdk.org <dev@dpdk.org>
Cc: Jack Min <jackmin@nvidia.com>
Subject: Re: [dpdk-dev] rte_flow ageing

External email: Use caution opening links or attachments


Hi Matan, and thanks a lot,

regarding the mode 1, I still have a doubt:

  1.  Register the AGE event -> in event time to query the aged-out flows by the rte_flow_get_aged_flows API, this call will trigger a new event when new aged-out flow will be detected for the port.(if you don’t call rte_flow_get_aged_flows the event will not be retriggered.)

You meant calling rte_flow_get_aged_flows() from the event callback I guess...?

I think this is not working because MLX5_AGE_TRIGGER is erased when the callback returns.


Anyway, the polling mode is enough to me so far.

Thanks again.


Regards.


On 4/5/21 12:23 PM, Matan Azrad wrote:

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] rte_flow ageing
  2021-04-05 10:23     ` Matan Azrad
@ 2021-04-07 16:19       ` David Bouyeure
  2021-04-07 18:09         ` Matan Azrad
  0 siblings, 1 reply; 27+ messages in thread
From: David Bouyeure @ 2021-04-07 16:19 UTC (permalink / raw)
  To: Matan Azrad, Asaf Penso, dev; +Cc: Jack Min

Hi Matan, and thanks a lot,

regarding the mode *1*, I still have a doubt:
>
>  1. Register the AGE event -> in event time to query the aged-out
>     flows by the rte_flow_get_aged_flows API, this call will trigger a
>     new event when new aged-out flow will be detected for the port.(if
>     you don’t call rte_flow_get_aged_flows the event will not be
>     retriggered.)
>
You meant calling rte_flow_get_aged_flows() from the event callback I 
guess...?

I think this is not working because MLX5_AGE_TRIGGER is erased when the 
callback returns.


Anyway, the polling mode is enough to me so far.

Thanks again.


Regards.


On 4/5/21 12:23 PM, Matan Azrad wrote:
>
> Hi
>
> I will try to answer inline with prefix [MA].
>
> *From:* David Bouyeure <david.bouyeure@fraudbuster.mobi>
> *Sent:* Tuesday, March 30, 2021 6:46 PM
> *To:* Asaf Penso <asafp@nvidia.com>; dev@dpdk.org
> *Cc:* Matan Azrad <matan@nvidia.com>; Jack Min <jackmin@nvidia.com>
> *Subject:* Re: [dpdk-dev] rte_flow ageing
>
> *External email: Use caution opening links or attachments*
>
> Thanks a lot Asaf, for your answer, so fast.
>
> depending on the feature we want, the table you mentioned in the doc 
> may give different combinations. Mine, DPDK-20.08/OFED 5.1-2, is part 
> of the list.
>
> Anyway, my question is more about the API design. Please, find my 
> comments below.
>
> On 3/29/21 8:02 PM, Asaf Penso wrote:
>
>     Hello David,
>
>     Thanks for reaching out, I'll try to answer as best as I know and I added Matan who will be able to provide further info during next week.
>
>     First, according to our pmd documentation (http://doc.dpdk.org/guides/nics/mlx5.html#supported-hardware-offloads  <https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fdoc.dpdk.org%2Fguides%2Fnics%2Fmlx5.html%23supported-hardware-offloads&data=04%7C01%7Cmatan%40nvidia.com%7Cdfc24177f1fa4209c81f08d8f392e4c2%7C43083d15727340c1b7db39efd9ccc17a%7C0%7C0%7C637527159538915512%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8xI9xcx8uhHTBDr22aZi986oXyTHTN8E6NKsx%2BYMqAQ%3D&reserved=0>) we recommend using DPDK20.11 and OFED5.2, and not the combo you are referring to.
>
>     Second, we can always improve our documentation and I appreciate your queries.
>
>     Please see my comments inline.
>
>     Regards,
>
>     Asaf Penso
>
>         -----Original Message-----
>
>         From: dev<dev-bounces@dpdk.org>  <mailto:dev-bounces@dpdk.org>  On Behalf Of David Bouyeure
>
>         Sent: Monday, March 29, 2021 11:35 AM
>
>         To:dev@dpdk.org  <mailto:dev@dpdk.org>
>
>         Subject: [dpdk-dev] rte_flow ageing
>
>         Hi,
>
>         I've found out the pretty useful experimental brand new flow ageing API
>
>         implemented in the mlx5 PMD.
>
>     It is useful and I hope you'll fully understand at the end why😊
>
>       
>
>         I'm trying it (rte_eth_dev_callback_register(RTE_ETH_EVENT_FLOW_AGED),
>
>         RTE_FLOW_ACTION_TYPE_AGE) to recover any flow that I previously
>
>         offloaded.
>
>         The DPDK version is 20.08 and Mellanox(Connect-X6) OFED drivers are 5.1-
>
>         2.5.8.0.
>
>     See above the suggested versions for this feature
>
>         I eventually don't see the usefulness of the callback since it's actually triggered
>
>         indirectly by us(the DPDK application) when calling
>
>         rte_flow_get_aged_flows().
>
>     The main intention is to offload the aging logic from the application level to the pmd level.
>
>     There is so saving of cpu cycles, and the gain here is with simplicity.
>
>     The application doesn't need to have complex logic of comparison between counters or other HW info that can be retrieve.
>
>     Now, the pmd hides all of that and leaves the application only to decide what to do with the flows that are aged out.
>
>     Please note, the pmd does not delete any flow, just provide the list of all the flows that are aged.
>
> I fully understand that and this is a very very useful feature to us.
>
>         If we don't call it, the callback is called only once.
>
>         And, calling rte_flow_get_aged_flows() from the callback won't trigger it next
>
>         time(MLX5_AGE_TRIGGER is reset after the callback call)
>
>     Once you call the function the pmd will not trigger more events. Now it's up to the application to decide what to do.
>
>     Doing it differently, will cause an interrupt storm and the pmd avoids that.If new flows are aged then the pmd will trigger a new event.
>
> Sorry, I wasn't realizing that the callback isn't called for each flow 
> but rather for each port, though it's clear in the PMD code. But, the 
> fact that we can register several RTE_ETH_EVENT_FLOW_AGED event 
> handlers is surprising.
>
> [MA] Yes you can register the event for each port support aging if you 
> want your callback will be called for “new” aged flows.
>
> So, you suggest to use the callback as an indicator to later retrieve 
> the aged-out flows, that's it?
>
> [MA] the user has 2 options:
>
>  1. Register the AGE event -> in event time to query the aged-out
>     flows by the rte_flow_get_aged_flows API, this call will trigger a
>     new event when new aged-out flow will be detected for the port.(if
>     you don’t call rte_flow_get_aged_flows the event will not be
>     retriggered.)
>  2. Just call rte_flow_get_aged_flows from time to time(application
>     polling).
>
> Wouldn't calling rte_flow_get_aged_flows with NULL param just to get 
> the number of aged_flows do the same, without the need to un/register 
> a callback, and DPDK to call it?
>
> [MA]
>
> Here, application need to do polling all the time (option 2), in 
> option 1 application invest effort only when aged-out flows are detected.
>
> In option 1, you can call it with NULL also in order to know what is 
> the array size you need for the actual call.
>
> Another thing, the explanation here 
> http://doc.dpdk.org/api/rte__flow_8h.html#a43763e0794d2696b18b6272619aafc2a 
> <https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fdoc.dpdk.org%2Fapi%2Frte__flow_8h.html%23a43763e0794d2696b18b6272619aafc2a&data=04%7C01%7Cmatan%40nvidia.com%7Cdfc24177f1fa4209c81f08d8f392e4c2%7C43083d15727340c1b7db39efd9ccc17a%7C0%7C0%7C637527159538925502%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=h0Vp1jtf9NKmgywkL4LLOuSDxLR4VzqPH6mS6aD0%2FyI%3D&reserved=0> 
> *"...to get the aged flows usynchronously from the event callback..."* 
> seems wrong to me because age_info->flags is set to 0 just after the 
> callback, thus ML5_AGE_TRIGGER is canceled and no event will be 
> triggered before we'll call rte_flow_get_aged_flows() outside of the 
> callback.
>
> [MA] It just say you can choose one of the options *usynchronously 
> (option 1), synchronously (option 2).*
>
> Matan
>
>         Furthermore, I don't see the point of computing ageing flows in
>
>         mlx5_fow.c::mlx5_flow_aging_check() if the client callback isn't called.
>
>     Can you elaborate? I'm not sure I understand your intention.
>
> Please forgot :-)
>
>         So far, I can handle the flow ageing from the same thread as the one which is
>
>         handling the flow direction(rte_flow), it even avoid threads synchronization.
>
>         But, in the future, I may need to be noticed as soon as possible of a single flow
>
>         ageing, and thus handle this flow logic from the ageing callback.
>
>         I may misunderstand the whole ageing API... Thanks a lot for any clarification.
>

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] rte_flow ageing
  2021-03-30 15:45   ` David Bouyeure
@ 2021-04-05 10:23     ` Matan Azrad
  2021-04-07 16:19       ` David Bouyeure
  0 siblings, 1 reply; 27+ messages in thread
From: Matan Azrad @ 2021-04-05 10:23 UTC (permalink / raw)
  To: David Bouyeure, Asaf Penso, dev; +Cc: Jack Min

Hi

I will try to answer inline with prefix [MA].

From: David Bouyeure <david.bouyeure@fraudbuster.mobi>
Sent: Tuesday, March 30, 2021 6:46 PM
To: Asaf Penso <asafp@nvidia.com>; dev@dpdk.org
Cc: Matan Azrad <matan@nvidia.com>; Jack Min <jackmin@nvidia.com>
Subject: Re: [dpdk-dev] rte_flow ageing

External email: Use caution opening links or attachments


Thanks a lot Asaf, for your answer, so fast.

depending on the feature we want, the table you mentioned in the doc may give different combinations. Mine, DPDK-20.08/OFED 5.1-2, is part of the list.

Anyway, my question is more about the API design. Please, find my comments below.
On 3/29/21 8:02 PM, Asaf Penso wrote:

Hello David,



Thanks for reaching out, I'll try to answer as best as I know and I added Matan who will be able to provide further info during next week.

First, according to our pmd documentation (http://doc.dpdk.org/guides/nics/mlx5.html#supported-hardware-offloads<https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fdoc.dpdk.org%2Fguides%2Fnics%2Fmlx5.html%23supported-hardware-offloads&data=04%7C01%7Cmatan%40nvidia.com%7Cdfc24177f1fa4209c81f08d8f392e4c2%7C43083d15727340c1b7db39efd9ccc17a%7C0%7C0%7C637527159538915512%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8xI9xcx8uhHTBDr22aZi986oXyTHTN8E6NKsx%2BYMqAQ%3D&reserved=0>) we recommend using DPDK20.11 and OFED5.2, and not the combo you are referring to.

Second, we can always improve our documentation and I appreciate your queries.



Please see my comments inline.



Regards,

Asaf Penso



-----Original Message-----

From: dev <dev-bounces@dpdk.org><mailto:dev-bounces@dpdk.org> On Behalf Of David Bouyeure

Sent: Monday, March 29, 2021 11:35 AM

To: dev@dpdk.org<mailto:dev@dpdk.org>

Subject: [dpdk-dev] rte_flow ageing



Hi,





I've found out the pretty useful experimental brand new flow ageing API

implemented in the mlx5 PMD.



It is useful and I hope you'll fully understand at the end why 😊







I'm trying it (rte_eth_dev_callback_register(RTE_ETH_EVENT_FLOW_AGED),

RTE_FLOW_ACTION_TYPE_AGE) to recover any flow that I previously

offloaded.



The DPDK version is 20.08 and Mellanox(Connect-X6) OFED drivers are 5.1-

2.5.8.0.





See above the suggested versions for this feature



I eventually don't see the usefulness of the callback since it's actually triggered

indirectly by us(the DPDK application) when calling

rte_flow_get_aged_flows().



The main intention is to offload the aging logic from the application level to the pmd level.

There is so saving of cpu cycles, and the gain here is with simplicity.

The application doesn't need to have complex logic of comparison between counters or other HW info that can be retrieve.

Now, the pmd hides all of that and leaves the application only to decide what to do with the flows that are aged out.

Please note, the pmd does not delete any flow, just provide the list of all the flows that are aged.
I fully understand that and this is a very very useful feature to us.




If we don't call it, the callback is called only once.



And, calling rte_flow_get_aged_flows() from the callback won't trigger it next

time(MLX5_AGE_TRIGGER is reset after the callback call)



Once you call the function the pmd will not trigger more events. Now it's up to the application to decide what to do.

Doing it differently, will cause an interrupt storm and the pmd avoids that.If new flows are aged then the pmd will trigger a new event.

Sorry, I wasn't realizing that the callback isn't called for each flow but rather for each port, though it's clear in the PMD code. But, the fact that we can register several RTE_ETH_EVENT_FLOW_AGED event handlers is surprising.

[MA] Yes you can register the event for each port support aging if you want your callback will be called for “new” aged flows.

So, you suggest to use the callback as an indicator to later retrieve the aged-out flows, that's it?

[MA] the user has 2 options:

  1.  Register the AGE event -> in event time to query the aged-out flows by the rte_flow_get_aged_flows API, this call will trigger a new event when new aged-out flow will be detected for the port.(if you don’t call rte_flow_get_aged_flows the event will not be retriggered.)
  2.  Just call rte_flow_get_aged_flows from time to time(application polling).



Wouldn't calling rte_flow_get_aged_flows with NULL param just to get the number of aged_flows do the same, without the need to un/register a callback, and DPDK to call it?



[MA]

Here, application need to do polling all the time (option 2), in option 1 application invest effort only when aged-out flows are detected.

In option 1, you can call it with NULL also in order to know what is the array size you need for the actual call.
Another thing, the explanation here http://doc.dpdk.org/api/rte__flow_8h.html#a43763e0794d2696b18b6272619aafc2a<https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fdoc.dpdk.org%2Fapi%2Frte__flow_8h.html%23a43763e0794d2696b18b6272619aafc2a&data=04%7C01%7Cmatan%40nvidia.com%7Cdfc24177f1fa4209c81f08d8f392e4c2%7C43083d15727340c1b7db39efd9ccc17a%7C0%7C0%7C637527159538925502%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=h0Vp1jtf9NKmgywkL4LLOuSDxLR4VzqPH6mS6aD0%2FyI%3D&reserved=0> "...to get the aged flows usynchronously from the event callback..." seems wrong to me because age_info->flags is set to 0 just after the callback, thus ML5_AGE_TRIGGER is canceled and no event will be triggered before we'll call rte_flow_get_aged_flows() outside of the callback.

[MA] It just say you can choose one of the options usynchronously (option 1), synchronously (option 2).

Matan






Furthermore, I don't see the point of computing ageing flows in

mlx5_fow.c::mlx5_flow_aging_check() if the client callback isn't called.





Can you elaborate? I'm not sure I understand your intention.
Please forgot :-)






So far, I can handle the flow ageing from the same thread as the one which is

handling the flow direction(rte_flow), it even avoid threads synchronization.

But, in the future, I may need to be noticed as soon as possible of a single flow

ageing, and thus handle this flow logic from the ageing callback.





I may misunderstand the whole ageing API... Thanks a lot for any clarification.



^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] rte_flow ageing
  2021-03-29 18:02 ` Asaf Penso
@ 2021-03-30 15:45   ` David Bouyeure
  2021-04-05 10:23     ` Matan Azrad
  0 siblings, 1 reply; 27+ messages in thread
From: David Bouyeure @ 2021-03-30 15:45 UTC (permalink / raw)
  To: Asaf Penso, dev; +Cc: Matan Azrad, Jack Min

Thanks a lot Asaf, for your answer, so fast.

depending on the feature we want, the table you mentioned in the doc may 
give different combinations. Mine, DPDK-20.08/OFED 5.1-2, is part of the 
list.

Anyway, my question is more about the API design. Please, find my 
comments below.

On 3/29/21 8:02 PM, Asaf Penso wrote:
> Hello David,
>
> Thanks for reaching out, I'll try to answer as best as I know and I added Matan who will be able to provide further info during next week.
> First, according to our pmd documentation (http://doc.dpdk.org/guides/nics/mlx5.html#supported-hardware-offloads) we recommend using DPDK20.11 and OFED5.2, and not the combo you are referring to.
> Second, we can always improve our documentation and I appreciate your queries.
>
> Please see my comments inline.
>
> Regards,
> Asaf Penso
>
>> -----Original Message-----
>> From: dev <dev-bounces@dpdk.org> On Behalf Of David Bouyeure
>> Sent: Monday, March 29, 2021 11:35 AM
>> To: dev@dpdk.org
>> Subject: [dpdk-dev] rte_flow ageing
>>
>> Hi,
>>
>>
>> I've found out the pretty useful experimental brand new flow ageing API
>> implemented in the mlx5 PMD.
> It is useful and I hope you'll fully understand at the end why 😊
>   
>
>> I'm trying it (rte_eth_dev_callback_register(RTE_ETH_EVENT_FLOW_AGED),
>> RTE_FLOW_ACTION_TYPE_AGE) to recover any flow that I previously
>> offloaded.
>>
>> The DPDK version is 20.08 and Mellanox(Connect-X6) OFED drivers are 5.1-
>> 2.5.8.0.
>>
> See above the suggested versions for this feature
>
>> I eventually don't see the usefulness of the callback since it's actually triggered
>> indirectly by us(the DPDK application) when calling
>> rte_flow_get_aged_flows().
> The main intention is to offload the aging logic from the application level to the pmd level.
> There is so saving of cpu cycles, and the gain here is with simplicity.
> The application doesn't need to have complex logic of comparison between counters or other HW info that can be retrieve.
> Now, the pmd hides all of that and leaves the application only to decide what to do with the flows that are aged out.
> Please note, the pmd does not delete any flow, just provide the list of all the flows that are aged.
I fully understand that and this is a very very useful feature to us.
>> If we don't call it, the callback is called only once.
>>
>> And, calling rte_flow_get_aged_flows() from the callback won't trigger it next
>> time(MLX5_AGE_TRIGGER is reset after the callback call)
> Once you call the function the pmd will not trigger more events. Now it's up to the application to decide what to do.
> Doing it differently, will cause an interrupt storm and the pmd avoids that.If new flows are aged then the pmd will trigger a new event.

Sorry, I wasn't realizing that the callback isn't called for each flow 
but rather for each port, though it's clear in the PMD code. But, the 
fact that we can register several RTE_ETH_EVENT_FLOW_AGED event handlers 
is surprising.

So, you suggest to use the callback as an indicator to later retrieve 
the aged-out flows, that's it?

Wouldn't calling rte_flow_get_aged_flows with NULL param just to get the 
number of aged_flows do the same, without the need to un/register a 
callback, and DPDK to call it?

Another thing, the explanation here 
http://doc.dpdk.org/api/rte__flow_8h.html#a43763e0794d2696b18b6272619aafc2a 
*"...to get the aged flows usynchronously from the event callback..."* 
seems wrong to me because age_info->flags is set to 0 just after the 
callback, thus ML5_AGE_TRIGGER is canceled and no event will be 
triggered before we'll call rte_flow_get_aged_flows() outside of the 
callback.
>> Furthermore, I don't see the point of computing ageing flows in
>> mlx5_fow.c::mlx5_flow_aging_check() if the client callback isn't called.
>>
> Can you elaborate? I'm not sure I understand your intention.
Please forgot :-)
>
>> So far, I can handle the flow ageing from the same thread as the one which is
>> handling the flow direction(rte_flow), it even avoid threads synchronization.
>> But, in the future, I may need to be noticed as soon as possible of a single flow
>> ageing, and thus handle this flow logic from the ageing callback.
>>
>>
>> I may misunderstand the whole ageing API... Thanks a lot for any clarification.

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] rte_flow ageing
  2021-03-29  8:34 [dpdk-dev] rte_flow ageing David Bouyeure
@ 2021-03-29 18:02 ` Asaf Penso
  2021-03-30 15:45   ` David Bouyeure
  0 siblings, 1 reply; 27+ messages in thread
From: Asaf Penso @ 2021-03-29 18:02 UTC (permalink / raw)
  To: David Bouyeure, dev; +Cc: Matan Azrad, Jack Min

Hello David,

Thanks for reaching out, I'll try to answer as best as I know and I added Matan who will be able to provide further info during next week.
First, according to our pmd documentation (http://doc.dpdk.org/guides/nics/mlx5.html#supported-hardware-offloads) we recommend using DPDK20.11 and OFED5.2, and not the combo you are referring to.
Second, we can always improve our documentation and I appreciate your queries. 

Please see my comments inline.

Regards,
Asaf Penso

>-----Original Message-----
>From: dev <dev-bounces@dpdk.org> On Behalf Of David Bouyeure
>Sent: Monday, March 29, 2021 11:35 AM
>To: dev@dpdk.org
>Subject: [dpdk-dev] rte_flow ageing
>
>Hi,
>
>
>I've found out the pretty useful experimental brand new flow ageing API
>implemented in the mlx5 PMD.

It is useful and I hope you'll fully understand at the end why 😊
 

>
>I'm trying it (rte_eth_dev_callback_register(RTE_ETH_EVENT_FLOW_AGED),
>RTE_FLOW_ACTION_TYPE_AGE) to recover any flow that I previously
>offloaded.
>
>The DPDK version is 20.08 and Mellanox(Connect-X6) OFED drivers are 5.1-
>2.5.8.0.
>

See above the suggested versions for this feature

>I eventually don't see the usefulness of the callback since it's actually triggered
>indirectly by us(the DPDK application) when calling
>rte_flow_get_aged_flows().

The main intention is to offload the aging logic from the application level to the pmd level.
There is so saving of cpu cycles, and the gain here is with simplicity. 
The application doesn't need to have complex logic of comparison between counters or other HW info that can be retrieve.
Now, the pmd hides all of that and leaves the application only to decide what to do with the flows that are aged out.
Please note, the pmd does not delete any flow, just provide the list of all the flows that are aged.

> If we don't call it, the callback is called only once.
>
>And, calling rte_flow_get_aged_flows() from the callback won't trigger it next
>time(MLX5_AGE_TRIGGER is reset after the callback call)

Once you call the function the pmd will not trigger more events. Now it's up to the application to decide what to do.
Doing it differently, will cause an interrupt storm and the pmd avoids that.
If new flows are aged then the pmd will trigger a new event.

>
>Furthermore, I don't see the point of computing ageing flows in
>mlx5_fow.c::mlx5_flow_aging_check() if the client callback isn't called.
>

Can you elaborate? I'm not sure I understand your intention.

>So far, I can handle the flow ageing from the same thread as the one which is
>handling the flow direction(rte_flow), it even avoid threads synchronization.
>But, in the future, I may need to be noticed as soon as possible of a single flow
>ageing, and thus handle this flow logic from the ageing callback.
>
>
>I may misunderstand the whole ageing API... Thanks a lot for any clarification.


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] rte_flow ageing
@ 2021-03-29  8:34 David Bouyeure
  2021-03-29 18:02 ` Asaf Penso
  0 siblings, 1 reply; 27+ messages in thread
From: David Bouyeure @ 2021-03-29  8:34 UTC (permalink / raw)
  To: dev

Hi,


I've found out the pretty useful experimental brand new flow ageing API 
implemented in the mlx5 PMD.

I'm trying it (rte_eth_dev_callback_register(RTE_ETH_EVENT_FLOW_AGED), 
RTE_FLOW_ACTION_TYPE_AGE) to recover any flow that I previously offloaded.

The DPDK version is 20.08 and Mellanox(Connect-X6) OFED drivers are 
5.1-2.5.8.0.

I eventually don't see the usefulness of the callback since it's 
actually triggered indirectly by us(the DPDK application) when calling 
rte_flow_get_aged_flows(). If we don't call it, the callback is called 
only once.

And, calling rte_flow_get_aged_flows() from the callback won't trigger 
it next time(MLX5_AGE_TRIGGER is reset after the callback call)

Furthermore, I don't see the point of computing ageing flows in 
mlx5_fow.c::mlx5_flow_aging_check() if the client callback isn't called.

So far, I can handle the flow ageing from the same thread as the one 
which is handling the flow direction(rte_flow), it even avoid threads 
synchronization. But, in the future, I may need to be noticed as soon as 
possible of a single flow ageing, and thus handle this flow logic from 
the ageing callback.


I may misunderstand the whole ageing API... Thanks a lot for any 
clarification.


^ permalink raw reply	[flat|nested] 27+ messages in thread

end of thread, other threads:[~2021-12-28 14:45 UTC | newest]

Thread overview: 27+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-22 10:09 [dpdk-dev] [PATCH 0/2] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
2021-02-22 10:09 ` [dpdk-dev] [PATCH 1/2] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
2021-03-26 10:42   ` [dpdk-dev] [PATCH v2 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 1/3] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
2021-03-26 14:29       ` [dpdk-dev] [PATCH v3 0/3] support GTPU inner IPv4/IPv6 for AVF FDIR Junfeng Guo
2021-03-26  6:41         ` Zhang, Qi Z
2021-03-31 14:54           ` Ferruh Yigit
2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 1/3] net/iavf: support GTPU inner IPv4 for FDIR Junfeng Guo
2021-03-29  7:50           ` [dpdk-dev] rte_flow ageing David Bouyeure
2021-03-29  8:32             ` David Bouyeure
2021-03-31 14:53           ` [dpdk-dev] [PATCH v3 1/3] net/iavf: support GTPU inner IPv4 for FDIR Ferruh Yigit
2021-12-28 14:45           ` [dpdk-dev] net/mlx5: rte_flow_item_gtp restricted to GTPU David Bouyeure
2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 2/3] net/iavf: support GTPU inner IPv6 for FDIR Junfeng Guo
2021-03-26 14:29         ` [dpdk-dev] [PATCH v3 3/3] doc: add release notes for 21.05 Junfeng Guo
2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 2/3] net/iavf: support GTPU inner IPv6 for FDIR Junfeng Guo
2021-03-26 10:42     ` [dpdk-dev] [PATCH v2 3/3] doc: add release notes for 21.05 Junfeng Guo
2021-03-26  6:08       ` Zhang, Qi Z
2021-02-22 10:09 ` [dpdk-dev] [PATCH 2/2] net/iavf: support GTPU inner IPv6 for FDIR Junfeng Guo
2021-03-29  8:34 [dpdk-dev] rte_flow ageing David Bouyeure
2021-03-29 18:02 ` Asaf Penso
2021-03-30 15:45   ` David Bouyeure
2021-04-05 10:23     ` Matan Azrad
2021-04-07 16:19       ` David Bouyeure
2021-04-07 18:09         ` Matan Azrad
2021-04-08  7:50           ` David Bouyeure
2021-04-08  9:52             ` Matan Azrad
2021-04-08 16:45               ` David Bouyeure

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).