DPDK patches and discussions
 help / color / Atom feed
* [dpdk-dev] [dpdk-dev 0/4] add RSS configuration for iavf
@ 2020-03-18 17:03 Jeff Guo
  2020-03-18 17:03 ` [dpdk-dev] [dpdk-dev 1/4] ethdev: add new RSS offload types Jeff Guo
                   ` (3 more replies)
  0 siblings, 4 replies; 14+ messages in thread
From: Jeff Guo @ 2020-03-18 17:03 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su, jia.guo

Because the VF must be capable of configuring RSS, so add RSS configuration
for iavf. The supported protocol rss input set as below.

eth-src-only/
eth-dst-only/
svlan/
cvlan/
ipv4/
ipv6/
l3_src_only/
l3_dst_only/
l2tpv3/
esp/
ah/
pfcp/
gtpu down/
gtpu up/
udp/
tcp/
sctp/

This patchset depends on below patch sets.
(1)http://patches.dpdk.org/project/dpdk/list/?series=8961
	net/iavf: support FDIR capabiltiy 
(2)http://patches.dpdk.org/project/dpdk/list/?series=8950
	add generic filter support for iavf

Jeff Guo (4):
  ethdev: add new RSS offload types
  net/iavf: add RSS configuration for VFs
  app/testpmd: support GTP PDU type
  app/testpmd: add new types to RSS hash commands

 app/test-pmd/cmdline.c         |   24 +-
 app/test-pmd/cmdline_flow.c    |   11 +-
 app/test-pmd/config.c          |    8 +
 drivers/net/iavf/Makefile      |    1 +
 drivers/net/iavf/iavf.h        |   10 +
 drivers/net/iavf/iavf_hash.c   | 1134 ++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c  |   33 +-
 drivers/net/iavf/meson.build   |    1 +
 lib/librte_ethdev/rte_ethdev.h |   12 +-
 9 files changed, 1225 insertions(+), 9 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_hash.c

-- 
2.20.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [dpdk-dev 1/4] ethdev: add new RSS offload types
  2020-03-18 17:03 [dpdk-dev] [dpdk-dev 0/4] add RSS configuration for iavf Jeff Guo
@ 2020-03-18 17:03 ` Jeff Guo
  2020-03-18 17:03 ` [dpdk-dev] [dpdk-dev 2/4] net/iavf: add RSS configuration for VFs Jeff Guo
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 14+ messages in thread
From: Jeff Guo @ 2020-03-18 17:03 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su, jia.guo

Defines some new RSS offload types for ETH/SVLAN/CVLAN/GTPU/L2TPV3/
ESP/AH/PFCP.

Change-Id: I2459dd40c9867632e084d235d8e25b7471cdb5af
Signed-off-by: Jeff Guo <jia.guo@intel.com>
---
 lib/librte_ethdev/rte_ethdev.h | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index d1a593ad1..074bae238 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -524,7 +524,17 @@ struct rte_eth_rss_conf {
 #define ETH_RSS_L3_SRC_ONLY        (1ULL << 63)
 #define ETH_RSS_L3_DST_ONLY        (1ULL << 62)
 #define ETH_RSS_L4_SRC_ONLY        (1ULL << 61)
-#define ETH_RSS_L4_DST_ONLY        (1ULL << 60)
+#define ETH_RSS_L4_DST_ONLY	   (1ULL << 60)
+#define ETH_RSS_ETH_SRC_ONLY	   (1ULL << 59)
+#define ETH_RSS_ETH_DST_ONLY	   (1ULL << 58)
+#define ETH_RSS_S_VLAN		   (1ULL << 57)
+#define ETH_RSS_C_VLAN		   (1ULL << 56)
+#define ETH_RSS_ESP_SPI		   (1ULL << 55)
+#define ETH_RSS_AH_SPI		   (1ULL << 54)
+#define ETH_RSS_L2TPV3_SESS_ID     (1ULL << 53)
+#define ETH_RSS_GTPU_UP		   (1ULL << 52)
+#define ETH_RSS_GTPU_DWN	   (1ULL << 51)
+#define ETH_RSS_PFCP_SEID	   (1ULL << 50)
 
 /**
  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
-- 
2.20.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [dpdk-dev 2/4] net/iavf: add RSS configuration for VFs
  2020-03-18 17:03 [dpdk-dev] [dpdk-dev 0/4] add RSS configuration for iavf Jeff Guo
  2020-03-18 17:03 ` [dpdk-dev] [dpdk-dev 1/4] ethdev: add new RSS offload types Jeff Guo
@ 2020-03-18 17:03 ` Jeff Guo
  2020-03-18 17:04 ` [dpdk-dev] [dpdk-dev 3/4] app/testpmd: support GTP PDU type Jeff Guo
  2020-03-18 17:04 ` [dpdk-dev] [dpdk-dev 4/4] app/testpmd: add new types to RSS hash commands Jeff Guo
  3 siblings, 0 replies; 14+ messages in thread
From: Jeff Guo @ 2020-03-18 17:03 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su, jia.guo

Add RSS configuration for VFs. The VF must be capable of configuring
RSS.

Change-Id: I2e5692ef80d4bcd8852488d82f70386156827007
Signed-off-by: Jeff Guo <jia.guo@intel.com>
---
 drivers/net/iavf/Makefile     |    1 +
 drivers/net/iavf/iavf.h       |   10 +
 drivers/net/iavf/iavf_hash.c  | 1134 +++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c |   33 +-
 drivers/net/iavf/meson.build  |    1 +
 5 files changed, 1174 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_hash.c

diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
index 1bf0f26b5..7b0093a3e 100644
--- a/drivers/net/iavf/Makefile
+++ b/drivers/net/iavf/Makefile
@@ -24,6 +24,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c
 ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
 endif
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 6cefa1bf2..ce264a6ad 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -74,6 +74,13 @@
 #define IAVF_COMMS_PROTO_L2TPv3         0x0000000000000008
 #define IAVF_COMMS_PROTO_ESP            0x0000000000000010
 
+/* DDP package type */
+enum ice_pkg_type {
+	IAVF_PKG_TYPE_UNKNOWN,
+	IAVF_PKG_TYPE_OS_DEFAULT,
+	IAVF_PKG_TYPE_COMMS,
+};
+
 struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
@@ -151,6 +158,7 @@ struct iavf_adapter {
 	bool tx_vec_allowed;
 	const uint32_t *ptype_tbl;
 	bool stopped;
+	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
 };
 
 /* IAVF_DEV_PRIVATE_TO */
@@ -256,4 +264,6 @@ int iavf_config_promisc(struct iavf_adapter *adapter, bool enable_unicast,
 int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
 			 struct rte_ether_addr *addr, bool add);
 int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
+int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
+			 struct virtchnl_rss_cfg *rss_cfg, bool add);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
new file mode 100644
index 000000000..bda56f3f8
--- /dev/null
+++ b/drivers/net/iavf/iavf_hash.c
@@ -0,0 +1,1134 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "iavf_log.h"
+#include "iavf.h"
+#include "iavf_generic_flow.h"
+
+#define IAVF_ACTION_RSS_MAX_QUEUE_NUM 32
+
+struct rss_type_match_hdr {
+	uint32_t hdr_mask;
+	uint64_t eth_rss_hint;
+};
+
+struct iavf_hash_match_type {
+	uint64_t hash_type;
+	struct virtchnl_proto_stack *proto_stack;
+};
+
+struct iavf_rss_meta {
+	struct virtchnl_proto_stack *proto_stack;
+	uint32_t hash_function;
+};
+
+struct iavf_hash_flow_cfg {
+	bool simple_xor;
+	struct virtchnl_rss_cfg *rss_cfg;
+};
+
+static int
+iavf_hash_init(struct iavf_adapter *ad);
+static int
+iavf_hash_create(struct iavf_adapter *ad, struct rte_flow *flow, void *meta,
+		 struct rte_flow_error *error);
+static int
+iavf_hash_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
+		  struct rte_flow_error *error);
+static void
+iavf_hash_uninit(struct iavf_adapter *ad);
+static void
+iavf_hash_free(struct rte_flow *flow);
+static int
+iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
+			       struct iavf_pattern_match_item *array,
+			       uint32_t array_len,
+			       const struct rte_flow_item pattern[],
+			       const struct rte_flow_action actions[],
+			       void **meta,
+			       struct rte_flow_error *error);
+
+/* Generate flow hash field from flow field type(s) */
+#define IAVF_FLOW_HASH_ETH	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_ETH_DST) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_ETH_SRC))
+#define IAVF_FLOW_HASH_IPV4	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_IPV4_SRC) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_IPV4_DST))
+#define IAVF_FLOW_HASH_IPV6	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_IPV6_SRC) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_IPV6_DST))
+#define IAVF_FLOW_HASH_TCP_PORT	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_TCP_DST_PORT))
+#define IAVF_FLOW_HASH_UDP_PORT	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_UDP_DST_PORT))
+#define IAVF_FLOW_HASH_SCTP_PORT	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT))
+
+#define IAVF_HASH_INVALID	0
+#define IAVF_HASH_TCP_IPV4	(IAVF_FLOW_HASH_IPV4 | IAVF_FLOW_HASH_TCP_PORT)
+#define IAVF_HASH_TCP_IPV6	(IAVF_FLOW_HASH_IPV6 | IAVF_FLOW_HASH_TCP_PORT)
+#define IAVF_HASH_UDP_IPV4	(IAVF_FLOW_HASH_IPV4 | IAVF_FLOW_HASH_UDP_PORT)
+#define IAVF_HASH_UDP_IPV6	(IAVF_FLOW_HASH_IPV6 | IAVF_FLOW_HASH_UDP_PORT)
+#define IAVF_HASH_SCTP_IPV4	(IAVF_FLOW_HASH_IPV4 | IAVF_FLOW_HASH_SCTP_PORT)
+#define IAVF_HASH_SCTP_IPV6	(IAVF_FLOW_HASH_IPV6 | IAVF_FLOW_HASH_SCTP_PORT)
+
+#define IAVF_FLOW_HASH_GTP_U_TEID \
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID))
+
+#define IAVF_FLOW_HASH_GTP_U_IPV4_TEID \
+	(IAVF_FLOW_HASH_IPV4 | IAVF_FLOW_HASH_GTP_U_TEID)
+#define IAVF_FLOW_HASH_GTP_U_IPV6_TEID \
+	(IAVF_FLOW_HASH_IPV6 | IAVF_FLOW_HASH_GTP_U_TEID)
+
+#define IAVF_FLOW_HASH_GTP_U_EH_TEID \
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID))
+
+#define IAVF_FLOW_HASH_GTP_U_EH_QFI \
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_GTPU_EH_QFI))
+
+#define IAVF_FLOW_HASH_GTP_U_IPV4_EH \
+	(IAVF_FLOW_HASH_IPV4 | IAVF_FLOW_HASH_GTP_U_EH_TEID | \
+	 IAVF_FLOW_HASH_GTP_U_EH_QFI)
+#define IAVF_FLOW_HASH_GTP_U_IPV6_EH \
+	(IAVF_FLOW_HASH_IPV6 | IAVF_FLOW_HASH_GTP_U_EH_TEID | \
+	 IAVF_FLOW_HASH_GTP_U_EH_QFI)
+
+/* The first member is protocol header, the second member is ETH_RSS_*. */
+struct rss_type_match_hdr iavf_hint_empty = {
+	VIRTCHNL_PROTO_HDR_NONE,	0};
+struct rss_type_match_hdr iavf_hint_eth_ipv4 = {
+	VIRTCHNL_PROTO_HDR_IPV4,	ETH_RSS_IPV4};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_udp = {
+	VIRTCHNL_PROTO_HDR_IPV4 | VIRTCHNL_PROTO_HDR_UDP,
+	ETH_RSS_NONFRAG_IPV4_UDP};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_tcp = {
+	VIRTCHNL_PROTO_HDR_IPV4 | VIRTCHNL_PROTO_HDR_TCP,
+	ETH_RSS_NONFRAG_IPV4_TCP};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_sctp = {
+	VIRTCHNL_PROTO_HDR_IPV4 | VIRTCHNL_PROTO_HDR_SCTP,
+	ETH_RSS_NONFRAG_IPV4_SCTP};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_gtpu_eh = {
+	VIRTCHNL_PROTO_HDR_GTPU_EH,	ETH_RSS_IPV4};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_esp = {
+	VIRTCHNL_PROTO_HDR_ESP,	ETH_RSS_IPV4};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_udp_esp = {
+	VIRTCHNL_PROTO_HDR_ESP,	ETH_RSS_IPV4};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_ah = {
+	VIRTCHNL_PROTO_HDR_AH,	ETH_RSS_IPV4};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_l2tpv3 = {
+	VIRTCHNL_PROTO_HDR_L2TPV3,	ETH_RSS_IPV4};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_pfcp = {
+	VIRTCHNL_PROTO_HDR_PFCP,	ETH_RSS_IPV4};
+struct rss_type_match_hdr iavf_hint_eth_ipv6 = {
+	VIRTCHNL_PROTO_HDR_IPV6,	ETH_RSS_IPV6};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_udp = {
+	VIRTCHNL_PROTO_HDR_IPV6 | VIRTCHNL_PROTO_HDR_UDP,
+	ETH_RSS_NONFRAG_IPV6_UDP};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_tcp = {
+	VIRTCHNL_PROTO_HDR_IPV6 | VIRTCHNL_PROTO_HDR_TCP,
+	ETH_RSS_NONFRAG_IPV6_TCP};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_sctp = {
+	VIRTCHNL_PROTO_HDR_IPV6 | VIRTCHNL_PROTO_HDR_SCTP,
+	ETH_RSS_NONFRAG_IPV6_SCTP};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_esp = {
+	VIRTCHNL_PROTO_HDR_ESP,	ETH_RSS_IPV6};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_udp_esp = {
+	VIRTCHNL_PROTO_HDR_ESP,	ETH_RSS_IPV6};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_ah = {
+	VIRTCHNL_PROTO_HDR_AH,	ETH_RSS_IPV6};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_l2tpv3 = {
+	VIRTCHNL_PROTO_HDR_L2TPV3,	ETH_RSS_IPV6};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_pfcp = {
+	VIRTCHNL_PROTO_HDR_PFCP,	ETH_RSS_IPV6};
+
+/* Supported pattern for hash. */
+static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
+	{iavf_pattern_eth_ipv4, IAVF_INSET_NONE, &iavf_hint_eth_ipv4},
+	{iavf_pattern_eth_ipv4_udp, IAVF_INSET_NONE, &iavf_hint_eth_ipv4_udp},
+	{iavf_pattern_eth_ipv4_tcp, IAVF_INSET_NONE, &iavf_hint_eth_ipv4_tcp},
+	{iavf_pattern_eth_ipv4_sctp, IAVF_INSET_NONE, &iavf_hint_eth_ipv4_sctp},
+	{iavf_pattern_eth_ipv6, IAVF_INSET_NONE, &iavf_hint_eth_ipv6},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_gtpu_eh},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_gtpu_eh},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_gtpu_eh},
+	{iavf_pattern_eth_ipv4_esp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_esp},
+	{iavf_pattern_eth_ipv4_udp_esp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_udp_esp},
+	{iavf_pattern_eth_ipv4_ah,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_ah},
+	{iavf_pattern_eth_ipv4_l2tpv3,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_l2tpv3},
+	{iavf_pattern_eth_ipv4_pfcp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_pfcp},
+	{iavf_pattern_eth_ipv6_udp, IAVF_INSET_NONE, &iavf_hint_eth_ipv6_udp},
+	{iavf_pattern_eth_ipv6_tcp, IAVF_INSET_NONE, &iavf_hint_eth_ipv6_tcp},
+	{iavf_pattern_eth_ipv6_sctp, IAVF_INSET_NONE, &iavf_hint_eth_ipv6_sctp},
+	{iavf_pattern_eth_ipv6_esp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv6_esp},
+	{iavf_pattern_eth_ipv6_udp_esp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv6_udp_esp},
+	{iavf_pattern_eth_ipv6_ah,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv6_ah},
+	{iavf_pattern_eth_ipv6_l2tpv3,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv6_l2tpv3},
+	{iavf_pattern_eth_ipv6_pfcp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv6_pfcp},
+	{iavf_pattern_empty, IAVF_INSET_NONE, &iavf_hint_empty},
+};
+
+#define	GTP_EH_PDU_LINK_UP		1
+#define	GTP_EH_PDU_LINK_DWN		0
+
+#define TUNNEL_LEVEL_OUTER		0
+#define TUNNEL_LEVEL_INNER		1
+
+#define PROTO_COUNT_ONE			1
+#define PROTO_COUNT_TWO			2
+#define PROTO_COUNT_THREE		3
+
+#define BUFF_NOUSED			0
+#define FIELD_FOR_PROTO_ONLY		0
+
+#define proto_hint_eth_src { \
+	VIRTCHNL_PROTO_HDR_ETH, VIRTCHNL_PROTO_HDR_ETH_SRC, {BUFF_NOUSED } }
+
+#define proto_hint_eth_dst { \
+	VIRTCHNL_PROTO_HDR_ETH, VIRTCHNL_PROTO_HDR_ETH_DST, {BUFF_NOUSED } }
+
+#define proto_hint_svlan { \
+	VIRTCHNL_PROTO_HDR_S_VLAN, VIRTCHNL_PROTO_HDR_S_VLAN_ID, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_cvlan { \
+	VIRTCHNL_PROTO_HDR_C_VLAN, VIRTCHNL_PROTO_HDR_C_VLAN_ID, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_ipv4_src { \
+	VIRTCHNL_PROTO_HDR_IPV4, VIRTCHNL_PROTO_HDR_IPV4_SRC, {BUFF_NOUSED } }
+
+#define proto_hint_ipv4_dst { \
+	VIRTCHNL_PROTO_HDR_IPV4, VIRTCHNL_PROTO_HDR_IPV4_DST, {BUFF_NOUSED } }
+
+#define proto_hint_ipv4_only { \
+	VIRTCHNL_PROTO_HDR_IPV4, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_ipv4 { \
+	VIRTCHNL_PROTO_HDR_IPV4, \
+	VIRTCHNL_PROTO_HDR_IPV4_SRC | VIRTCHNL_PROTO_HDR_IPV4_DST, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_udp_src_port { \
+	VIRTCHNL_PROTO_HDR_UDP, VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_udp_dst_port { \
+	VIRTCHNL_PROTO_HDR_UDP, VIRTCHNL_PROTO_HDR_UDP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_udp_only { \
+	VIRTCHNL_PROTO_HDR_UDP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_udp { \
+	VIRTCHNL_PROTO_HDR_UDP, \
+	VIRTCHNL_PROTO_HDR_UDP_SRC_PORT | VIRTCHNL_PROTO_HDR_UDP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_tcp_src_port { \
+	VIRTCHNL_PROTO_HDR_TCP, VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_tcp_dst_port { \
+	VIRTCHNL_PROTO_HDR_TCP, VIRTCHNL_PROTO_HDR_TCP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_tcp_only { \
+	VIRTCHNL_PROTO_HDR_TCP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_tcp { \
+	VIRTCHNL_PROTO_HDR_TCP, \
+	VIRTCHNL_PROTO_HDR_TCP_SRC_PORT | VIRTCHNL_PROTO_HDR_TCP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_sctp_src_port { \
+	VIRTCHNL_PROTO_HDR_SCTP, VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_sctp_dst_port { \
+	VIRTCHNL_PROTO_HDR_SCTP, VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_sctp_only { \
+	VIRTCHNL_PROTO_HDR_SCTP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_sctp { \
+	VIRTCHNL_PROTO_HDR_SCTP, \
+	VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT | VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_ipv6_src { \
+	VIRTCHNL_PROTO_HDR_IPV6, VIRTCHNL_PROTO_HDR_IPV6_SRC, {BUFF_NOUSED } }
+
+#define proto_hint_ipv6_dst { \
+	VIRTCHNL_PROTO_HDR_IPV6, VIRTCHNL_PROTO_HDR_IPV6_DST, {BUFF_NOUSED } }
+
+#define proto_hint_ipv6_only { \
+	VIRTCHNL_PROTO_HDR_IPV6, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_ipv6 { \
+	VIRTCHNL_PROTO_HDR_IPV6, \
+	VIRTCHNL_PROTO_HDR_IPV6_SRC | VIRTCHNL_PROTO_HDR_IPV6_DST, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_gtpu_up_only { \
+	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, \
+	FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_gtpu_dwn_only { \
+	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, \
+	FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_esp { \
+	VIRTCHNL_PROTO_HDR_ESP, \
+	VIRTCHNL_PROTO_HDR_ESP_SPI, {BUFF_NOUSED } }
+
+#define proto_hint_ah { \
+	VIRTCHNL_PROTO_HDR_AH, \
+	VIRTCHNL_PROTO_HDR_AH_SPI, {BUFF_NOUSED } }
+
+#define proto_hint_l2tpv3 { \
+	VIRTCHNL_PROTO_HDR_L2TPV3, \
+	VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, {BUFF_NOUSED } }
+
+#define proto_hint_pfcp { \
+	VIRTCHNL_PROTO_HDR_PFCP, VIRTCHNL_PROTO_HDR_PFCP_SEID, {BUFF_NOUSED } }
+
+struct virtchnl_proto_stack stack_hint_eth_src = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_eth_src }
+};
+
+struct virtchnl_proto_stack stack_hint_eth_dst = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_eth_dst }
+};
+
+struct virtchnl_proto_stack stack_hint_svlan = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_svlan }
+};
+
+struct virtchnl_proto_stack stack_hint_cvlan = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_cvlan }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4_src }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4_dst }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_gtpu_up = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_TWO, {proto_hint_gtpu_up_only,
+	proto_hint_ipv4_src }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_gtpu_dwn = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_TWO, {proto_hint_gtpu_dwn_only,
+	proto_hint_ipv4_dst }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_udp_gtpu_up = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_THREE, {proto_hint_gtpu_up_only,
+	proto_hint_ipv4_src, proto_hint_udp_only }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_udp_gtpu_dwn = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_THREE, {proto_hint_gtpu_dwn_only,
+	proto_hint_ipv4_dst, proto_hint_udp_only }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_tcp_gtpu_up = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_THREE, {proto_hint_gtpu_up_only,
+	proto_hint_ipv4_src, proto_hint_tcp_only }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_tcp_gtpu_dwn = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_THREE, {proto_hint_gtpu_dwn_only,
+	proto_hint_ipv4_dst, proto_hint_tcp_only }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_sctp_gtpu_up = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_THREE, {proto_hint_gtpu_up_only,
+	proto_hint_ipv4_src, proto_hint_sctp_only }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_sctp_gtpu_dwn = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_THREE, {proto_hint_gtpu_dwn_only,
+	proto_hint_ipv4_dst, proto_hint_sctp_only }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_esp = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_esp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_ah = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_ah }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_l2tpv3 = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_l2tpv3 }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_pfcp = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_pfcp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4 = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4 }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_udp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4,
+	proto_hint_udp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_tcp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4,
+	proto_hint_tcp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_sctp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4,
+	proto_hint_sctp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6_src }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6_dst }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_esp = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+	proto_hint_esp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_ah = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+	proto_hint_ah }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_l2tpv3 = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+	proto_hint_l2tpv3 }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_pfcp = {
+	TUNNEL_LEVEL_INNER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+	proto_hint_pfcp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6 = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6 }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_udp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6,
+	proto_hint_udp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_tcp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6,
+	proto_hint_tcp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_sctp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6,
+	proto_hint_sctp }
+};
+
+/**
+ * The first member is hash type,
+ * the second member is virtchnl protocol stack.
+ */
+struct iavf_hash_match_type iavf_hash_type_list[] = {
+	/* IPV4 */
+	{ETH_RSS_IPV4 | ETH_RSS_ETH_SRC_ONLY,	&stack_hint_eth_src},
+	{ETH_RSS_IPV4 | ETH_RSS_ETH_DST_ONLY,	&stack_hint_eth_dst},
+	{ETH_RSS_IPV4 | ETH_RSS_S_VLAN,		&stack_hint_svlan},
+	{ETH_RSS_IPV4 | ETH_RSS_C_VLAN,		&stack_hint_cvlan},
+	{ETH_RSS_IPV4 | ETH_RSS_L3_SRC_ONLY,	&stack_hint_ipv4_src},
+	{ETH_RSS_IPV4 | ETH_RSS_L3_DST_ONLY,	&stack_hint_ipv4_dst},
+	{ETH_RSS_IPV4 | ETH_RSS_L3_SRC_ONLY | ETH_RSS_GTPU_UP,
+						&stack_hint_ipv4_src_gtpu_up},
+	{ETH_RSS_IPV4 | ETH_RSS_L3_DST_ONLY | ETH_RSS_GTPU_DWN,
+						&stack_hint_ipv4_dst_gtpu_dwn},
+	{ETH_RSS_IPV4 | ETH_RSS_ESP_SPI,	&stack_hint_ipv4_esp},
+	{ETH_RSS_IPV4 | ETH_RSS_AH_SPI,		&stack_hint_ipv4_ah},
+	{ETH_RSS_IPV4 | ETH_RSS_L2TPV3_SESS_ID,	&stack_hint_ipv4_l2tpv3},
+	{ETH_RSS_IPV4 | ETH_RSS_PFCP_SEID,	&stack_hint_ipv4_pfcp},
+	{ETH_RSS_IPV4,				&stack_hint_ipv4},
+	/* IPV4 UDP */
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_src_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_src_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_GTPU_UP,
+					&stack_hint_ipv4_src_udp_gtpu_up},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv4_src},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_dst_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_dst_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_GTPU_DWN,
+					&stack_hint_ipv4_dst_udp_gtpu_dwn},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv4_dst},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP,	&stack_hint_ipv4_udp},
+	/* IPV4 TCP */
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_src_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_src_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_GTPU_UP,
+					&stack_hint_ipv4_src_tcp_gtpu_up},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv4_src},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_dst_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_dst_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_GTPU_DWN,
+					&stack_hint_ipv4_dst_tcp_gtpu_dwn},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv4_dst},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP,	&stack_hint_ipv4_tcp},
+	/* IPV4 SCTP */
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_src_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_src_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_GTPU_UP,
+					&stack_hint_ipv4_src_sctp_gtpu_up},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv4_src},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_dst_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_dst_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_GTPU_DWN,
+					&stack_hint_ipv4_dst_sctp_gtpu_dwn},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv4_dst},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP,	&stack_hint_ipv4_sctp},
+	/* IPV6 */
+	{ETH_RSS_IPV6 | ETH_RSS_ETH_SRC_ONLY,	&stack_hint_eth_src},
+	{ETH_RSS_IPV6 | ETH_RSS_ETH_DST_ONLY,	&stack_hint_eth_dst},
+	{ETH_RSS_IPV6 | ETH_RSS_S_VLAN,		&stack_hint_svlan},
+	{ETH_RSS_IPV6 | ETH_RSS_C_VLAN,		&stack_hint_cvlan},
+	{ETH_RSS_IPV6 | ETH_RSS_L3_SRC_ONLY,	&stack_hint_ipv6_src},
+	{ETH_RSS_IPV6 | ETH_RSS_L3_DST_ONLY,	&stack_hint_ipv6_dst},
+	{ETH_RSS_IPV6 | ETH_RSS_ESP_SPI,	&stack_hint_ipv6_esp},
+	{ETH_RSS_IPV6 | ETH_RSS_AH_SPI,		&stack_hint_ipv6_ah},
+	{ETH_RSS_IPV6 | ETH_RSS_L2TPV3_SESS_ID,	&stack_hint_ipv6_l2tpv3},
+	{ETH_RSS_IPV6 | ETH_RSS_PFCP_SEID,	&stack_hint_ipv6_pfcp},
+	{ETH_RSS_IPV6,				&stack_hint_ipv6},
+	/* IPV6 UDP */
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_src_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_src_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv6_src},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_dst_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_dst_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv6_dst},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP,	&stack_hint_ipv6_udp},
+	/* IPV6 TCP */
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_src_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_src_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv6_src},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_dst_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_dst_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv6_dst},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP,	&stack_hint_ipv6_tcp},
+	/* IPV6 SCTP */
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_src_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_src_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv6_src},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_dst_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_dst_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv6_dst},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP,	&stack_hint_ipv6_sctp},
+};
+
+static struct iavf_flow_engine iavf_hash_engine = {
+	.init = iavf_hash_init,
+	.create = iavf_hash_create,
+	.destroy = iavf_hash_destroy,
+	.uninit = iavf_hash_uninit,
+	.free = iavf_hash_free,
+	.type = IAVF_FLOW_ENGINE_HASH,
+};
+
+/* Register parser for comms package. */
+static struct iavf_flow_parser iavf_hash_parser = {
+	.engine = &iavf_hash_engine,
+	.array = iavf_hash_pattern_list,
+	.array_len = RTE_DIM(iavf_hash_pattern_list),
+	.parse_pattern_action = iavf_hash_parse_pattern_action,
+	.stage = IAVF_FLOW_STAGE_RSS,
+};
+
+RTE_INIT(iavf_hash_engine_init)
+{
+	struct iavf_flow_engine *engine = &iavf_hash_engine;
+
+	iavf_register_flow_engine(engine);
+}
+
+static int
+iavf_hash_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF))
+		return -ENOTSUP;
+
+	parser = &iavf_hash_parser;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static int
+iavf_hash_check_inset(const struct rte_flow_item pattern[],
+		      struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Not support range");
+			return -rte_errno;
+		}
+	}
+
+	return 0;
+}
+
+static uint64_t
+iavf_rss_hf_refine(uint64_t rss_hf, const struct rte_flow_item pattern[],
+		   const struct rte_flow_action *action,
+		   struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item;
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
+			const struct rte_flow_action_rss *rss = action->conf;
+			const struct rte_flow_item_gtp_psc *psc = item->spec;
+
+			if (psc && ((psc->pdu_type == GTP_EH_PDU_LINK_UP &&
+			     (rss->types & ETH_RSS_L3_SRC_ONLY)) ||
+			    (!psc->pdu_type &&
+			     (rss->types & ETH_RSS_L3_DST_ONLY)))) {
+				rss_hf |= psc->pdu_type ? ETH_RSS_GTPU_UP :
+						ETH_RSS_GTPU_DWN;
+			} else {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+						   pattern,
+						   "Invalid input set");
+				return -rte_errno;
+			}
+		}
+	}
+
+	return rss_hf;
+}
+
+static int
+iavf_hash_parse_action(struct iavf_pattern_match_item *pattern_match_item,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta, struct rte_flow_error *error)
+{
+	struct iavf_rss_meta *rss_meta = (struct iavf_rss_meta *)*meta;
+	struct rss_type_match_hdr *m = (struct rss_type_match_hdr *)
+				(pattern_match_item->meta);
+	uint32_t type_list_len = RTE_DIM(iavf_hash_type_list);
+	struct iavf_hash_match_type *type_match_item;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	const struct rte_flow_action_rss *rss;
+	uint64_t rss_hf;
+	uint16_t i;
+
+	/* Supported action is RSS. */
+	for (action = actions; action->type !=
+		RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			rss_hf = rss->types;
+
+			/**
+			 * Check simultaneous use of SRC_ONLY and DST_ONLY
+			 * of the same level.
+			 */
+			rss_hf = rte_eth_rss_hf_refine(rss_hf);
+
+			/**
+			 * Check the item spec with the rss action and
+			 * refine rss hash field.
+			 */
+			rss_hf = iavf_rss_hf_refine(rss_hf, pattern, action,
+						    error);
+
+			/* Check if pattern is empty. */
+			if (pattern_match_item->pattern_list !=
+				iavf_pattern_empty && rss->func ==
+				RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
+				return rte_flow_error_set(error, ENOTSUP,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"Not supported flow");
+
+			/* Check if rss types match pattern. */
+			if (rss->func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+				if (((rss_hf & ETH_RSS_IPV4) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_IPV6) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) !=
+					m->eth_rss_hint))
+					return rte_flow_error_set(error,
+					ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+					action, "Not supported RSS types");
+			}
+
+			if (rss->level)
+				return rte_flow_error_set(error, ENOTSUP,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"a nonzero RSS encapsulation level is not supported");
+
+			if (rss->key_len == 0)
+				return rte_flow_error_set(error, ENOTSUP,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"RSS hash key_len mustn't be 0");
+
+			if (rss->queue_num > IAVF_ACTION_RSS_MAX_QUEUE_NUM)
+				return rte_flow_error_set(error, ENOTSUP,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"too many queues for RSS context");
+
+			/* Check hash function and save it to rss_meta. */
+			if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
+				rss_meta->hash_function =
+				RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+
+			if (rss->func ==
+			    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
+				rss_meta->hash_function =
+				RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
+
+			type_match_item =
+			rte_zmalloc("iavf_type_match_item",
+				    sizeof(struct iavf_hash_match_type), 0);
+			if (!type_match_item) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_HANDLE,
+						   NULL,
+						   "No memory for type_match_item");
+				return -ENOMEM;
+			}
+
+			/* Find matched proto stack according to hash type. */
+			for (i = 0; i < type_list_len; i++) {
+				struct iavf_hash_match_type *ht_map =
+					&iavf_hash_type_list[i];
+				if (rss_hf == ht_map->hash_type) {
+					type_match_item->hash_type =
+						ht_map->hash_type;
+					type_match_item->proto_stack =
+						ht_map->proto_stack;
+				}
+			}
+
+			/* Save proto stack to rss_meta. */
+			rss_meta->proto_stack = type_match_item->proto_stack;
+
+			rte_free(type_match_item);
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, action,
+					   "Invalid action.");
+			return -rte_errno;
+		}
+	}
+
+	return 0;
+}
+
+static int
+iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
+			       struct iavf_pattern_match_item *array,
+			       uint32_t array_len,
+			       const struct rte_flow_item pattern[],
+			       const struct rte_flow_action actions[],
+			       void **meta,
+			       struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct iavf_pattern_match_item *pattern_match_item;
+	struct iavf_rss_meta *rss_meta_ptr;
+
+	rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0);
+	if (!rss_meta_ptr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for rss_meta_ptr");
+		return -ENOMEM;
+	}
+
+	*meta = rss_meta_ptr;
+
+	/* Check rss supported pattern and find matched pattern. */
+	pattern_match_item =
+		iavf_search_pattern_match_item(pattern, array, array_len,
+					       error);
+	if (!pattern_match_item)
+		return -rte_errno;
+
+	ret = iavf_hash_check_inset(pattern, error);
+	if (ret)
+		return -rte_errno;
+
+	/* Check rss action. */
+	ret = iavf_hash_parse_action(pattern_match_item, pattern, actions,
+				     meta, error);
+	if (ret)
+		return -rte_errno;
+
+	rte_free(pattern_match_item);
+
+	return 0;
+}
+
+static int
+iavf_hash_create(__rte_unused struct iavf_adapter *ad,
+		 __rte_unused struct rte_flow *flow, void *meta,
+		 __rte_unused struct rte_flow_error *error)
+{
+	struct iavf_rss_meta *rss_meta = (struct iavf_rss_meta *)meta;
+	struct virtchnl_rss_cfg *rss_cfg;
+	int ret = 0;
+
+	rss_cfg = rte_zmalloc("iavf rss rule",
+			      sizeof(struct virtchnl_rss_cfg), 0);
+	if (!rss_cfg) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for rss rule");
+		return -ENOMEM;
+	}
+
+	rss_cfg->proto_stack = *rss_meta->proto_stack;
+	rss_cfg->hash_function = rss_meta->hash_function;
+
+	ret = iavf_add_del_rss_cfg(ad, rss_cfg, true);
+	if (!ret) {
+		flow->rule = rss_cfg;
+	} else {
+		PMD_DRV_LOG(ERR, "fail to add RSS configure");
+		rte_free(rss_cfg);
+	}
+
+	return ret;
+}
+
+static int
+iavf_hash_destroy(__rte_unused struct iavf_adapter *ad,
+		  struct rte_flow *flow,
+		  __rte_unused struct rte_flow_error *error)
+{
+	struct virtchnl_rss_cfg *rss_cfg;
+	int ret = 0;
+
+	rss_cfg = (struct virtchnl_rss_cfg *)flow->rule;
+
+	ret = iavf_add_del_rss_cfg(ad, rss_cfg, false);
+	if (ret)
+		PMD_DRV_LOG(ERR, "fail to del RSS configure");
+
+	return ret;
+}
+
+static void
+iavf_hash_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_hash_parser, ad);
+}
+
+static void
+iavf_hash_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+}
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 11c70f5fc..3b04ef114 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -336,13 +336,10 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	/* TODO: basic offload capabilities, need to
-	 * add advanced/optional offload capabilities
-	 */
-
 	caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
 		VIRTCHNL_VF_OFFLOAD_QUERY_DDP |
-		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
+		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -867,3 +864,29 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 
 	return err;
 }
+
+int
+iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
+		     struct virtchnl_rss_cfg *rss_cfg, bool add)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = add ? VIRTCHNL_OP_ADD_RSS_CFG :
+		VIRTCHNL_OP_DEL_RSS_CFG;
+	args.in_args = (u8 *)rss_cfg;
+	args.in_args_size = sizeof(*rss_cfg);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of %s",
+			    add ? "OP_ADD_RSS_CFG" :
+			    "OP_DEL_RSS_INPUT_CFG");
+
+	return err;
+}
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 32eabca4b..5a5cdd562 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -13,6 +13,7 @@ sources = files(
 	'iavf_rxtx.c',
 	'iavf_vchnl.c',
 	'iavf_generic_flow.c',
+	'iavf_hash.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.20.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [dpdk-dev 3/4] app/testpmd: support GTP PDU type
  2020-03-18 17:03 [dpdk-dev] [dpdk-dev 0/4] add RSS configuration for iavf Jeff Guo
  2020-03-18 17:03 ` [dpdk-dev] [dpdk-dev 1/4] ethdev: add new RSS offload types Jeff Guo
  2020-03-18 17:03 ` [dpdk-dev] [dpdk-dev 2/4] net/iavf: add RSS configuration for VFs Jeff Guo
@ 2020-03-18 17:04 ` Jeff Guo
  2020-03-18 17:04 ` [dpdk-dev] [dpdk-dev 4/4] app/testpmd: add new types to RSS hash commands Jeff Guo
  3 siblings, 0 replies; 14+ messages in thread
From: Jeff Guo @ 2020-03-18 17:04 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su, jia.guo

Add gtp pdu type configure in the cmdline.

Change-Id: Ibc6fe056fc6c8dcc447fa69ebfdcd97599489dc7
Signed-off-by: Jeff Guo <jia.guo@intel.com>
---
 app/test-pmd/cmdline_flow.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 159ecc48d..64b36f807 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -49,6 +49,7 @@ enum index {
 	PORT_ID,
 	GROUP_ID,
 	PRIORITY_LEVEL,
+	GTP_PSC_PDU_T,
 
 	/* Top-level command. */
 	SET,
@@ -1626,6 +1627,13 @@ static const struct token token_list[] = {
 		.call = parse_int,
 		.comp = comp_none,
 	},
+	[GTP_PSC_PDU_T] = {
+		.name = "{GTPU pdu type}",
+		.type = "INTEGER",
+		.help = "gtpu pdu uplink/downlink identifier",
+		.call = parse_int,
+		.comp = comp_none,
+	},
 	/* Top-level command. */
 	[FLOW] = {
 		.name = "flow",
@@ -2615,7 +2623,8 @@ static const struct token token_list[] = {
 	[ITEM_GTP_PSC_PDU_T] = {
 		.name = "pdu_t",
 		.help = "PDU type",
-		.next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
+		.next = NEXT(item_gtp_psc, NEXT_ENTRY(GTP_PSC_PDU_T),
+			     item_param),
 		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
 					pdu_type)),
 	},
-- 
2.20.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [dpdk-dev 4/4] app/testpmd: add new types to RSS hash commands
  2020-03-18 17:03 [dpdk-dev] [dpdk-dev 0/4] add RSS configuration for iavf Jeff Guo
                   ` (2 preceding siblings ...)
  2020-03-18 17:04 ` [dpdk-dev] [dpdk-dev 3/4] app/testpmd: support GTP PDU type Jeff Guo
@ 2020-03-18 17:04 ` Jeff Guo
  2020-03-26 16:40   ` [dpdk-dev] [dpdk-dev v2 0/4] add RSS configuration for iavf Jeff Guo
  3 siblings, 1 reply; 14+ messages in thread
From: Jeff Guo @ 2020-03-18 17:04 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su, jia.guo

Add some new types, such as eth-src-only/eth-dst-only/svlan/cvlan/
l2tpv3/esp/ah/pfcp types into RSS hash commands, it could be used
to configure these rss input set by cmdline.

Change-Id: Idb85afd5f146a74c915f6d3716b77cb2e7c9f73c
Signed-off-by: Jeff Guo <jia.guo@intel.com>
---
 app/test-pmd/cmdline.c | 24 ++++++++++++++++++++++--
 app/test-pmd/config.c  |  8 ++++++++
 2 files changed, 30 insertions(+), 2 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index a037a55c6..084ee9ef7 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -2299,6 +2299,22 @@ cmd_config_rss_parsed(void *parsed_result,
 		rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
 	else if (!strcmp(res->value, "l4-dst-only"))
 		rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
+	else if (!strcmp(res->value, "eth-src-only"))
+		rss_conf.rss_hf = ETH_RSS_ETH_SRC_ONLY;
+	else if (!strcmp(res->value, "eth-dst-only"))
+		rss_conf.rss_hf = ETH_RSS_ETH_DST_ONLY;
+	else if (!strcmp(res->value, "s-vlan"))
+		rss_conf.rss_hf = ETH_RSS_S_VLAN;
+	else if (!strcmp(res->value, "c-vlan"))
+		rss_conf.rss_hf = ETH_RSS_C_VLAN;
+	else if (!strcmp(res->value, "l2tpv3"))
+		rss_conf.rss_hf = ETH_RSS_L2TPV3_SESS_ID;
+	else if (!strcmp(res->value, "esp"))
+		rss_conf.rss_hf = ETH_RSS_ESP_SPI;
+	else if (!strcmp(res->value, "ah"))
+		rss_conf.rss_hf = ETH_RSS_AH_SPI;
+	else if (!strcmp(res->value, "pfcp"))
+		rss_conf.rss_hf = ETH_RSS_PFCP_SEID;
 	else if (!strcmp(res->value, "none"))
 		rss_conf.rss_hf = 0;
 	else if (!strcmp(res->value, "default"))
@@ -2467,7 +2483,9 @@ cmdline_parse_token_string_t cmd_config_rss_hash_key_rss_type =
 				 "ipv4-other#ipv6#ipv6-frag#ipv6-tcp#ipv6-udp#"
 				 "ipv6-sctp#ipv6-other#l2-payload#ipv6-ex#"
 				 "ipv6-tcp-ex#ipv6-udp-ex#"
-				 "l3-src-only#l3-dst-only#l4-src-only#l4-dst-only");
+				 "l3-src-only#l3-dst-only#l4-src-only#l4-dst-only#"
+				 "eth-src-only#eth-dst-only#s-vlan#c-vlan#"
+				 "l2tpv3#esp#ah#pfcp");
 cmdline_parse_token_string_t cmd_config_rss_hash_key_value =
 	TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, key, NULL);
 
@@ -2478,7 +2496,9 @@ cmdline_parse_inst_t cmd_config_rss_hash_key = {
 		"ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
 		"ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
 		"l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex|"
-		"l3-src-only|l3-dst-only|l4-src-only|l4-dst-only "
+		"l3-src-only|l3-dst-only|l4-src-only|l4-dst-only|"
+		"eth-src-only|eth-dst-only|s-vlan|c-vlan|"
+		"l2tpv3|esp|ah|pfcp "
 		"<string of hex digits (variable length, NIC dependent)>",
 	.tokens = {
 		(void *)&cmd_config_rss_hash_key_port,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 8cf84ccd3..be8f65eef 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -77,6 +77,10 @@ const struct rss_type_info rss_type_table[] = {
 			ETH_RSS_UDP | ETH_RSS_SCTP |
 			ETH_RSS_L2_PAYLOAD },
 	{ "none", 0 },
+	{ "eth-src-only", ETH_RSS_ETH_SRC_ONLY },
+	{ "eth-dst-only", ETH_RSS_ETH_DST_ONLY },
+	{ "s-vlan", ETH_RSS_S_VLAN },
+	{ "c-vlan", ETH_RSS_C_VLAN },
 	{ "ipv4", ETH_RSS_IPV4 },
 	{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
 	{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
@@ -106,6 +110,10 @@ const struct rss_type_info rss_type_table[] = {
 	{ "l3-dst-only", ETH_RSS_L3_DST_ONLY },
 	{ "l4-src-only", ETH_RSS_L4_SRC_ONLY },
 	{ "l4-dst-only", ETH_RSS_L4_DST_ONLY },
+	{ "l2tpv3", ETH_RSS_L2TPV3_SESS_ID },
+	{ "esp", ETH_RSS_ESP_SPI },
+	{ "ah", ETH_RSS_AH_SPI },
+	{ "pfcp", ETH_RSS_PFCP_SEID },
 	{ NULL, 0 },
 };
 
-- 
2.20.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [dpdk-dev v2 0/4] add RSS configuration for iavf
  2020-03-18 17:04 ` [dpdk-dev] [dpdk-dev 4/4] app/testpmd: add new types to RSS hash commands Jeff Guo
@ 2020-03-26 16:40   ` Jeff Guo
  2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 1/4] ethdev: add new RSS offload types Jeff Guo
                       ` (3 more replies)
  0 siblings, 4 replies; 14+ messages in thread
From: Jeff Guo @ 2020-03-26 16:40 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su, jia.guo

Because the VF must be capable of configuring RSS, so add RSS configuration
for iavf. The supported protocol rss input set as below.

eth-src-only/
eth-dst-only/
svlan/
cvlan/
ipv4/
ipv6/
l3_src_only/
l3_dst_only/
l2tpv3/
esp/
ah/
pfcp/
gtpu down/
gtpu up/
udp/
tcp/
sctp/

This patchset depends on below patch sets.
(1)http://patches.dpdk.org/project/dpdk/list/?series=8961
        net/iavf: support FDIR capabiltiy
(2)http://patches.dpdk.org/project/dpdk/list/?series=8950
        add generic filter support for iavf

v1:
1.refine some rte eth rss offload types.
2.add NAT T ESP and refine gtpu down/up
3.reference some fixing patch from pf side.

Jeff Guo (4):
  ethdev: add new RSS offload types
  net/iavf: add RSS configuration for VFs
  app/testpmd: support GTP PDU type
  app/testpmd: add new types to RSS hash commands

 app/test-pmd/cmdline.c         |   24 +-
 app/test-pmd/cmdline_flow.c    |   11 +-
 app/test-pmd/config.c          |    8 +
 drivers/net/iavf/Makefile      |    1 +
 drivers/net/iavf/iavf.h        |   10 +
 drivers/net/iavf/iavf_hash.c   | 1157 ++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c  |   33 +-
 drivers/net/iavf/meson.build   |    1 +
 lib/librte_ethdev/rte_ethdev.h |   11 +-
 9 files changed, 1247 insertions(+), 9 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_hash.c

-- 
2.20.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [dpdk-dev v2 1/4] ethdev: add new RSS offload types
  2020-03-26 16:40   ` [dpdk-dev] [dpdk-dev v2 0/4] add RSS configuration for iavf Jeff Guo
@ 2020-03-26 16:40     ` Jeff Guo
  2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 2/4] net/iavf: add RSS configuration for VFs Jeff Guo
                       ` (2 subsequent siblings)
  3 siblings, 0 replies; 14+ messages in thread
From: Jeff Guo @ 2020-03-26 16:40 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su, jia.guo

Defines some new RSS offload types for ETH/SVLAN/CVLAN/GTPU/L2TPV3/
ESP/AH/PFCP.

Signed-off-by: Jeff Guo <jia.guo@intel.com>
---
v1:
1.refine some rte eth rss offload types.
---
 lib/librte_ethdev/rte_ethdev.h | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index d1a593ad1..29cdf07cd 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -511,6 +511,13 @@ struct rte_eth_rss_conf {
 #define ETH_RSS_GENEVE             (1ULL << 20)
 #define ETH_RSS_NVGRE              (1ULL << 21)
 #define ETH_RSS_GTPU               (1ULL << 23)
+#define ETH_RSS_ETH		   (1ULL << 24)
+#define ETH_RSS_S_VLAN		   (1ULL << 25)
+#define ETH_RSS_C_VLAN		   (1ULL << 26)
+#define ETH_RSS_IPSEC_ESP	   (1ULL << 27)
+#define ETH_RSS_IPSEC_AH	   (1ULL << 28)
+#define ETH_RSS_L2TPV3		   (1ULL << 29)
+#define ETH_RSS_PFCP		   (1ULL << 30)
 
 /*
  * We use the following macros to combine with above ETH_RSS_* for
@@ -524,7 +531,9 @@ struct rte_eth_rss_conf {
 #define ETH_RSS_L3_SRC_ONLY        (1ULL << 63)
 #define ETH_RSS_L3_DST_ONLY        (1ULL << 62)
 #define ETH_RSS_L4_SRC_ONLY        (1ULL << 61)
-#define ETH_RSS_L4_DST_ONLY        (1ULL << 60)
+#define ETH_RSS_L4_DST_ONLY	   (1ULL << 60)
+#define ETH_RSS_ETH_SRC_ONLY	   (1ULL << 59)
+#define ETH_RSS_ETH_DST_ONLY	   (1ULL << 58)
 
 /**
  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
-- 
2.20.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [dpdk-dev v2 2/4] net/iavf: add RSS configuration for VFs
  2020-03-26 16:40   ` [dpdk-dev] [dpdk-dev v2 0/4] add RSS configuration for iavf Jeff Guo
  2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 1/4] ethdev: add new RSS offload types Jeff Guo
@ 2020-03-26 16:40     ` Jeff Guo
  2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type Jeff Guo
  2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 4/4] app/testpmd: add new types to RSS hash commands Jeff Guo
  3 siblings, 0 replies; 14+ messages in thread
From: Jeff Guo @ 2020-03-26 16:40 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su, jia.guo

Add RSS configuration for VFs. The VF must be capable of configuring
RSS.

Signed-off-by: Jeff Guo <jia.guo@intel.com>
---
v1:
1.add NAT T ESP and refine gtpu down/up
2.reference some fixing patch from pf side.
---
 drivers/net/iavf/Makefile     |    1 +
 drivers/net/iavf/iavf.h       |   10 +
 drivers/net/iavf/iavf_hash.c  | 1157 +++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c |   33 +-
 drivers/net/iavf/meson.build  |    1 +
 5 files changed, 1197 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_hash.c

diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
index 1bf0f26b5..7b0093a3e 100644
--- a/drivers/net/iavf/Makefile
+++ b/drivers/net/iavf/Makefile
@@ -24,6 +24,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c
 ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
 endif
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index c8a45644c..afec8b276 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -77,6 +77,13 @@
 #define IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK  0x03
 #define IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
 
+/* DDP package type */
+enum ice_pkg_type {
+	IAVF_PKG_TYPE_UNKNOWN,
+	IAVF_PKG_TYPE_OS_DEFAULT,
+	IAVF_PKG_TYPE_COMMS,
+};
+
 struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
@@ -155,6 +162,7 @@ struct iavf_adapter {
 	const uint32_t *ptype_tbl;
 	bool stopped;
 	uint8_t fdir_enabled;
+	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
 };
 
 /* IAVF_DEV_PRIVATE_TO */
@@ -260,4 +268,6 @@ int iavf_config_promisc(struct iavf_adapter *adapter, bool enable_unicast,
 int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
 			 struct rte_ether_addr *addr, bool add);
 int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
+int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
+			 struct virtchnl_rss_cfg *rss_cfg, bool add);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
new file mode 100644
index 000000000..e5d59ec2d
--- /dev/null
+++ b/drivers/net/iavf/iavf_hash.c
@@ -0,0 +1,1157 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "iavf_log.h"
+#include "iavf.h"
+#include "iavf_generic_flow.h"
+
+struct rss_type_match_hdr {
+	uint32_t hdr_mask;
+	uint64_t eth_rss_hint;
+};
+
+struct iavf_hash_match_type {
+	uint64_t hash_type;
+	struct virtchnl_proto_stack *proto_stack;
+};
+
+struct iavf_rss_meta {
+	struct virtchnl_proto_stack *proto_stack;
+	uint32_t hash_function;
+};
+
+struct iavf_hash_flow_cfg {
+	bool simple_xor;
+	struct virtchnl_rss_cfg *rss_cfg;
+};
+
+static int
+iavf_hash_init(struct iavf_adapter *ad);
+static int
+iavf_hash_create(struct iavf_adapter *ad, struct rte_flow *flow, void *meta,
+		 struct rte_flow_error *error);
+static int
+iavf_hash_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
+		  struct rte_flow_error *error);
+static void
+iavf_hash_uninit(struct iavf_adapter *ad);
+static void
+iavf_hash_free(struct rte_flow *flow);
+static int
+iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
+			       struct iavf_pattern_match_item *array,
+			       uint32_t array_len,
+			       const struct rte_flow_item pattern[],
+			       const struct rte_flow_action actions[],
+			       void **meta,
+			       struct rte_flow_error *error);
+
+/* Generate flow hash field from flow field type(s) */
+#define IAVF_FLOW_HASH_ETH	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_ETH_DST) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_ETH_SRC))
+#define IAVF_FLOW_HASH_IPV4	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_IPV4_SRC) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_IPV4_DST))
+#define IAVF_FLOW_HASH_IPV6	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_IPV6_SRC) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_IPV6_DST))
+#define IAVF_FLOW_HASH_TCP_PORT	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_TCP_DST_PORT))
+#define IAVF_FLOW_HASH_UDP_PORT	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_UDP_DST_PORT))
+#define IAVF_FLOW_HASH_SCTP_PORT	\
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | \
+	 BIT_ULL(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT))
+
+#define IAVF_HASH_INVALID	0
+#define IAVF_HASH_TCP_IPV4	(IAVF_FLOW_HASH_IPV4 | IAVF_FLOW_HASH_TCP_PORT)
+#define IAVF_HASH_TCP_IPV6	(IAVF_FLOW_HASH_IPV6 | IAVF_FLOW_HASH_TCP_PORT)
+#define IAVF_HASH_UDP_IPV4	(IAVF_FLOW_HASH_IPV4 | IAVF_FLOW_HASH_UDP_PORT)
+#define IAVF_HASH_UDP_IPV6	(IAVF_FLOW_HASH_IPV6 | IAVF_FLOW_HASH_UDP_PORT)
+#define IAVF_HASH_SCTP_IPV4	(IAVF_FLOW_HASH_IPV4 | IAVF_FLOW_HASH_SCTP_PORT)
+#define IAVF_HASH_SCTP_IPV6	(IAVF_FLOW_HASH_IPV6 | IAVF_FLOW_HASH_SCTP_PORT)
+
+#define IAVF_FLOW_HASH_GTP_U_TEID \
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID))
+
+#define IAVF_FLOW_HASH_GTP_U_IPV4_TEID \
+	(IAVF_FLOW_HASH_IPV4 | IAVF_FLOW_HASH_GTP_U_TEID)
+#define IAVF_FLOW_HASH_GTP_U_IPV6_TEID \
+	(IAVF_FLOW_HASH_IPV6 | IAVF_FLOW_HASH_GTP_U_TEID)
+
+#define IAVF_FLOW_HASH_GTP_U_EH_TEID \
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID))
+
+#define IAVF_FLOW_HASH_GTP_U_EH_QFI \
+	(BIT_ULL(VIRTCHNL_PROTO_HDR_GTPU_EH_QFI))
+
+#define IAVF_FLOW_HASH_GTP_U_IPV4_EH \
+	(IAVF_FLOW_HASH_IPV4 | IAVF_FLOW_HASH_GTP_U_EH_TEID | \
+	 IAVF_FLOW_HASH_GTP_U_EH_QFI)
+#define IAVF_FLOW_HASH_GTP_U_IPV6_EH \
+	(IAVF_FLOW_HASH_IPV6 | IAVF_FLOW_HASH_GTP_U_EH_TEID | \
+	 IAVF_FLOW_HASH_GTP_U_EH_QFI)
+
+/* The first member is protocol header, the second member is ETH_RSS_*. */
+struct rss_type_match_hdr iavf_hint_empty = {
+	VIRTCHNL_PROTO_HDR_NONE,	0};
+struct rss_type_match_hdr iavf_hint_eth_ipv4 = {
+	VIRTCHNL_PROTO_HDR_IPV4,	ETH_RSS_IPV4};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_udp = {
+	VIRTCHNL_PROTO_HDR_IPV4 | VIRTCHNL_PROTO_HDR_UDP,
+	ETH_RSS_NONFRAG_IPV4_UDP};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_tcp = {
+	VIRTCHNL_PROTO_HDR_IPV4 | VIRTCHNL_PROTO_HDR_TCP,
+	ETH_RSS_NONFRAG_IPV4_TCP};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_sctp = {
+	VIRTCHNL_PROTO_HDR_IPV4 | VIRTCHNL_PROTO_HDR_SCTP,
+	ETH_RSS_NONFRAG_IPV4_SCTP};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_gtpu_eh = {
+	VIRTCHNL_PROTO_HDR_GTPU_EH,	ETH_RSS_NONFRAG_IPV4_UDP};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_esp = {
+	VIRTCHNL_PROTO_HDR_ESP,	ETH_RSS_IPV4};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_udp_esp = {
+	VIRTCHNL_PROTO_HDR_NAT_T_ESP,	ETH_RSS_NONFRAG_IPV4_UDP};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_ah = {
+	VIRTCHNL_PROTO_HDR_AH,	ETH_RSS_IPV4};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_l2tpv3 = {
+	VIRTCHNL_PROTO_HDR_L2TPV3,	ETH_RSS_IPV4};
+struct rss_type_match_hdr iavf_hint_eth_ipv4_pfcp = {
+	VIRTCHNL_PROTO_HDR_PFCP,	ETH_RSS_NONFRAG_IPV4_UDP};
+struct rss_type_match_hdr iavf_hint_eth_ipv6 = {
+	VIRTCHNL_PROTO_HDR_IPV6,	ETH_RSS_IPV6};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_udp = {
+	VIRTCHNL_PROTO_HDR_IPV6 | VIRTCHNL_PROTO_HDR_UDP,
+	ETH_RSS_NONFRAG_IPV6_UDP};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_tcp = {
+	VIRTCHNL_PROTO_HDR_IPV6 | VIRTCHNL_PROTO_HDR_TCP,
+	ETH_RSS_NONFRAG_IPV6_TCP};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_sctp = {
+	VIRTCHNL_PROTO_HDR_IPV6 | VIRTCHNL_PROTO_HDR_SCTP,
+	ETH_RSS_NONFRAG_IPV6_SCTP};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_esp = {
+	VIRTCHNL_PROTO_HDR_ESP,	ETH_RSS_IPV6};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_udp_esp = {
+	VIRTCHNL_PROTO_HDR_NAT_T_ESP,	ETH_RSS_NONFRAG_IPV6_UDP};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_ah = {
+	VIRTCHNL_PROTO_HDR_AH,	ETH_RSS_IPV6};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_l2tpv3 = {
+	VIRTCHNL_PROTO_HDR_L2TPV3,	ETH_RSS_IPV6};
+struct rss_type_match_hdr iavf_hint_eth_ipv6_pfcp = {
+	VIRTCHNL_PROTO_HDR_PFCP,	ETH_RSS_NONFRAG_IPV6_UDP};
+
+/* Supported pattern for hash. */
+static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
+	{iavf_pattern_eth_ipv4, IAVF_INSET_NONE, &iavf_hint_eth_ipv4},
+	{iavf_pattern_eth_ipv4_udp, IAVF_INSET_NONE, &iavf_hint_eth_ipv4_udp},
+	{iavf_pattern_eth_ipv4_tcp, IAVF_INSET_NONE, &iavf_hint_eth_ipv4_tcp},
+	{iavf_pattern_eth_ipv4_sctp, IAVF_INSET_NONE, &iavf_hint_eth_ipv4_sctp},
+	{iavf_pattern_eth_ipv6, IAVF_INSET_NONE, &iavf_hint_eth_ipv6},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_gtpu_eh},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_gtpu_eh},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_gtpu_eh},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_gtpu_eh},
+	{iavf_pattern_eth_ipv4_esp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_esp},
+	{iavf_pattern_eth_ipv4_udp_esp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_udp_esp},
+	{iavf_pattern_eth_ipv4_ah,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_ah},
+	{iavf_pattern_eth_ipv4_l2tpv3,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_l2tpv3},
+	{iavf_pattern_eth_ipv4_pfcp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv4_pfcp},
+	{iavf_pattern_eth_ipv6_udp, IAVF_INSET_NONE, &iavf_hint_eth_ipv6_udp},
+	{iavf_pattern_eth_ipv6_tcp, IAVF_INSET_NONE, &iavf_hint_eth_ipv6_tcp},
+	{iavf_pattern_eth_ipv6_sctp, IAVF_INSET_NONE, &iavf_hint_eth_ipv6_sctp},
+	{iavf_pattern_eth_ipv6_esp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv6_esp},
+	{iavf_pattern_eth_ipv6_udp_esp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv6_udp_esp},
+	{iavf_pattern_eth_ipv6_ah,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv6_ah},
+	{iavf_pattern_eth_ipv6_l2tpv3,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv6_l2tpv3},
+	{iavf_pattern_eth_ipv6_pfcp,
+				IAVF_INSET_NONE, &iavf_hint_eth_ipv6_pfcp},
+	{iavf_pattern_empty, IAVF_INSET_NONE, &iavf_hint_empty},
+};
+
+#define	GTP_EH_PDU_LINK_UP		1
+#define	GTP_EH_PDU_LINK_DWN		0
+
+#define TUNNEL_LEVEL_OUTER		0
+#define TUNNEL_LEVEL_FIRST_INNER	1
+
+#define PROTO_COUNT_ONE			1
+#define PROTO_COUNT_TWO			2
+#define PROTO_COUNT_THREE		3
+
+#define BUFF_NOUSED			0
+#define FIELD_FOR_PROTO_ONLY		0
+
+#define proto_hint_eth_src { \
+	VIRTCHNL_PROTO_HDR_ETH, VIRTCHNL_PROTO_HDR_ETH_SRC, {BUFF_NOUSED } }
+
+#define proto_hint_eth_dst { \
+	VIRTCHNL_PROTO_HDR_ETH, VIRTCHNL_PROTO_HDR_ETH_DST, {BUFF_NOUSED } }
+
+#define proto_hint_svlan { \
+	VIRTCHNL_PROTO_HDR_S_VLAN, VIRTCHNL_PROTO_HDR_S_VLAN_ID, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_cvlan { \
+	VIRTCHNL_PROTO_HDR_C_VLAN, VIRTCHNL_PROTO_HDR_C_VLAN_ID, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_ipv4_src { \
+	VIRTCHNL_PROTO_HDR_IPV4, VIRTCHNL_PROTO_HDR_IPV4_SRC, {BUFF_NOUSED } }
+
+#define proto_hint_ipv4_dst { \
+	VIRTCHNL_PROTO_HDR_IPV4, VIRTCHNL_PROTO_HDR_IPV4_DST, {BUFF_NOUSED } }
+
+#define proto_hint_ipv4_only { \
+	VIRTCHNL_PROTO_HDR_IPV4, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_ipv4 { \
+	VIRTCHNL_PROTO_HDR_IPV4, \
+	VIRTCHNL_PROTO_HDR_IPV4_SRC | VIRTCHNL_PROTO_HDR_IPV4_DST, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_udp_src_port { \
+	VIRTCHNL_PROTO_HDR_UDP, VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_udp_dst_port { \
+	VIRTCHNL_PROTO_HDR_UDP, VIRTCHNL_PROTO_HDR_UDP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_udp_only { \
+	VIRTCHNL_PROTO_HDR_UDP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_udp { \
+	VIRTCHNL_PROTO_HDR_UDP, \
+	VIRTCHNL_PROTO_HDR_UDP_SRC_PORT | VIRTCHNL_PROTO_HDR_UDP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_tcp_src_port { \
+	VIRTCHNL_PROTO_HDR_TCP, VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_tcp_dst_port { \
+	VIRTCHNL_PROTO_HDR_TCP, VIRTCHNL_PROTO_HDR_TCP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_tcp_only { \
+	VIRTCHNL_PROTO_HDR_TCP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_tcp { \
+	VIRTCHNL_PROTO_HDR_TCP, \
+	VIRTCHNL_PROTO_HDR_TCP_SRC_PORT | VIRTCHNL_PROTO_HDR_TCP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_sctp_src_port { \
+	VIRTCHNL_PROTO_HDR_SCTP, VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_sctp_dst_port { \
+	VIRTCHNL_PROTO_HDR_SCTP, VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_sctp_only { \
+	VIRTCHNL_PROTO_HDR_SCTP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_sctp { \
+	VIRTCHNL_PROTO_HDR_SCTP, \
+	VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT | VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_ipv6_src { \
+	VIRTCHNL_PROTO_HDR_IPV6, VIRTCHNL_PROTO_HDR_IPV6_SRC, {BUFF_NOUSED } }
+
+#define proto_hint_ipv6_dst { \
+	VIRTCHNL_PROTO_HDR_IPV6, VIRTCHNL_PROTO_HDR_IPV6_DST, {BUFF_NOUSED } }
+
+#define proto_hint_ipv6_only { \
+	VIRTCHNL_PROTO_HDR_IPV6, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_ipv6 { \
+	VIRTCHNL_PROTO_HDR_IPV6, \
+	VIRTCHNL_PROTO_HDR_IPV6_SRC | VIRTCHNL_PROTO_HDR_IPV6_DST, \
+	{BUFF_NOUSED } }
+
+#define proto_hint_gtpu_up_only { \
+	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, \
+	FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_gtpu_dwn_only { \
+	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, \
+	FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_esp { \
+	VIRTCHNL_PROTO_HDR_ESP, \
+	VIRTCHNL_PROTO_HDR_ESP_SPI, {BUFF_NOUSED } }
+
+#define proto_hint_ah { \
+	VIRTCHNL_PROTO_HDR_AH, \
+	VIRTCHNL_PROTO_HDR_AH_SPI, {BUFF_NOUSED } }
+
+#define proto_hint_l2tpv3 { \
+	VIRTCHNL_PROTO_HDR_L2TPV3, \
+	VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, {BUFF_NOUSED } }
+
+#define proto_hint_pfcp { \
+	VIRTCHNL_PROTO_HDR_PFCP, VIRTCHNL_PROTO_HDR_PFCP_SEID, {BUFF_NOUSED } }
+
+struct virtchnl_proto_stack stack_hint_eth_src = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_eth_src }
+};
+
+struct virtchnl_proto_stack stack_hint_eth_dst = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_eth_dst }
+};
+
+struct virtchnl_proto_stack stack_hint_svlan = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_svlan }
+};
+
+struct virtchnl_proto_stack stack_hint_cvlan = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_cvlan }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4_src }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4_dst }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_gtpu_up = {
+	TUNNEL_LEVEL_FIRST_INNER, PROTO_COUNT_TWO, {proto_hint_gtpu_up_only,
+	proto_hint_ipv4_src }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_gtpu_dwn = {
+	TUNNEL_LEVEL_FIRST_INNER, PROTO_COUNT_TWO, {proto_hint_gtpu_dwn_only,
+	proto_hint_ipv4_dst }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_udp_gtpu_up = {
+	TUNNEL_LEVEL_FIRST_INNER, PROTO_COUNT_THREE, {proto_hint_gtpu_up_only,
+	proto_hint_ipv4_src, proto_hint_udp_only }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_udp_gtpu_dwn = {
+	TUNNEL_LEVEL_FIRST_INNER, PROTO_COUNT_THREE, {proto_hint_gtpu_dwn_only,
+	proto_hint_ipv4_dst, proto_hint_udp_only }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_tcp_gtpu_up = {
+	TUNNEL_LEVEL_FIRST_INNER, PROTO_COUNT_THREE, {proto_hint_gtpu_up_only,
+	proto_hint_ipv4_src, proto_hint_tcp_only }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_tcp_gtpu_dwn = {
+	TUNNEL_LEVEL_FIRST_INNER, PROTO_COUNT_THREE, {proto_hint_gtpu_dwn_only,
+	proto_hint_ipv4_dst, proto_hint_tcp_only }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_esp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_esp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_ah = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_ah }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_l2tpv3 = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_l2tpv3 }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_pfcp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_pfcp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4 = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4 }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_udp_esp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_THREE, {proto_hint_ipv4_only,
+	proto_hint_udp_only, proto_hint_esp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_udp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4,
+	proto_hint_udp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_tcp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4,
+	proto_hint_tcp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_src_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+	proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_dst_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+	proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+	proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv4_sctp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4,
+	proto_hint_sctp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6_src }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6_dst }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_esp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+	proto_hint_esp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_ah = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+	proto_hint_ah }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_l2tpv3 = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+	proto_hint_l2tpv3 }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_pfcp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+	proto_hint_pfcp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6 = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6 }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_udp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+	proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_udp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+	proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_udp_esp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_THREE, {proto_hint_ipv6_only,
+	proto_hint_udp_only, proto_hint_esp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_udp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6,
+	proto_hint_udp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_tcp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+	proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_tcp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+	proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_tcp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6,
+	proto_hint_tcp }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_src_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+	proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_dst_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+	proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_sctp_src_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+	proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_sctp_dst_port = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+	proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_stack stack_hint_ipv6_sctp = {
+	TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6,
+	proto_hint_sctp }
+};
+
+/**
+ * The first member is hash type,
+ * the second member is virtchnl protocol stack.
+ */
+struct iavf_hash_match_type iavf_hash_type_list[] = {
+	/* IPV4 */
+	{ETH_RSS_IPV4 | ETH_RSS_ETH_SRC_ONLY,	&stack_hint_eth_src},
+	{ETH_RSS_IPV4 | ETH_RSS_ETH_DST_ONLY,	&stack_hint_eth_dst},
+	{ETH_RSS_IPV4 | ETH_RSS_S_VLAN,		&stack_hint_svlan},
+	{ETH_RSS_IPV4 | ETH_RSS_C_VLAN,		&stack_hint_cvlan},
+	{ETH_RSS_IPV4 | ETH_RSS_L3_SRC_ONLY,	&stack_hint_ipv4_src},
+	{ETH_RSS_IPV4 | ETH_RSS_L3_DST_ONLY,	&stack_hint_ipv4_dst},
+	{ETH_RSS_IPV4 | ETH_RSS_IPSEC_ESP,	&stack_hint_ipv4_esp},
+	{ETH_RSS_IPV4 | ETH_RSS_IPSEC_AH,	&stack_hint_ipv4_ah},
+	{ETH_RSS_IPV4 | ETH_RSS_L2TPV3,		&stack_hint_ipv4_l2tpv3},
+	{ETH_RSS_IPV4,				&stack_hint_ipv4},
+	/* IPV4 UDP */
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_src_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_src_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_GTPU,
+					&stack_hint_ipv4_src_udp_gtpu_up},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv4_src},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_dst_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_dst_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_GTPU,
+					&stack_hint_ipv4_dst_udp_gtpu_dwn},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv4_dst},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_GTPU,
+						&stack_hint_ipv4_src_gtpu_up},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_GTPU,
+						&stack_hint_ipv4_dst_gtpu_dwn},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_PFCP,
+					&stack_hint_ipv4_pfcp},
+	{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_IPSEC_ESP,
+					&stack_hint_ipv4_udp_esp},
+	{ETH_RSS_NONFRAG_IPV4_UDP,	&stack_hint_ipv4_udp},
+	/* IPV4 TCP */
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_src_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_src_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_GTPU,
+					&stack_hint_ipv4_src_tcp_gtpu_up},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv4_src},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_dst_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_dst_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_GTPU,
+					&stack_hint_ipv4_dst_tcp_gtpu_dwn},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv4_dst},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_TCP,	&stack_hint_ipv4_tcp},
+	/* IPV4 SCTP */
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_src_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_src_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv4_src},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_dst_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_dst_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv4_dst},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv4_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv4_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV4_SCTP,	&stack_hint_ipv4_sctp},
+	/* IPV6 */
+	{ETH_RSS_IPV6 | ETH_RSS_ETH_SRC_ONLY,	&stack_hint_eth_src},
+	{ETH_RSS_IPV6 | ETH_RSS_ETH_DST_ONLY,	&stack_hint_eth_dst},
+	{ETH_RSS_IPV6 | ETH_RSS_S_VLAN,		&stack_hint_svlan},
+	{ETH_RSS_IPV6 | ETH_RSS_C_VLAN,		&stack_hint_cvlan},
+	{ETH_RSS_IPV6 | ETH_RSS_L3_SRC_ONLY,	&stack_hint_ipv6_src},
+	{ETH_RSS_IPV6 | ETH_RSS_L3_DST_ONLY,	&stack_hint_ipv6_dst},
+	{ETH_RSS_IPV6 | ETH_RSS_IPSEC_ESP,	&stack_hint_ipv6_esp},
+	{ETH_RSS_IPV6 | ETH_RSS_IPSEC_AH,	&stack_hint_ipv6_ah},
+	{ETH_RSS_IPV6 | ETH_RSS_L2TPV3,		&stack_hint_ipv6_l2tpv3},
+	{ETH_RSS_IPV6,				&stack_hint_ipv6},
+	/* IPV6 UDP */
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_src_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_src_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv6_src},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_dst_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_dst_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv6_dst},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_udp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_udp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_PFCP,
+					&stack_hint_ipv6_pfcp},
+	{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPSEC_ESP,
+					&stack_hint_ipv6_udp_esp},
+	{ETH_RSS_NONFRAG_IPV6_UDP,	&stack_hint_ipv6_udp},
+	/* IPV6 TCP */
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_src_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_src_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv6_src},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_dst_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_dst_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv6_dst},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_tcp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_tcp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_TCP,	&stack_hint_ipv6_tcp},
+	/* IPV6 SCTP */
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_src_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_src_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+					&stack_hint_ipv6_src},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_dst_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_dst_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+					&stack_hint_ipv6_dst},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+					&stack_hint_ipv6_sctp_src_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+					&stack_hint_ipv6_sctp_dst_port},
+	{ETH_RSS_NONFRAG_IPV6_SCTP,	&stack_hint_ipv6_sctp},
+};
+
+static struct iavf_flow_engine iavf_hash_engine = {
+	.init = iavf_hash_init,
+	.create = iavf_hash_create,
+	.destroy = iavf_hash_destroy,
+	.uninit = iavf_hash_uninit,
+	.free = iavf_hash_free,
+	.type = IAVF_FLOW_ENGINE_HASH,
+};
+
+/* Register parser for comms package. */
+static struct iavf_flow_parser iavf_hash_parser = {
+	.engine = &iavf_hash_engine,
+	.array = iavf_hash_pattern_list,
+	.array_len = RTE_DIM(iavf_hash_pattern_list),
+	.parse_pattern_action = iavf_hash_parse_pattern_action,
+	.stage = IAVF_FLOW_STAGE_RSS,
+};
+
+RTE_INIT(iavf_hash_engine_init)
+{
+	struct iavf_flow_engine *engine = &iavf_hash_engine;
+
+	iavf_register_flow_engine(engine);
+}
+
+static int
+iavf_hash_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF))
+		return -ENOTSUP;
+
+	parser = &iavf_hash_parser;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static int
+iavf_hash_check_inset(const struct rte_flow_item pattern[],
+		      struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Not support range");
+			return -rte_errno;
+		}
+	}
+
+	return 0;
+}
+
+static uint64_t
+iavf_rss_hf_refine(uint64_t rss_hf, const struct rte_flow_item pattern[],
+		   const struct rte_flow_action *action,
+		   struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item;
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
+			const struct rte_flow_action_rss *rss = action->conf;
+			const struct rte_flow_item_gtp_psc *psc = item->spec;
+
+			if (psc && ((psc->pdu_type == GTP_EH_PDU_LINK_UP &&
+				     (rss->types & ETH_RSS_L3_SRC_ONLY)) ||
+				    (!psc->pdu_type &&
+				     (rss->types & ETH_RSS_L3_DST_ONLY)))) {
+				rss_hf |= ETH_RSS_GTPU;
+			} else {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+						   pattern,
+						   "Invalid input set");
+				return -rte_errno;
+			}
+		}
+	}
+
+	return rss_hf;
+}
+
+static int
+iavf_hash_parse_action(struct iavf_pattern_match_item *pattern_match_item,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta, struct rte_flow_error *error)
+{
+	struct iavf_rss_meta *rss_meta = (struct iavf_rss_meta *)*meta;
+	struct rss_type_match_hdr *m = (struct rss_type_match_hdr *)
+				(pattern_match_item->meta);
+	uint32_t type_list_len = RTE_DIM(iavf_hash_type_list);
+	struct iavf_hash_match_type *type_match_item;
+	enum rte_flow_action_type action_type;
+	const struct rte_flow_action_rss *rss;
+	const struct rte_flow_action *action;
+	uint64_t rss_hf;
+	uint16_t i;
+	bool item_found = false;
+
+	/* Supported action is RSS. */
+	for (action = actions; action->type !=
+		RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			rss_hf = rss->types;
+
+			/**
+			 * Check simultaneous use of SRC_ONLY and DST_ONLY
+			 * of the same level.
+			 */
+			rss_hf = rte_eth_rss_hf_refine(rss_hf);
+
+			/**
+			 * Check the item spec with the rss action and
+			 * refine rss hash field.
+			 */
+			rss_hf = iavf_rss_hf_refine(rss_hf, pattern, action,
+						    error);
+
+			/* Check if pattern is empty. */
+			if (pattern_match_item->pattern_list !=
+				iavf_pattern_empty && rss->func ==
+				RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
+				return rte_flow_error_set(error, ENOTSUP,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"Not supported flow");
+
+			/* Check if rss types match pattern. */
+			if (rss->func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+				if (((rss_hf & ETH_RSS_IPV4) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_IPV6) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) !=
+					m->eth_rss_hint) &&
+				    ((rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) !=
+					m->eth_rss_hint))
+					return rte_flow_error_set(error,
+					ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+					action, "Not supported RSS types");
+			}
+
+			if (rss->level)
+				return rte_flow_error_set(error, ENOTSUP,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"a nonzero RSS encapsulation level is not supported");
+
+			if (rss->key_len)
+				return rte_flow_error_set(error, ENOTSUP,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"a nonzero RSS key_len is not supported");
+
+			if (rss->queue_num)
+				return rte_flow_error_set(error, ENOTSUP,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"a non-NULL RSS queue is not supported");
+
+			/* Check hash function and save it to rss_meta. */
+			if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
+				rss_meta->hash_function =
+				RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+
+			if (rss->func ==
+			    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
+				rss_meta->hash_function =
+				RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
+
+			type_match_item =
+			rte_zmalloc("iavf_type_match_item",
+				    sizeof(struct iavf_hash_match_type), 0);
+			if (!type_match_item) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_HANDLE,
+						   NULL,
+						   "No memory for type_match_item");
+				return -ENOMEM;
+			}
+
+			/* Find matched proto stack according to hash type. */
+			for (i = 0; i < type_list_len; i++) {
+				struct iavf_hash_match_type *ht_map =
+					&iavf_hash_type_list[i];
+				if (rss_hf == ht_map->hash_type) {
+					type_match_item->hash_type =
+						ht_map->hash_type;
+					type_match_item->proto_stack =
+						ht_map->proto_stack;
+					rss_meta->proto_stack =
+						type_match_item->proto_stack;
+					item_found = true;
+				}
+			}
+
+			rte_free(type_match_item);
+
+			if (!item_found)
+				return rte_flow_error_set(error, ENOTSUP,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"Not supported flow");
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, action,
+					   "Invalid action.");
+			return -rte_errno;
+		}
+	}
+
+	return 0;
+}
+
+static int
+iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
+			       struct iavf_pattern_match_item *array,
+			       uint32_t array_len,
+			       const struct rte_flow_item pattern[],
+			       const struct rte_flow_action actions[],
+			       void **meta,
+			       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *pattern_match_item;
+	struct iavf_rss_meta *rss_meta_ptr;
+	int ret = 0;
+
+	rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0);
+	if (!rss_meta_ptr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for rss_meta_ptr");
+		return -ENOMEM;
+	}
+
+	/* Check rss supported pattern and find matched pattern. */
+	pattern_match_item =
+		iavf_search_pattern_match_item(pattern, array, array_len,
+					       error);
+	if (!pattern_match_item) {
+		ret = -rte_errno;
+		goto error;
+	}
+
+	ret = iavf_hash_check_inset(pattern, error);
+	if (ret)
+		goto error;
+
+	/* Check rss action. */
+	ret = iavf_hash_parse_action(pattern_match_item, pattern, actions,
+				     (void **)&rss_meta_ptr, error);
+
+error:
+	if (!ret && meta)
+		*meta = rss_meta_ptr;
+	else
+		rte_free(rss_meta_ptr);
+
+	rte_free(pattern_match_item);
+
+	return ret;
+}
+
+static int
+iavf_hash_create(__rte_unused struct iavf_adapter *ad,
+		 __rte_unused struct rte_flow *flow, void *meta,
+		 __rte_unused struct rte_flow_error *error)
+{
+	struct iavf_rss_meta *rss_meta = (struct iavf_rss_meta *)meta;
+	struct virtchnl_rss_cfg *rss_cfg;
+	int ret = 0;
+
+	rss_cfg = rte_zmalloc("iavf rss rule",
+			      sizeof(struct virtchnl_rss_cfg), 0);
+	if (!rss_cfg) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for rss rule");
+		return -ENOMEM;
+	}
+
+	rss_cfg->proto_stack = *rss_meta->proto_stack;
+	rss_cfg->hash_function = rss_meta->hash_function;
+
+	ret = iavf_add_del_rss_cfg(ad, rss_cfg, true);
+	if (!ret) {
+		flow->rule = rss_cfg;
+	} else {
+		PMD_DRV_LOG(ERR, "fail to add RSS configure");
+		rte_free(rss_cfg);
+	}
+
+	return ret;
+}
+
+static int
+iavf_hash_destroy(__rte_unused struct iavf_adapter *ad,
+		  struct rte_flow *flow,
+		  __rte_unused struct rte_flow_error *error)
+{
+	struct virtchnl_rss_cfg *rss_cfg;
+	int ret = 0;
+
+	rss_cfg = (struct virtchnl_rss_cfg *)flow->rule;
+
+	ret = iavf_add_del_rss_cfg(ad, rss_cfg, false);
+	if (ret)
+		PMD_DRV_LOG(ERR, "fail to del RSS configure");
+
+	return ret;
+}
+
+static void
+iavf_hash_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_hash_parser, ad);
+}
+
+static void
+iavf_hash_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+}
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index caaf66690..23079692a 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -336,13 +336,10 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	/* TODO: basic offload capabilities, need to
-	 * add advanced/optional offload capabilities
-	 */
-
 	caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
 		VIRTCHNL_VF_OFFLOAD_QUERY_DDP |
-		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
+		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -883,3 +880,29 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 
 	return err;
 }
+
+int
+iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
+		     struct virtchnl_rss_cfg *rss_cfg, bool add)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = add ? VIRTCHNL_OP_ADD_RSS_CFG :
+		VIRTCHNL_OP_DEL_RSS_CFG;
+	args.in_args = (u8 *)rss_cfg;
+	args.in_args_size = sizeof(*rss_cfg);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of %s",
+			    add ? "OP_ADD_RSS_CFG" :
+			    "OP_DEL_RSS_INPUT_CFG");
+
+	return err;
+}
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 32eabca4b..5a5cdd562 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -13,6 +13,7 @@ sources = files(
 	'iavf_rxtx.c',
 	'iavf_vchnl.c',
 	'iavf_generic_flow.c',
+	'iavf_hash.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.20.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type
  2020-03-26 16:40   ` [dpdk-dev] [dpdk-dev v2 0/4] add RSS configuration for iavf Jeff Guo
  2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 1/4] ethdev: add new RSS offload types Jeff Guo
  2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 2/4] net/iavf: add RSS configuration for VFs Jeff Guo
@ 2020-03-26 16:40     ` Jeff Guo
  2020-03-29  8:44       ` Ori Kam
  2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 4/4] app/testpmd: add new types to RSS hash commands Jeff Guo
  3 siblings, 1 reply; 14+ messages in thread
From: Jeff Guo @ 2020-03-26 16:40 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su, jia.guo

Add gtp pdu type configure in the cmdline.

Signed-off-by: Jeff Guo <jia.guo@intel.com>
---
v1:
no change
---
 app/test-pmd/cmdline_flow.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a78154502..c1bd02919 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -49,6 +49,7 @@ enum index {
 	PORT_ID,
 	GROUP_ID,
 	PRIORITY_LEVEL,
+	GTP_PSC_PDU_T,
 
 	/* Top-level command. */
 	SET,
@@ -1626,6 +1627,13 @@ static const struct token token_list[] = {
 		.call = parse_int,
 		.comp = comp_none,
 	},
+	[GTP_PSC_PDU_T] = {
+		.name = "{GTPU pdu type}",
+		.type = "INTEGER",
+		.help = "gtpu pdu uplink/downlink identifier",
+		.call = parse_int,
+		.comp = comp_none,
+	},
 	/* Top-level command. */
 	[FLOW] = {
 		.name = "flow",
@@ -2615,7 +2623,8 @@ static const struct token token_list[] = {
 	[ITEM_GTP_PSC_PDU_T] = {
 		.name = "pdu_t",
 		.help = "PDU type",
-		.next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
+		.next = NEXT(item_gtp_psc, NEXT_ENTRY(GTP_PSC_PDU_T),
+			     item_param),
 		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
 					pdu_type)),
 	},
-- 
2.20.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [dpdk-dev v2 4/4] app/testpmd: add new types to RSS hash commands
  2020-03-26 16:40   ` [dpdk-dev] [dpdk-dev v2 0/4] add RSS configuration for iavf Jeff Guo
                       ` (2 preceding siblings ...)
  2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type Jeff Guo
@ 2020-03-26 16:40     ` Jeff Guo
  3 siblings, 0 replies; 14+ messages in thread
From: Jeff Guo @ 2020-03-26 16:40 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su, jia.guo

Add some new types, such as eth-src-only/eth-dst-only/svlan/cvlan/
l2tpv3/esp/ah/pfcp types into RSS hash commands, it could be used
to configure these rss input set by cmdline.

Signed-off-by: Jeff Guo <jia.guo@intel.com>
---
v1:
1.refine some rte eth rss offload types.
---
 app/test-pmd/cmdline.c | 24 ++++++++++++++++++++++--
 app/test-pmd/config.c  |  8 ++++++++
 2 files changed, 30 insertions(+), 2 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index a037a55c6..d55d23f81 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -2299,6 +2299,22 @@ cmd_config_rss_parsed(void *parsed_result,
 		rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
 	else if (!strcmp(res->value, "l4-dst-only"))
 		rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
+	else if (!strcmp(res->value, "eth-src-only"))
+		rss_conf.rss_hf = ETH_RSS_ETH_SRC_ONLY;
+	else if (!strcmp(res->value, "eth-dst-only"))
+		rss_conf.rss_hf = ETH_RSS_ETH_DST_ONLY;
+	else if (!strcmp(res->value, "s-vlan"))
+		rss_conf.rss_hf = ETH_RSS_S_VLAN;
+	else if (!strcmp(res->value, "c-vlan"))
+		rss_conf.rss_hf = ETH_RSS_C_VLAN;
+	else if (!strcmp(res->value, "l2tpv3"))
+		rss_conf.rss_hf = ETH_RSS_L2TPV3;
+	else if (!strcmp(res->value, "esp"))
+		rss_conf.rss_hf = ETH_RSS_IPSEC_ESP;
+	else if (!strcmp(res->value, "ah"))
+		rss_conf.rss_hf = ETH_RSS_IPSEC_AH;
+	else if (!strcmp(res->value, "pfcp"))
+		rss_conf.rss_hf = ETH_RSS_PFCP;
 	else if (!strcmp(res->value, "none"))
 		rss_conf.rss_hf = 0;
 	else if (!strcmp(res->value, "default"))
@@ -2467,7 +2483,9 @@ cmdline_parse_token_string_t cmd_config_rss_hash_key_rss_type =
 				 "ipv4-other#ipv6#ipv6-frag#ipv6-tcp#ipv6-udp#"
 				 "ipv6-sctp#ipv6-other#l2-payload#ipv6-ex#"
 				 "ipv6-tcp-ex#ipv6-udp-ex#"
-				 "l3-src-only#l3-dst-only#l4-src-only#l4-dst-only");
+				 "l3-src-only#l3-dst-only#l4-src-only#l4-dst-only#"
+				 "eth-src-only#eth-dst-only#s-vlan#c-vlan#"
+				 "l2tpv3#esp#ah#pfcp");
 cmdline_parse_token_string_t cmd_config_rss_hash_key_value =
 	TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, key, NULL);
 
@@ -2478,7 +2496,9 @@ cmdline_parse_inst_t cmd_config_rss_hash_key = {
 		"ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
 		"ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
 		"l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex|"
-		"l3-src-only|l3-dst-only|l4-src-only|l4-dst-only "
+		"l3-src-only|l3-dst-only|l4-src-only|l4-dst-only|"
+		"eth-src-only|eth-dst-only|s-vlan|c-vlan|"
+		"l2tpv3|esp|ah|pfcp "
 		"<string of hex digits (variable length, NIC dependent)>",
 	.tokens = {
 		(void *)&cmd_config_rss_hash_key_port,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 71aeb5413..0b83400ea 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -79,6 +79,10 @@ const struct rss_type_info rss_type_table[] = {
 			ETH_RSS_UDP | ETH_RSS_SCTP |
 			ETH_RSS_L2_PAYLOAD },
 	{ "none", 0 },
+	{ "eth-src-only", ETH_RSS_ETH_SRC_ONLY },
+	{ "eth-dst-only", ETH_RSS_ETH_DST_ONLY },
+	{ "s-vlan", ETH_RSS_S_VLAN },
+	{ "c-vlan", ETH_RSS_C_VLAN },
 	{ "ipv4", ETH_RSS_IPV4 },
 	{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
 	{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
@@ -108,6 +112,10 @@ const struct rss_type_info rss_type_table[] = {
 	{ "l3-dst-only", ETH_RSS_L3_DST_ONLY },
 	{ "l4-src-only", ETH_RSS_L4_SRC_ONLY },
 	{ "l4-dst-only", ETH_RSS_L4_DST_ONLY },
+	{ "l2tpv3", ETH_RSS_L2TPV3 },
+	{ "esp", ETH_RSS_IPSEC_ESP },
+	{ "ah", ETH_RSS_IPSEC_AH },
+	{ "pfcp", ETH_RSS_PFCP },
 	{ NULL, 0 },
 };
 
-- 
2.20.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type
  2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type Jeff Guo
@ 2020-03-29  8:44       ` Ori Kam
  2020-03-30  8:29         ` Jeff Guo
  0 siblings, 1 reply; 14+ messages in thread
From: Ori Kam @ 2020-03-29  8:44 UTC (permalink / raw)
  To: Jeff Guo, xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su

Hi Jeff,


> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Jeff Guo
> Sent: Thursday, March 26, 2020 6:41 PM
> To: xiaolong.ye@intel.com; qi.z.zhang@intel.com
> Cc: dev@dpdk.org; jingjing.wu@intel.com; yahui.cao@intel.com;
> simei.su@intel.com; jia.guo@intel.com
> Subject: [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type
> 
> Add gtp pdu type configure in the cmdline.

Why not use ITEM_GTP_PSC_PDU?

> 
> Signed-off-by: Jeff Guo <jia.guo@intel.com>
> ---
> v1:
> no change
> ---
>  app/test-pmd/cmdline_flow.c | 11 ++++++++++-
>  1 file changed, 10 insertions(+), 1 deletion(-)
> 
> diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
> index a78154502..c1bd02919 100644
> --- a/app/test-pmd/cmdline_flow.c
> +++ b/app/test-pmd/cmdline_flow.c
> @@ -49,6 +49,7 @@ enum index {
>  	PORT_ID,
>  	GROUP_ID,
>  	PRIORITY_LEVEL,
> +	GTP_PSC_PDU_T,
> 
>  	/* Top-level command. */
>  	SET,
> @@ -1626,6 +1627,13 @@ static const struct token token_list[] = {
>  		.call = parse_int,
>  		.comp = comp_none,
>  	},
> +	[GTP_PSC_PDU_T] = {
> +		.name = "{GTPU pdu type}",
> +		.type = "INTEGER",
> +		.help = "gtpu pdu uplink/downlink identifier",
> +		.call = parse_int,
> +		.comp = comp_none,
> +	},

Why is this created at this level?
This looks like is should be written totally differently.

>  	/* Top-level command. */
>  	[FLOW] = {
>  		.name = "flow",
> @@ -2615,7 +2623,8 @@ static const struct token token_list[] = {
>  	[ITEM_GTP_PSC_PDU_T] = {
>  		.name = "pdu_t",
>  		.help = "PDU type",
> -		.next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED),
> item_param),
> +		.next = NEXT(item_gtp_psc, NEXT_ENTRY(GTP_PSC_PDU_T),
> +			     item_param),
>  		.args = ARGS(ARGS_ENTRY_HTON(struct
> rte_flow_item_gtp_psc,
>  					pdu_type)),
>  	},
> --
> 2.20.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type
  2020-03-29  8:44       ` Ori Kam
@ 2020-03-30  8:29         ` Jeff Guo
  2020-03-30 10:18           ` Ori Kam
  0 siblings, 1 reply; 14+ messages in thread
From: Jeff Guo @ 2020-03-30  8:29 UTC (permalink / raw)
  To: Ori Kam, xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su

hi, orika


On 3/29/2020 4:44 PM, Ori Kam wrote:
> Hi Jeff,
>
>
>> -----Original Message-----
>> From: dev <dev-bounces@dpdk.org> On Behalf Of Jeff Guo
>> Sent: Thursday, March 26, 2020 6:41 PM
>> To: xiaolong.ye@intel.com; qi.z.zhang@intel.com
>> Cc: dev@dpdk.org; jingjing.wu@intel.com; yahui.cao@intel.com;
>> simei.su@intel.com; jia.guo@intel.com
>> Subject: [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type
>>
>> Add gtp pdu type configure in the cmdline.
> Why not use ITEM_GTP_PSC_PDU?


I guess you mean ITEM_GTP_PSC_PDU_T, rihgt? We know  we have got 
ITEM_GTP_PSC_QFI/ITEM_GTP_PSC_PDU_T but not define the

spec for them, so what i use is add the spec into the ITEM_GTP_PSC_PDU_T 
to let the pdu type to be configured.


>> Signed-off-by: Jeff Guo <jia.guo@intel.com>
>> ---
>> v1:
>> no change
>> ---
>>   app/test-pmd/cmdline_flow.c | 11 ++++++++++-
>>   1 file changed, 10 insertions(+), 1 deletion(-)
>>
>> diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
>> index a78154502..c1bd02919 100644
>> --- a/app/test-pmd/cmdline_flow.c
>> +++ b/app/test-pmd/cmdline_flow.c
>> @@ -49,6 +49,7 @@ enum index {
>>   	PORT_ID,
>>   	GROUP_ID,
>>   	PRIORITY_LEVEL,
>> +	GTP_PSC_PDU_T,
>>
>>   	/* Top-level command. */
>>   	SET,
>> @@ -1626,6 +1627,13 @@ static const struct token token_list[] = {
>>   		.call = parse_int,
>>   		.comp = comp_none,
>>   	},
>> +	[GTP_PSC_PDU_T] = {
>> +		.name = "{GTPU pdu type}",
>> +		.type = "INTEGER",
>> +		.help = "gtpu pdu uplink/downlink identifier",
>> +		.call = parse_int,
>> +		.comp = comp_none,
>> +	},
> Why is this created at this level?
> This looks like is should be written totally differently.


As i said above,  the item we got but spec or say next token still need 
to be added, do you mean it should not in the group of Common tokens? If 
so, let me think about that, and please explicit your proposal if you 
already have one.


>>   	/* Top-level command. */
>>   	[FLOW] = {
>>   		.name = "flow",
>> @@ -2615,7 +2623,8 @@ static const struct token token_list[] = {
>>   	[ITEM_GTP_PSC_PDU_T] = {
>>   		.name = "pdu_t",
>>   		.help = "PDU type",
>> -		.next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED),
>> item_param),
>> +		.next = NEXT(item_gtp_psc, NEXT_ENTRY(GTP_PSC_PDU_T),
>> +			     item_param),
>>   		.args = ARGS(ARGS_ENTRY_HTON(struct
>> rte_flow_item_gtp_psc,
>>   					pdu_type)),
>>   	},
>> --
>> 2.20.1

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type
  2020-03-30  8:29         ` Jeff Guo
@ 2020-03-30 10:18           ` Ori Kam
  2020-03-31  8:50             ` Jeff Guo
  0 siblings, 1 reply; 14+ messages in thread
From: Ori Kam @ 2020-03-30 10:18 UTC (permalink / raw)
  To: Jeff Guo, xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su

Hi Jeff,

My name is Ori 😊

I'm not an expert in GTP so this is just my thinking and maybe I'm
missing something, this is why a good explanation helps 😊

> -----Original Message-----
> From: Jeff Guo <jia.guo@intel.com>
> Sent: Monday, March 30, 2020 11:30 AM
> To: Ori Kam <orika@mellanox.com>; xiaolong.ye@intel.com;
> qi.z.zhang@intel.com
> Cc: dev@dpdk.org; jingjing.wu@intel.com; yahui.cao@intel.com;
> simei.su@intel.com
> Subject: Re: [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type
> 
> hi, orika
> 
> 
> On 3/29/2020 4:44 PM, Ori Kam wrote:
> > Hi Jeff,
> >
> >
> >> -----Original Message-----
> >> From: dev <dev-bounces@dpdk.org> On Behalf Of Jeff Guo
> >> Sent: Thursday, March 26, 2020 6:41 PM
> >> To: xiaolong.ye@intel.com; qi.z.zhang@intel.com
> >> Cc: dev@dpdk.org; jingjing.wu@intel.com; yahui.cao@intel.com;
> >> simei.su@intel.com; jia.guo@intel.com
> >> Subject: [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type
> >>
> >> Add gtp pdu type configure in the cmdline.
> > Why not use ITEM_GTP_PSC_PDU?
> 
> 
> I guess you mean ITEM_GTP_PSC_PDU_T, rihgt? We know  we have got
> ITEM_GTP_PSC_QFI/ITEM_GTP_PSC_PDU_T but not define the
> 
> spec for them, so what i use is add the spec into the ITEM_GTP_PSC_PDU_T
> to let the pdu type to be configured.
> 
Yes you are correct, from rte_flow we have the  RTE_FLOW_ITEM_TYPE_GTP_PSC
Item that include pdu_type. This is the field you need right?

In testpmd we have the ITEM_GTP_PSC_PDU_T which should support adding
the pdu type.
Basically you just need to type the following cmd line:
flow create 0 ingress pattern gtp_psc pdu_t is xxx
if this command is not working we need to understand why.



> 
> >> Signed-off-by: Jeff Guo <jia.guo@intel.com>
> >> ---
> >> v1:
> >> no change
> >> ---
> >>   app/test-pmd/cmdline_flow.c | 11 ++++++++++-
> >>   1 file changed, 10 insertions(+), 1 deletion(-)
> >>
> >> diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
> >> index a78154502..c1bd02919 100644
> >> --- a/app/test-pmd/cmdline_flow.c
> >> +++ b/app/test-pmd/cmdline_flow.c
> >> @@ -49,6 +49,7 @@ enum index {
> >>   	PORT_ID,
> >>   	GROUP_ID,
> >>   	PRIORITY_LEVEL,
> >> +	GTP_PSC_PDU_T,
> >>
> >>   	/* Top-level command. */
> >>   	SET,
> >> @@ -1626,6 +1627,13 @@ static const struct token token_list[] = {
> >>   		.call = parse_int,
> >>   		.comp = comp_none,
> >>   	},
> >> +	[GTP_PSC_PDU_T] = {
> >> +		.name = "{GTPU pdu type}",
> >> +		.type = "INTEGER",
> >> +		.help = "gtpu pdu uplink/downlink identifier",
> >> +		.call = parse_int,
> >> +		.comp = comp_none,
> >> +	},
> > Why is this created at this level?
> > This looks like is should be written totally differently.
> 
> 
> As i said above,  the item we got but spec or say next token still need
> to be added, do you mean it should not in the group of Common tokens? If
> so, let me think about that, and please explicit your proposal if you
> already have one.
> 

Please see above response.

> 
> >>   	/* Top-level command. */
> >>   	[FLOW] = {
> >>   		.name = "flow",
> >> @@ -2615,7 +2623,8 @@ static const struct token token_list[] = {
> >>   	[ITEM_GTP_PSC_PDU_T] = {
> >>   		.name = "pdu_t",
> >>   		.help = "PDU type",
> >> -		.next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED),
> >> item_param),
> >> +		.next = NEXT(item_gtp_psc, NEXT_ENTRY(GTP_PSC_PDU_T),
> >> +			     item_param),
> >>   		.args = ARGS(ARGS_ENTRY_HTON(struct
> >> rte_flow_item_gtp_psc,
> >>   					pdu_type)),
> >>   	},
> >> --
> >> 2.20.1

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type
  2020-03-30 10:18           ` Ori Kam
@ 2020-03-31  8:50             ` Jeff Guo
  0 siblings, 0 replies; 14+ messages in thread
From: Jeff Guo @ 2020-03-31  8:50 UTC (permalink / raw)
  To: Ori Kam, xiaolong.ye, qi.z.zhang; +Cc: dev, jingjing.wu, yahui.cao, simei.su

yes, Ori, please check the comment below.


On 3/30/2020 6:18 PM, Ori Kam wrote:
> Hi Jeff,
>
> My name is Ori 😊
>
> I'm not an expert in GTP so this is just my thinking and maybe I'm
> missing something, this is why a good explanation helps 😊
>
>> -----Original Message-----
>> From: Jeff Guo <jia.guo@intel.com>
>> Sent: Monday, March 30, 2020 11:30 AM
>> To: Ori Kam <orika@mellanox.com>; xiaolong.ye@intel.com;
>> qi.z.zhang@intel.com
>> Cc: dev@dpdk.org; jingjing.wu@intel.com; yahui.cao@intel.com;
>> simei.su@intel.com
>> Subject: Re: [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type
>>
>> hi, orika
>>
>>
>> On 3/29/2020 4:44 PM, Ori Kam wrote:
>>> Hi Jeff,
>>>
>>>
>>>> -----Original Message-----
>>>> From: dev <dev-bounces@dpdk.org> On Behalf Of Jeff Guo
>>>> Sent: Thursday, March 26, 2020 6:41 PM
>>>> To: xiaolong.ye@intel.com; qi.z.zhang@intel.com
>>>> Cc: dev@dpdk.org; jingjing.wu@intel.com; yahui.cao@intel.com;
>>>> simei.su@intel.com; jia.guo@intel.com
>>>> Subject: [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type
>>>>
>>>> Add gtp pdu type configure in the cmdline.
>>> Why not use ITEM_GTP_PSC_PDU?
>>
>> I guess you mean ITEM_GTP_PSC_PDU_T, rihgt? We know  we have got
>> ITEM_GTP_PSC_QFI/ITEM_GTP_PSC_PDU_T but not define the
>>
>> spec for them, so what i use is add the spec into the ITEM_GTP_PSC_PDU_T
>> to let the pdu type to be configured.
>>
> Yes you are correct, from rte_flow we have the  RTE_FLOW_ITEM_TYPE_GTP_PSC
> Item that include pdu_type. This is the field you need right?
>
> In testpmd we have the ITEM_GTP_PSC_PDU_T which should support adding
> the pdu type.
> Basically you just need to type the following cmd line:
> flow create 0 ingress pattern gtp_psc pdu_t is xxx
> if this command is not working we need to understand why.
>
>

please check the part before this patch as below:

         [ITEM_GTP_PSC_PDU_T] = {
                 .name = "pdu_t",
                 .help = "PDU type",
                .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), 
item_param),

sure, we got the ITEM_GTP_PSC_PDU_T at prior but the NEXT_ENTRY is 
UNSIGNED, that means we still not implement

the spec to let the pdu type to be configurable, so what the patch do is 
to fix this issue.


>>>> Signed-off-by: Jeff Guo <jia.guo@intel.com>
>>>> ---
>>>> v1:
>>>> no change
>>>> ---
>>>>    app/test-pmd/cmdline_flow.c | 11 ++++++++++-
>>>>    1 file changed, 10 insertions(+), 1 deletion(-)
>>>>
>>>> diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
>>>> index a78154502..c1bd02919 100644
>>>> --- a/app/test-pmd/cmdline_flow.c
>>>> +++ b/app/test-pmd/cmdline_flow.c
>>>> @@ -49,6 +49,7 @@ enum index {
>>>>    	PORT_ID,
>>>>    	GROUP_ID,
>>>>    	PRIORITY_LEVEL,
>>>> +	GTP_PSC_PDU_T,
>>>>
>>>>    	/* Top-level command. */
>>>>    	SET,
>>>> @@ -1626,6 +1627,13 @@ static const struct token token_list[] = {
>>>>    		.call = parse_int,
>>>>    		.comp = comp_none,
>>>>    	},
>>>> +	[GTP_PSC_PDU_T] = {
>>>> +		.name = "{GTPU pdu type}",
>>>> +		.type = "INTEGER",
>>>> +		.help = "gtpu pdu uplink/downlink identifier",
>>>> +		.call = parse_int,
>>>> +		.comp = comp_none,
>>>> +	},
>>> Why is this created at this level?
>>> This looks like is should be written totally differently.
>>
>> As i said above,  the item we got but spec or say next token still need
>> to be added, do you mean it should not in the group of Common tokens? If
>> so, let me think about that, and please explicit your proposal if you
>> already have one.
>>
> Please see above response.
>
>>>>    	/* Top-level command. */
>>>>    	[FLOW] = {
>>>>    		.name = "flow",
>>>> @@ -2615,7 +2623,8 @@ static const struct token token_list[] = {
>>>>    	[ITEM_GTP_PSC_PDU_T] = {
>>>>    		.name = "pdu_t",
>>>>    		.help = "PDU type",
>>>> -		.next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED),
>>>> item_param),
>>>> +		.next = NEXT(item_gtp_psc, NEXT_ENTRY(GTP_PSC_PDU_T),
>>>> +			     item_param),
>>>>    		.args = ARGS(ARGS_ENTRY_HTON(struct
>>>> rte_flow_item_gtp_psc,
>>>>    					pdu_type)),
>>>>    	},
>>>> --
>>>> 2.20.1

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, back to index

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-18 17:03 [dpdk-dev] [dpdk-dev 0/4] add RSS configuration for iavf Jeff Guo
2020-03-18 17:03 ` [dpdk-dev] [dpdk-dev 1/4] ethdev: add new RSS offload types Jeff Guo
2020-03-18 17:03 ` [dpdk-dev] [dpdk-dev 2/4] net/iavf: add RSS configuration for VFs Jeff Guo
2020-03-18 17:04 ` [dpdk-dev] [dpdk-dev 3/4] app/testpmd: support GTP PDU type Jeff Guo
2020-03-18 17:04 ` [dpdk-dev] [dpdk-dev 4/4] app/testpmd: add new types to RSS hash commands Jeff Guo
2020-03-26 16:40   ` [dpdk-dev] [dpdk-dev v2 0/4] add RSS configuration for iavf Jeff Guo
2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 1/4] ethdev: add new RSS offload types Jeff Guo
2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 2/4] net/iavf: add RSS configuration for VFs Jeff Guo
2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 3/4] app/testpmd: support GTP PDU type Jeff Guo
2020-03-29  8:44       ` Ori Kam
2020-03-30  8:29         ` Jeff Guo
2020-03-30 10:18           ` Ori Kam
2020-03-31  8:50             ` Jeff Guo
2020-03-26 16:40     ` [dpdk-dev] [dpdk-dev v2 4/4] app/testpmd: add new types to RSS hash commands Jeff Guo

DPDK patches and discussions

Archives are clonable:
	git clone --mirror http://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ http://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev


Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/ public-inbox