DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [RFC] net/ice: couple of new DCF features
@ 2021-05-14  4:21 Steven Zou
  0 siblings, 0 replies; only message in thread
From: Steven Zou @ 2021-05-14  4:21 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, Zou, Steven, Zou

From: "Zou, Steven" <steven.zou@intel.com>

This patchset is for couple of new feature on DCF
1, enable multiple DCF instances
2, enhanced GRE in DCF support
3, enhanced VxLAN support

Signed-off-by: Zou, Steven <steven.zou@intel.com>
---
 app/test-pmd/cmdline.c                   |  13 +-
 drivers/common/iavf/virtchnl.h           |   6 +-
 drivers/net/iavf/iavf_ethdev.c           |   2 +-
 drivers/net/ice/base/ice_common.c        |  25 ++
 drivers/net/ice/base/ice_flex_pipe.c     |  30 ++-
 drivers/net/ice/base/ice_flex_pipe.h     |   2 +-
 drivers/net/ice/base/ice_flex_type.h     |   1 +
 drivers/net/ice/base/ice_protocol_type.h |  16 ++
 drivers/net/ice/base/ice_switch.c        | 398 ++++++++++++++++++++++++++++++-
 drivers/net/ice/base/ice_type.h          |   2 +
 drivers/net/ice/ice_acl_filter.c         |  11 +-
 drivers/net/ice/ice_dcf.c                |  23 +-
 drivers/net/ice/ice_dcf.h                |   3 +
 drivers/net/ice/ice_dcf_ethdev.c         | 228 +++++++++++++-----
 drivers/net/ice/ice_dcf_parent.c         |   8 +
 drivers/net/ice/ice_ethdev.c             |   4 +
 drivers/net/ice/ice_generic_flow.c       |  37 +++
 drivers/net/ice/ice_generic_flow.h       |   9 +
 drivers/net/ice/ice_switch_filter.c      | 134 ++++++++++-
 lib/librte_ethdev/rte_ethdev.h           |   1 +
 20 files changed, 871 insertions(+), 82 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 2d18b6c..4b52091 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -9084,7 +9084,7 @@ struct cmd_tunnel_filter_result {
 
 /* *** CONFIGURE TUNNEL UDP PORT *** */
 struct cmd_tunnel_udp_config {
-	cmdline_fixed_string_t cmd;
+        cmdline_fixed_string_t rx_vxlan_port;
 	cmdline_fixed_string_t what;
 	uint16_t udp_port;
 	portid_t port_id;
@@ -9101,8 +9101,7 @@ struct cmd_tunnel_udp_config {
 
 	tunnel_udp.udp_port = res->udp_port;
 
-	if (!strcmp(res->cmd, "rx_vxlan_port"))
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+        tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
 
 	if (!strcmp(res->what, "add"))
 		ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
@@ -9115,9 +9114,9 @@ struct cmd_tunnel_udp_config {
 		printf("udp tunneling add error: (%s)\n", strerror(-ret));
 }
 
-cmdline_parse_token_string_t cmd_tunnel_udp_config_cmd =
-	TOKEN_STRING_INITIALIZER(struct cmd_tunnel_udp_config,
-				cmd, "rx_vxlan_port");
+cmdline_parse_token_string_t cmd_tunnel_udp_config_rx_vxlan_port =
+        TOKEN_STRING_INITIALIZER(struct cmd_tunnel_udp_config,
+                                rx_vxlan_port, "rx_vxlan_port");
 cmdline_parse_token_string_t cmd_tunnel_udp_config_what =
 	TOKEN_STRING_INITIALIZER(struct cmd_tunnel_udp_config,
 				what, "add#rm");
@@ -9134,7 +9133,7 @@ struct cmd_tunnel_udp_config {
 	.help_str = "rx_vxlan_port add|rm <udp_port> <port_id>: "
 		"Add/Remove a tunneling UDP port filter",
 	.tokens = {
-		(void *)&cmd_tunnel_udp_config_cmd,
+		(void *)&cmd_tunnel_udp_config_rx_vxlan_port,
 		(void *)&cmd_tunnel_udp_config_what,
 		(void *)&cmd_tunnel_udp_config_udp_port,
 		(void *)&cmd_tunnel_udp_config_port_id,
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 302cc24..e7f7a02 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -128,7 +128,9 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, 37 and 38 are reserved */
+	/* opcodes 34, 35, 36 and 37 are reserved */
+        VIRTCHNL_OP_DCF_RULE_FLUSH = 6000,
+        VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
 	VIRTCHNL_OP_DCF_CMD_BUFF = 40,
 	VIRTCHNL_OP_DCF_DISABLE = 41,
@@ -771,6 +773,7 @@ enum virtchnl_event_codes {
 	VIRTCHNL_EVENT_RESET_IMPENDING,
 	VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
 	VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE,
+        VIRTCHNL_EVENT_DCF_VSI_INFO = 1000,
 };
 
 #define PF_EVENT_SEVERITY_INFO		0
@@ -1468,6 +1471,7 @@ enum virtchnl_vector_limits {
 		 */
 		valid_len = msglen;
 		break;
+        case VIRTCHNL_OP_DCF_RULE_FLUSH:
 	case VIRTCHNL_OP_DCF_DISABLE:
 	case VIRTCHNL_OP_DCF_GET_VSI_MAP:
 	case VIRTCHNL_OP_DCF_GET_PKG_INFO:
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 93c68e6..eca54a9 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2115,7 +2115,7 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 iavf_dcf_cap_check_handler(__rte_unused const char *key,
 			   const char *value, __rte_unused void *opaque)
 {
-	if (strcmp(value, "dcf"))
+        if (strcmp(value, "dcf") && strcmp(value, "mdcf"))
 		return -1;
 
 	return 0;
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 304e55e..4039e34 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -538,6 +538,28 @@ enum ice_status
 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 }
 
+static int ice_buildin_recipe_init(struct ice_hw *hw)
+{
+        struct ice_switch_info *sw = hw->switch_info;
+        struct ice_sw_recipe *recipe;
+
+        sw->buildin_recipes = ice_malloc(hw,
+                        sizeof(sw->buildin_recipes[0]) * ICE_MAX_NUM_RECIPES);
+
+        if (!sw->buildin_recipes)
+                return ICE_ERR_NO_MEMORY;
+
+        recipe = &sw->buildin_recipes[10];
+        recipe->is_root = 1;
+
+        recipe->lkup_exts.n_val_words = 1;
+        recipe->lkup_exts.field_mask[0] = 0x00ff;
+        recipe->lkup_exts.fv_words[0].off = 8;
+        recipe->lkup_exts.fv_words[0].prot_id = 32;
+
+        return ICE_SUCCESS;
+}
+
 /**
  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
  * @hw: pointer to the HW struct
@@ -558,6 +580,8 @@ enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
 	sw->prof_res_bm_init = 0;
 
+        ice_buildin_recipe_init(hw);
+
 	status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
 	if (status) {
 		ice_free(hw, hw->switch_info);
@@ -628,6 +652,7 @@ enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
 			ice_free(hw, recps[i].root_buf);
 	}
 	ice_rm_sw_replay_rule_info(hw, sw);
+        ice_free(hw, sw->buildin_recipes);
 	ice_free(hw, sw->recp_list);
 	ice_free(hw, sw);
 }
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index 7594df1..6906259 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -13,6 +13,7 @@
 static const struct ice_tunnel_type_scan tnls[] = {
 	{ TNL_VXLAN,		"TNL_VXLAN_PF" },
 	{ TNL_GENEVE,		"TNL_GENEVE_PF" },
+        { TNL_ECPRI,           "TNL_UDP_ECPRI_PF" },
 	{ TNL_LAST,		"" }
 };
 
@@ -1280,6 +1281,9 @@ static void ice_init_pkg_regs(struct ice_hw *hw)
  */
 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
 {
+        if (pkg_ver->major == 0xFF)
+            return ICE_SUCCESS;
+
 	if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
 	    pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
 		return ICE_ERR_NOT_SUPPORTED;
@@ -1664,6 +1668,7 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
  * @ids_cnt: lookup/protocol count
  * @bm: bitmap of field vectors to consider
  * @fv_list: Head of a list
+ * @lkup_exts: lookup elements
  *
  * Finds all the field vector entries from switch block that contain
  * a given protocol ID and returns a list of structures of type
@@ -1674,7 +1679,8 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
  */
 enum ice_status
 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
-		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
+                   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list,
+                   struct ice_prot_lkup_ext *lkup_exts)
 {
 	struct ice_sw_fv_list_entry *fvl;
 	struct ice_sw_fv_list_entry *tmp;
@@ -1705,6 +1711,7 @@ enum ice_status
 		if (!ice_is_bit_set(bm, (u16)offset))
 			continue;
 
+#if 0
 		for (i = 0; i < ids_cnt; i++) {
 			int j;
 
@@ -1729,6 +1736,27 @@ enum ice_status
 				break;
 			}
 		}
+#else
+                int found = 1;
+                for (i = 0; i < lkup_exts->n_val_words; i++) {
+                        int j;
+
+                        for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+                                if (fv->ew[j].prot_id == lkup_exts->fv_words[i].prot_id && fv->ew[j].off == lkup_exts->fv_words[i].off)
+                                        break;
+                        if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+                                found = 0;
+                }
+                if (found) {
+                        fvl = (struct ice_sw_fv_list_entry *)
+                                ice_malloc(hw, sizeof(*fvl));
+                        if (!fvl)
+                                goto err;
+                        fvl->fv_ptr = fv;
+                        fvl->profile_id = offset;
+                        LIST_ADD(&fvl->list_entry, fv_list);
+                }
+#endif
 	} while (fv);
 	if (LIST_EMPTY(fv_list))
 		return ICE_ERR_CFG;
diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h
index 214c7a2..4ea0650 100644
--- a/drivers/net/ice/base/ice_flex_pipe.h
+++ b/drivers/net/ice/base/ice_flex_pipe.h
@@ -37,7 +37,7 @@ enum ice_status
 ice_init_prof_result_bm(struct ice_hw *hw);
 enum ice_status
 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
-		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
+                   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list, struct ice_prot_lkup_ext *lkup_exts);
 bool
 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
 			 u16 *port);
diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h
index 1dd57ba..bbe7a8a 100644
--- a/drivers/net/ice/base/ice_flex_type.h
+++ b/drivers/net/ice/base/ice_flex_type.h
@@ -516,6 +516,7 @@ struct ice_pkg_enum {
 enum ice_tunnel_type {
 	TNL_VXLAN = 0,
 	TNL_GENEVE,
+        TNL_ECPRI,
 	TNL_LAST = 0xFF,
 	TNL_ALL = 0xFF,
 };
diff --git a/drivers/net/ice/base/ice_protocol_type.h b/drivers/net/ice/base/ice_protocol_type.h
index e8caefd..f9fc208 100644
--- a/drivers/net/ice/base/ice_protocol_type.h
+++ b/drivers/net/ice/base/ice_protocol_type.h
@@ -44,6 +44,7 @@ enum ice_protocol_type {
 	ICE_GENEVE,
 	ICE_VXLAN_GPE,
 	ICE_NVGRE,
+        ICE_GRE,
 	ICE_GTP,
 	ICE_PPPOE,
 	ICE_PFCP,
@@ -65,6 +66,7 @@ enum ice_sw_tunnel_type {
 	ICE_SW_TUN_VXLAN,	/* VXLAN matches only non-VLAN pkts */
 	ICE_SW_TUN_VXLAN_VLAN,  /* VXLAN matches both VLAN and non-VLAN pkts */
 	ICE_SW_TUN_NVGRE,
+        ICE_SW_TUN_GRE,
 	ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN
 			 * and GENEVE
 			 */
@@ -196,6 +198,10 @@ enum ice_prot_id {
 #define ICE_TUN_FLAG_VLAN_MASK 0x01
 #define ICE_TUN_FLAG_FV_IND 2
 
+#define ICE_GRE_FLAG_MDID 22
+#define ICE_GRE_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_GRE_FLAG_MDID)
+#define ICE_GRE_FLAG_MASK 0x01C0
+
 #define ICE_PROTOCOL_MAX_ENTRIES 16
 
 /* Mapping of software defined protocol ID to hardware defined protocol ID */
@@ -336,6 +342,15 @@ struct ice_nvgre {
 	__be32 tni_flow;
 };
 
+struct ice_gre {
+        __be16 flags;
+        __be16 protocol;
+        __be16 chksum;
+        __be16 offset;
+        __be32 key;
+        __be32 seqnum;
+};
+
 union ice_prot_hdr {
 	struct ice_ether_hdr eth_hdr;
 	struct ice_ethtype_hdr ethertype;
@@ -346,6 +361,7 @@ struct ice_nvgre {
 	struct ice_sctp_hdr sctp_hdr;
 	struct ice_udp_tnl_hdr tnl_hdr;
 	struct ice_nvgre nvgre_hdr;
+        struct ice_gre gre_hdr;
 	struct ice_udp_gtp_hdr gtp_hdr;
 	struct ice_pppoe_hdr pppoe_hdr;
 	struct ice_pfcp_hdr pfcp_hdr;
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index dc55d7e..b1a6ee3 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -11,6 +11,8 @@
 #define ICE_ETH_VLAN_TCI_OFFSET		14
 #define ICE_MAX_VLAN_ID			0xFFF
 #define ICE_IPV4_NVGRE_PROTO_ID		0x002F
+#define ICE_IPV4_GRE_PROTO_ID           0x002F
+#define ICE_IPV6_GRE_PROTO_ID           0x002F
 #define ICE_PPP_IPV6_PROTO_ID		0x0057
 #define ICE_IPV6_ETHER_ID		0x86DD
 #define ICE_TCP_PROTO_ID		0x06
@@ -126,6 +128,209 @@ struct ice_dummy_pkt_offsets {
 	0x00, 0x08, 0x00, 0x00,
 };
 
+static const struct ice_dummy_pkt_offsets dummy_ipv6_gre_udp_packet_offsets[] = {
+        { ICE_MAC_OFOS,         0 },
+        { ICE_ETYPE_OL,         12 },
+        { ICE_IPV6_OFOS,        14 },
+        { ICE_GRE,              54 },
+        { ICE_IPV6_IL,          58 },
+        { ICE_UDP_ILOS,         98 },
+        { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_ipv6_gre_udp_packet[] = {
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00,
+        0x00, 0x00, 0x00, 0x36, 0x2f, 0x40, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+        0x86, 0xdd, 0x60, 0x00, 0x00, 0x00, 0x00, 0x0a,
+        0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
+        0xff, 0xd8, 0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets dummy_gre_rfc1701_c1k1_tcp_packet_offsets[] = {
+        { ICE_MAC_OFOS,         0 },
+        { ICE_ETYPE_OL,         12 },
+        { ICE_IPV4_OFOS,        14 },
+        { ICE_GRE,              34 },
+        { ICE_IPV4_IL,          50 },
+        { ICE_TCP_IL,           70 },
+        { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_gre_rfc1701_c1k1_tcp_packet[] = {
+        0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
+        0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00,
+
+        0x08, 0x00,             /* ICE_ETYPE_OL 12 */
+
+        0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
+        0x00, 0x01, 0x00, 0x00,
+        0x40, 0x2f, 0x7c, 0x7e,
+        0x7f, 0x00, 0x00, 0x01,
+        0x7f, 0x00, 0x00, 0x01,
+
+        0xb0, 0x00, 0x08, 0x00, /* ICE_GRE 34 */
+        0x46, 0x1e, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00,
+
+        0x45, 0x00, 0x00, 0x2a, /* ICE_IPV4_IL 50 */
+        0x00, 0x01, 0x00, 0x00,
+        0x40, 0x06, 0x7c, 0xcb,
+        0x7f, 0x00, 0x00, 0x01,
+        0x7f, 0x00, 0x00, 0x01,
+
+        0x00, 0x14, 0x00, 0x50, /* ICE_TCP_IL 70 */
+        0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00,
+        0x50, 0x02, 0x20, 0x00,
+        0x91, 0x7a, 0x00, 0x00,
+
+        0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets dummy_gre_rfc1701_c1k1_udp_packet_offsets[] = {
+        { ICE_MAC_OFOS,         0 },
+        { ICE_ETYPE_OL,         12 },
+        { ICE_IPV4_OFOS,        14 },
+        { ICE_GRE,              34 },
+        { ICE_IPV4_IL,          50 },
+        { ICE_UDP_ILOS,         70 },
+        { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_gre_rfc1701_c1k1_udp_packet[] = {
+        0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
+        0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00,
+
+        0x08, 0x00,             /* ICE_ETYPE_OL 12 */
+
+        0x45, 0x00, 0x00, 0x42, /* ICE_IPV4_OFOS 14 */
+        0x00, 0x01, 0x00, 0x00,
+        0x40, 0x2f, 0x7c, 0x8a,
+        0x7f, 0x00, 0x00, 0x01,
+        0x7f, 0x00, 0x00, 0x01,
+
+        0xb0, 0x00, 0x08, 0x00, /* ICE_GRE 34 */
+        0x46, 0x1d, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00,
+
+        0x45, 0x00, 0x00, 0x1e, /* ICE_IPV4_IL 50 */
+        0x00, 0x01, 0x00, 0x00,
+        0x40, 0x11, 0x7c, 0xcc,
+        0x7f, 0x00, 0x00, 0x01,
+        0x7f, 0x00, 0x00, 0x01,
+
+        0x00, 0x35, 0x00, 0x35, /* ICE_UDP_ILOS 70 */
+        0x00, 0x0a, 0x01, 0x6e,
+
+        0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets dummy_gre_rfc1701_c0k1_tcp_packet_offsets[] = {
+        { ICE_MAC_OFOS,         0 },
+        { ICE_ETYPE_OL,         12 },
+        { ICE_IPV4_OFOS,        14 },
+        { ICE_GRE,              34 },
+        { ICE_IPV4_IL,          46 },
+        { ICE_TCP_IL,           66 },
+        { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_gre_rfc1701_c0k1_tcp_packet[] = {
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+        0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f,
+        0x7c, 0x82, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+        0x00, 0x01, 0x30, 0x00, 0x08, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
+        0x00, 0x2a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06,
+        0x7c, 0xcb, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+        0x00, 0x01, 0x00, 0x14, 0x00, 0x50, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02,
+        0x20, 0x00, 0x91, 0x7a, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets dummy_gre_rfc1701_c0k1_udp_packet_offsets[] = {
+        { ICE_MAC_OFOS,         0 },
+        { ICE_ETYPE_OL,         12 },
+        { ICE_IPV4_OFOS,        14 },
+        { ICE_GRE,              34 },
+        { ICE_IPV4_IL,          46 },
+        { ICE_UDP_ILOS,         66 },
+        { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_gre_rfc1701_c0k1_udp_packet[] = {
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+        0x00, 0x3e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f,
+        0x7c, 0x8e, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+        0x00, 0x01, 0x30, 0x00, 0x08, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
+        0x00, 0x1e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+        0x7c, 0xcc, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+        0x00, 0x01, 0x00, 0x35, 0x00, 0x35, 0x00, 0x0a,
+        0x01, 0x6e, 0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets dummy_gre_rfc1701_c0k0_tcp_packet_offsets[] = {
+        { ICE_MAC_OFOS,         0 },
+        { ICE_ETYPE_OL,         12 },
+        { ICE_IPV4_OFOS,        14 },
+        { ICE_GRE,              34 },
+        { ICE_IPV4_IL,          42 },
+        { ICE_TCP_IL,           62 },
+        { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_gre_rfc1701_c0k0_tcp_packet[] = {
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+        0x00, 0x46, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f,
+        0x7c, 0x86, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+        0x00, 0x01, 0x10, 0x00, 0x08, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01,
+        0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00,
+        0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x14,
+        0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0x7a,
+        0x00, 0x00, 0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets dummy_gre_rfc1701_c0k0_udp_packet_offsets[] = {
+        { ICE_MAC_OFOS,         0 },
+        { ICE_ETYPE_OL,         12 },
+        { ICE_IPV4_OFOS,        14 },
+        { ICE_GRE,              34 },
+        { ICE_IPV4_IL,          42 },
+        { ICE_UDP_ILOS,         62 },
+        { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_gre_rfc1701_c0k0_udp_packet[] = {
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+        0x00, 0x3a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f,
+        0x7c, 0x92, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+        0x00, 0x01, 0x10, 0x00, 0x08, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01,
+        0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00,
+        0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x35,
+        0x00, 0x35, 0x00, 0x0a, 0x01, 0x6e, 0x00, 0x00,
+};
+
 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
 	{ ICE_MAC_OFOS,		0 },
 	{ ICE_ETYPE_OL,		12 },
@@ -6043,6 +6248,7 @@ enum ice_status
 	{ ICE_GENEVE,		{ 8, 10, 12, 14 } },
 	{ ICE_VXLAN_GPE,	{ 8, 10, 12, 14 } },
 	{ ICE_NVGRE,		{ 0, 2, 4, 6 } },
+        { ICE_GRE,              { 0, 2, 4, 6, 8, 10, 12, 14 } },
 	{ ICE_GTP,		{ 8, 10, 12, 14, 16, 18, 20 } },
 	{ ICE_PPPOE,		{ 0, 2, 4, 6 } },
 	{ ICE_PFCP,		{ 8, 10, 12, 14, 16, 18, 20, 22 } },
@@ -6077,6 +6283,7 @@ enum ice_status
 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
 	{ ICE_VXLAN_GPE,	ICE_UDP_OF_HW },
 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
+        { ICE_GRE,              ICE_GRE_OF_HW },
 	{ ICE_GTP,		ICE_UDP_OF_HW },
 	{ ICE_PPPOE,		ICE_PPPOE_HW },
 	{ ICE_PFCP,		ICE_UDP_ILOS_HW },
@@ -6088,6 +6295,47 @@ enum ice_status
 	{ ICE_VLAN_EX,		ICE_VLAN_OF_HW },
 };
 
+static u16 buildin_recipe_get(struct ice_switch_info *sw,
+                              struct ice_prot_lkup_ext *lkup_exts)
+{
+        int i;
+
+        if (!sw->buildin_recipes)
+                return ICE_MAX_NUM_RECIPES;
+
+        for (i = 10; i < ICE_MAX_NUM_RECIPES; i++) {
+                struct ice_sw_recipe *recp = &sw->buildin_recipes[i];
+                struct ice_fv_word *a = lkup_exts->fv_words;
+                struct ice_fv_word *b = recp->lkup_exts.fv_words;
+                u16 *c = recp->lkup_exts.field_mask;
+                u16 *d = lkup_exts->field_mask;
+                bool found = true;
+                u8 p, q;
+
+                if (!recp->is_root)
+                        continue;
+
+                if (recp->lkup_exts.n_val_words != lkup_exts->n_val_words)
+                        continue;
+
+                for (p = 0; p < lkup_exts->n_val_words; p++) {
+                        for (q = 0; q < recp->lkup_exts.n_val_words; q++) {
+                                if (a[p].off == b[q].off &&
+                                    a[p].prot_id == b[q].prot_id &&
+                                    d[p] == c[q])
+                                        break;
+                        }
+                        if (q >= recp->lkup_exts.n_val_words) {
+                                found = false;
+                                break;
+                        }
+                }
+                if (found)
+                        return i;
+        }
+        return ICE_MAX_NUM_RECIPES;
+}
+
 /**
  * ice_find_recp - find a recipe
  * @hw: pointer to the hardware structure
@@ -6100,8 +6348,15 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
 {
 	bool refresh_required = true;
 	struct ice_sw_recipe *recp;
+        u16 buildin_rid;
 	u8 i;
 
+        if (hw->use_buildin_recipe) {
+                buildin_rid = buildin_recipe_get(hw->switch_info, lkup_exts);
+                if (buildin_rid < ICE_MAX_NUM_RECIPES)
+                        return buildin_rid;
+        }
+
 	/* Walk through existing recipes to find a match */
 	recp = hw->switch_info->recp_list;
 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
@@ -6196,6 +6451,7 @@ static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
 		     struct ice_prot_lkup_ext *lkup_exts)
 {
 	u8 j, word, prot_id, ret_val;
+        u8 extra_byte = 0;
 
 	if (!ice_prot_type_to_id(rule->type, &prot_id))
 		return 0;
@@ -6208,8 +6464,16 @@ static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
 			/* No more space to accommodate */
 			if (word >= ICE_MAX_CHAIN_WORDS)
 				return 0;
+                        if (rule->type == ICE_GRE) {
+                                if (ice_prot_ext[rule->type].offs[j] == 0) {
+                                        if (((u16 *)&rule->h_u)[j] == 0x20) {
+                                                extra_byte = 4;
+                                        }
+                                        continue;
+                                }
+                        }
 			lkup_exts->fv_words[word].off =
-				ice_prot_ext[rule->type].offs[j];
+				ice_prot_ext[rule->type].offs[j] - extra_byte;
 			lkup_exts->fv_words[word].prot_id =
 				ice_prot_id_tbl[rule->type].protocol_id;
 			lkup_exts->field_mask[word] =
@@ -6753,10 +7017,11 @@ static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
  * @lkups_cnt: number of protocols
  * @bm: bitmap of field vectors to consider
  * @fv_list: pointer to a list that holds the returned field vectors
+ * @lkup_exts: lookup elements
  */
 static enum ice_status
 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
-	   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
+ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list, struct ice_prot_lkup_ext *lkup_exts)
 {
 	enum ice_status status;
 	u8 *prot_ids;
@@ -6776,7 +7041,7 @@ static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
 		}
 
 	/* Find field vectors that include all specified protocol types */
-	status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
+        status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list, lkup_exts);
 
 free_mem:
 	ice_free(hw, prot_ids);
@@ -6806,6 +7071,10 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
 		*mask = ICE_TUN_FLAG_MASK;
 		return true;
 
+        case ICE_SW_TUN_GRE:
+                *mask = ICE_GRE_FLAG_MASK;
+                return true;
+
 	case ICE_SW_TUN_GENEVE_VLAN:
 	case ICE_SW_TUN_VXLAN_VLAN:
 		*mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
@@ -6826,7 +7095,15 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
 ice_add_special_words(struct ice_adv_rule_info *rinfo,
 		      struct ice_prot_lkup_ext *lkup_exts)
 {
+        u8 has_gre_key = 0;
 	u16 mask;
+        u8 i;
+
+        for (i = 0; i < lkup_exts->n_val_words; i++) {
+                if (lkup_exts->fv_words[i].prot_id == 0x40) {
+                        has_gre_key = 1;
+                }
+        }
 
 	/* If this is a tunneled packet, then add recipe index to match the
 	 * tunnel bit in the packet metadata flags.
@@ -6838,6 +7115,12 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
 			lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
 			lkup_exts->field_mask[word] = mask;
+
+                        if (rinfo->tun_type == ICE_SW_TUN_GRE)
+                                lkup_exts->fv_words[word].off = ICE_GRE_FLAG_MDID_OFF;
+
+                        if (!has_gre_key)
+                                lkup_exts->field_mask[word] = 0x0140;
 		} else {
 			return ICE_ERR_MAX_LIMIT;
 		}
@@ -6879,6 +7162,9 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
 	case ICE_SW_TUN_NVGRE:
 		prof_type = ICE_PROF_TUN_GRE;
 		break;
+        case ICE_SW_TUN_GRE:
+                prof_type = ICE_PROF_TUN_GRE;
+                break;
 	case ICE_SW_TUN_PPPOE:
 	case ICE_SW_TUN_PPPOE_QINQ:
 		prof_type = ICE_PROF_TUN_PPPOE;
@@ -7099,7 +7385,7 @@ bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
 	 */
 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
 
-	status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+	status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list, lkup_exts);
 	if (status)
 		goto err_unroll;
 
@@ -7248,6 +7534,8 @@ bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
 		      const struct ice_dummy_pkt_offsets **offsets)
 {
 	bool tcp = false, udp = false, ipv6 = false, vlan = false;
+        bool gre_c_bit = false;
+        bool gre_k_bit = false;
 	bool gre = false;
 	u16 i;
 
@@ -7256,10 +7544,26 @@ bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
 			udp = true;
 		else if (lkups[i].type == ICE_TCP_IL)
 			tcp = true;
-		else if (lkups[i].type == ICE_IPV6_OFOS)
+                else if (lkups[i].type == ICE_IPV6_OFOS) {
 			ipv6 = true;
+                        if (lkups[i].h_u.ipv6_hdr.next_hdr == ICE_IPV6_GRE_PROTO_ID &&
+                            lkups[i].m_u.ipv6_hdr.next_hdr == 0xFF)
+                                gre = true;
+                }
 		else if (lkups[i].type == ICE_VLAN_OFOS)
 			vlan = true;
+                else if (lkups[i].type == ICE_GRE) {
+                        if (lkups[i].h_u.gre_hdr.flags & 0x20)
+                                gre_k_bit = true;
+                        if (lkups[i].h_u.gre_hdr.flags & 0x80)
+                                gre_c_bit = true;
+                }
+                else if (lkups[i].type == ICE_IPV4_OFOS &&
+                         lkups[i].h_u.ipv4_hdr.protocol ==
+                                ICE_IPV4_GRE_PROTO_ID &&
+                         lkups[i].m_u.ipv4_hdr.protocol ==
+                                0xFF)
+                        gre = true;
 		else if (lkups[i].type == ICE_IPV4_OFOS &&
 			 lkups[i].h_u.ipv4_hdr.protocol ==
 				ICE_IPV4_NVGRE_PROTO_ID &&
@@ -7469,6 +7773,12 @@ bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
 	}
 
 	if (tun_type == ICE_SW_IPV4_TCP) {
+                if (vlan && tcp) {
+                        *pkt = dummy_vlan_tcp_packet;
+                        *pkt_len = sizeof(dummy_vlan_tcp_packet);
+                        *offsets = dummy_vlan_tcp_packet_offsets;
+                        return;
+                }
 		*pkt = dummy_tcp_packet;
 		*pkt_len = sizeof(dummy_tcp_packet);
 		*offsets = dummy_tcp_packet_offsets;
@@ -7476,6 +7786,12 @@ bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
 	}
 
 	if (tun_type == ICE_SW_IPV4_UDP) {
+                if (vlan && udp) {
+                        *pkt = dummy_vlan_udp_packet;
+                        *pkt_len = sizeof(dummy_vlan_udp_packet);
+                        *offsets = dummy_vlan_udp_packet_offsets;
+                        return;
+                }
 		*pkt = dummy_udp_packet;
 		*pkt_len = sizeof(dummy_udp_packet);
 		*offsets = dummy_udp_packet_offsets;
@@ -7503,6 +7819,13 @@ bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
 		return;
 	}
 
+       if (ipv6 && gre) {
+               *pkt = dummy_ipv6_gre_udp_packet;
+               *pkt_len = sizeof(dummy_ipv6_gre_udp_packet);
+               *offsets = dummy_ipv6_gre_udp_packet_offsets;
+               return;
+        }
+
 	if (tun_type == ICE_SW_TUN_NVGRE || gre) {
 		if (tcp) {
 			*pkt = dummy_gre_tcp_packet;
@@ -7517,6 +7840,46 @@ bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
 		return;
 	}
 
+        if (tun_type == ICE_SW_TUN_GRE || gre) {
+                if (tcp) {
+                        if (gre_c_bit && gre_k_bit) {
+                                *pkt = dummy_gre_rfc1701_c1k1_tcp_packet;
+                                *pkt_len = sizeof(dummy_gre_rfc1701_c1k1_tcp_packet);
+                                *offsets = dummy_gre_rfc1701_c1k1_tcp_packet_offsets;
+                                return;
+                        }
+                        if (!gre_c_bit && gre_k_bit) {
+                                *pkt = dummy_gre_rfc1701_c0k1_tcp_packet;
+                                *pkt_len = sizeof(dummy_gre_rfc1701_c0k1_tcp_packet);
+                                *offsets = dummy_gre_rfc1701_c0k1_tcp_packet_offsets;
+                                return;
+                        }
+
+                        *pkt = dummy_gre_rfc1701_c0k0_tcp_packet;
+                        *pkt_len = sizeof(dummy_gre_rfc1701_c0k0_tcp_packet);
+                        *offsets = dummy_gre_rfc1701_c0k0_tcp_packet_offsets;
+                        return;
+                }
+
+                if (gre_c_bit && gre_k_bit) {
+                        *pkt = dummy_gre_rfc1701_c1k1_udp_packet;
+                        *pkt_len = sizeof(dummy_gre_rfc1701_c1k1_udp_packet);
+                        *offsets = dummy_gre_rfc1701_c1k1_udp_packet_offsets;
+                        return;
+                }
+                if (!gre_c_bit && gre_k_bit) {
+                        *pkt = dummy_gre_rfc1701_c0k1_udp_packet;
+                        *pkt_len = sizeof(dummy_gre_rfc1701_c0k1_udp_packet);
+                        *offsets = dummy_gre_rfc1701_c0k1_udp_packet_offsets;
+                        return;
+                }
+
+                *pkt = dummy_gre_rfc1701_c0k0_udp_packet;
+                *pkt_len = sizeof(dummy_gre_rfc1701_c0k0_udp_packet);
+                *offsets = dummy_gre_rfc1701_c0k0_udp_packet_offsets;
+                return;
+        }
+
 	if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
 	    tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
 	    tun_type == ICE_SW_TUN_GENEVE_VLAN ||
@@ -7658,6 +8021,9 @@ bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
 		case ICE_NVGRE:
 			len = sizeof(struct ice_nvgre);
 			break;
+                case ICE_GRE:
+                        len = sizeof(struct ice_gre);
+                        break;
 		case ICE_VXLAN:
 		case ICE_GENEVE:
 		case ICE_VXLAN_GPE:
@@ -7691,6 +8057,20 @@ bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
 		if (len % ICE_BYTES_PER_WORD)
 			return ICE_ERR_CFG;
 
+                if (lkups[i].type == ICE_GRE) {
+                        if (lkups[i].h_u.gre_hdr.flags == 0x20) {
+                                offset -= 4;
+                        }
+                        for (j = 1; j < len / sizeof(u16); j++)
+                                if (((u16 *)&lkups[i].m_u)[j])
+                                        ((u16 *)(pkt + offset))[j] =
+                                                (((u16 *)(pkt + offset))[j] &
+                                                 ~((u16 *)&lkups[i].m_u)[j]) |
+                                                (((u16 *)&lkups[i].h_u)[j] &
+                                                 ((u16 *)&lkups[i].m_u)[j]);
+                        continue;
+                }
+
 		/* We have the offset to the header start, the length, the
 		 * caller's header values and mask. Use this information to
 		 * copy the data into the dummy packet appropriately based on
@@ -8267,8 +8647,11 @@ enum ice_status
 			return ICE_ERR_CFG;
 
 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
-		if (!count)
-			return ICE_ERR_CFG;
+                if (!count) {
+                        if (lkups[i].type == ICE_GRE)
+                                continue;
+                        return ICE_ERR_CFG;
+                }
 	}
 
 	/* Create any special protocol/offset pairs, such as looking at tunnel
@@ -8357,6 +8740,7 @@ enum ice_status
 	struct ice_switch_info *sw;
 
 	sw = hw->switch_info;
+        if (!sw->buildin_recipes[remove_entry->rid].is_root)
 	if (!sw->recp_list[remove_entry->rid].recp_created)
 		return ICE_ERR_PARAM;
 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index 6b8d44f..d364066 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -838,6 +838,7 @@ struct ice_switch_info {
 	u16 max_used_prof_index;
 
 	ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
+        struct ice_sw_recipe *buildin_recipes;
 };
 
 /* Port hardware description */
@@ -985,6 +986,7 @@ struct ice_hw {
 	ice_declare_bitmap(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX);
 	struct ice_lock rss_locks;	/* protect RSS configuration */
 	struct LIST_HEAD_TYPE rss_list_head;
+        u8 use_buildin_recipe;
 };
 
 /* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c
index f7dbe53..2658515 100644
--- a/drivers/net/ice/ice_acl_filter.c
+++ b/drivers/net/ice/ice_acl_filter.c
@@ -25,6 +25,7 @@
 #include "ice_ethdev.h"
 #include "ice_generic_flow.h"
 #include "base/ice_flow.h"
+#include "ice_dcf_ethdev.h"
 
 #define MAX_ACL_SLOTS_ID 2048
 
@@ -978,8 +979,11 @@ static void ice_deinit_acl(struct ice_pf *pf)
 	struct ice_pf *pf = &ad->pf;
 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
 	struct ice_flow_parser *parser = &ice_acl_parser;
+        struct rte_eth_dev *eth_dev = ad->eth_dev;
+        struct ice_dcf_adapter *dcf_adapter = eth_dev->data->dev_private;
+        struct ice_dcf_hw *dcf_hw = &dcf_adapter->real_hw;
 
-	if (!ad->hw.dcf_enabled)
+        if (!ad->hw.dcf_enabled || dcf_hw->multi_inst)
 		return 0;
 
 	ret = ice_acl_prof_alloc(hw);
@@ -1025,8 +1029,11 @@ static void ice_deinit_acl(struct ice_pf *pf)
 	struct ice_pf *pf = &ad->pf;
 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
 	struct ice_flow_parser *parser = &ice_acl_parser;
+        struct rte_eth_dev *eth_dev = ad->eth_dev;
+        struct ice_dcf_adapter *dcf_adapter = eth_dev->data->dev_private;
+        struct ice_dcf_hw *dcf_hw = &dcf_adapter->real_hw;
 
-	if (ad->hw.dcf_enabled) {
+        if (ad->hw.dcf_enabled && !dcf_hw->multi_inst) {
 		ice_unregister_parser(parser, ad);
 		ice_deinit_acl(pf);
 		ice_acl_prof_free(hw);
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 44dbd3b..d4b1bb9 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -642,7 +642,8 @@
 
 	if (ice_dcf_get_vf_vsi_map(hw) < 0) {
 		PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
-		ice_dcf_mode_disable(hw);
+                if (!hw->multi_inst)
+		        ice_dcf_mode_disable(hw);
 		goto err_alloc;
 	}
 
@@ -700,8 +701,8 @@
 	rte_intr_disable(intr_handle);
 	rte_intr_callback_unregister(intr_handle,
 				     ice_dcf_dev_interrupt_handler, hw);
-
-	ice_dcf_mode_disable(hw);
+        if (!hw->multi_inst)
+	        ice_dcf_mode_disable(hw);
 	iavf_shutdown_adminq(&hw->avf);
 
 	rte_free(hw->arq_buf);
@@ -1079,3 +1080,19 @@
 	rte_free(list);
 	return err;
 }
+
+int
+ice_dcf_flush_rules(struct ice_dcf_hw *hw)
+{
+        struct dcf_virtchnl_cmd args;
+        int err = 0;
+
+        memset(&args, 0, sizeof(args));
+        args.v_op = VIRTCHNL_OP_DCF_RULE_FLUSH;
+
+        err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+        if (err)
+                PMD_DRV_LOG(WARNING, "fail to execute command OF_DCF_RULE_FLUSH, DCF role must be preempted.");
+
+        return 0;
+}
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index ff02996..bebbc88 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -51,6 +51,8 @@ struct ice_dcf_hw {
 	uint16_t vsi_id;
 
 	struct rte_eth_dev *eth_dev;
+        bool multi_inst;
+        bool dcf_replaced;
 	uint8_t *rss_lut;
 	uint8_t *rss_key;
 	uint64_t supported_rxdid;
@@ -77,5 +79,6 @@ int ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
 int ice_dcf_query_stats(struct ice_dcf_hw *hw,
 			struct virtchnl_eth_stats *pstats);
 int ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add);
+int ice_dcf_flush_rules(struct ice_dcf_hw *hw);
 
 #endif /* _ICE_DCF_H_ */
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index b983d84..dd835fe 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -19,6 +19,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_ethdev.h>
 
 #include <iavf_devids.h>
 
@@ -26,6 +27,13 @@
 #include "ice_dcf_ethdev.h"
 #include "ice_rxtx.h"
 
+static int
+ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+                               struct rte_eth_udp_tunnel *udp_tunnel);
+static int
+ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+                               struct rte_eth_udp_tunnel *udp_tunnel);
+
 static uint16_t
 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
 		  __rte_unused struct rte_mbuf **bufs,
@@ -866,6 +874,64 @@
 	return 0;
 }
 
+/* Add UDP tunneling port */
+static int
+ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+                                struct rte_eth_udp_tunnel *udp_tunnel)
+{
+        struct ice_dcf_adapter *adapter = dev->data->dev_private;
+        struct ice_adapter *parent_adapter = &adapter->parent;
+        struct ice_hw *parent_hw = &parent_adapter->hw;
+        int ret = 0;
+
+        if (!udp_tunnel)
+                return -EINVAL;
+
+        switch (udp_tunnel->prot_type) {
+        case RTE_TUNNEL_TYPE_VXLAN:
+                ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
+                                        udp_tunnel->udp_port);
+                break;
+        case RTE_TUNNEL_TYPE_ECPRI:
+                ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
+                                        udp_tunnel->udp_port);
+                break;
+        default:
+                PMD_DRV_LOG(ERR, "Invalid tunnel type");
+                ret = -EINVAL;
+                break;
+        }
+
+        return ret;
+}
+
+/* Delete UDP tunneling port */
+static int
+ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+                                struct rte_eth_udp_tunnel *udp_tunnel)
+{
+        struct ice_dcf_adapter *adapter = dev->data->dev_private;
+        struct ice_adapter *parent_adapter = &adapter->parent;
+        struct ice_hw *parent_hw = &parent_adapter->hw;
+        int ret = 0;
+
+        if (!udp_tunnel)
+                return -EINVAL;
+
+        switch (udp_tunnel->prot_type) {
+        case RTE_TUNNEL_TYPE_VXLAN:
+        case RTE_TUNNEL_TYPE_ECPRI:
+                ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
+                break;
+        default:
+                PMD_DRV_LOG(ERR, "Invalid tunnel type");
+                ret = -EINVAL;
+                break;
+        }
+
+        return ret;
+}
+
 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
 	.dev_start               = ice_dcf_dev_start,
 	.dev_stop                = ice_dcf_dev_stop,
@@ -888,91 +954,139 @@
 	.allmulticast_enable     = ice_dcf_dev_allmulticast_enable,
 	.allmulticast_disable    = ice_dcf_dev_allmulticast_disable,
 	.filter_ctrl             = ice_dcf_dev_filter_ctrl,
+        .udp_tunnel_port_add     = ice_dcf_dev_udp_tunnel_port_add,
+        .udp_tunnel_port_del     = ice_dcf_dev_udp_tunnel_port_del,
 };
 
 static int
-ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
+ice_dcf_cap_check_handler(__rte_unused const char *key,
+                          const char *value, void *opaque)
 {
-	struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
+        bool *mi = opaque;
+
+        if (!strcmp(value, "dcf")) {
+                *mi = 0;
+                return 0;
+        }
+        if (!strcmp(value, "mdcf")) {
+                *mi = 1;
+        return 0;
+        }
+
+        return -1;
+}
 
-	eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
-	eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
-	eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
+static int
+parse_bool(const char *key, const char *value, void *args)
+{
+        int *i = (int *)args;
+        char *end;
+        int num;
 
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
+        num = strtoul(value, &end, 10);
 
-	adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
-	if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
-		PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
-		return -1;
-	}
+        if (num != 0 && num != 1) {
+                PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
+                            "value must be 0 or 1",
+                            value, key);
+                return -1;
+        }
 
-	if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
-		PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
-		ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
-		return -1;
-	}
-
-	return 0;
+        *i = num;
+        return 0;
 }
 
 static int
-ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
+ice_dcf_cap_selected(struct ice_dcf_adapter *adapter,
+                      struct rte_devargs *devargs)
 {
-	ice_dcf_dev_close(eth_dev);
+        struct ice_adapter *ad = &adapter->parent;
+        struct rte_kvargs *kvlist;
+        const char *key_cap = "cap";
+        const char *key_br = "br";
+        int ret = 0;
 
-	return 0;
-}
+        if (devargs == NULL)
+                return 0;
 
-static int
-ice_dcf_cap_check_handler(__rte_unused const char *key,
-			  const char *value, __rte_unused void *opaque)
-{
-	if (strcmp(value, "dcf"))
-		return -1;
+        kvlist = rte_kvargs_parse(devargs->args, NULL);
+        if (kvlist == NULL)
+                return 0;
 
-	return 0;
+        if (!rte_kvargs_count(kvlist, key_cap))
+                goto exit;
+
+        /* dcf capability selected when there's a key-value pair: cap=dcf */
+        if (rte_kvargs_process(kvlist, key_cap,
+                               ice_dcf_cap_check_handler,
+                               &adapter->real_hw.multi_inst) < 0)
+                goto exit;
+
+        /* dcf capability selected when there's a key-value pair: cap=dcf */
+        if (rte_kvargs_process(kvlist, key_br,
+                               &parse_bool,
+                               &ad->hw.use_buildin_recipe) < 0)
+                goto exit;
+
+        ret = 1;
+
+exit:
+        rte_kvargs_free(kvlist);
+        return ret;
 }
 
+/**
+ * Queue xstats filled automatically by ethdev layer.
+ * PMDs filling the queue xstats themselves should not set this flag
+ */
+#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040
 static int
-ice_dcf_cap_selected(struct rte_devargs *devargs)
+ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
 {
-	struct rte_kvargs *kvlist;
-	const char *key = "cap";
-	int ret = 0;
+        struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+        struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
 
-	if (devargs == NULL)
-		return 0;
+        if (!ice_dcf_cap_selected(adapter, pci_dev->device.devargs))
+                return 1;
 
-	kvlist = rte_kvargs_parse(devargs->args, NULL);
-	if (kvlist == NULL)
-		return 0;
+        eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
+        eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
+        eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
 
-	if (!rte_kvargs_count(kvlist, key))
-		goto exit;
+        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+                return 0;
 
-	/* dcf capability selected when there's a key-value pair: cap=dcf */
-	if (rte_kvargs_process(kvlist, key,
-			       ice_dcf_cap_check_handler, NULL) < 0)
-		goto exit;
+        eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
-	ret = 1;
+        adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
+        if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
+                PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
+                return -1;
+        }
 
-exit:
-	rte_kvargs_free(kvlist);
-	return ret;
+        if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
+                PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
+                ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
+                return -1;
+        }
+
+        return 0;
+}
+
+static int
+ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+	ice_dcf_dev_close(eth_dev);
+
+	return 0;
 }
 
 static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
 			     struct rte_pci_device *pci_dev)
 {
-	if (!ice_dcf_cap_selected(pci_dev->device.devargs))
-		return 1;
-
-	return rte_eth_dev_pci_generic_probe(pci_dev,
-					     sizeof(struct ice_dcf_adapter),
-					     ice_dcf_dev_init);
+        return rte_eth_dev_pci_generic_probe(pci_dev,
+                                             sizeof(struct ice_dcf_adapter),
+                                             ice_dcf_dev_init);
 }
 
 static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
@@ -995,4 +1109,6 @@ static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
-RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");
+RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf,
+                              "cap=dcf|mdcf "
+                              "br=<1|0>");
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 30ead4c..fdeef7d 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -112,6 +112,9 @@
 {
 	struct ice_dcf_hw *hw = param;
 
+        if (hw->multi_inst)
+                return NULL;
+
 	usleep(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
 
 	rte_spinlock_lock(&vsi_update_lock);
@@ -160,6 +163,10 @@
 		pthread_create(&thread, NULL,
 			       ice_dcf_vsi_update_service_handler, dcf_hw);
 		break;
+        case VIRTCHNL_EVENT_DCF_VSI_INFO:
+                if (dcf_hw->vsi_id != pf_msg->event_data.vf_vsi_map.vsi_id)
+                        dcf_hw->dcf_replaced = true;
+                break;
 	default:
 		PMD_DRV_LOG(ERR, "Unknown event received %u", pf_msg->event);
 		break;
@@ -381,6 +388,7 @@ static void ice_dcf_uninit_parent_hw(struct ice_hw *hw)
 	parent_hw->aq_send_cmd_fn = ice_dcf_send_aq_cmd;
 	parent_hw->aq_send_cmd_param = &adapter->real_hw;
 	parent_hw->dcf_enabled = true;
+        hw->dcf_replaced = false;
 
 	err = ice_dcf_init_parent_hw(parent_hw);
 	if (err) {
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index b01f2cd..58bffe1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -72,6 +72,7 @@ struct proto_xtr_ol_flag {
 
 #define ICE_OS_DEFAULT_PKG_NAME		"ICE OS Default Package"
 #define ICE_COMMS_PKG_NAME			"ICE COMMS Package"
+#define ICE_CUSTOM_OS_PKG_NAME             "Tencent ICE OS Package"
 #define ICE_MAX_RES_DESC_NUM        1024
 
 static int ice_dev_configure(struct rte_eth_dev *dev);
@@ -1810,6 +1811,9 @@ enum ice_pkg_type
 	else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
 		ICE_PKG_NAME_SIZE))
 		package_type = ICE_PKG_TYPE_COMMS;
+        if (!strncmp((char *)hw->active_pkg_name, ICE_CUSTOM_OS_PKG_NAME,
+                ICE_PKG_NAME_SIZE))
+                package_type = ICE_PKG_TYPE_OS_DEFAULT;
 	else
 		package_type = ICE_PKG_TYPE_UNKNOWN;
 
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index cead476..f42df9b 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -17,6 +17,7 @@
 
 #include "ice_ethdev.h"
 #include "ice_generic_flow.h"
+#include "ice_dcf.h"
 
 /**
  * Non-pipeline mode, fdir and switch both used as distributor,
@@ -1003,6 +1004,33 @@ enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_icmp6[] = {
 	RTE_FLOW_ITEM_TYPE_ICMP6,
 	RTE_FLOW_ITEM_TYPE_END,
 };
+/* IPv4 GRE RAW IPv4 */
+enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4[] = {
+        RTE_FLOW_ITEM_TYPE_ETH,
+        RTE_FLOW_ITEM_TYPE_IPV4,
+        RTE_FLOW_ITEM_TYPE_GRE,
+        RTE_FLOW_ITEM_TYPE_RAW,
+        RTE_FLOW_ITEM_TYPE_IPV4,
+        RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4_udp[] = {
+        RTE_FLOW_ITEM_TYPE_ETH,
+        RTE_FLOW_ITEM_TYPE_IPV4,
+        RTE_FLOW_ITEM_TYPE_GRE,
+        RTE_FLOW_ITEM_TYPE_RAW,
+        RTE_FLOW_ITEM_TYPE_IPV4,
+        RTE_FLOW_ITEM_TYPE_UDP,
+        RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4_tcp[] = {
+        RTE_FLOW_ITEM_TYPE_ETH,
+        RTE_FLOW_ITEM_TYPE_IPV4,
+        RTE_FLOW_ITEM_TYPE_GRE,
+        RTE_FLOW_ITEM_TYPE_RAW,
+        RTE_FLOW_ITEM_TYPE_IPV4,
+        RTE_FLOW_ITEM_TYPE_TCP,
+        RTE_FLOW_ITEM_TYPE_END,
+};
 
 /*IPv4 GTPU (EH) */
 enum rte_flow_item_type pattern_eth_ipv4_gtpu[] = {
@@ -2286,10 +2314,16 @@ struct ice_pattern_match_item *
 		struct rte_flow_error *error)
 {
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+        struct ice_adapter *ad =
+                ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+        struct ice_dcf_hw *hw = ad->hw.aq_send_cmd_param;
 	struct rte_flow *p_flow;
 	void *temp;
 	int ret = 0;
 
+        if (hw->dcf_replaced)
+                return ret;
+
 	TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
 		ret = ice_flow_destroy(dev, p_flow, error);
 		if (ret) {
@@ -2298,6 +2332,9 @@ struct ice_pattern_match_item *
 		}
 	}
 
+        if (ad->hw.dcf_enabled && hw->multi_inst)
+                return ice_dcf_flush_rules(ad->hw.aq_send_cmd_param);
+
 	return ret;
 }
 
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 434d2f4..f212736 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -35,6 +35,7 @@
 #define ICE_PROT_AH                (1ULL << 24)
 #define ICE_PROT_L2TPV3OIP         (1ULL << 25)
 #define ICE_PROT_PFCP              (1ULL << 26)
+#define ICE_PROT_GRE               (1ULL << 27)
 
 /* field */
 
@@ -61,6 +62,7 @@
 #define ICE_L2TPV3OIP_SESSION_ID   (1ULL << 43)
 #define ICE_PFCP_SEID              (1ULL << 42)
 #define ICE_PFCP_S_FIELD           (1ULL << 41)
+#define ICE_RAW_PATTERN            (1ULL << 40)
 
 /* input set */
 
@@ -189,6 +191,8 @@
 	(ICE_PROT_GTPU | ICE_GTPU_TEID)
 #define ICE_INSET_GTPU_QFI \
 	(ICE_PROT_GTPU | ICE_GTPU_QFI)
+#define ICE_INSET_RAW \
+        (ICE_PROT_GRE | ICE_RAW_PATTERN)
 #define ICE_INSET_PPPOE_SESSION \
 	(ICE_PROT_PPPOE_S | ICE_PPPOE_SESSION)
 #define ICE_INSET_PPPOE_PROTO \
@@ -361,6 +365,11 @@
 extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_sctp[];
 extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_icmp6[];
 
+/* IPv4 GRE RAW IPv4 */
+extern enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4[];
+extern enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4_udp[];
+extern enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4_tcp[];
+
 /* IPv4 GTPU (EH) */
 extern enum rte_flow_item_type pattern_eth_ipv4_gtpu[];
 extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 8cba6eb..6746179 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -31,12 +31,15 @@
 #define ICE_PPP_IPV4_PROTO	0x0021
 #define ICE_PPP_IPV6_PROTO	0x0057
 #define ICE_IPV4_PROTO_NVGRE	0x002F
+#define ICE_IPV6_PROTO_NVGRE    0x002F
 
 #define ICE_SW_INSET_ETHER ( \
 	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
 #define ICE_SW_INSET_MAC_VLAN ( \
 		ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
 		ICE_INSET_VLAN_OUTER)
+#define ICE_SW_INSET_MAC_VLAN_IPV4 ( \
+        ICE_SW_INSET_MAC_VLAN | ICE_SW_INSET_MAC_IPV4)
 #define ICE_SW_INSET_MAC_IPV4 ( \
 	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
 	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
@@ -48,6 +51,10 @@
 	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
 	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
 	ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
+#define ICE_SW_INSET_MAC_VLAN_IPV4_TCP ( \
+        ICE_SW_INSET_MAC_VLAN | ICE_SW_INSET_MAC_IPV4_TCP)
+#define ICE_SW_INSET_MAC_VLAN_IPV4_UDP ( \
+        ICE_SW_INSET_MAC_VLAN | ICE_SW_INSET_MAC_IPV4_UDP)
 #define ICE_SW_INSET_MAC_IPV6 ( \
 	ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
 	ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
@@ -82,6 +89,17 @@
 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
 	ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
 	ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
+#define ICE_SW_INSET_DIST_GRE_RAW_IPV4 ( \
+        ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+        ICE_INSET_IPV4_DST | ICE_INSET_RAW)
+#define ICE_SW_INSET_DIST_GRE_RAW_IPV4_TCP ( \
+        ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+        ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
+        ICE_INSET_IPV4_DST | ICE_INSET_RAW)
+#define ICE_SW_INSET_DIST_GRE_RAW_IPV4_UDP ( \
+        ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+        ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
+        ICE_INSET_IPV4_DST | ICE_INSET_RAW)
 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
 	ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
@@ -131,6 +149,13 @@
 	ICE_SW_INSET_MAC_IPV6 | \
 	ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
 
+#define CUSTOM_GRE_KEY_OFFSET 4
+#define CUSTOM_GRE_RAW_KEY_LEN 2*CUSTOM_GRE_KEY_OFFSET
+
+#define GRE_CFLAG (0x80)
+#define GRE_KFLAG (0x20)
+#define GRE_SFLAG (0x10)
+
 struct sw_meta {
 	struct ice_adv_lkup_elem *list;
 	uint16_t lkups_num;
@@ -156,6 +181,12 @@ struct sw_meta {
 			ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
 	{pattern_eth_ipv4_tcp,
 			ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
+        {pattern_eth_vlan_ipv4,
+                        ICE_SW_INSET_MAC_VLAN_IPV4, ICE_INSET_NONE},
+        {pattern_eth_vlan_ipv4_tcp,
+                        ICE_SW_INSET_MAC_VLAN_IPV4_TCP, ICE_INSET_NONE},
+        {pattern_eth_vlan_ipv4_udp,
+                        ICE_SW_INSET_MAC_VLAN_IPV4_UDP, ICE_INSET_NONE},
 	{pattern_eth_ipv6,
 			ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
 	{pattern_eth_ipv6_udp,
@@ -174,6 +205,12 @@ struct sw_meta {
 			ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
 	{pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
 			ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
+        {pattern_eth_ipv4_gre_raw_ipv4,
+                        ICE_SW_INSET_DIST_GRE_RAW_IPV4, ICE_INSET_NONE},
+        {pattern_eth_ipv4_gre_raw_ipv4_tcp,
+                        ICE_SW_INSET_DIST_GRE_RAW_IPV4_TCP, ICE_INSET_NONE},
+        {pattern_eth_ipv4_gre_raw_ipv4_udp,
+                        ICE_SW_INSET_DIST_GRE_RAW_IPV4_UDP, ICE_INSET_NONE},
 };
 
 static struct
@@ -453,9 +490,10 @@ struct sw_meta {
 		struct rte_flow *flow,
 		struct rte_flow_error *error)
 {
+        struct ice_dcf_hw *dcf_hw = ad->hw.aq_send_cmd_param;
+        struct ice_rule_query_data *filter_ptr;
 	struct ice_hw *hw = &ad->hw;
 	int ret;
-	struct ice_rule_query_data *filter_ptr;
 
 	filter_ptr = (struct ice_rule_query_data *)
 		flow->rule;
@@ -469,7 +507,7 @@ struct sw_meta {
 	}
 
 	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
-	if (ret) {
+        if (ret && !(hw->dcf_enabled && dcf_hw->multi_inst)) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 			"fail to destroy switch filter rule");
@@ -477,7 +515,7 @@ struct sw_meta {
 	}
 
 	rte_free(filter_ptr);
-	return ret;
+	return 0;
 }
 
 static void
@@ -502,6 +540,8 @@ struct sw_meta {
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
 	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
+        const struct rte_flow_item_gre *gre_spec, *gre_mask;
+        const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
 	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
 	const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
@@ -519,12 +559,16 @@ struct sw_meta {
 	bool tunnel_valid = 0;
 	bool profile_rule = 0;
 	bool nvgre_valid = 0;
+        bool gre_valid = 0;
 	bool vxlan_valid = 0;
 	bool ipv6_valid = 0;
 	bool ipv4_valid = 0;
 	bool udp_valid = 0;
 	bool tcp_valid = 0;
 	uint16_t j, t = 0;
+        int32_t off;
+        uint16_t len;
+        uint16_t c_rsvd0_ver = 0;
 
 	for (item = pattern; item->type !=
 			RTE_FLOW_ITEM_TYPE_END; item++) {
@@ -734,6 +778,10 @@ struct sw_meta {
 						break;
 					}
 				}
+                               if ((ipv6_spec->hdr.proto &
+                                       ipv6_mask->hdr.proto) ==
+                                       ICE_IPV6_PROTO_NVGRE)
+                                       *tun_type = ICE_SW_TUN_AND_NON_TUN;
 				if (ipv6_mask->hdr.proto &&
 					tunnel_valid)
 					input_set |=
@@ -1047,6 +1095,84 @@ struct sw_meta {
 			}
 			break;
 
+                case RTE_FLOW_ITEM_TYPE_GRE:
+                        gre_spec = item->spec;
+                        gre_mask = item->mask;
+
+                        gre_valid = 1;
+                        tunnel_valid = 1;
+                        if(gre_spec && gre_mask) {
+                                list[t].type = ICE_GRE;
+                                if(gre_mask->c_rsvd0_ver) {
+                                        /* GRE RFC1701 */
+                                        list[t].h_u.gre_hdr.flags = gre_spec->c_rsvd0_ver;
+                                        list[t].m_u.gre_hdr.flags = gre_mask->c_rsvd0_ver;
+                                        c_rsvd0_ver = gre_spec->c_rsvd0_ver & gre_mask->c_rsvd0_ver;
+                                }
+                        }
+                        break;
+
+                case RTE_FLOW_ITEM_TYPE_RAW:
+                        raw_spec = item->spec;
+                        raw_mask = item->mask;
+
+                        if(list[t].type != ICE_GRE) {
+                                rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "RAW must follow GRE.");
+                                return -rte_errno;
+                        }
+
+                        if((c_rsvd0_ver & GRE_KFLAG) == 0) {
+                                if(raw_spec && raw_mask) {
+                                        rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "invalid pattern! k_bit is 0 while raw pattern exists.");
+                                        return -rte_errno;
+                                }
+                                break;
+                        }
+
+                        if((!raw_spec) || (!raw_mask)) {
+                                rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "invalid pattern! k_bit is 1 while raw pattern doesn't exist.");
+                                return -rte_errno;
+                        }
+
+                        off = raw_spec->offset;
+                        len = raw_spec->length;
+
+                        if((c_rsvd0_ver & GRE_CFLAG) == GRE_CFLAG) {
+                                if(off != CUSTOM_GRE_KEY_OFFSET) {
+                                        rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "invalid pattern! c_bit is 1 while offset is not 4.");
+                                        return -rte_errno;
+                                }
+                        }
+
+                        if(len > CUSTOM_GRE_RAW_KEY_LEN) {
+                                rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Only support 4 bytes matching.");
+                                return -rte_errno;
+                        }
+
+                        if(raw_spec && raw_mask) {
+                                        /* store pattern to header buffer */
+                                        const uint8_t *pat_src = (const uint8_t *)raw_spec->pattern;
+                                        uint8_t *key_dst = (uint8_t *)&list[t].h_u.gre_hdr.key;
+
+                                        int i = 0;
+                                        for(i=0; i < len; i++) {
+                                                int t = 0;
+                                                if(pat_src[i] >= '0' && pat_src[i] <= '9')
+                                                        t = pat_src[i] - '0';
+                                                else if(pat_src[i] >= 'a' && pat_src[i] <= 'f')
+                                                        t = pat_src[i] - 'a' + 10;
+                                                else
+                                                        printf("invalid pattern.\n");
+
+                                                key_dst[i/2] = (i%2) ? (key_dst[i/2]+t) : t*16;
+                                        }
+                                        list[t].m_u.gre_hdr.key = 0xffffffff;
+                                        input_set |= ICE_INSET_RAW;
+                                        input_set_byte += 2;
+                                t++;
+                        }
+                        break;
+
 		case RTE_FLOW_ITEM_TYPE_VLAN:
 			vlan_spec = item->spec;
 			vlan_mask = item->mask;
@@ -1402,6 +1528,8 @@ struct sw_meta {
 			*tun_type = ICE_SW_TUN_VXLAN;
 		else if (nvgre_valid)
 			*tun_type = ICE_SW_TUN_NVGRE;
+                else if (gre_valid)
+                        *tun_type = ICE_SW_TUN_GRE;
 		else if (ipv4_valid && tcp_valid)
 			*tun_type = ICE_SW_IPV4_TCP;
 		else if (ipv4_valid && udp_valid)
diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index 0934e02..846fa66 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -1043,6 +1043,7 @@ enum rte_eth_tunnel_type {
 	RTE_TUNNEL_TYPE_IP_IN_GRE,
 	RTE_L2_TUNNEL_TYPE_E_TAG,
 	RTE_TUNNEL_TYPE_VXLAN_GPE,
+        RTE_TUNNEL_TYPE_ECPRI,
 	RTE_TUNNEL_TYPE_MAX,
 };
 
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-05-14  4:18 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-14  4:21 [dpdk-dev] [RFC] net/ice: couple of new DCF features Steven Zou

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).