DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/7] net/ice: update share code
@ 2019-01-15 12:56 Qi Zhang
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 1/7] net/ice/base: code clean Qi Zhang
                   ` (9 more replies)
  0 siblings, 10 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-15 12:56 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang
  Cc: paul.m.stillwell.jr, dev, bruce.richardson, ferruh.yigit, Qi Zhang

For ice family NICs, package processing pipe line can be configured in
a package file should be downloaded into device by driver during init.
The patch set add necessary share code APIs to support package download.
Also some code clean is included.

Though package download will not be enabled in 19.02 as plan, but we
hope the share code part could be merged early.

Meanwhile we will submit a seperate  RFC patch for package download for
customer early evelutation.

Qi Zhang (7):
  net/ice/base: code clean
  net/ice/base: add API to support resource allocate
  net/ice/base: add package download related data structure
  net/ice/base: add some help macros
  net/ice/base: add flexible pipeline module
  net/ice/base: add flow module
  net/ice/base: free flow profile entries

 drivers/net/ice/Makefile              |    2 +
 drivers/net/ice/base/ice_adminq_cmd.h |   85 +
 drivers/net/ice/base/ice_common.c     |   85 +-
 drivers/net/ice/base/ice_common.h     |   32 +-
 drivers/net/ice/base/ice_controlq.c   |    2 +-
 drivers/net/ice/base/ice_flex_pipe.c  | 5056 +++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_flex_pipe.h  |  107 +
 drivers/net/ice/base/ice_flex_type.h  |  685 +++++
 drivers/net/ice/base/ice_flow.c       | 2080 ++++++++++++++
 drivers/net/ice/base/ice_flow.h       |  337 +++
 drivers/net/ice/base/ice_osdep.h      |    2 +
 drivers/net/ice/base/ice_type.h       |   37 +-
 drivers/net/ice/base/meson.build      |    2 +
 13 files changed, 8479 insertions(+), 33 deletions(-)
 create mode 100644 drivers/net/ice/base/ice_flex_pipe.c
 create mode 100644 drivers/net/ice/base/ice_flex_pipe.h
 create mode 100644 drivers/net/ice/base/ice_flow.c

-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH 1/7] net/ice/base: code clean
  2019-01-15 12:56 [dpdk-dev] [PATCH 0/7] net/ice: update share code Qi Zhang
@ 2019-01-15 12:56 ` Qi Zhang
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 2/7] net/ice/base: add API to support resource allocate Qi Zhang
                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-15 12:56 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang
  Cc: paul.m.stillwell.jr, dev, bruce.richardson, ferruh.yigit, Qi Zhang

Remove some unnecessary code.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/base/ice_common.c   | 14 --------------
 drivers/net/ice/base/ice_common.h   | 17 -----------------
 drivers/net/ice/base/ice_controlq.c |  2 +-
 3 files changed, 1 insertion(+), 32 deletions(-)

diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index d49264d14..2ccf58527 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -60,16 +60,6 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
 	return status;
 }
 
-#if defined(FPGA_SUPPORT) || defined(CVL_A0_SUPPORT)
-void ice_dev_onetime_setup(struct ice_hw *hw)
-{
-	/* configure Rx - set non pxe mode */
-	wr32(hw, GLLAN_RCTL_0, 0x1);
-
-
-
-}
-#endif /* FPGA_SUPPORT || CVL_A0_SUPPORT */
 
 /**
  * ice_clear_pf_cfg - Clear PF configuration
@@ -830,10 +820,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
 	if (status)
 		goto err_unroll_sched;
 
-#if defined(FPGA_SUPPORT) || defined(CVL_A0_SUPPORT)
-	/* some of the register write workarounds to get Rx working */
-	ice_dev_onetime_setup(hw);
-#endif /* FPGA_SUPPORT || CVL_A0_SUPPORT */
 
 	/* Get MAC information */
 	/* A single port can report up to two (LAN and WoL) addresses */
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 082ae66f9..0197fbfe3 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -9,18 +9,6 @@
 
 #include "ice_switch.h"
 
-/* prototype for functions used for SW locks */
-void ice_free_list(struct LIST_HEAD_TYPE *list);
-void ice_init_lock(struct ice_lock *lock);
-void ice_acquire_lock(struct ice_lock *lock);
-void ice_release_lock(struct ice_lock *lock);
-void ice_destroy_lock(struct ice_lock *lock);
-
-void *ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m, u64 size);
-void ice_free_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m);
-
-bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq);
-
 enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
 
 void
@@ -61,11 +49,6 @@ enum ice_status ice_get_caps(struct ice_hw *hw);
 
 
 
-#if defined(FPGA_SUPPORT) || defined(CVL_A0_SUPPORT)
-void ice_dev_onetime_setup(struct ice_hw *hw);
-#endif /* FPGA_SUPPORT || CVL_A0_SUPPORT */
-
-
 enum ice_status
 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
 		  u32 rxq_index);
diff --git a/drivers/net/ice/base/ice_controlq.c b/drivers/net/ice/base/ice_controlq.c
index fb82c23ee..cbc4cb4c2 100644
--- a/drivers/net/ice/base/ice_controlq.c
+++ b/drivers/net/ice/base/ice_controlq.c
@@ -735,7 +735,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  * Returns true if the firmware has processed all descriptors on the
  * admin send queue. Returns false if there are still requests pending.
  */
-bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 {
 	/* AQ designers suggest use of head for better
 	 * timing reliability than DD bit
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH 2/7] net/ice/base: add API to support resource allocate
  2019-01-15 12:56 [dpdk-dev] [PATCH 0/7] net/ice: update share code Qi Zhang
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 1/7] net/ice/base: code clean Qi Zhang
@ 2019-01-15 12:56 ` Qi Zhang
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 3/7] net/ice/base: add package download related data structure Qi Zhang
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-15 12:56 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang
  Cc: paul.m.stillwell.jr, dev, bruce.richardson, ferruh.yigit, Qi Zhang

Added API ice_alloc_hw_res and ice_free_hw_res.
Added resource type macro.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/base/ice_adminq_cmd.h | 27 +++++++++++++
 drivers/net/ice/base/ice_common.c     | 71 +++++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_common.h     |  4 ++
 3 files changed, 102 insertions(+)

diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 9332f84aa..3003efd55 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -238,8 +238,35 @@ struct ice_aqc_get_sw_cfg_resp {
  * Free Resources command (indirect 0x0209)
  * Get Allocated Resource Descriptors Command (indirect 0x020A)
  */
+#define ICE_AQC_RES_TYPE_VEB_COUNTER			0x00
+#define ICE_AQC_RES_TYPE_VLAN_COUNTER			0x01
+#define ICE_AQC_RES_TYPE_MIRROR_RULE			0x02
 #define ICE_AQC_RES_TYPE_VSI_LIST_REP			0x03
 #define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE			0x04
+#define ICE_AQC_RES_TYPE_RECIPE				0x05
+#define ICE_AQC_RES_TYPE_PROFILE			0x06
+#define ICE_AQC_RES_TYPE_SWID				0x07
+#define ICE_AQC_RES_TYPE_VSI				0x08
+#define ICE_AQC_RES_TYPE_FLU				0x09
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_1			0x0A
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_2			0x0B
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_4			0x0C
+#define ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH		0x20
+#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK		0x21
+#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES	0x22
+#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES		0x23
+#define ICE_AQC_RES_TYPE_FLEX_DESC_PROG			0x30
+#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID	0x48
+#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM		0x49
+#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID		0x50
+#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM		0x51
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID		0x58
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM		0x59
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID		0x60
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM		0x61
+/* Resource types 0x62-67 are reserved for Hash profile builder */
+#define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID		0x68
+#define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM		0x69
 
 #define ICE_AQC_RES_TYPE_FLAG_SHARED			BIT(7)
 #define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM		BIT(12)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 2ccf58527..145f66a90 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -1712,6 +1712,77 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 }
 
+/**
+ * ice_alloc_hw_res - allocate resource
+ * @hw: pointer to the hw struct
+ * @type: type of resource
+ * @num: number of resources to allocate
+ * @sh: shared if true, dedicated if false
+ * @res: pointer to array that will receive the resources
+ */
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool sh, u16 *res)
+{
+	struct ice_aqc_alloc_free_res_elem *buf;
+	enum ice_status status;
+	u16 buf_len;
+
+	buf_len = sizeof(*buf) + sizeof(buf->elem) * (num - 1);
+	buf = (struct ice_aqc_alloc_free_res_elem *)
+		ice_malloc(hw, buf_len);
+	if (!buf)
+		return ICE_ERR_NO_MEMORY;
+
+	/* Prepare buffer to allocate resource. */
+	buf->num_elems = CPU_TO_LE16(num);
+	buf->res_type = CPU_TO_LE16(type | (sh ? ICE_AQC_RES_TYPE_FLAG_SHARED :
+		ICE_AQC_RES_TYPE_FLAG_DEDICATED));
+	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+				       ice_aqc_opc_alloc_res, NULL);
+	if (status)
+		goto ice_alloc_res_exit;
+
+	ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
+		   ICE_NONDMA_TO_NONDMA);
+
+ice_alloc_res_exit:
+	ice_free(hw, buf);
+	return status;
+}
+
+/**
+ * ice_free_hw_res - free allocated hw resource
+ * @hw: pointer to the hw struct
+ * @type: type of resource to free
+ * @num: number of resources
+ * @res: pointer to array that contains the resources to free
+ */
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+{
+	struct ice_aqc_alloc_free_res_elem *buf;
+	enum ice_status status;
+	u16 buf_len;
+
+	buf_len = sizeof(*buf) + sizeof(buf->elem) * (num - 1);
+	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
+	if (!buf)
+		return ICE_ERR_NO_MEMORY;
+
+	/* Prepare buffer to free resource. */
+	buf->num_elems = CPU_TO_LE16(num);
+	buf->res_type = CPU_TO_LE16(type);
+	ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
+		   ICE_NONDMA_TO_NONDMA);
+
+	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
+				       ice_aqc_opc_free_res, NULL);
+	if (status)
+		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
+
+	ice_free(hw, buf);
+	return status;
+}
 
 /**
  * ice_get_num_per_func - determine number of resources per PF
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 0197fbfe3..45d93eb64 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -32,6 +32,10 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
 		enum ice_aq_res_access_type access, u32 timeout);
 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
 enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool sh, u16 *res);
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
+enum ice_status
 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd);
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH 3/7] net/ice/base: add package download related data structure
  2019-01-15 12:56 [dpdk-dev] [PATCH 0/7] net/ice: update share code Qi Zhang
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 1/7] net/ice/base: code clean Qi Zhang
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 2/7] net/ice/base: add API to support resource allocate Qi Zhang
@ 2019-01-15 12:56 ` Qi Zhang
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 4/7] net/ice/base: add some help macros Qi Zhang
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-15 12:56 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang
  Cc: paul.m.stillwell.jr, dev, bruce.richardson, ferruh.yigit, Qi Zhang

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/base/ice_adminq_cmd.h | 58 +++++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_type.h       |  1 -
 2 files changed, 58 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 3003efd55..a1b9edd14 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -1576,6 +1576,57 @@ struct ice_aqc_move_txqs_data {
 
 
 
+/* Download Package (indirect 0x0C40) */
+/* Also used for Update Package (indirect 0x0C42) */
+struct ice_aqc_download_pkg {
+	u8 flags;
+#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF	0x01
+	u8 reserved[3];
+	__le32 reserved1;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+struct ice_aqc_download_pkg_resp {
+	__le32 error_offset;
+	__le32 error_info;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+/* Get Package Info List (indirect 0x0C43) */
+struct ice_aqc_get_pkg_info_list {
+	__le32 reserved1;
+	__le32 reserved2;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+/* Version format for packages */
+struct ice_pkg_ver {
+	u8 major;
+	u8 minor;
+	u8 update;
+	u8 draft;
+};
+
+#define ICE_PKG_NAME_SIZE	32
+
+struct ice_aqc_get_pkg_info {
+	struct ice_pkg_ver ver;
+	char name[ICE_PKG_NAME_SIZE];
+	u8 is_in_nvm;
+	u8 is_active;
+	u8 is_active_at_boot;
+	u8 is_modified;
+};
+
+/* Get Package Info List response buffer format (0x0C43) */
+struct ice_aqc_get_pkg_info_resp {
+	__le32 count;
+	struct ice_aqc_get_pkg_info pkg_info[1];
+};
+
 
 
 
@@ -1726,6 +1777,8 @@ struct ice_aq_desc {
 		struct ice_aqc_txqs_cleanup txqs_cleanup;
 		struct ice_aqc_add_get_update_free_vsi vsi_cmd;
 		struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
+		struct ice_aqc_download_pkg download_pkg;
+		struct ice_aqc_get_pkg_info_list get_pkg_info_list;
 		struct ice_aqc_fw_logging fw_logging;
 		struct ice_aqc_get_clear_fw_log get_clear_fw_log;
 		struct ice_aqc_set_mac_lb set_mac_lb;
@@ -1903,6 +1956,11 @@ enum ice_adminq_opc {
 	ice_aqc_opc_txqs_cleanup			= 0x0C31,
 	ice_aqc_opc_move_recfg_txqs			= 0x0C32,
 
+	/* package commands */
+	ice_aqc_opc_download_pkg			= 0x0C40,
+	ice_aqc_opc_upload_section			= 0x0C41,
+	ice_aqc_opc_update_pkg				= 0x0C42,
+	ice_aqc_opc_get_pkg_info_list			= 0x0C43,
 
 
 
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index 256bf3f37..d7c12d172 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -704,7 +704,6 @@ struct ice_hw {
 
 	u8 ucast_shared;	/* true if VSIs can share unicast addr */
 
-
 };
 
 /* Statistics collected by each port, VSI, VEB, and S-channel */
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH 4/7] net/ice/base: add some help macros
  2019-01-15 12:56 [dpdk-dev] [PATCH 0/7] net/ice: update share code Qi Zhang
                   ` (2 preceding siblings ...)
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 3/7] net/ice/base: add package download related data structure Qi Zhang
@ 2019-01-15 12:56 ` Qi Zhang
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 5/7] net/ice/base: add flexible pipeline module Qi Zhang
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-15 12:56 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang
  Cc: paul.m.stillwell.jr, dev, bruce.richardson, ferruh.yigit, Qi Zhang

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/base/ice_common.h | 10 ++++++++++
 drivers/net/ice/base/ice_osdep.h  |  2 ++
 2 files changed, 12 insertions(+)

diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 45d93eb64..67539ccb7 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -51,6 +51,16 @@ void ice_clear_pxe_mode(struct ice_hw *hw);
 
 enum ice_status ice_get_caps(struct ice_hw *hw);
 
+/* Define a macro that will align a pointer to point to the next memory address
+ * that falls on the given power of 2 (i.e., 2, 4, 8, 16, 32, 64...). For
+ * example, given the variable pointer = 0x1006, then after the following call:
+ *
+ *      pointer = ICE_ALIGN(pointer, 4)
+ *
+ * ... the value of pointer would equal 0x1008, since 0x1008 is the next
+ * address after 0x1006 which is divisible by 4.
+ */
+#define ICE_ALIGN(ptr, align)   (((ptr) + ((align) - 1)) & ~((align) - 1))
 
 
 enum ice_status
diff --git a/drivers/net/ice/base/ice_osdep.h b/drivers/net/ice/base/ice_osdep.h
index a3351c034..252c8f4e7 100644
--- a/drivers/net/ice/base/ice_osdep.h
+++ b/drivers/net/ice/base/ice_osdep.h
@@ -457,6 +457,8 @@ LIST_HEAD(ice_list_head, ice_list_entry);
 
 /*Note parameters are swapped*/
 #define LIST_FIRST_ENTRY(head, type, field) (type *)((head)->lh_first)
+#define LIST_NEXT_ENTRY(entry, type, field) \
+	((type *)(entry)->field.next.le_next)
 #define LIST_ADD(entry, list_head)    LIST_INSERT_HEAD(list_head, entry, next)
 #define LIST_ADD_AFTER(entry, list_entry) \
 	LIST_INSERT_AFTER(list_entry, entry, next)
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH 5/7] net/ice/base: add flexible pipeline module
  2019-01-15 12:56 [dpdk-dev] [PATCH 0/7] net/ice: update share code Qi Zhang
                   ` (3 preceding siblings ...)
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 4/7] net/ice/base: add some help macros Qi Zhang
@ 2019-01-15 12:56 ` Qi Zhang
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 6/7] net/ice/base: add flow module Qi Zhang
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-15 12:56 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang
  Cc: paul.m.stillwell.jr, dev, bruce.richardson, ferruh.yigit, Qi Zhang

The flexible pipeline module provide the intrastructure for ice's
flexible packet processing feature.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/Makefile             |    1 +
 drivers/net/ice/base/ice_flex_pipe.c | 5018 ++++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_flex_pipe.h |  107 +
 drivers/net/ice/base/ice_flex_type.h |  685 +++++
 drivers/net/ice/base/ice_type.h      |   36 +
 drivers/net/ice/base/meson.build     |    1 +
 6 files changed, 5848 insertions(+)
 create mode 100644 drivers/net/ice/base/ice_flex_pipe.c
 create mode 100644 drivers/net/ice/base/ice_flex_pipe.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 4b421cde3..1bb76efc2 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -49,6 +49,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_common.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_sched.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_nvm.c
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_flex_pipe.c
 
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx.c
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
new file mode 100644
index 000000000..0b8376d31
--- /dev/null
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -0,0 +1,5018 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2019
+ */
+
+#include "ice_common.h"
+#include "ice_flex_pipe.h"
+#include "ice_protocol_type.h"
+#include "ice_flow.h"
+
+static const struct ice_tunnel_type_scan tnls[] = {
+	{ TNL_VXLAN,		"TNL_VXLAN" },
+	{ TNL_GTPC,		"TNL_GTPC" },
+	{ TNL_GTPC_TEID,	"TNL_GTPC_TEID" },
+	{ TNL_GTPU,		"TNL_GTPC" },
+	{ TNL_GTPU_TEID,	"TNL_GTPU_TEID" },
+	{ TNL_VXLAN_GPE,	"TNL_VXLAN_GPE" },
+	{ TNL_GENEVE,		"TNL_GENEVE" },
+	{ TNL_NAT,		"TNL_NAT" },
+	{ TNL_ROCE_V2,		"TNL_ROCE_V2" },
+	{ TNL_MPLSO_UDP,	"TNL_MPLSO_UDP" },
+	{ TNL_UDP2_END,		"TNL_UDP2_END" },
+	{ TNL_UPD_END,		"TNL_UPD_END" },
+	{ TNL_LAST,		"" }
+};
+
+static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
+	/* SWITCH */
+	{
+		ICE_SID_XLT0_SW,
+		ICE_SID_XLT_KEY_BUILDER_SW,
+		ICE_SID_XLT1_SW,
+		ICE_SID_XLT2_SW,
+		ICE_SID_PROFID_TCAM_SW,
+		ICE_SID_PROFID_REDIR_SW,
+		ICE_SID_FLD_VEC_SW,
+		ICE_SID_CDID_KEY_BUILDER_SW,
+		ICE_SID_CDID_REDIR_SW
+	},
+
+	/* ACL */
+	{
+		ICE_SID_XLT0_ACL,
+		ICE_SID_XLT_KEY_BUILDER_ACL,
+		ICE_SID_XLT1_ACL,
+		ICE_SID_XLT2_ACL,
+		ICE_SID_PROFID_TCAM_ACL,
+		ICE_SID_PROFID_REDIR_ACL,
+		ICE_SID_FLD_VEC_ACL,
+		ICE_SID_CDID_KEY_BUILDER_ACL,
+		ICE_SID_CDID_REDIR_ACL
+	},
+
+	/* FD */
+	{
+		ICE_SID_XLT0_FD,
+		ICE_SID_XLT_KEY_BUILDER_FD,
+		ICE_SID_XLT1_FD,
+		ICE_SID_XLT2_FD,
+		ICE_SID_PROFID_TCAM_FD,
+		ICE_SID_PROFID_REDIR_FD,
+		ICE_SID_FLD_VEC_FD,
+		ICE_SID_CDID_KEY_BUILDER_FD,
+		ICE_SID_CDID_REDIR_FD
+	},
+
+	/* RSS */
+	{
+		ICE_SID_XLT0_RSS,
+		ICE_SID_XLT_KEY_BUILDER_RSS,
+		ICE_SID_XLT1_RSS,
+		ICE_SID_XLT2_RSS,
+		ICE_SID_PROFID_TCAM_RSS,
+		ICE_SID_PROFID_REDIR_RSS,
+		ICE_SID_FLD_VEC_RSS,
+		ICE_SID_CDID_KEY_BUILDER_RSS,
+		ICE_SID_CDID_REDIR_RSS
+	},
+
+	/* PE */
+	{
+		ICE_SID_XLT0_PE,
+		ICE_SID_XLT_KEY_BUILDER_PE,
+		ICE_SID_XLT1_PE,
+		ICE_SID_XLT2_PE,
+		ICE_SID_PROFID_TCAM_PE,
+		ICE_SID_PROFID_REDIR_PE,
+		ICE_SID_FLD_VEC_PE,
+		ICE_SID_CDID_KEY_BUILDER_PE,
+		ICE_SID_CDID_REDIR_PE
+	}
+};
+
+/**
+ * ice_sect_id - returns section ID
+ * @blk: block type
+ * @sect: section type
+ *
+ * This helper function returns the proper section ID given a block type and a
+ * section type.
+ */
+static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
+{
+	return ice_sect_lkup[blk][sect];
+}
+
+/**
+ * ice_pkg_val_buf
+ * @buf: pointer to the ice buffer
+ *
+ * This helper function validates a buffer's header.
+ */
+static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+{
+	struct ice_buf_hdr *hdr;
+	u16 section_count;
+	u16 data_end;
+
+	hdr = (struct ice_buf_hdr *)buf->buf;
+	/* verify data */
+	section_count = LE16_TO_CPU(hdr->section_count);
+	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
+		return NULL;
+
+	data_end = LE16_TO_CPU(hdr->data_end);
+	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
+		return NULL;
+
+	return hdr;
+}
+
+/**
+ * ice_find_buf_table
+ * @ice_seg: pointer to the ice segment
+ *
+ * Returns the address of the buffer table within the ice segment.
+ */
+static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
+{
+	struct ice_nvm_table *nvms;
+
+	nvms = (struct ice_nvm_table *)(ice_seg->device_table +
+		LE32_TO_CPU(ice_seg->device_table_count));
+
+	return (struct ice_buf_table *)
+		(nvms->vers + LE32_TO_CPU(nvms->table_count));
+}
+
+/**
+ * ice_pkg_enum_buf
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This function will enumerate all the buffers in the ice segment. The first
+ * call is made with the ice_seg parameter non-NULL; on subsequent calls,
+ * ice_seg is set to NULL which continues the enumeration. When the function
+ * returns a NULL pointer, then the end of the buffers has been reached, or an
+ * unexpected value has been detected (for example an invalid section count or
+ * an invalid buffer end value).
+ */
+static struct ice_buf_hdr *
+ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+	if (ice_seg) {
+		state->buf_table = ice_find_buf_table(ice_seg);
+		if (!state->buf_table)
+			return NULL;
+
+		state->buf_idx = 0;
+		return ice_pkg_val_buf(state->buf_table->buf_array);
+	}
+
+	if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
+		return ice_pkg_val_buf(state->buf_table->buf_array +
+				       state->buf_idx);
+	else
+		return NULL;
+}
+
+/**
+ * ice_pkg_advance_sect
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This helper function will advance the section within the ice segment,
+ * also advancing the buffer if needed.
+ */
+static bool
+ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+	if (!ice_seg && !state->buf)
+		return false;
+
+	if (!ice_seg && state->buf)
+		if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
+			return true;
+
+	state->buf = ice_pkg_enum_buf(ice_seg, state);
+	if (!state->buf)
+		return false;
+
+	/* start of new buffer, reset section index */
+	state->sect_idx = 0;
+	return true;
+}
+
+/**
+ * ice_pkg_enum_section
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ *
+ * This function will enumerate all the sections of a particular type in the
+ * ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the matching
+ * sections has been reached.
+ */
+static void *
+ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+		     u32 sect_type)
+{
+	u16 offset, size;
+
+	if (ice_seg)
+		state->type = sect_type;
+
+	if (!ice_pkg_advance_sect(ice_seg, state))
+		return NULL;
+
+	/* scan for next matching section */
+	while (state->buf->section_entry[state->sect_idx].type !=
+	       CPU_TO_LE32(state->type))
+		if (!ice_pkg_advance_sect(NULL, state))
+			return NULL;
+
+	/* validate section */
+	offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
+		return NULL;
+
+	size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
+	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
+		return NULL;
+
+	/* make sure the section fits in the buffer */
+	if (offset + size > ICE_PKG_BUF_SIZE)
+		return NULL;
+
+	state->sect_type =
+		LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
+
+	/* calc pointer to this section */
+	state->sect = ((u8 *)state->buf) +
+		LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+
+	return state->sect;
+}
+
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+static void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+		   u32 sect_type, u32 *offset,
+		   void *(*handler)(u32 sect_type, void *section,
+				    u32 index, u32 *offset))
+{
+	void *entry;
+
+	if (ice_seg) {
+		if (!handler)
+			return NULL;
+
+		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+			return NULL;
+
+		state->entry_idx = 0;
+		state->handler = handler;
+	} else {
+		state->entry_idx++;
+	}
+
+	if (!state->handler)
+		return NULL;
+
+	/* get entry */
+	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+			       offset);
+	if (!entry) {
+		/* end of a section, look for another section of this type */
+		if (!ice_pkg_enum_section(NULL, state, 0))
+			return NULL;
+
+		state->entry_idx = 0;
+		entry = state->handler(state->sect_type, state->sect,
+				       state->entry_idx, offset);
+	}
+
+	return entry;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost tcam entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost tcam sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost tcam entries.
+ */
+static void *
+ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+	struct ice_boost_tcam_section *boost;
+
+	if (!section)
+		return NULL;
+
+	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+		return NULL;
+
+	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+		return NULL;
+
+	if (offset)
+		*offset = 0;
+
+	boost = (struct ice_boost_tcam_section *)section;
+	if (index >= LE16_TO_CPU(boost->count))
+		return NULL;
+
+	return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static enum ice_status
+ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+		     struct ice_boost_tcam_entry **entry)
+{
+	struct ice_boost_tcam_entry *tcam;
+	struct ice_pkg_enum state;
+
+	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+	if (!ice_seg)
+		return ICE_ERR_PARAM;
+
+	do {
+		tcam = (struct ice_boost_tcam_entry *)
+		       ice_pkg_enum_entry(ice_seg, &state,
+					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+					  ice_boost_tcam_handler);
+		if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
+			*entry = tcam;
+			return ICE_SUCCESS;
+		}
+
+		ice_seg = NULL;
+	} while (tcam);
+
+	*entry = NULL;
+	return ICE_ERR_CFG;
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *
+ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
+		       u32 *offset)
+{
+	struct ice_label_section *labels;
+
+	if (!section)
+		return NULL;
+
+	if (index > ICE_MAX_LABELS_IN_BUF)
+		return NULL;
+
+	if (offset)
+		*offset = 0;
+
+	labels = (struct ice_label_section *)section;
+	if (index >= LE16_TO_CPU(labels->count))
+		return NULL;
+
+	return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *
+ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
+		u16 *value)
+{
+	struct ice_label *label;
+
+	/* Check for valid label section on first call */
+	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+		return NULL;
+
+	label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
+						       NULL,
+						       ice_label_enum_handler);
+	if (!label)
+		return NULL;
+
+	*value = LE16_TO_CPU(label->value);
+	return label->name;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+	struct ice_pkg_enum state;
+	char *label_name;
+	u16 val;
+	int i;
+
+	ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
+
+	if (!ice_seg)
+		return;
+
+	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+				     &val);
+
+	while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+		for (i = 0; tnls[i].type != TNL_LAST; i++) {
+			if (!strncmp(label_name, tnls[i].label_prefix,
+				     strlen(tnls[i].label_prefix))) {
+				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+				hw->tnl.tbl[hw->tnl.count].valid = false;
+				hw->tnl.tbl[hw->tnl.count].in_use = false;
+				hw->tnl.tbl[hw->tnl.count].marked = false;
+				hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+				hw->tnl.tbl[hw->tnl.count].port = 0;
+				hw->tnl.count++;
+				break;
+			}
+		}
+
+		label_name = ice_enum_labels(NULL, 0, &state, &val);
+	}
+
+	/* Cache the appropriate boost tcam entry pointers */
+	for (i = 0; i < hw->tnl.count; i++) {
+		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+				     &hw->tnl.tbl[i].boost_entry);
+		if (hw->tnl.tbl[i].boost_entry)
+			hw->tnl.tbl[i].valid = true;
+	}
+}
+
+/* Key creation */
+
+#define ICE_DC_KEY	0x1	/* don't care */
+#define ICE_DC_KEYINV	0x1
+#define ICE_NM_KEY	0x0	/* never match */
+#define ICE_NM_KEYINV	0x0
+#define ICE_0_KEY	0x1	/* match 0 */
+#define ICE_0_KEYINV	0x0
+#define ICE_1_KEY	0x0	/* match 1 */
+#define ICE_1_KEYINV	0x1
+
+/**
+ * ice_gen_key_word - generate 16-bits of a key/mask word
+ * @val: the value
+ * @valid: valid bits mask (change only the valid bits)
+ * @dont_care: don't care mask
+ * @nvr_mtch: never match mask
+ * @key: pointer to an array of where the resulting key portion
+ * @key_inv: pointer to an array of where the resulting key invert portion
+ *
+ * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
+ * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
+ * of key and 8 bits of key invert.
+ *
+ *     '0' =    b01, always match a 0 bit
+ *     '1' =    b10, always match a 1 bit
+ *     '?' =    b11, don't care bit (always matches)
+ *     '~' =    b00, never match bit
+ *
+ * Input:
+ *          val:         b0  1  0  1  0  1
+ *          dont_care:   b0  0  1  1  0  0
+ *          never_mtch:  b0  0  0  0  1  1
+ *          ------------------------------
+ * Result:  key:        b01 10 11 11 00 00
+ */
+static enum ice_status
+ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
+		 u8 *key_inv)
+{
+	u8 in_key = *key, in_key_inv = *key_inv;
+	u8 i;
+
+	/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
+	if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
+		return ICE_ERR_CFG;
+
+	*key = 0;
+	*key_inv = 0;
+
+	/* encode the 8 bits into 8-bit key and 8-bit key invert */
+	for (i = 0; i < 8; i++) {
+		*key >>= 1;
+		*key_inv >>= 1;
+
+		if (!(valid & 0x1)) { /* change only valid bits */
+			*key |= (in_key & 0x1) << 7;
+			*key_inv |= (in_key_inv & 0x1) << 7;
+		} else if (dont_care & 0x1) { /* don't care bit */
+			*key |= ICE_DC_KEY << 7;
+			*key_inv |= ICE_DC_KEYINV << 7;
+		} else if (nvr_mtch & 0x1) { /* never match bit */
+			*key |= ICE_NM_KEY << 7;
+			*key_inv |= ICE_NM_KEYINV << 7;
+		} else if (val & 0x01) { /* exact 1 match */
+			*key |= ICE_1_KEY << 7;
+			*key_inv |= ICE_1_KEYINV << 7;
+		} else { /* exact 0 match */
+			*key |= ICE_0_KEY << 7;
+			*key_inv |= ICE_0_KEYINV << 7;
+		}
+
+		dont_care >>= 1;
+		nvr_mtch >>= 1;
+		valid >>= 1;
+		val >>= 1;
+		in_key >>= 1;
+		in_key_inv >>= 1;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_bits_max_set - determine if the number of bits set is within a maximum
+ * @mask: pointer to the byte array which is the mask
+ * @size: the number of bytes in the mask
+ * @max: the max number of set bits
+ *
+ * This function determines if there are at most 'max' number of bits set in an
+ * array. Returns true if the number for bits set is <= max or will return false
+ * otherwise.
+ */
+static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
+{
+	u16 count = 0;
+	u16 i, j;
+
+	/* check each byte */
+	for (i = 0; i < size; i++) {
+		/* if 0, go to next byte */
+		if (!mask[i])
+			continue;
+
+		/* We know there is at least one set bit in this byte because of
+		 * the above check; if we already have found 'max' number of
+		 * bits set, then we can return failure now.
+		 */
+		if (count == max)
+			return false;
+
+		/* count the bits in this byte, checking threshold */
+		for (j = 0; j < BITS_PER_BYTE; j++) {
+			count += (mask[i] & (0x1 << j)) ? 1 : 0;
+			if (count > max)
+				return false;
+		}
+	}
+
+	return true;
+}
+
+/**
+ * ice_set_key - generate a variable sized key with multiples of 16-bits
+ * @key: pointer to where the key will be stored
+ * @size: the size of the complete key in bytes (must be even)
+ * @val: array of 8-bit values that makes up the value portion of the key
+ * @upd: array of 8-bit masks that determine what key portion to update
+ * @dc: array of 8-bit masks that make up the dont' care mask
+ * @nm: array of 8-bit masks that make up the never match mask
+ * @off: the offset of the first byte in the key to update
+ * @len: the number of bytes in the key update
+ *
+ * This function generates a key from a value, a don't care mask and a never
+ * match mask.
+ * upd, dc, and nm are optional parameters, and can be NULL:
+ *	upd == NULL --> udp mask is all 1's (update all bits)
+ *	dc == NULL --> dc mask is all 0's (no don't care bits)
+ *	nm == NULL --> nm mask is all 0's (no never match bits)
+ */
+enum ice_status
+ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
+	    u16 len)
+{
+	u16 half_size;
+	u16 i;
+
+	/* size must be a multiple of 2 bytes. */
+	if (size % 2)
+		return ICE_ERR_CFG;
+	half_size = size / 2;
+
+	if (off + len > half_size)
+		return ICE_ERR_CFG;
+
+	/* Make sure at most one bit is set in the never match mask. Having more
+	 * than one never match mask bit set will cause HW to consume excessive
+	 * power otherwise; this is a power management efficiency check.
+	 */
+#define ICE_NVR_MTCH_BITS_MAX	1
+	if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
+		return ICE_ERR_CFG;
+
+	for (i = 0; i < len; i++)
+		if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
+				     dc ? dc[i] : 0, nm ? nm[i] : 0,
+				     key + off + i, key + half_size + off + i))
+			return ICE_ERR_CFG;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_acquire_global_cfg_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the global config lock for reading
+ * or writing of the package. When attempting to obtain write access, the
+ * caller must check for the following two return values:
+ *
+ * ICE_SUCCESS        - Means the caller has acquired the global config lock
+ *                      and can perform writing of the package.
+ * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
+ *                      package or has found that no update was necessary; in
+ *                      this case, the caller can just skip performing any
+ *                      update of the package.
+ */
+static enum ice_status
+ice_acquire_global_cfg_lock(struct ice_hw *hw,
+			    enum ice_aq_res_access_type access)
+{
+	enum ice_status status;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_global_cfg_lock");
+
+	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
+				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+
+	if (status == ICE_ERR_AQ_NO_WORK)
+		ice_debug(hw, ICE_DBG_PKG,
+			  "Global config lock: No work to do\n");
+
+	return status;
+}
+
+/**
+ * ice_release_global_cfg_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the global config lock.
+ */
+static void ice_release_global_cfg_lock(struct ice_hw *hw)
+{
+	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
+}
+
+/**
+ * ice_acquire_change_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the change lock.
+ */
+static enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
+{
+	ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_change_lock");
+
+	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
+			       ICE_CHANGE_LOCK_TIMEOUT);
+}
+
+/**
+ * ice_release_change_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the change lock using the proper Admin Command.
+ */
+static void ice_release_change_lock(struct ice_hw *hw)
+{
+	ice_debug(hw, ICE_DBG_TRACE, "ice_release_change_lock");
+
+	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
+}
+
+/**
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+static enum ice_status
+ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+		    u16 buf_size, bool last_buf, u32 *error_offset,
+		    u32 *error_info, struct ice_sq_cd *cd)
+{
+	struct ice_aqc_download_pkg *cmd;
+	struct ice_aq_desc desc;
+	enum ice_status status;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_aq_download_pkg");
+
+	if (error_offset)
+		*error_offset = 0;
+	if (error_info)
+		*error_info = 0;
+
+	cmd = &desc.params.download_pkg;
+	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+	if (last_buf)
+		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+	if (status == ICE_ERR_AQ_ERROR) {
+		/* Read error from buffer only when the FW returned an error */
+		struct ice_aqc_download_pkg_resp *resp;
+
+		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+		if (error_offset)
+			*error_offset = LE32_TO_CPU(resp->error_offset);
+		if (error_info)
+			*error_info = LE32_TO_CPU(resp->error_info);
+	}
+
+	return status;
+}
+
+/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+enum ice_status
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+		      u16 buf_size, struct ice_sq_cd *cd)
+{
+	struct ice_aq_desc desc;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_aq_upload_section");
+	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+	return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static enum ice_status
+ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
+		  bool last_buf, u32 *error_offset, u32 *error_info,
+		  struct ice_sq_cd *cd)
+{
+	struct ice_aqc_download_pkg *cmd;
+	struct ice_aq_desc desc;
+	enum ice_status status;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_aq_update_pkg");
+
+	if (error_offset)
+		*error_offset = 0;
+	if (error_info)
+		*error_info = 0;
+
+	cmd = &desc.params.download_pkg;
+	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+	if (last_buf)
+		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+	if (status == ICE_ERR_AQ_ERROR) {
+		/* Read error from buffer only when the FW returned an error */
+		struct ice_aqc_download_pkg_resp *resp;
+
+		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+		if (error_offset)
+			*error_offset = LE32_TO_CPU(resp->error_offset);
+		if (error_info)
+			*error_info = LE32_TO_CPU(resp->error_info);
+	}
+
+	return status;
+}
+
+/**
+ * ice_find_seg_in_pkg
+ * @hw: pointer to the hardware structure
+ * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ */
+struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+		    struct ice_pkg_hdr *pkg_hdr)
+{
+	u32 i;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_find_seg_in_pkg\n");
+	ice_debug(hw, ICE_DBG_PKG, "Package version: %d.%d.%d.%d\n",
+		  pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
+		  pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
+
+	/* Search all package segments for the requested segment type */
+	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
+		struct ice_generic_seg_hdr *seg;
+
+		seg = (struct ice_generic_seg_hdr *)
+			((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
+
+		if (LE32_TO_CPU(seg->seg_type) == seg_type)
+			return seg;
+	}
+
+	return NULL;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+	enum ice_status status;
+	u32 offset, info, i;
+
+	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+	if (status)
+		return status;
+
+	for (i = 0; i < count; i++) {
+		bool last = ((i + 1) == count);
+
+		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+
+		status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
+					   last, &offset, &info, NULL);
+
+		if (status) {
+			ice_debug(hw, ICE_DBG_PKG,
+				  "Update pkg failed: err %d off %d inf %d\n",
+				  status, offset, info);
+			break;
+		}
+	}
+
+	ice_release_change_lock(hw);
+
+	return status;
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware. Metadata buffers are skipped, and the first metadata buffer
+ * found indicates that the rest of the buffers are all metadata buffers.
+ */
+static enum ice_status
+ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+	enum ice_status status;
+	struct ice_buf_hdr *bh;
+	u32 offset, info, i;
+
+	if (!bufs || !count)
+		return ICE_ERR_PARAM;
+
+	/* If the first buffer's first section has its metadata bit set
+	 * then there are no buffers to be downloaded, and the operation is
+	 * considered a success.
+	 */
+	bh = (struct ice_buf_hdr *)bufs;
+	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
+		return ICE_SUCCESS;
+
+	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+	if (status)
+		return status;
+
+	for (i = 0; i < count; i++) {
+		bool last = ((i + 1) == count);
+
+		if (!last) {
+			/* check next buffer for metadata flag */
+			bh = (struct ice_buf_hdr *)(bufs + i + 1);
+
+			/* A set metadata flag in the next buffer will signal
+			 * that the current buffer will be the last buffer
+			 * downloaded
+			 */
+			if (LE16_TO_CPU(bh->section_count))
+				if (LE32_TO_CPU(bh->section_entry[0].type) &
+				    ICE_METADATA_BUF)
+					last = true;
+		}
+
+		bh = (struct ice_buf_hdr *)(bufs + i);
+
+		status = ice_aq_download_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
+					     last, &offset, &info, NULL);
+
+		if (status) {
+			ice_debug(hw, ICE_DBG_PKG,
+				  "Pkg download failed: err %d off %d inf %d\n",
+				  status, offset, info);
+			break;
+		}
+
+		if (last)
+			break;
+	}
+
+	ice_release_global_cfg_lock(hw);
+
+	return status;
+}
+
+/**
+ * ice_aq_get_pkg_info_list
+ * @hw: pointer to the hardware structure
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Package Info List (0x0C43)
+ */
+static enum ice_status
+ice_aq_get_pkg_info_list(struct ice_hw *hw,
+			 struct ice_aqc_get_pkg_info_resp *pkg_info,
+			 u16 buf_size, struct ice_sq_cd *cd)
+{
+	struct ice_aq_desc desc;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_pkg_info_list");
+	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+
+	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+enum ice_status ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+	struct ice_buf_table *ice_buf_tbl;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_download_pkg\n");
+	ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
+		  ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
+		  ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
+
+	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+		  LE32_TO_CPU(ice_seg->hdr.seg_type),
+		  LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
+
+	ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+		  LE32_TO_CPU(ice_buf_tbl->buf_count));
+
+	return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+				  LE32_TO_CPU(ice_buf_tbl->buf_count));
+}
+
+/**
+ * ice_init_pkg_info
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ *
+ * Saves off the package details into the hw structure.
+ */
+enum ice_status
+ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+	struct ice_aqc_get_pkg_info_resp *pkg_info;
+	struct ice_global_metadata_seg *meta_seg;
+	struct ice_generic_seg_hdr *seg_hdr;
+	enum ice_status status;
+	u16 size;
+	u32 i;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_init_pkg_info\n");
+	if (!pkg_hdr)
+		return ICE_ERR_PARAM;
+
+	meta_seg = (struct ice_global_metadata_seg *)
+		   ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
+	if (meta_seg) {
+		hw->pkg_ver = meta_seg->pkg_ver;
+		ice_memcpy(hw->pkg_name, meta_seg->pkg_name,
+			   sizeof(hw->pkg_name), ICE_NONDMA_TO_NONDMA);
+
+		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
+			  meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
+			  meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
+			  meta_seg->pkg_name);
+	} else {
+		ice_debug(hw, ICE_DBG_INIT,
+			  "Did not find metadata segment in driver package\n");
+		return ICE_ERR_CFG;
+	}
+
+	seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+	if (seg_hdr) {
+		hw->ice_pkg_ver = seg_hdr->seg_ver;
+		ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
+			   sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA);
+
+		ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
+			  seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
+			  seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
+			  seg_hdr->seg_name);
+	} else {
+		ice_debug(hw, ICE_DBG_INIT,
+			  "Did not find ice segment in driver package\n");
+		return ICE_ERR_CFG;
+	}
+
+#define ICE_PKG_CNT	4
+	size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
+				    (ICE_PKG_CNT - 1));
+	pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+	if (!pkg_info)
+		return ICE_ERR_NO_MEMORY;
+
+	status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
+	if (status)
+		goto init_pkg_free_alloc;
+
+	for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
+#define ICE_PKG_FLAG_COUNT	4
+		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
+		u8 place = 0;
+
+		if (pkg_info->pkg_info[i].is_active) {
+			flags[place++] = 'A';
+			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+			ice_memcpy(hw->active_pkg_name,
+				   pkg_info->pkg_info[i].name,
+				   sizeof(hw->active_pkg_name),
+				   ICE_NONDMA_TO_NONDMA);
+		}
+		if (pkg_info->pkg_info[i].is_active_at_boot)
+			flags[place++] = 'B';
+		if (pkg_info->pkg_info[i].is_modified)
+			flags[place++] = 'M';
+		if (pkg_info->pkg_info[i].is_in_nvm)
+			flags[place++] = 'N';
+
+		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
+			  i, pkg_info->pkg_info[i].ver.major,
+			  pkg_info->pkg_info[i].ver.minor,
+			  pkg_info->pkg_info[i].ver.update,
+			  pkg_info->pkg_info[i].ver.draft,
+			  pkg_info->pkg_info[i].name, flags);
+	}
+
+init_pkg_free_alloc:
+	ice_free(hw, pkg_info);
+
+	return status;
+}
+
+/**
+ * ice_find_label_value
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @name: name of the label to search for
+ * @type: the section type that will contain the label
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Finds a label's value given the label name and the section type to search.
+ * The ice_seg parameter must not be NULL since the first call to
+ * ice_enum_labels requires a pointer to an actual ice_seg structure.
+ */
+enum ice_status
+ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
+		     u16 *value)
+{
+	struct ice_pkg_enum state;
+	char *label_name;
+	u16 val;
+
+	if (!ice_seg)
+		return ICE_ERR_PARAM;
+
+	do {
+		label_name = ice_enum_labels(ice_seg, type, &state, &val);
+		if (label_name && !strcmp(label_name, name)) {
+			*value = val;
+			return ICE_SUCCESS;
+		}
+
+		ice_seg = NULL;
+	} while (label_name);
+
+	return ICE_ERR_CFG;
+}
+
+/**
+ * ice_verify_pkg - verify package
+ * @pkg: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * Verifies various attributes of the package file, including length, format
+ * version, and the requirement of at least one segment.
+ */
+static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+{
+	u32 seg_count;
+	u32 i;
+
+	if (len < sizeof(*pkg))
+		return ICE_ERR_BUF_TOO_SHORT;
+
+	if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+	    pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+	    pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
+	    pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
+		return ICE_ERR_CFG;
+
+	/* pkg must have at least one segment */
+	seg_count = LE32_TO_CPU(pkg->seg_count);
+	if (seg_count < 1)
+		return ICE_ERR_CFG;
+
+	/* make sure segment array fits in package length */
+	if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
+		return ICE_ERR_BUF_TOO_SHORT;
+
+	/* all segments must fit within length */
+	for (i = 0; i < seg_count; i++) {
+		u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
+		struct ice_generic_seg_hdr *seg;
+
+		/* segment header must fit */
+		if (len < off + sizeof(*seg))
+			return ICE_ERR_BUF_TOO_SHORT;
+
+		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+
+		/* segment body must fit */
+		if (len < off + LE32_TO_CPU(seg->seg_size))
+			return ICE_ERR_BUF_TOO_SHORT;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_free_seg - free package segment pointer
+ * @hw: pointer to the hardware structure
+ *
+ * Frees the package segment pointer in the proper manner, depending on if the
+ * segment was allocated or just the passed in pointer was stored.
+ */
+void ice_free_seg(struct ice_hw *hw)
+{
+	if (hw->pkg_copy) {
+		ice_free(hw, hw->pkg_copy);
+		hw->pkg_copy = NULL;
+	}
+	hw->seg = NULL;
+}
+
+/**
+ * ice_init_pkg_regs - initialize additional package registers
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_pkg_regs(struct ice_hw *hw)
+{
+#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
+#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
+#define ICE_SW_BLK_IDX	0
+
+	/* setup Switch block input mask, which is 48-bits in two parts */
+	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
+	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
+}
+
+/**
+ * ice_init_pkg - initialize/download package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function initializes a package. The package contains HW tables
+ * required to do packet processing. First, the function extracts package
+ * information such as version. Then it finds the ice configuration segment
+ * within the package; this function then saves a copy of the segment pointer
+ * within the supplied package buffer. Next, the function will cache any hints
+ * from the package, followed by downloading the package itself. Note, that if
+ * a previous PF driver has already downloaded the package successfully, then
+ * the current driver will not have to download the package again.
+ *
+ * The local package contents will be used to query default behavior and to
+ * update specific sections of the HW's version of the package (e.g. to update
+ * the parse graph to understand new protocols).
+ *
+ * This function stores a pointer to the package buffer memory, and it is
+ * expected that the supplied buffer will not be freed immediately. If the
+ * package buffer needs to be freed, such as when read from a file, use
+ * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
+ * case.
+ */
+static enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+{
+	struct ice_pkg_hdr *pkg;
+	enum ice_status status;
+	struct ice_seg *seg;
+
+	if (!buf || !len)
+		return ICE_ERR_PARAM;
+
+	pkg = (struct ice_pkg_hdr *)buf;
+	status = ice_verify_pkg(pkg, len);
+	if (status) {
+		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+			  status);
+		return status;
+	}
+
+	/* initialize package info */
+	status = ice_init_pkg_info(hw, pkg);
+	if (status)
+		return status;
+
+	/* find segment in given package */
+	seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
+	if (!seg) {
+		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+		return ICE_ERR_CFG;
+	}
+
+	/* initialize package hints and then download package */
+	ice_init_pkg_hints(hw, seg);
+	status = ice_download_pkg(hw, seg);
+	if (status == ICE_ERR_AQ_NO_WORK) {
+		ice_debug(hw, ICE_DBG_INIT,
+			  "package previously loaded - no work.\n");
+		status = ICE_SUCCESS;
+	}
+
+	/* Free a previous segment, if necessary */
+	ice_free_seg(hw);
+	if (!status) {
+		hw->seg = seg;
+		/* on successful package download, update other required
+		 * registers to support the package
+		 */
+		ice_init_pkg_regs(hw);
+	} else {
+		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
+			  status);
+	}
+
+	return status;
+}
+
+/**
+ * ice_copy_and_init_pkg - initialize/download a copy of the package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function copies the package buffer, and then calls ice_init_pkg() to
+ * initialize the copied package contents.
+ *
+ * The copying is necessary if the package buffer supplied is constant, or if
+ * the memory may disappear shortly after calling this function.
+ *
+ * If the package buffer resides in the data segment and can be modified, the
+ * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
+ *
+ * However, if the package buffer needs to be copied first, such as when being
+ * read from a file, the caller should use ice_copy_and_init_pkg().
+ *
+ * This function will first copy the package buffer, before calling
+ * ice_init_pkg(). The caller is free to immediately destroy the original
+ * package buffer, as the new copy will be managed by this function and
+ * related routines.
+ */
+enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
+{
+	enum ice_status status;
+	u8 *buf_copy;
+
+	if (!buf || !len)
+		return ICE_ERR_PARAM;
+
+	buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
+
+	status = ice_init_pkg(hw, buf_copy, len);
+	if (status)
+		/* Free the copy, since we failed to initialize the package */
+		ice_free(hw, buf_copy);
+	else
+		/* Track the copied pkg so we can free it later */
+		hw->pkg_copy = buf_copy;
+
+	return status;
+}
+
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+	struct ice_buf_build *bld;
+	struct ice_buf_hdr *buf;
+
+	bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
+	if (!bld)
+		return NULL;
+
+	buf = (struct ice_buf_hdr *)bld;
+	buf->data_end = CPU_TO_LE16(sizeof(*buf) -
+				    sizeof(buf->section_entry[0]));
+	return bld;
+}
+
+/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector
+ * vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+	struct ice_sw_fv_section *fv_section =
+		(struct ice_sw_fv_section *)section;
+
+	if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+		return NULL;
+	if (index >= LE16_TO_CPU(fv_section->count))
+		return NULL;
+	if (offset)
+		/* "index" passed in to this function is relative to a given
+		 * 4k block. To get to the true index into the field vector
+		 * table need to add the relative index to the base_offset
+		 * field of this section
+		 */
+		*offset = LE16_TO_CPU(fv_section->base_offset) + index;
+	return fv_section->fv + index;
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @prot_ids: field vector to search for with a given protocol id
+ * @ids_cnt: lookup/protocol count
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol id and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile id information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u16 *prot_ids, u8 ids_cnt,
+		   struct LIST_HEAD_TYPE *fv_list)
+{
+	struct ice_sw_fv_list_entry *fvl;
+	struct ice_sw_fv_list_entry *tmp;
+	struct ice_pkg_enum state;
+	struct ice_seg *ice_seg;
+	struct ice_fv *fv;
+	u32 offset;
+
+	if (!ids_cnt || !hw->seg)
+		return ICE_ERR_PARAM;
+
+	ice_seg = hw->seg;
+	do {
+		u8 i;
+
+		fv = (struct ice_fv *)
+			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+					   &offset, ice_sw_fv_handler);
+
+		for (i = 0; i < ids_cnt && fv; i++) {
+			int j;
+
+			/* This code assumes that if a switch field vector line
+			 * has a matching protocol, then this line will contain
+			 * the entries necessary to represent every field in
+			 * that protocol header.
+			 */
+			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+				if (fv->ew[j].prot_id == prot_ids[i])
+					break;
+			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+				break;
+			if (i + 1 == ids_cnt) {
+				fvl = (struct ice_sw_fv_list_entry *)
+					ice_malloc(hw, sizeof(*fvl));
+				if (!fvl)
+					goto err;
+				fvl->fv_ptr = fv;
+				fvl->profile_id = offset;
+				LIST_ADD(&fvl->list_entry, fv_list);
+				break;
+			}
+		}
+		ice_seg = NULL;
+	} while (fv);
+	if (LIST_EMPTY(fv_list))
+		return ICE_ERR_CFG;
+	return ICE_SUCCESS;
+
+err:
+	LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
+				 list_entry) {
+		LIST_DEL(&fvl->list_entry);
+		ice_free(hw, fvl);
+	}
+
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+static struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+				 void **section)
+{
+	struct ice_buf_build *buf;
+
+	if (!section)
+		return NULL;
+
+	buf = ice_pkg_buf_alloc(hw);
+	if (!buf)
+		return NULL;
+
+	if (ice_pkg_buf_reserve_section(buf, 1))
+		goto ice_pkg_buf_alloc_single_section_err;
+
+	*section = ice_pkg_buf_alloc_section(buf, type, size);
+	if (!*section)
+		goto ice_pkg_buf_alloc_single_section_err;
+
+	return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+	ice_pkg_buf_free(hw, buf);
+	return NULL;
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+	struct ice_buf_hdr *buf;
+	u16 section_count;
+	u16 data_end;
+
+	if (!bld)
+		return ICE_ERR_PARAM;
+
+	buf = (struct ice_buf_hdr *)&bld->buf;
+
+	/* already an active section, can't increase table size */
+	section_count = LE16_TO_CPU(buf->section_count);
+	if (section_count > 0)
+		return ICE_ERR_CFG;
+
+	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+		return ICE_ERR_CFG;
+	bld->reserved_section_table_entries += count;
+
+	data_end = LE16_TO_CPU(buf->data_end) +
+		   (count * sizeof(buf->section_entry[0]));
+	buf->data_end = CPU_TO_LE16(data_end);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_pkg_buf_unreserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to unreserve
+ *
+ * Unreserves one or more section table entries in a package buffer, releasing
+ * space that can be used for section data. This routine can be called
+ * multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+enum ice_status
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
+{
+	struct ice_buf_hdr *buf;
+	u16 section_count;
+	u16 data_end;
+
+	if (!bld)
+		return ICE_ERR_PARAM;
+
+	buf = (struct ice_buf_hdr *)&bld->buf;
+
+	/* already an active section, can't decrease table size */
+	section_count = LE16_TO_CPU(buf->section_count);
+	if (section_count > 0)
+		return ICE_ERR_CFG;
+
+	if (count > bld->reserved_section_table_entries)
+		return ICE_ERR_CFG;
+	bld->reserved_section_table_entries -= count;
+
+	data_end = LE16_TO_CPU(buf->data_end) -
+		   (count * sizeof(buf->section_entry[0]));
+	buf->data_end = CPU_TO_LE16(data_end);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+	struct ice_buf_hdr *buf;
+	u16 sect_count;
+	u16 data_end;
+
+	if (!bld || !type || !size)
+		return NULL;
+
+	buf = (struct ice_buf_hdr *)&bld->buf;
+
+	/* check for enough space left in buffer */
+	data_end = LE16_TO_CPU(buf->data_end);
+
+	/* section start must align on 4 byte boundary */
+	data_end = ICE_ALIGN(data_end, 4);
+
+	if ((data_end + size) > ICE_MAX_S_DATA_END)
+		return NULL;
+
+	/* check for more available section table entries */
+	sect_count = LE16_TO_CPU(buf->section_count);
+	if (sect_count < bld->reserved_section_table_entries) {
+		void *section_ptr = ((u8 *)buf) + data_end;
+
+		buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
+		buf->section_entry[sect_count].size = CPU_TO_LE16(size);
+		buf->section_entry[sect_count].type = CPU_TO_LE32(type);
+
+		data_end += size;
+		buf->data_end = CPU_TO_LE16(data_end);
+
+		buf->section_count = CPU_TO_LE16(sect_count + 1);
+		return section_ptr;
+	}
+
+	/* no free section table entries */
+	return NULL;
+}
+
+/**
+ * ice_pkg_buf_get_free_space
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of free bytes remaining in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
+{
+	struct ice_buf_hdr *buf;
+
+	if (!bld)
+		return 0;
+
+	buf = (struct ice_buf_hdr *)&bld->buf;
+	return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+	struct ice_buf_hdr *buf;
+
+	if (!bld)
+		return 0;
+
+	buf = (struct ice_buf_hdr *)&bld->buf;
+	return LE16_TO_CPU(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf_header
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+	if (!bld)
+		return NULL;
+
+	return &bld->buf;
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+	ice_free(hw, bld);
+}
+
+/* PTG Management */
+
+/**
+ * ice_ptg_update_xlt1 - Updates packet type groups in hw via xlt1 table
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ *
+ * This function will update the xlt1 hardware table to reflect the new
+ * packet type group configuration.
+ */
+enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
+{
+	struct ice_xlt1_section *sect;
+	struct ice_buf_build *bld;
+	enum ice_status status;
+	u16 index;
+
+	bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
+					       ICE_XLT1_SIZE(ICE_XLT1_CNT),
+					       (void **)&sect);
+	if (!bld)
+		return ICE_ERR_NO_MEMORY;
+
+	sect->count = CPU_TO_LE16(ICE_XLT1_CNT);
+	sect->offset = CPU_TO_LE16(0);
+	for (index = 0; index < ICE_XLT1_CNT; index++)
+		sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg;
+
+	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+
+	ice_pkg_buf_free(hw, bld);
+
+	return status;
+}
+
+/**
+ * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptype: the ptype to search for
+ * @ptg: pointer to variable that receives the PTG
+ *
+ * This function will search the PTGs for a particular ptype, returning the
+ * PTG ID that contains it through the ptg parameter, with the value of
+ * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
+ */
+enum ice_status
+ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
+{
+	if (ptype >= ICE_XLT1_CNT || !ptg)
+		return ICE_ERR_PARAM;
+
+	*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptg_alloc_val - Allocates a new packet type group ID by value
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptg: the ptg to allocate
+ *
+ * This function allocates a given packet type group ID specified by the ptg
+ * parameter.
+ */
+static
+void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
+{
+	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
+}
+
+/**
+ * ice_ptg_alloc - Find a free entry and allocates a new packet type group ID
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ *
+ * This function allocates and returns a new packet type group ID. Note
+ * that 0 is the default packet type group, so successfully created PTGs will
+ * have a non-zero ID value; which means a 0 return value indicates an error.
+ */
+u8 ice_ptg_alloc(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 i;
+
+	/* Skip the default PTG of 0 */
+	for (i = 1; i < ICE_MAX_PTGS; i++)
+		if (!hw->blk[blk].xlt1.ptg_tbl[i].in_use) {
+			/* found a free PTG ID */
+			ice_ptg_alloc_val(hw, blk, i);
+			return (u8)i;
+		}
+
+	return 0;
+}
+
+/**
+ * ice_ptg_free - Frees a packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptg: the ptg ID to free
+ *
+ * This function frees a packet type group, and returns all the current ptypes
+ * within it to the default PTG.
+ */
+void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
+{
+	struct ice_ptg_ptype *p, *temp;
+
+	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false;
+	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+	while (p) {
+		p->ptg = ICE_DEFAULT_PTG;
+		temp = p->next_ptype;
+		p->next_ptype = NULL;
+		p = temp;
+	}
+
+	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL;
+}
+
+/**
+ * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptype: the ptype to remove
+ * @ptg: the ptg to remove the ptype from
+ *
+ * This function will remove the ptype from the specific ptg, and move it to
+ * the default PTG (ICE_DEFAULT_PTG).
+ */
+static enum ice_status
+ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
+{
+	struct ice_ptg_ptype **ch;
+	struct ice_ptg_ptype *p;
+
+	if (ptype > ICE_XLT1_CNT - 1)
+		return ICE_ERR_PARAM;
+
+	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	/* Should not happen if .in_use is set, bad config */
+	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
+		return ICE_ERR_CFG;
+
+	/* find the ptype within this PTG, and bypass the link over it */
+	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+	ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+	while (p) {
+		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
+			*ch = p->next_ptype;
+			break;
+		}
+
+		ch = &p->next_ptype;
+		p = p->next_ptype;
+	}
+
+	hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
+	hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptype: the ptype to add or move
+ * @ptg: the ptg to add or move the ptype to
+ *
+ * This function will either add or move a ptype to a particular PTG depending
+ * on if the ptype is already part of another group. Note that using a
+ * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
+ * default PTG.
+ */
+enum ice_status
+ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
+{
+	enum ice_status status;
+	u8 original_ptg;
+
+	if (ptype > ICE_XLT1_CNT - 1)
+		return ICE_ERR_PARAM;
+
+	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
+	if (status)
+		return status;
+
+	/* Is ptype already in the correct PTG? */
+	if (original_ptg == ptg)
+		return ICE_SUCCESS;
+
+	/* Remove from original PTG and move back to the default PTG */
+	if (original_ptg != ICE_DEFAULT_PTG)
+		ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
+
+	/* Moving to default PTG? Then we're done with this request */
+	if (ptg == ICE_DEFAULT_PTG)
+		return ICE_SUCCESS;
+
+	/* Add ptype to PTG at beginning of list */
+	hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
+		hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
+		&hw->blk[blk].xlt1.ptypes[ptype];
+
+	hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
+	hw->blk[blk].xlt1.t[ptype] = ptg;
+
+	return ICE_SUCCESS;
+}
+
+/* Block / table size info */
+struct ice_blk_size_details {
+	u16 xlt1;			/* # xlt1 entries */
+	u16 xlt2;			/* # xlt2 entries */
+	u16 prof_tcam;			/* # profile id tcam entries */
+	u16 prof_id;			/* # profile ids */
+	u8 prof_cdid_bits;		/* # cdid one-hot bits used in key */
+	u16 prof_redir;			/* # profile redirection entries */
+	u16 es;				/* # extraction sequence entries */
+	u16 fvw;			/* # field vector words */
+	u8 overwrite;			/* overwrite existing entries allowed */
+	u8 reverse;			/* reverse FV order */
+};
+
+static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
+	/**
+	 * Table Definitions
+	 * XLT1 - Number of entries in XLT1 table
+	 * XLT2 - Number of entries in XLT2 table
+	 * TCAM - Number of entries Profile ID TCAM table
+	 * CDID - Control Domain ID of the hardware block
+	 * PRED - Number of entries in the Profile Redirection Table
+	 * FV   - Number of entries in the Field Vector
+	 * FVW  - Width (in WORDs) of the Field Vector
+	 * OVR  - Overwrite existing table entries
+	 * REV  - Reverse FV
+	 */
+	/*          XLT1        , XLT2        ,TCAM, PID,CDID,PRED,   FV, FVW */
+	/*          Overwrite   , Reverse FV */
+	/* SW  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
+		    false, false },
+	/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
+		    false, false },
+	/* FD  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
+		    false, true  },
+	/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
+		    true,  true  },
+	/* PE  */ { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
+		    false, false },
+};
+
+enum ice_sid_all {
+	ICE_SID_XLT1_OFF = 0,
+	ICE_SID_XLT2_OFF,
+	ICE_SID_PR_OFF,
+	ICE_SID_PR_REDIR_OFF,
+	ICE_SID_ES_OFF,
+	ICE_SID_OFF_COUNT,
+};
+
+/* Characteristic handling */
+
+/**
+ * ice_match_prop_lst - determine if properties of two lists match
+ * @list1: first properties list
+ * @list2: second properties list
+ *
+ * Count, cookies and the order must match in order to be considered equivalent.
+ */
+static bool
+ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
+{
+	struct ice_vsig_prof *tmp1;
+	struct ice_vsig_prof *tmp2;
+	u16 chk_count = 0;
+	u16 count = 0;
+
+	/* compare counts */
+	LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) {
+		count++;
+	}
+	LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) {
+		chk_count++;
+	}
+	if (!count || count != chk_count)
+		return false;
+
+	tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
+	tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
+
+	/* profile cookies must compare, and in the exact same order to take
+	 * into account priority
+	 */
+	while (--count) {
+		if (tmp2->profile_cookie != tmp1->profile_cookie)
+			return false;
+
+		tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
+		tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
+	}
+
+	return true;
+}
+
+/* VSIG Management */
+
+/**
+ * ice_vsig_update_xlt2_sect - update one section of xlt2 table
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsi: hw vsi number to program
+ * @vsig: vsig for the vsi
+ *
+ * This function will update the xlt2 hardware table with the input vsi
+ * group configuration.
+ */
+static enum ice_status
+ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
+			  u16 vsig)
+{
+	struct ice_xlt2_section *sect;
+	struct ice_buf_build *bld;
+	enum ice_status status;
+
+	bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
+					       sizeof(struct ice_xlt2_section),
+					       (void **)&sect);
+	if (!bld)
+		return ICE_ERR_NO_MEMORY;
+
+	sect->count = CPU_TO_LE16(1);
+	sect->offset = CPU_TO_LE16(vsi);
+	sect->value[0] = CPU_TO_LE16(vsig);
+
+	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+
+	ice_pkg_buf_free(hw, bld);
+
+	return status;
+}
+
+/**
+ * ice_vsig_update_xlt2 - update xlt2 table with VSIG configuration
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ *
+ * This function will update the xlt2 hardware table with the input vsi
+ * group configuration of used vsis.
+ */
+enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 vsi;
+
+	for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) {
+		/* update only vsis that have been changed */
+		if (hw->blk[blk].xlt2.vsis[vsi].changed) {
+			enum ice_status status;
+			u16 vsig;
+
+			vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
+			status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig);
+			if (status)
+				return status;
+
+			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_find_vsi - find a VSIG that contains a specified vsi
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsi: vsi of interest
+ * @vsig: pointer to receive the vsi group
+ *
+ * This function will lookup the vsi entry in the XLT2 list and return
+ * the vsi group its associated with.
+ */
+enum ice_status
+ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
+{
+	if (!vsig || vsi >= ICE_MAX_VSI)
+		return ICE_ERR_PARAM;
+
+	/* As long as there's a default or valid VSIG associated with the input
+	 * vsi, the functions returns a success. Any handling of VSIG will be
+	 * done by the following add, update or remove functions.
+	 */
+	*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_alloc_val - allocate a new VSIG by value
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsig: the vsig to allocate
+ *
+ * This function will allocate a given VSIG specified by the vsig parameter.
+ */
+static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+
+	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
+		INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+		hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
+	}
+
+	return ICE_VSIG_VALUE(idx, hw->pf_id);
+}
+
+/**
+ * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ *
+ * This function will iterate through the VSIG list and mark the first
+ * unused entry for the new VSIG entry as used and return that value.
+ */
+static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 i;
+
+	for (i = 1; i < ICE_MAX_VSIGS; i++)
+		if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+			return ice_vsig_alloc_val(hw, blk, i);
+
+	return ICE_DEFAULT_VSIG;
+}
+
+/**
+ * ice_find_dup_props_vsig - find vsi group with a specified set of properties
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @chs: characteristic list
+ * @vsig: returns the VSIG with the matching profiles, if found
+ *
+ * Each VSIG is associated with a characteristic set; i.e. all vsis under
+ * a group have the same characteristic set. To check if there exists a VSIG
+ * which has the same characteristics as the input characteristics; this
+ * function will iterate through the xlt2 list and return the VSIG that has a
+ * matching configuration. In order to make sure that priorities are accounted
+ * for, the list must match exactly, including the order in which the
+ * characteristics are listed.
+ */
+enum ice_status
+ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
+			struct LIST_HEAD_TYPE *chs, u16 *vsig)
+{
+	struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
+	u16 i;
+
+	for (i = 0; i < xlt2->count; i++) {
+		if (xlt2->vsig_tbl[i].in_use &&
+		    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
+			*vsig = (i | ((hw->pf_id << ICE_PF_NUM_S) &
+				      ICE_PF_NUM_M));
+			*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
+			return ICE_SUCCESS;
+		}
+	}
+
+	return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_vsig_free - free vsi group
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsig: VSIG to remove
+ *
+ * The function will remove all vsis associated with the input VSIG and move
+ * them to the DEFAULT_VSIG and mark the VSIG available.
+ */
+enum ice_status
+ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+	struct ice_vsig_prof *dtmp, *del;
+	struct ice_vsig_vsi *vsi_cur;
+	u16 idx;
+
+	idx = vsig & ICE_VSIG_IDX_M;
+	if (idx >= ICE_MAX_VSIGS)
+		return ICE_ERR_PARAM;
+
+	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
+
+	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+	if (!vsi_cur)
+		return ICE_ERR_CFG;
+
+	/* remove all vsis associated with this VSIG XLT2 entry */
+	do {
+		struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+
+		vsi_cur->vsig = ICE_DEFAULT_VSIG;
+		vsi_cur->changed = 1;
+		vsi_cur->next_vsi = NULL;
+		vsi_cur = tmp;
+	} while (vsi_cur);
+
+	/* NULL terminate head of vsi list */
+	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
+
+	/* free characteristic list */
+	LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
+				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+				 ice_vsig_prof, list) {
+		LIST_DEL(&del->list);
+		ice_free(hw, del);
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_add_mv_vsi - add or move a vsi to a vsi group
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsi: vsi to move
+ * @vsig: destination vsi group
+ *
+ * This function will move or add the input vsi to the target VSIG.
+ * The function will find the original VSIG the vsi belongs to and
+ * move the entry to the DEFAULT_VSIG, update the original VSIG and
+ * then move entry to the new VSIG.
+ */
+enum ice_status
+ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+	struct ice_vsig_vsi *tmp;
+	enum ice_status status;
+	u16 orig_vsig, idx;
+
+	idx = vsig & ICE_VSIG_IDX_M;
+
+	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+		return ICE_ERR_PARAM;
+
+	/* if VSIG not in use and VSIG is not default type this VSIG
+	 * doesn't exist.
+	 */
+	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
+	    vsig != ICE_DEFAULT_VSIG)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+	if (status)
+		return status;
+
+	/* no update required if vsigs match */
+	if (orig_vsig == vsig)
+		return ICE_SUCCESS;
+
+	if (orig_vsig != ICE_DEFAULT_VSIG) {
+		/* remove entry from orig_vsig and add to default VSIG */
+		status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
+		if (status)
+			return status;
+	}
+
+	if (idx == ICE_DEFAULT_VSIG)
+		return ICE_SUCCESS;
+
+	/* Create vsi entry and add VSIG and prop_mask values */
+	hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
+	hw->blk[blk].xlt2.vsis[vsi].changed = 1;
+
+	/* Add new entry to the head of the VSIG list */
+	tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
+		&hw->blk[blk].xlt2.vsis[vsi];
+	hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
+	hw->blk[blk].xlt2.t[vsi] = vsig;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_remove_vsi - remove vsi from VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsi: vsi to remove
+ * @vsig: vsi group to remove from
+ *
+ * The function will remove the input vsi from its vsi group and move it
+ * to the DEFAULT_VSIG.
+ */
+enum ice_status
+ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+	struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
+	u16 idx;
+
+	idx = vsig & ICE_VSIG_IDX_M;
+
+	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+		return ICE_ERR_PARAM;
+
+	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	/* entry already in default VSIG, dont have to remove */
+	if (idx == ICE_DEFAULT_VSIG)
+		return ICE_SUCCESS;
+
+	vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+	if (!(*vsi_head))
+		return ICE_ERR_CFG;
+
+	vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
+	vsi_cur = (*vsi_head);
+
+	/* iterate the vsi list, skip over the entry to be removed */
+	while (vsi_cur) {
+		if (vsi_tgt == vsi_cur) {
+			(*vsi_head) = vsi_cur->next_vsi;
+			break;
+		}
+		vsi_head = &vsi_cur->next_vsi;
+		vsi_cur = vsi_cur->next_vsi;
+	}
+
+	/* verify if vsi was removed from group list */
+	if (!vsi_cur)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	vsi_cur->vsig = ICE_DEFAULT_VSIG;
+	vsi_cur->changed = 1;
+	vsi_cur->next_vsi = NULL;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_find_prof_id - find profile id for a given field vector
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @fv: field vector to search for
+ * @prof_id: receives the profile id
+ */
+static enum ice_status
+ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
+		 struct ice_fv_word *fv, u8 *prof_id)
+{
+	struct ice_es *es = &hw->blk[blk].es;
+	u16 off, i;
+
+	for (i = 0; i < es->count; i++) {
+		off = i * es->fvw;
+
+		if (memcmp(&es->t[off], fv, es->fvw * 2))
+			continue;
+
+		*prof_id = i;
+		return ICE_SUCCESS;
+	}
+
+	return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_prof_id_rsrc_type - get profile id resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+	switch (blk) {
+	case ICE_BLK_SW:
+		*rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID;
+		break;
+	case ICE_BLK_ACL:
+		*rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID;
+		break;
+	case ICE_BLK_FD:
+		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
+		break;
+	case ICE_BLK_RSS:
+		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
+		break;
+	case ICE_BLK_PE:
+		*rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
+/**
+ * ice_tcam_ent_rsrc_type - get tcam entry resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+	switch (blk) {
+	case ICE_BLK_SW:
+		*rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM;
+		break;
+	case ICE_BLK_ACL:
+		*rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM;
+		break;
+	case ICE_BLK_FD:
+		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
+		break;
+	case ICE_BLK_RSS:
+		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
+		break;
+	case ICE_BLK_PE:
+		*rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
+/**
+ * ice_workaround_get_res_blk - determine the block from a resource type
+ * @type: type of resource
+ * @blk: pointer to a enum that will receive the block type
+ * @tcam: pointer to variable that will be set to true for a TCAM resource type
+ */
+static enum
+ice_status ice_workaround_get_res_blk(u16 type, enum ice_block *blk, bool *tcam)
+{
+	/* just need to support TCAM entries and Profile IDs for now */
+	*tcam = false;
+
+	switch (type) {
+	case ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM:
+		*blk = ICE_BLK_SW;
+		*tcam = true;
+		break;
+	case ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM:
+		*blk = ICE_BLK_ACL;
+		*tcam = true;
+		break;
+	case ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM:
+		*blk = ICE_BLK_FD;
+		*tcam = true;
+		break;
+	case ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM:
+		*blk = ICE_BLK_RSS;
+		*tcam = true;
+		break;
+	case ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM:
+		*blk = ICE_BLK_PE;
+		*tcam = true;
+		break;
+	case ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID:
+		*blk = ICE_BLK_SW;
+		break;
+	case ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID:
+		*blk = ICE_BLK_ACL;
+		break;
+	case ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID:
+		*blk = ICE_BLK_FD;
+		break;
+	case ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID:
+		*blk = ICE_BLK_RSS;
+		break;
+	case ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID:
+		*blk = ICE_BLK_PE;
+		break;
+	default:
+		return ICE_ERR_PARAM;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_alloc_res_workaround
+ * @hw: pointer to the hw struct
+ * @type: type of resource
+ * @num: number of resources to allocate
+ * @res: pointer to array that will receive the resources
+ */
+static enum ice_status
+ice_alloc_res_workaround(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+{
+	enum ice_block blk;
+	u16 count = 0;
+	bool tcam;
+	u16 first;
+	u16 last;
+	u16 max;
+	u16 i;
+
+/* Number of PFs we support with this workaround */
+#define ICE_WA_PF_COUNT	4
+#define ICE_WA_1ST_TCAM	4
+#define ICE_WA_1ST_FV	4
+
+	/* Only allow our supported PFs */
+	if (hw->pf_id >= ICE_WA_PF_COUNT)
+		return ICE_ERR_AQ_ERROR;
+
+	if (ice_workaround_get_res_blk(type, &blk, &tcam))
+		return ICE_ERR_AQ_ERROR;
+
+	if (tcam) {
+		/* range of entries based on PF */
+		max = hw->blk[blk].prof.count / ICE_WA_PF_COUNT;
+		first = max * hw->pf_id;
+		last = first + max;
+
+		/* Profile IDs - start at non-zero index for PROF ID TCAM table
+		 * The first few entries are for bypass, default and errors
+		 * (only relevant for PF 0)
+		 */
+		first += hw->pf_id ? 0 : ICE_WA_1ST_TCAM;
+
+		for (i = first; i < last && count < num; i++) {
+			if (!hw->blk[blk].prof.resource_used_hack[i]) {
+				res[count++] = i;
+				hw->blk[blk].prof.resource_used_hack[i] = true;
+			}
+		}
+
+		/* handle failure case */
+		if (count < num) {
+			for (i = 0; i < count; i++) {
+				hw->blk[blk].prof.resource_used_hack[res[i]] =
+					false;
+				res[i] = 0;
+			}
+
+			return ICE_ERR_AQ_ERROR;
+		}
+	} else {
+		/* range of entries based on PF */
+		max = hw->blk[blk].es.count / ICE_WA_PF_COUNT;
+		first = max * hw->pf_id;
+		last = first + max;
+
+		/* FV index - start at non-zero index for Field vector table
+		 * The first few entries are for bypass, default and errors
+		 * (only relevant for PF 0)
+		 */
+		first += hw->pf_id ? 0 : ICE_WA_1ST_FV;
+
+		for (i = first; i < last && count < num; i++) {
+			if (!hw->blk[blk].es.resource_used_hack[i]) {
+				res[count++] = i;
+				hw->blk[blk].es.resource_used_hack[i] = true;
+			}
+		}
+
+		/* handle failure case */
+		if (count < num) {
+			for (i = 0; i < count; i++) {
+				hw->blk[blk].es.resource_used_hack[res[i]] =
+					false;
+				res[i] = 0;
+			}
+
+			return ICE_ERR_AQ_ERROR;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_free_res_workaround
+ * @hw: pointer to the hw struct
+ * @type: type of resource to free
+ * @num: number of resources
+ * @res: array of resource ids to free
+ */
+static enum ice_status
+ice_free_res_workaround(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+{
+	enum ice_block blk;
+	bool tcam = false;
+	u16 i;
+
+	if (ice_workaround_get_res_blk(type, &blk, &tcam))
+		return ICE_ERR_AQ_ERROR;
+
+	if (tcam) {
+		/* TCAM entries */
+		for (i = 0; i < num; i++) {
+			if (res[i] < hw->blk[blk].prof.count) {
+				u16 idx = res[i];
+
+				ice_free_hw_res(hw, type, 1, &idx);
+				hw->blk[blk].prof.resource_used_hack[res[i]] =
+					false;
+			}
+		}
+
+	} else {
+		/* Profile IDs */
+		for (i = 0; i < num; i++) {
+			if (res[i] < hw->blk[blk].es.count) {
+				u16 idx = res[i];
+
+				ice_free_hw_res(hw, type, 1, &idx);
+				hw->blk[blk].es.resource_used_hack[res[i]] =
+					false;
+			}
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_alloc_tcam_ent - allocate hardware tcam entry
+ * @hw: pointer to the hw struct
+ * @blk: the block to allocate the tcam for
+ * @tcam_idx: pointer to variable to receive the tcam entry
+ *
+ * This function allocates a new entry in a Profile ID TCAM for a specific
+ * block.
+ */
+static enum ice_status
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+{
+	u16 res_type;
+
+	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+		return ICE_ERR_PARAM;
+
+	return ice_alloc_res_workaround(hw, res_type, 1, tcam_idx);
+}
+
+/**
+ * ice_free_tcam_ent - free hardware tcam entry
+ * @hw: pointer to the hw struct
+ * @blk: the block from which to free the tcam entry
+ * @tcam_idx: the tcam entry to free
+ *
+ * This function frees an entry in a Profile ID TCAM for a specific block.
+ */
+static enum ice_status
+ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
+{
+	u16 res_type;
+
+	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+		return ICE_ERR_PARAM;
+
+	return ice_free_res_workaround(hw, res_type, 1, &tcam_idx);
+}
+
+/**
+ * ice_alloc_prof_id - allocate profile id
+ * @hw: pointer to the hw struct
+ * @blk: the block to allocate the profile id for
+ * @prof_id: pointer to variable to receive the profile id
+ *
+ * This function allocates a new profile id, which also corresponds to a Field
+ * Vector (Extraction Sequence) entry.
+ */
+static enum ice_status
+ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+{
+	enum ice_status status;
+	u16 res_type;
+	u16 get_prof;
+
+	if (!ice_prof_id_rsrc_type(blk, &res_type))
+		return ICE_ERR_PARAM;
+
+	status = ice_alloc_res_workaround(hw, res_type, 1, &get_prof);
+	if (!status)
+		*prof_id = (u8)get_prof;
+
+	return status;
+}
+
+/**
+ * ice_free_prof_id - free profile id
+ * @hw: pointer to the hw struct
+ * @blk: the block from which to free the profile id
+ * @prof_id: the profile id to free
+ *
+ * This function frees a profile id, which also corresponds to a Field Vector.
+ */
+static enum ice_status
+ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+	u16 tmp_prof_id = (u16)prof_id;
+	u16 res_type;
+
+	if (!ice_prof_id_rsrc_type(blk, &res_type))
+		return ICE_ERR_PARAM;
+
+	return ice_free_res_workaround(hw, res_type, 1, &tmp_prof_id);
+	/* The following code is a WORKAROUND until DCR 076 is available.
+	 * DCR 076 - Update to Profile ID TCAM Resource Allocation
+	 *
+	 * Once the DCR 076 changes are available in FW, this code can be
+	 * restored. Original code:
+	 *
+	 * return ice_free_res(hw, res_type, 1, &tmp_prof_id);
+	 */
+}
+
+/**
+ * ice_prof_inc_ref - increment reference count for profile
+ * @hw: pointer to the hw struct
+ * @blk: the block from which to free the profile id
+ * @prof_id: the profile id for which to increment the reference count
+ */
+static enum ice_status
+ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+	if (prof_id > hw->blk[blk].es.count)
+		return ICE_ERR_PARAM;
+
+	hw->blk[blk].es.ref_count[prof_id]++;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_dec_ref - decrement reference count for profile
+ * @hw: pointer to the hw struct
+ * @blk: the block from which to free the profile id
+ * @prof_id: the profile id for which to decrement the reference count
+ */
+static enum ice_status
+ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+	if (prof_id > hw->blk[blk].es.count)
+		return ICE_ERR_PARAM;
+
+	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
+		if (!--hw->blk[blk].es.ref_count[prof_id])
+			return ice_free_prof_id(hw, blk, prof_id);
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_write_es - write an extraction sequence to hardware
+ * @hw: pointer to the hw struct
+ * @blk: the block in which to write the extraction sequence
+ * @prof_id: the profile id to write
+ * @fv: pointer to the extraction sequence to write
+ */
+static void
+ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
+	     struct ice_fv_word *fv)
+{
+	u16 off;
+
+	off = prof_id * hw->blk[blk].es.fvw;
+	ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * 2,
+		   ICE_NONDMA_TO_NONDMA);
+}
+
+/* Block / table section IDs */
+static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
+	/* SWITCH */
+	{	ICE_SID_XLT1_SW,
+		ICE_SID_XLT2_SW,
+		ICE_SID_PROFID_TCAM_SW,
+		ICE_SID_PROFID_REDIR_SW,
+		ICE_SID_FLD_VEC_SW
+	},
+
+	/* ACL */
+	{	ICE_SID_XLT1_ACL,
+		ICE_SID_XLT2_ACL,
+		ICE_SID_PROFID_TCAM_ACL,
+		ICE_SID_PROFID_REDIR_ACL,
+		ICE_SID_FLD_VEC_ACL
+	},
+
+	/* FD */
+	{	ICE_SID_XLT1_FD,
+		ICE_SID_XLT2_FD,
+		ICE_SID_PROFID_TCAM_FD,
+		ICE_SID_PROFID_REDIR_FD,
+		ICE_SID_FLD_VEC_FD
+	},
+
+	/* RSS */
+	{	ICE_SID_XLT1_RSS,
+		ICE_SID_XLT2_RSS,
+		ICE_SID_PROFID_TCAM_RSS,
+		ICE_SID_PROFID_REDIR_RSS,
+		ICE_SID_FLD_VEC_RSS
+	},
+
+	/* PE */
+	{	ICE_SID_XLT1_PE,
+		ICE_SID_XLT2_PE,
+		ICE_SID_PROFID_TCAM_PE,
+		ICE_SID_PROFID_REDIR_PE,
+		ICE_SID_FLD_VEC_PE
+	}
+};
+
+/**
+ * ice_fill_tbl - Reads content of a single table type into database
+ * @hw: pointer to the hardware structure
+ * @block_id: Block ID of the table to copy
+ * @sid: Section ID of the table to copy
+ *
+ * Will attempt to read the entire content of a given table of a single block
+ * into the driver database. We assume that the buffer will always
+ * be as large or larger than the data contained in the package. If
+ * this condition is not met, there is most likely an error in the package
+ * contents.
+ */
+static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
+{
+	u32 dst_len, sect_len, offset = 0;
+	struct ice_prof_redir_section *pr;
+	struct ice_prof_id_section *pid;
+	struct ice_xlt1_section *xlt1;
+	struct ice_xlt2_section *xlt2;
+	struct ice_sw_fv_section *es;
+	struct ice_pkg_enum state;
+	u8 *src, *dst;
+	void *sect;
+
+	/* if the hw segment pointer is null then the first iteration of
+	 * ice_pkg_enum_section() will fail. In this case the Hw tables will
+	 * not be filled and return success.
+	 */
+	if (!hw->seg) {
+		ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
+		return;
+	}
+
+	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+	sect = ice_pkg_enum_section(hw->seg, &state, sid);
+
+	while (sect) {
+		switch (sid) {
+		case ICE_SID_XLT1_FD:
+		case ICE_SID_XLT1_RSS:
+		case ICE_SID_XLT1_ACL:
+			xlt1 = (struct ice_xlt1_section *)sect;
+			src = xlt1->value;
+			sect_len = LE16_TO_CPU(xlt1->count) *
+				sizeof(*hw->blk[block_id].xlt1.t);
+			dst = hw->blk[block_id].xlt1.t;
+			dst_len = hw->blk[block_id].xlt1.count *
+				sizeof(*hw->blk[block_id].xlt1.t);
+			break;
+		case ICE_SID_XLT2_FD:
+		case ICE_SID_XLT2_RSS:
+		case ICE_SID_XLT2_ACL:
+			xlt2 = (struct ice_xlt2_section *)sect;
+			src = (u8 *)xlt2->value;
+			sect_len = LE16_TO_CPU(xlt2->count) *
+				sizeof(*hw->blk[block_id].xlt2.t);
+			dst = (u8 *)hw->blk[block_id].xlt2.t;
+			dst_len = hw->blk[block_id].xlt2.count *
+				sizeof(*hw->blk[block_id].xlt2.t);
+			break;
+		case ICE_SID_PROFID_TCAM_FD:
+		case ICE_SID_PROFID_TCAM_RSS:
+		case ICE_SID_PROFID_TCAM_ACL:
+			pid = (struct ice_prof_id_section *)sect;
+			src = (u8 *)pid->entry;
+			sect_len = LE16_TO_CPU(pid->count) *
+				sizeof(*hw->blk[block_id].prof.t);
+			dst = (u8 *)hw->blk[block_id].prof.t;
+			dst_len = hw->blk[block_id].prof.count *
+				sizeof(*hw->blk[block_id].prof.t);
+			break;
+		case ICE_SID_PROFID_REDIR_FD:
+		case ICE_SID_PROFID_REDIR_RSS:
+		case ICE_SID_PROFID_REDIR_ACL:
+			pr = (struct ice_prof_redir_section *)sect;
+			src = pr->redir_value;
+			sect_len = LE16_TO_CPU(pr->count) *
+				sizeof(*hw->blk[block_id].prof_redir.t);
+			dst = hw->blk[block_id].prof_redir.t;
+			dst_len = hw->blk[block_id].prof_redir.count *
+				sizeof(*hw->blk[block_id].prof_redir.t);
+			break;
+		case ICE_SID_FLD_VEC_FD:
+		case ICE_SID_FLD_VEC_RSS:
+		case ICE_SID_FLD_VEC_ACL:
+			es = (struct ice_sw_fv_section *)sect;
+			src = (u8 *)es->fv;
+			sect_len = LE16_TO_CPU(es->count) *
+				sizeof(*hw->blk[block_id].prof_redir.t);
+			dst = (u8 *)hw->blk[block_id].es.t;
+			dst_len = hw->blk[block_id].es.count *
+				sizeof(*hw->blk[block_id].es.t);
+			break;
+		default:
+			return;
+		}
+
+		/* if the section offset exceeds destination length, terminate
+		 * table fill.
+		 */
+		if (offset > dst_len)
+			return;
+
+		/* if the sum of section size and offset exceed destination size
+		 * then we are out of bounds of the Hw table size for that PF.
+		 * Changing section length to fill the remaining table space
+		 * of that PF.
+		 */
+		if ((offset + sect_len) > dst_len)
+			sect_len = dst_len - offset;
+
+		ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
+		offset += sect_len;
+		sect = ice_pkg_enum_section(NULL, &state, sid);
+	}
+}
+
+/**
+ * ice_fill_blk_tbls - Read package content for tables of a block
+ * @hw: pointer to the hardware structure
+ * @block_id: The block ID which contains the tables to be copied
+ *
+ * Reads the current package contents and populates the driver
+ * database with the data it contains to allow for advanced driver
+ * features.
+ */
+static void ice_fill_blk_tbls(struct ice_hw *hw, enum ice_block block_id)
+{
+	ice_fill_tbl(hw, block_id, hw->blk[block_id].xlt1.sid);
+	ice_fill_tbl(hw, block_id, hw->blk[block_id].xlt2.sid);
+	ice_fill_tbl(hw, block_id, hw->blk[block_id].prof.sid);
+	ice_fill_tbl(hw, block_id, hw->blk[block_id].prof_redir.sid);
+	ice_fill_tbl(hw, block_id, hw->blk[block_id].es.sid);
+}
+
+/**
+ * ice_free_prof_map - frees the profile map
+ * @hw: pointer to the hardware structure
+ * @blk: the hw block which contains the profile map to be freed
+ */
+static void ice_free_prof_map(struct ice_hw *hw, enum ice_block blk)
+{
+	struct ice_prof_map *del, *tmp;
+
+	if (LIST_EMPTY(&hw->blk[blk].es.prof_map))
+		return;
+
+	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &hw->blk[blk].es.prof_map,
+				 ice_prof_map, list) {
+		LIST_DEL(&del->list);
+		ice_free(hw, del);
+	}
+}
+
+/**
+ * ice_free_vsig_tbl - free complete VSIG table entries
+ * @hw: pointer to the hardware structure
+ * @blk: the hw block on which to free the VSIG table entries
+ */
+static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 i;
+
+	if (!hw->blk[blk].xlt2.vsig_tbl)
+		return;
+
+	for (i = 1; i < ICE_MAX_VSIGS; i++)
+		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+			ice_vsig_free(hw, blk, i);
+}
+
+/**
+ * ice_free_hw_tbls - free hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+void ice_free_hw_tbls(struct ice_hw *hw)
+{
+	u8 i;
+
+	for (i = 0; i < ICE_BLK_COUNT; i++) {
+		ice_free_prof_map(hw, (enum ice_block)i);
+		ice_free_vsig_tbl(hw, (enum ice_block)i);
+		ice_free(hw, hw->blk[i].xlt1.ptypes);
+		ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
+		ice_free(hw, hw->blk[i].xlt1.t);
+		ice_free(hw, hw->blk[i].xlt2.t);
+		ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
+		ice_free(hw, hw->blk[i].xlt2.vsis);
+		ice_free(hw, hw->blk[i].prof.t);
+		ice_free(hw, hw->blk[i].prof_redir.t);
+		ice_free(hw, hw->blk[i].es.t);
+		ice_free(hw, hw->blk[i].es.ref_count);
+
+		ice_free(hw, hw->blk[i].es.resource_used_hack);
+		ice_free(hw, hw->blk[i].prof.resource_used_hack);
+	}
+
+	ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
+}
+
+/**
+ * ice_init_flow_profs - init flow profile locks and list heads
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_flow_profs(struct ice_hw *hw)
+{
+	u8 i;
+
+	for (i = 0; i < ICE_BLK_COUNT; i++) {
+		ice_init_lock(&hw->fl_profs_locks[i]);
+		INIT_LIST_HEAD(&hw->fl_profs[i]);
+	}
+}
+
+/**
+ * ice_init_sw_xlt1_db - init software xlt1 database from hw tables
+ * @hw: pointer to the hardware structure
+ * @blk: the hw block to initialize
+ */
+static
+void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 pt;
+
+	for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
+		u8 ptg;
+
+		ptg = hw->blk[blk].xlt1.t[pt];
+		if (ptg != ICE_DEFAULT_PTG) {
+			ice_ptg_alloc_val(hw, blk, ptg);
+			ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
+		}
+	}
+}
+
+/**
+ * ice_init_sw_xlt2_db - init software xlt2 database from hw tables
+ * @hw: pointer to the hardware structure
+ * @blk: the hw block to initialize
+ */
+static
+void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 vsi;
+
+	for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
+		u16 vsig;
+
+		vsig = hw->blk[blk].xlt2.t[vsi];
+		if (vsig) {
+			ice_vsig_alloc_val(hw, blk, vsig);
+			ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+			/* no changes at this time, since this has been
+			 * initialized from the original package
+			 */
+			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+		}
+	}
+}
+
+/**
+ * ice_init_sw_db - init software database from hw tables
+ * @hw: pointer to the hardware structure
+ */
+static
+void ice_init_sw_db(struct ice_hw *hw)
+{
+	u16 i;
+
+	for (i = 0; i < ICE_BLK_COUNT; i++) {
+		ice_init_sw_xlt1_db(hw, (enum ice_block)i);
+		ice_init_sw_xlt2_db(hw, (enum ice_block)i);
+	}
+}
+
+/**
+ * ice_init_hw_tbls - init hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+{
+	u8 i;
+
+	ice_init_flow_profs(hw);
+
+	for (i = 0; i < ICE_BLK_COUNT; i++) {
+		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+		struct ice_prof_tcam *prof = &hw->blk[i].prof;
+		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+		struct ice_es *es = &hw->blk[i].es;
+
+		hw->blk[i].overwrite = blk_sizes[i].overwrite;
+		es->reverse = blk_sizes[i].reverse;
+
+		xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
+		xlt1->count = blk_sizes[i].xlt1;
+
+		xlt1->ptypes = (struct ice_ptg_ptype *)
+			ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
+
+		if (!xlt1->ptypes)
+			goto err;
+
+		xlt1->ptg_tbl = (struct ice_ptg_entry *)
+			ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
+
+		if (!xlt1->ptg_tbl)
+			goto err;
+
+		xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
+		if (!xlt1->t)
+			goto err;
+
+		xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
+		xlt2->count = blk_sizes[i].xlt2;
+
+		xlt2->vsis = (struct ice_vsig_vsi *)
+			ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
+
+		if (!xlt2->vsis)
+			goto err;
+
+		xlt2->vsig_tbl = (struct ice_vsig_entry *)
+			ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
+		if (!xlt2->vsig_tbl)
+			goto err;
+
+		xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
+		if (!xlt2->t)
+			goto err;
+
+		prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
+		prof->count = blk_sizes[i].prof_tcam;
+		prof->max_prof_id = blk_sizes[i].prof_id;
+		prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
+		prof->t = (struct ice_prof_tcam_entry *)
+			ice_calloc(hw, prof->count, sizeof(*prof->t));
+
+		if (!prof->t)
+			goto err;
+
+		prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
+		prof_redir->count = blk_sizes[i].prof_redir;
+		prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
+						 sizeof(*prof_redir->t));
+
+		if (!prof_redir->t)
+			goto err;
+
+		es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
+		es->count = blk_sizes[i].es;
+		es->fvw = blk_sizes[i].fvw;
+		es->t = (struct ice_fv_word *)
+			ice_calloc(hw, es->count * es->fvw, sizeof(*es->t));
+
+		if (!es->t)
+			goto err;
+
+		es->ref_count = (u16 *)
+			ice_calloc(hw, es->count, sizeof(*es->ref_count));
+
+		if (!es->ref_count)
+			goto err;
+
+		es->resource_used_hack = (u8 *)
+			ice_calloc(hw, hw->blk[i].es.count, sizeof(u8));
+
+		if (!es->resource_used_hack)
+			goto err;
+
+		prof->resource_used_hack = (u8 *)ice_calloc(hw, prof->count,
+							    sizeof(u8));
+
+		if (!prof->resource_used_hack)
+			goto err;
+
+		INIT_LIST_HEAD(&es->prof_map);
+
+		/* Now that tables are allocated, read in package data */
+		ice_fill_blk_tbls(hw, (enum ice_block)i);
+	}
+
+	ice_init_sw_db(hw);
+
+	return ICE_SUCCESS;
+
+err:
+	ice_free_hw_tbls(hw);
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_prof_gen_key - generate profile id key
+ * @hw: pointer to the hw struct
+ * @blk: the block in which to write profile id to
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: cdid portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ * @key: output of profile id key
+ */
+static enum ice_status
+ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
+		 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+		 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
+		 u8 key[ICE_TCAM_KEY_SZ])
+{
+	struct ice_prof_id_key inkey;
+
+	inkey.xlt1 = ptg;
+	inkey.xlt2_cdid = CPU_TO_LE16(vsig);
+	inkey.flags = CPU_TO_LE16(flags);
+
+	switch (hw->blk[blk].prof.cdid_bits) {
+	case 0:
+		break;
+	case 2:
+#define ICE_CD_2_M 0xC000U
+#define ICE_CD_2_S 14
+		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
+		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
+		break;
+	case 4:
+#define ICE_CD_4_M 0xF000U
+#define ICE_CD_4_S 12
+		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
+		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
+		break;
+	case 8:
+#define ICE_CD_8_M 0xFF00U
+#define ICE_CD_8_S 16
+		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
+		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
+		break;
+	default:
+		ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
+		break;
+	};
+
+	return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
+			   nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
+}
+
+/**
+ * ice_tcam_write_entry - write tcam entry
+ * @hw: pointer to the hw struct
+ * @blk: the block in which to write profile id to
+ * @idx: the entry index to write to
+ * @prof_id: profile id
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: cdid portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ */
+static enum ice_status
+ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
+		     u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
+		     u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+		     u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
+		     u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
+{
+	struct ice_prof_tcam_entry;
+	enum ice_status status;
+
+	status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
+				  dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
+	if (!status) {
+		hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
+		hw->blk[blk].prof.t[idx].prof_id = prof_id;
+	}
+
+	return status;
+}
+
+/**
+ * ice_vsig_get_ref - returns number of vsis belong to a VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsig: VSIG to query
+ * @refs: pointer to variable to receive the reference count
+ */
+static enum ice_status
+ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+	struct ice_vsig_vsi *ptr;
+	*refs = 0;
+
+	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+	while (ptr) {
+		(*refs)++;
+		ptr = ptr->next_vsi;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_ptg - get or allocate a ptg for a ptype
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptype: the ptype to retrieve the PTG for
+ * @ptg: receives the PTG of the ptype
+ * @add: receive boolean indicating whether PTG was added or not
+ */
+static enum ice_status
+ice_get_ptg(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg,
+	    bool *add)
+{
+	enum ice_status status;
+
+	*ptg = ICE_DEFAULT_PTG;
+	*add = false;
+
+	status = ice_ptg_find_ptype(hw, blk, ptype, ptg);
+	if (status)
+		return status;
+
+	if (*ptg == ICE_DEFAULT_PTG) {
+		/* need to allocate a PTG, and add ptype to it */
+		*ptg = ice_ptg_alloc(hw, blk);
+		if (*ptg == ICE_DEFAULT_PTG)
+			return ICE_ERR_HW_TABLE;
+
+		status = ice_ptg_add_mv_ptype(hw, blk, ptype, *ptg);
+		if (status)
+			return ICE_ERR_HW_TABLE;
+
+		*add = true;
+	}
+
+	return ICE_SUCCESS;
+};
+
+/**
+ * ice_has_prof_vsig - check to see if VSIG has a specific profile
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsig: VSIG to check against
+ * @hdl: profile handle
+ */
+static bool
+ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+	struct ice_vsig_prof *ent;
+
+	LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+			    ice_vsig_prof, list) {
+		if (ent->profile_cookie == hdl)
+			return true;
+	}
+
+	ice_debug(hw, ICE_DBG_INIT,
+		  "Characteristic list for vsi group %d not found.\n",
+		  vsig);
+	return false;
+}
+
+/**
+ * ice_prof_bld_es - build profile id extraction sequence changes
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
+		struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
+{
+	u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
+	struct ice_chs_chg *tmp;
+
+	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
+			u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
+			struct ice_pkg_es *p;
+			u32 id;
+
+			id = ice_sect_id(blk, ICE_VEC_TBL);
+			p = (struct ice_pkg_es *)
+				ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
+							  vec_size -
+							  sizeof(p->es[0]));
+
+			if (!p)
+				return ICE_ERR_MAX_LIMIT;
+
+			p->count = CPU_TO_LE16(1);
+			p->offset = CPU_TO_LE16(tmp->prof_id);
+
+			ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
+				   ICE_NONDMA_TO_NONDMA);
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_tcam - build profile id tcam changes
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
+		  struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
+{
+	struct ice_chs_chg *tmp;
+
+	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+		if ((tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) ||
+		    tmp->type == ICE_TCAM_REM) {
+			struct ice_prof_id_section *p;
+			u32 id;
+
+			id = ice_sect_id(blk, ICE_PROF_TCAM);
+			p = (struct ice_prof_id_section *)
+				ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+			if (!p)
+				return ICE_ERR_MAX_LIMIT;
+
+			p->count = CPU_TO_LE16(1);
+			p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
+			p->entry[0].prof_id = tmp->prof_id;
+
+			ice_memcpy(p->entry[0].key,
+				   &hw->blk[blk].prof.t[tmp->tcam_idx].key,
+				   sizeof(hw->blk[blk].prof.t->key),
+				   ICE_NONDMA_TO_NONDMA);
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_xlt1 - build xlt1 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
+		  struct LIST_HEAD_TYPE *chgs)
+{
+	struct ice_chs_chg *tmp;
+
+	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
+			struct ice_xlt1_section *p;
+			u32 id;
+
+			id = ice_sect_id(blk, ICE_XLT1);
+			p = (struct ice_xlt1_section *)
+				ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+			if (!p)
+				return ICE_ERR_MAX_LIMIT;
+
+			p->count = CPU_TO_LE16(1);
+			p->offset = CPU_TO_LE16(tmp->ptype);
+			p->value[0] = tmp->ptg;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_xlt2 - build xlt2 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
+		  struct LIST_HEAD_TYPE *chgs)
+{
+	struct ice_chs_chg *tmp;
+
+	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+		bool found = false;
+
+		if (tmp->type == ICE_VSIG_ADD)
+			found = true;
+		else if (tmp->type == ICE_VSI_MOVE)
+			found = true;
+		else if (tmp->type == ICE_VSIG_REM)
+			found = true;
+
+		if (found) {
+			struct ice_xlt2_section *p;
+			u32 id;
+
+			id = ice_sect_id(blk, ICE_XLT2);
+			p = (struct ice_xlt2_section *)
+				ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+			if (!p)
+				return ICE_ERR_MAX_LIMIT;
+
+			p->count = CPU_TO_LE16(1);
+			p->offset = CPU_TO_LE16(tmp->vsi);
+			p->value[0] = CPU_TO_LE16(tmp->vsig);
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_upd_prof_hw - update hardware using the change list
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
+		struct LIST_HEAD_TYPE *chgs)
+{
+	struct ice_buf_build *b;
+	struct ice_chs_chg *tmp;
+	enum ice_status status;
+	u16 pkg_sects = 0;
+	u16 sects = 0;
+	u16 xlt1 = 0;
+	u16 xlt2 = 0;
+	u16 tcam = 0;
+	u16 es = 0;
+
+	/* count number of sections we need */
+	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+		switch (tmp->type) {
+		case ICE_PTG_ES_ADD:
+			if (tmp->add_ptg)
+				xlt1++;
+			if (tmp->add_prof)
+				es++;
+			break;
+		case ICE_TCAM_ADD:
+		case ICE_TCAM_REM:
+			tcam++;
+			break;
+		case ICE_VSIG_ADD:
+		case ICE_VSI_MOVE:
+		case ICE_VSIG_REM:
+			xlt2++;
+			break;
+		default:
+			break;
+		}
+	}
+	sects = xlt1 + xlt2 + tcam + es;
+
+	if (!sects)
+		return ICE_SUCCESS;
+
+	/* Build update package buffer */
+	b = ice_pkg_buf_alloc(hw);
+	if (!b)
+		return ICE_ERR_NO_MEMORY;
+
+	status = ice_pkg_buf_reserve_section(b, sects);
+	if (status)
+		goto error_tmp;
+
+	/* Preserve order of table update: ES, TCAM, PTG, VSIG */
+	if (es) {
+		status = ice_prof_bld_es(hw, blk, b, chgs);
+		if (status)
+			goto error_tmp;
+	}
+
+	if (tcam) {
+		status = ice_prof_bld_tcam(hw, blk, b, chgs);
+		if (status)
+			goto error_tmp;
+	}
+
+	if (xlt1) {
+		status = ice_prof_bld_xlt1(blk, b, chgs);
+		if (status)
+			goto error_tmp;
+	}
+
+	if (xlt2) {
+		status = ice_prof_bld_xlt2(blk, b, chgs);
+		if (status)
+			goto error_tmp;
+	}
+
+	/* After package buffer build check if the section count in buffer is
+	 * non-zero and matches the number of sections detected for package
+	 * update.
+	 */
+	pkg_sects = ice_pkg_buf_get_active_sections(b);
+	if (!pkg_sects || pkg_sects != sects) {
+		status = ICE_ERR_INVAL_SIZE;
+		goto error_tmp;
+	}
+
+	/* update package */
+	status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
+	if (status == ICE_ERR_AQ_ERROR)
+		ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile.");
+
+error_tmp:
+	ice_pkg_buf_free(hw, b);
+	return status;
+}
+
+/**
+ * ice_add_prof - add profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking id
+ * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @es: extraction sequence (length of array is determined by the block)
+ *
+ * This function registers a profile, which matches a set of PTYPES with a
+ * particular extraction sequence. While the hardware profile is allocated
+ * it will not be written until the first call to ice_add_flow that specifies
+ * the id value used here.
+ */
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+	     struct ice_fv_word *es)
+{
+	u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
+	struct ice_prof_map *prof;
+	enum ice_status status;
+	u32 byte = 0;
+	u8 prof_id;
+
+	/* search for existing profile */
+	status = ice_find_prof_id(hw, blk, es, &prof_id);
+	if (status) {
+		/* allocate profile id */
+		status = ice_alloc_prof_id(hw, blk, &prof_id);
+		if (status)
+			goto err_ice_add_prof;
+
+		/* and write new es */
+		ice_write_es(hw, blk, prof_id, es);
+	}
+
+	/* add profile info */
+
+	prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
+	if (!prof)
+		goto err_ice_add_prof;
+
+	prof->profile_cookie = id;
+	prof->prof_id = prof_id;
+	prof->ptype_count = 0;
+	prof->context = 0;
+
+	/* build list of ptgs */
+	while (bytes && prof->ptype_count < ICE_MAX_PTYPE_PER_PROFILE) {
+		u32 bit;
+
+		if (!ptypes[byte]) {
+			bytes--;
+			byte++;
+			continue;
+		}
+		/* Examine 8 bits per byte */
+		for (bit = 0; bit < 8; bit++) {
+			if (ptypes[byte] & 1 << bit) {
+				u16 ptype;
+				u8 m;
+
+				ptype = byte * 8 + bit;
+				if (ptype < ICE_FLOW_PTYPE_MAX) {
+					prof->ptype[prof->ptype_count] = ptype;
+
+					if (++prof->ptype_count >=
+						ICE_MAX_PTYPE_PER_PROFILE)
+						break;
+				}
+
+				/* nothing left in byte, then exit */
+				m = ~((1 << (bit + 1)) - 1);
+				if (!(ptypes[byte] & m))
+					break;
+			}
+		}
+
+		bytes--;
+		byte++;
+	}
+	LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
+
+	return ICE_SUCCESS;
+
+err_ice_add_prof:
+	return status;
+}
+
+/**
+ * ice_search_prof_id - Search for a profile tracking ID
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added.
+ */
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+	struct ice_prof_map *entry = NULL;
+	struct ice_prof_map *map;
+
+	LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map,
+			    list) {
+		if (map->profile_cookie == id) {
+			entry = map;
+			break;
+		}
+	}
+
+	return entry;
+}
+
+/**
+ * ice_set_prof_context - Set context for a given profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @cntxt: context
+ */
+struct ice_prof_map *
+ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
+{
+	struct ice_prof_map *entry;
+
+	entry = ice_search_prof_id(hw, blk, id);
+	if (entry)
+		entry->context = cntxt;
+
+	return entry;
+}
+
+/**
+ * ice_get_prof_context - Get context for a given profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @cntxt: pointer to variable to receive the context
+ */
+struct ice_prof_map *
+ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
+{
+	struct ice_prof_map *entry;
+
+	entry = ice_search_prof_id(hw, blk, id);
+	if (entry)
+		*cntxt = entry->context;
+
+	return entry;
+}
+
+/**
+ * ice_vsig_prof_id_count - count profiles in a VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ */
+static u16
+ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
+	struct ice_vsig_prof *p;
+
+	LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+			    ice_vsig_prof, list) {
+		count++;
+	}
+
+	return count;
+}
+
+/**
+ * ice_rel_tcam_idx - release a tcam index
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @idx: the index to release
+ */
+static enum ice_status
+ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
+{
+	/* Masks to invoke a never match entry */
+	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
+	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+	enum ice_status status;
+
+	/* write the tcam entry */
+	status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
+				      dc_msk, nm_msk);
+	if (status)
+		return status;
+
+	/* release the tcam entry */
+	status = ice_free_tcam_ent(hw, blk, idx);
+
+	return status;
+}
+
+/**
+ * ice_rem_prof_id - remove one profile from a VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ * @prof: pointer to profile structure to remove
+ * @chg: pointer to list to record changes
+ */
+static enum ice_status
+ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+		struct ice_vsig_prof *prof, struct LIST_HEAD_TYPE *chg)
+{
+	enum ice_status status;
+	struct ice_chs_chg *p;
+	u16 i;
+
+	for (i = 0; i < prof->tcam_count; i++) {
+		p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+		if (!p)
+			goto err_ice_rem_prof_id;
+
+		p->type = ICE_TCAM_REM;
+		p->vsig = vsig;
+		p->prof_id = prof->tcam[i].prof_id;
+		p->tcam_idx = prof->tcam[i].tcam_idx;
+
+		p->ptg = prof->tcam[i].ptg;
+		prof->tcam[i].in_use = false;
+		p->orig_ent = hw->blk[blk].prof.t[p->tcam_idx];
+		status = ice_rel_tcam_idx(hw, blk, p->tcam_idx);
+		if (!status)
+			status = ice_prof_dec_ref(hw, blk, p->prof_id);
+
+		LIST_ADD(&p->list_entry, chg);
+
+		if (status)
+			goto err_ice_rem_prof_id;
+	}
+
+	return ICE_SUCCESS;
+
+err_ice_rem_prof_id:
+	/* caller will clean up the change list */
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_rem_vsig - remove VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: the VSIG to remove
+ * @chg: the change list
+ */
+static enum ice_status
+ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+	     struct LIST_HEAD_TYPE *chg)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+	struct ice_vsig_vsi *vsi_cur;
+	struct ice_vsig_prof *d, *t;
+	enum ice_status status;
+
+	/* remove TCAM entries */
+	LIST_FOR_EACH_ENTRY_SAFE(d, t,
+				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+				 ice_vsig_prof, list) {
+		status = ice_rem_prof_id(hw, blk, vsig, d, chg);
+		if (status)
+			goto err_ice_rem_vsig;
+
+		LIST_DEL(&d->list);
+		ice_free(hw, d);
+	}
+
+	/* Move all VSIS associated with this VSIG to the default VSIG */
+	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+	if (!vsi_cur)
+		return ICE_ERR_CFG;
+
+	do {
+		struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+		struct ice_chs_chg *p;
+
+		p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+		if (!p)
+			goto err_ice_rem_vsig;
+
+		p->type = ICE_VSIG_REM;
+		p->orig_vsig = vsig;
+		p->vsig = ICE_DEFAULT_VSIG;
+		p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
+
+		LIST_ADD(&p->list_entry, chg);
+
+		status = ice_vsig_free(hw, blk, vsig);
+		if (status)
+			return status;
+
+		vsi_cur = tmp;
+	} while (vsi_cur);
+
+	return ICE_SUCCESS;
+
+err_ice_rem_vsig:
+	/* the caller will free up the change list */
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ * @hdl: profile handle indicating which profile to remove
+ * @chg: list to receive a record of changes
+ */
+static enum ice_status
+ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+		     struct LIST_HEAD_TYPE *chg)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+	struct ice_vsig_prof *p, *t;
+	enum ice_status status;
+
+	LIST_FOR_EACH_ENTRY_SAFE(p, t,
+				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+				 ice_vsig_prof, list) {
+		if (p->profile_cookie == hdl) {
+			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
+				/* this is the last profile, remove the VSIG */
+				return ice_rem_vsig(hw, blk, vsig, chg);
+
+			status = ice_rem_prof_id(hw, blk, vsig, p, chg);
+			if (!status) {
+				LIST_DEL(&p->list);
+				ice_free(hw, p);
+			}
+			return status;
+		}
+	}
+
+	return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_flow_all - remove all flows with a particular profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ */
+static enum ice_status
+ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+	struct ice_chs_chg *del, *tmp;
+	struct LIST_HEAD_TYPE chg;
+	enum ice_status status;
+	u16 i;
+
+	INIT_LIST_HEAD(&chg);
+
+	for (i = 1; i < ICE_MAX_VSIGS; i++) {
+		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
+			if (ice_has_prof_vsig(hw, blk, i, id)) {
+				status = ice_rem_prof_id_vsig(hw, blk, i, id,
+							      &chg);
+				if (status)
+					goto err_ice_rem_flow_all;
+			}
+		}
+	}
+
+	status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_flow_all:
+	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+		LIST_DEL(&del->list_entry);
+		ice_free(hw, del);
+	}
+
+	return status;
+}
+
+/**
+ * ice_rem_prof - remove profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will remove the profile specified by the id parameter, which was
+ * previously created through ice_add_prof. If any existing entries
+ * are associated with this profile, they will be removed as well.
+ */
+enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+	enum ice_status status;
+	struct ice_prof_map *pmap;
+
+	pmap = ice_search_prof_id(hw, blk, id);
+	if (!pmap)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	status = ice_free_prof_id(hw, blk, pmap->prof_id);
+
+	if (status)
+		return status;
+
+	/* remove all flows with this profile */
+	status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
+	if (status)
+		return status;
+	LIST_DEL(&pmap->list);
+	ice_free(hw, pmap);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_prof_ptgs - get ptgs for profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @hdl: profile handle
+ * @chg: change list
+ */
+static enum ice_status
+ice_get_prof_ptgs(struct ice_hw *hw, enum ice_block blk, u64 hdl,
+		  struct LIST_HEAD_TYPE *chg)
+{
+	struct ice_prof_map *map;
+	struct ice_chs_chg *p;
+	u16 i;
+
+	/* Get the details on the profile specified by the handle id */
+	map = ice_search_prof_id(hw, blk, hdl);
+	if (!map)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	for (i = 0; i < map->ptype_count; i++) {
+		enum ice_status status;
+		bool add;
+		u8 ptg;
+
+		status = ice_get_ptg(hw, blk, map->ptype[i], &ptg, &add);
+		if (status)
+			goto err_ice_get_prof_ptgs;
+
+		if (add || !hw->blk[blk].es.ref_count[map->prof_id]) {
+			/* add PTG to change list */
+			p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+			if (!p)
+				goto err_ice_get_prof_ptgs;
+
+			p->type = ICE_PTG_ES_ADD;
+			p->ptype = map->ptype[i];
+			p->ptg = ptg;
+			p->add_ptg = add;
+
+			p->add_prof = !hw->blk[blk].es.ref_count[map->prof_id];
+			p->prof_id = map->prof_id;
+
+			LIST_ADD(&p->list_entry, chg);
+		}
+	}
+
+	return ICE_SUCCESS;
+
+err_ice_get_prof_ptgs:
+	/* let caller clean up the change list */
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: VSIG from which to copy the list
+ * @lst: output list
+ *
+ * This routine makes a copy of the list of profiles in the specified VSIG.
+ */
+static enum ice_status
+ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+		   struct LIST_HEAD_TYPE *lst)
+{
+	struct ice_vsig_prof *ent1, *ent2;
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+
+	LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+			    ice_vsig_prof, list) {
+		struct ice_vsig_prof *p;
+
+		/* copy to the input list */
+		p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
+		if (!p)
+			goto err_ice_get_profs_vsig;
+
+		ice_memcpy(p, ent1, sizeof(*p), ICE_NONDMA_TO_NONDMA);
+
+		LIST_ADD(&p->list, lst);
+	}
+
+	return ICE_SUCCESS;
+
+err_ice_get_profs_vsig:
+	LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
+		LIST_DEL(&ent1->list);
+		ice_free(hw, ent1);
+	}
+
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_add_prof_to_lst - add profile entry to a list
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @lst: the list to be added to
+ * @hdl: profile handle of entry to add
+ */
+static enum ice_status
+ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
+		    struct LIST_HEAD_TYPE *lst, u64 hdl)
+{
+	struct ice_vsig_prof *p;
+	struct ice_prof_map *map;
+	u16 i;
+
+	map = ice_search_prof_id(hw, blk, hdl);
+	if (!map)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
+	if (!p)
+		return ICE_ERR_NO_MEMORY;
+
+	p->profile_cookie = map->profile_cookie;
+	p->prof_id = map->prof_id;
+	p->tcam_count = map->ptype_count;
+
+	for (i = 0; i < map->ptype_count; i++) {
+		enum ice_status status;
+		u8 ptg;
+
+		p->tcam[i].prof_id = map->prof_id;
+		p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
+
+		status = ice_ptg_find_ptype(hw, blk, map->ptype[i], &ptg);
+		if (status) {
+			ice_free(hw, p);
+			return status;
+		}
+
+		p->tcam[i].ptg = ptg;
+	}
+
+	LIST_ADD(&p->list, lst);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_move_vsi - move VSI to another VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: the VSI to move
+ * @vsig: the VSIG to move the VSI to
+ * @chg: the change list
+ */
+static enum ice_status
+ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
+	     struct LIST_HEAD_TYPE *chg)
+{
+	enum ice_status status;
+	struct ice_chs_chg *p;
+	u16 orig_vsig;
+
+	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+	if (!p)
+		return ICE_ERR_NO_MEMORY;
+
+	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+	if (!status)
+		status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+	if (status) {
+		ice_free(hw, p);
+		return status;
+	}
+
+	p->type = ICE_VSI_MOVE;
+	p->vsi = vsi;
+	p->orig_vsig = orig_vsig;
+	p->vsig = vsig;
+
+	LIST_ADD(&p->list_entry, chg);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_tcam_ena_dis - add enable or disable tcam change
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @enable: true to enable, false to disable
+ * @vsig: the vsig of the tcam entry
+ * @tcam: pointer the tcam info structure of the tcam to disable
+ * @chg: the change list
+ *
+ * This function appends an enable or disable tcam entry in the change log
+ */
+static enum ice_status
+ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
+		      u16 vsig, struct ice_tcam_inf *tcam,
+		      struct LIST_HEAD_TYPE *chg)
+{
+	enum ice_status status;
+	struct ice_chs_chg *p;
+
+	/* Default: enable means change the low flag bit to don't care */
+	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+
+	/* If disabled, change the low flag bit to never match */
+	if (!enable) {
+		dc_msk[0] = 0x00;
+		nm_msk[0] = 0x01;
+	}
+
+	/* add TCAM to change list */
+	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+	if (!p)
+		return ICE_ERR_NO_MEMORY;
+
+	status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
+				      tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
+				      nm_msk);
+	if (status)
+		goto err_ice_prof_tcam_ena_dis;
+
+	tcam->in_use = enable;
+
+	p->type = ICE_TCAM_ADD;
+	p->add_tcam_idx = true;
+	p->prof_id = tcam->prof_id;
+	p->ptg = tcam->ptg;
+	p->vsig = 0;
+	p->tcam_idx = tcam->tcam_idx;
+
+	/* log change */
+	LIST_ADD(&p->list_entry, chg);
+
+	return ICE_SUCCESS;
+
+err_ice_prof_tcam_ena_dis:
+	ice_free(hw, p);
+	return status;
+}
+
+/**
+ * ice_adj_prof_priorities - adjust profile based on priorities
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: the VSIG for which to adjust profile priorities
+ * @chg: the change list
+ */
+static enum ice_status
+ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+			struct LIST_HEAD_TYPE *chg)
+{
+	ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
+	struct ice_vsig_prof *t;
+	enum ice_status status;
+	u16 idx;
+
+	ice_memset(ptgs_used, 0, sizeof(ptgs_used), ICE_NONDMA_MEM);
+	idx = vsig & ICE_VSIG_IDX_M;
+
+	/* Priority is based on the order in which the profiles are added. The
+	 * newest added profile has highest priority and the oldest added
+	 * profile has the lowest priority. Since the profile property list for
+	 * a VSIG is sorted from newest to oldest, this code traverses the list
+	 * in order and enables the first of each PTG that it finds (that is not
+	 * already enabled); it also disables any duplicate PTGs that it finds
+	 * in the older profiles (that are currently enabled).
+	 */
+
+	LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+			    ice_vsig_prof, list) {
+		u16 i;
+
+		for (i = 0; i < t->tcam_count; i++) {
+			/* Scan the priorities from newest to oldest.
+			 * Make sure that the newest profiles take priority.
+			 */
+			if (ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
+			    t->tcam[i].in_use) {
+				/* need to mark this PTG as never match, as it
+				 * was already in use and therefore duplicate
+				 * (and lower priority)
+				 */
+				status = ice_prof_tcam_ena_dis(hw, blk, false,
+							       vsig,
+							       &t->tcam[i],
+							       chg);
+				if (status)
+					return status;
+			} else if (!ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
+				   !t->tcam[i].in_use) {
+				/* need to enable this PTG, as it in not in use
+				 * and not enabled (highest priority)
+				 */
+				status = ice_prof_tcam_ena_dis(hw, blk, true,
+							       vsig,
+							       &t->tcam[i],
+							       chg);
+				if (status)
+					return status;
+			}
+
+			/* keep track of used ptgs */
+			ice_set_bit(t->tcam[i].ptg, ptgs_used);
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_add_prof_id_vsig - add profile to VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: the VSIG to which this profile is to be added
+ * @hdl: the profile handle indicating the profile to add
+ * @chg: the change list
+ */
+static enum ice_status
+ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+		     struct LIST_HEAD_TYPE *chg)
+{
+	/* Masks that ignore flags */
+	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
+	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+	struct ice_prof_map *map;
+	struct ice_vsig_prof *t;
+	struct ice_chs_chg *p;
+	u16 i;
+
+	/* Get the details on the profile specified by the handle id */
+	map = ice_search_prof_id(hw, blk, hdl);
+	if (!map)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	/* Error, if this VSIG already has this profile */
+	if (ice_has_prof_vsig(hw, blk, vsig, hdl))
+		return ICE_ERR_ALREADY_EXISTS;
+
+	/* new VSIG profile structure */
+	t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
+	if (!t)
+		goto err_ice_add_prof_id_vsig;
+
+	t->profile_cookie = map->profile_cookie;
+	t->prof_id = map->prof_id;
+	t->tcam_count = map->ptype_count;
+
+	/* create tcam entries */
+	for (i = 0; i < map->ptype_count; i++) {
+		enum ice_status status;
+		u16 tcam_idx;
+		bool add;
+		u8 ptg;
+
+		/* If properly sequenced, we should never have to allocate new
+		 * PTGs
+		 */
+		status = ice_get_ptg(hw, blk, map->ptype[i], &ptg, &add);
+		if (status)
+			goto err_ice_add_prof_id_vsig;
+
+		/* add TCAM to change list */
+		p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+		if (!p)
+			goto err_ice_add_prof_id_vsig;
+
+		/* allocate the tcam entry index */
+		status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
+		if (status)
+			goto err_ice_add_prof_id_vsig;
+
+		t->tcam[i].ptg = ptg;
+		t->tcam[i].prof_id = map->prof_id;
+		t->tcam[i].tcam_idx = tcam_idx;
+		t->tcam[i].in_use = true;
+
+		p->type = ICE_TCAM_ADD;
+		p->add_tcam_idx = true;
+		p->prof_id = t->tcam[i].prof_id;
+		p->ptg = t->tcam[i].ptg;
+		p->vsig = vsig;
+		p->tcam_idx = t->tcam[i].tcam_idx;
+
+		/* write the tcam entry */
+		status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
+					      t->tcam[i].prof_id,
+					      t->tcam[i].ptg, vsig, 0, 0,
+					      vl_msk, dc_msk, nm_msk);
+		if (status)
+			goto err_ice_add_prof_id_vsig;
+
+		/* this increments the reference count of how many tcam entries
+		 * are using this hw profile id
+		 */
+		status = ice_prof_inc_ref(hw, blk, t->tcam[i].prof_id);
+
+		/* log change */
+		LIST_ADD(&p->list_entry, chg);
+	}
+
+	/* add profile to VSIG */
+	LIST_ADD(&t->list,
+		 &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
+
+	return ICE_SUCCESS;
+
+err_ice_add_prof_id_vsig:
+	/* let caller clean up the change list */
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_create_prof_id_vsig - add a new VSIG with a single profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @hdl: the profile handle of the profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
+			struct LIST_HEAD_TYPE *chg)
+{
+	enum ice_status status;
+	struct ice_chs_chg *p;
+	u16 new_vsig;
+
+	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+	if (!p)
+		return ICE_ERR_NO_MEMORY;
+
+	new_vsig = ice_vsig_alloc(hw, blk);
+	if (!new_vsig)
+		return ICE_ERR_HW_TABLE;
+
+	status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
+	if (status)
+		return status;
+
+	status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
+	if (status)
+		return status;
+
+	p->type = ICE_VSIG_ADD;
+	p->vsi = vsi;
+	p->orig_vsig = ICE_DEFAULT_VSIG;
+	p->vsig = new_vsig;
+
+	LIST_ADD(&p->list_entry, chg);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_create_vsig_from_list - create a new VSIG with a list of profiles
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @lst: the list of profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
+			 struct LIST_HEAD_TYPE *lst, struct LIST_HEAD_TYPE *chg)
+{
+	struct ice_vsig_prof *t;
+	enum ice_status status;
+	u16 vsig;
+
+	vsig = ice_vsig_alloc(hw, blk);
+	if (!vsig)
+		return ICE_ERR_HW_TABLE;
+
+	status = ice_move_vsi(hw, blk, vsi, vsig, chg);
+	if (status)
+		return status;
+
+	LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
+		status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
+					      chg);
+		if (status)
+			return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_find_prof_vsig - find a VSIG with a specific profile handle
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @hdl: the profile handle of the profile to search for
+ * @vsig: returns the VSIG with the matching profile
+ */
+static bool
+ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
+{
+	struct ice_vsig_prof *t;
+	struct LIST_HEAD_TYPE lst;
+	enum ice_status status;
+
+	INIT_LIST_HEAD(&lst);
+
+	t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
+	if (!t)
+		return false;
+
+	t->profile_cookie = hdl;
+	LIST_ADD(&t->list, &lst);
+
+	status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
+
+	LIST_DEL(&t->list);
+	ice_free(hw, t);
+
+	return status == ICE_SUCCESS;
+}
+
+/**
+ * ice_add_prof_id_flow - add profile flow
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: the vsi to enable with the profile specified by id
+ * @hdl: profile handle
+ *
+ * Calling this function will update the hardware tables to enable the
+ * profile indicated by the id parameter for the VSIs specified in the vsi
+ * array. Once successfully called, the flow will be enabled.
+ */
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+	struct ice_vsig_prof *tmp1, *del1;
+	struct LIST_HEAD_TYPE union_lst;
+	struct ice_chs_chg *tmp, *del;
+	struct LIST_HEAD_TYPE chrs;
+	struct LIST_HEAD_TYPE chg;
+	enum ice_status status;
+	u16 vsig, or_vsig = 0;
+
+	INIT_LIST_HEAD(&union_lst);
+	INIT_LIST_HEAD(&chrs);
+	INIT_LIST_HEAD(&chg);
+
+	status = ice_get_prof_ptgs(hw, blk, hdl, &chg);
+	if (status)
+		return status;
+
+	/* determine if vsi is already part of a VSIG */
+	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+	if (!status && vsig) {
+		bool only_vsi;
+		u16 ref;
+
+		/* found in vsig */
+		or_vsig = vsig;
+
+		/* make sure that there is no overlap/conflict between the new
+		 * characteristics and the existing ones; we don't support that
+		 * scenario
+		 */
+		if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
+			status = ICE_ERR_ALREADY_EXISTS;
+			goto err_ice_add_prof_id_flow;
+		}
+
+		/* last VSI in the VSIG? */
+		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+		if (status)
+			goto err_ice_add_prof_id_flow;
+		only_vsi = (ref == 1);
+
+		/* create a union of the current profiles and the one being
+		 * added
+		 */
+		status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
+		if (status)
+			goto err_ice_add_prof_id_flow;
+
+		status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
+		if (status)
+			goto err_ice_add_prof_id_flow;
+
+		/* search for an existing VSIG with an exact charc match */
+		status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
+		if (!status) {
+			/* found an exact match */
+			/* move vsi to the VSIG that matches */
+			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+
+			/* remove original VSIG if we just moved the only VSI
+			 * from it
+			 */
+			if (only_vsi) {
+				status = ice_rem_vsig(hw, blk, or_vsig, &chg);
+				if (status)
+					goto err_ice_add_prof_id_flow;
+			}
+		} else if (only_vsi) {
+			/* If the original VSIG only contains one VSI, then it
+			 * will be the requesting VSI. In this case the VSI is
+			 * not sharing entries and we can simply add the new
+			 * profile to the VSIG.
+			 */
+			status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+
+			/* Adjust priorities */
+			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+		} else {
+			/* No match, so we need a new VSIG */
+			status = ice_create_vsig_from_lst(hw, blk, vsi,
+							  &union_lst, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+
+			/* Adjust priorities */
+			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+		}
+	} else {
+		/* need to find or add a VSIG */
+		/* search for an exising VSIG with an exact charc match */
+		if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
+			/* found an exact match */
+			/* add or move vsi to the VSIG that matches */
+			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+		} else {
+			/* we did not find an exact match */
+			/* we need to add a VSIG */
+			status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
+							 &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+		}
+	}
+
+	/* update hardware */
+	if (!status)
+		status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_add_prof_id_flow:
+	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+		LIST_DEL(&del->list_entry);
+		ice_free(hw, del);
+	}
+
+	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
+		LIST_DEL(&del1->list);
+		ice_free(hw, del1);
+	}
+
+	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &chrs, ice_vsig_prof, list) {
+		LIST_DEL(&del1->list);
+		ice_free(hw, del1);
+	}
+
+	return status;
+}
+
+/**
+ * ice_add_flow - add flow
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: array of VSIs to enable with the profile specified by id
+ * @count: number of elements in the vsi array
+ * @id: profile tracking id
+ *
+ * Calling this function will update the hardware tables to enable the
+ * profile indicated by the id parameter for the VSIs specified in the vsi
+ * array. Once successfully called, the flow will be enabled.
+ */
+enum ice_status
+ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+	     u64 id)
+{
+	enum ice_status status;
+	u16 i;
+
+	for (i = 0; i < count; i++) {
+		status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
+		if (status)
+			return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_rem_prof_from_list - remove a profile from list
+ * @hw: pointer to the hw struct
+ * @lst: list to remove the profile from
+ * @hdl: the profile handle indicating the profile to remove
+ */
+static enum ice_status
+ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
+{
+	struct ice_vsig_prof *ent, *tmp;
+
+	LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) {
+		if (ent->profile_cookie == hdl) {
+			LIST_DEL(&ent->list);
+			ice_free(hw, ent);
+			return ICE_SUCCESS;
+		}
+	}
+
+	return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_prof_id_flow - remove flow
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: the vsi from which to remove the profile specified by id
+ * @hdl: profile tracking handle
+ *
+ * Calling this function will update the hardware tables to remove the
+ * profile indicated by the id parameter for the VSIs specified in the vsi
+ * array. Once successfully called, the flow will be disabled.
+ */
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+	struct ice_vsig_prof *tmp1, *del1;
+	struct LIST_HEAD_TYPE chg, copy;
+	struct ice_chs_chg *tmp, *del;
+	enum ice_status status;
+	u16 vsig;
+
+	INIT_LIST_HEAD(&copy);
+	INIT_LIST_HEAD(&chg);
+
+	/* determine if vsi is already part of a VSIG */
+	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+	if (!status && vsig) {
+		bool last_profile;
+		bool only_vsi;
+		u16 ref;
+
+		/* found in VSIG */
+		last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
+		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+		if (status)
+			goto err_ice_rem_prof_id_flow;
+		only_vsi = (ref == 1);
+
+		if (only_vsi) {
+			/* If the original VSIG only contains one reference,
+			 * which will be the requesting VSI, then the VSI is not
+			 * sharing entries and we can simply remove the specific
+			 * characteristics from the VSIG.
+			 */
+
+			if (last_profile) {
+				/* If there are no profiles left for this VSIG,
+				 * then simply remove the the VSIG.
+				 */
+				status = ice_rem_vsig(hw, blk, vsig, &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+			} else {
+				status = ice_rem_prof_id_vsig(hw, blk, vsig,
+							      hdl, &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+
+				/* Adjust priorities */
+				status = ice_adj_prof_priorities(hw, blk, vsig,
+								 &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+			}
+
+		} else {
+			/* Make a copy of the VSIG's list of Profiles */
+			status = ice_get_profs_vsig(hw, blk, vsig, &copy);
+			if (status)
+				goto err_ice_rem_prof_id_flow;
+
+			/* Remove specified profile entry from the list */
+			status = ice_rem_prof_from_list(hw, &copy, hdl);
+			if (status)
+				goto err_ice_rem_prof_id_flow;
+
+			if (LIST_EMPTY(&copy)) {
+				status = ice_move_vsi(hw, blk, vsi,
+						      ICE_DEFAULT_VSIG, &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+
+			} else if (ice_find_dup_props_vsig(hw, blk, &copy,
+							   &vsig)) {
+				/* found an exact match */
+				/* add or move vsi to the VSIG that matches */
+				/* Search for a VSIG with a matching profile
+				 * list
+				 */
+
+				/* Found match, move VSI to the matching VSIG */
+				status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+			} else {
+				/* since no existing VSIG supports this
+				 * characteristic pattern, we need to create a
+				 * new VSIG and tcam entries
+				 */
+				status = ice_create_vsig_from_lst(hw, blk, vsi,
+								  &copy, &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+
+				/* Adjust priorities */
+				status = ice_adj_prof_priorities(hw, blk, vsig,
+								 &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+			}
+		}
+	} else {
+		status = ICE_ERR_DOES_NOT_EXIST;
+	}
+
+	/* update hardware tables */
+	if (!status)
+		status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_prof_id_flow:
+	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+		LIST_DEL(&del->list_entry);
+		ice_free(hw, del);
+	}
+
+	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &copy, ice_vsig_prof, list) {
+		LIST_DEL(&del1->list);
+		ice_free(hw, del1);
+	}
+
+	return status;
+}
+
+/**
+ * ice_rem_flow - remove flow
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: array of VSIs from which to remove the profile specified by id
+ * @count: number of elements in the vsi array
+ * @id: profile tracking id
+ *
+ * The function will remove flows from the specified VSIs that were enabled
+ * using ice_add_flow. The id value will indicated which profile will be
+ * removed. Once successfully called, the flow will be disabled.
+ */
+enum ice_status
+ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+	     u64 id)
+{
+	enum ice_status status;
+	u16 i;
+
+	for (i = 0; i < count; i++) {
+		status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);
+		if (status)
+			return status;
+	}
+
+	return ICE_SUCCESS;
+}
diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h
new file mode 100644
index 000000000..81e89aeeb
--- /dev/null
+++ b/drivers/net/ice/base/ice_flex_pipe.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2019
+ */
+
+#ifndef _ICE_FLEX_PIPE_H_
+#define _ICE_FLEX_PIPE_H_
+
+#include "ice_type.h"
+
+/* Package format version */
+#define ICE_PKG_FMT_VER_MAJ	1
+#define ICE_PKG_FMT_VER_MNR	0
+#define ICE_PKG_FMT_VER_UPD	0
+#define ICE_PKG_FMT_VER_DFT	0
+
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+
+struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+		    struct ice_pkg_hdr *pkg_hdr);
+enum ice_status ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg);
+
+enum ice_status
+ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_header);
+
+void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg);
+
+enum ice_status
+ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
+		     u16 *value);
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u16 *prot_ids, u8 ids_cnt,
+		   struct LIST_HEAD_TYPE *fv_list);
+enum ice_status
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+		      u16 buf_size, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
+
+/* package buffer building routines */
+
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
+enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
+void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
+
+/* XLT1/PType group functions */
+enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk);
+enum ice_status
+ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg);
+u8 ice_ptg_alloc(struct ice_hw *hw, enum ice_block blk);
+void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg);
+enum ice_status
+ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg);
+
+/* XLT2/Vsi group functions */
+enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk);
+enum ice_status
+ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig);
+enum ice_status
+ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
+			struct LIST_HEAD_TYPE *chs, u16 *vsig);
+
+enum ice_status
+ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig);
+enum ice_status ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig);
+enum ice_status
+ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig);
+enum ice_status
+ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig);
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+	     struct ice_fv_word *es);
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+struct ice_prof_map *
+ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt);
+struct ice_prof_map *
+ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt);
+enum ice_status
+ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
+enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
+void ice_free_seg(struct ice_hw *hw);
+void ice_free_hw_tbls(struct ice_hw *hw);
+enum ice_status
+ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+	     u64 id);
+enum ice_status
+ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+	     u64 id);
+enum ice_status
+ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
+
+enum ice_status
+ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
+	    u16 len);
+#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h
index 84a38cb70..aa01dea88 100644
--- a/drivers/net/ice/base/ice_flex_type.h
+++ b/drivers/net/ice/base/ice_flex_type.h
@@ -16,4 +16,689 @@ struct ice_fv {
 	struct ice_fv_word ew[ICE_MAX_FV_WORDS];
 };
 
+/* Package and segment headers and tables */
+struct ice_pkg_hdr {
+	struct ice_pkg_ver format_ver;
+	__le32 seg_count;
+	__le32 seg_offset[1];
+};
+
+/* generic segment */
+struct ice_generic_seg_hdr {
+#define SEGMENT_TYPE_METADATA	0x00000001
+#define SEGMENT_TYPE_ICE	0x00000010
+	__le32 seg_type;
+	struct ice_pkg_ver seg_ver;
+	__le32 seg_size;
+	char seg_name[ICE_PKG_NAME_SIZE];
+};
+
+/* ice specific segment */
+
+union ice_device_id {
+	struct {
+		__le16 device_id;
+		__le16 vendor_id;
+	} dev_vend_id;
+	__le32 id;
+};
+
+struct ice_device_id_entry {
+	union ice_device_id device;
+	union ice_device_id sub_device;
+};
+
+struct ice_seg {
+	struct ice_generic_seg_hdr hdr;
+	__le32 device_table_count;
+	struct ice_device_id_entry device_table[1];
+};
+
+struct ice_nvm_table {
+	__le32 table_count;
+	__le32 vers[1];
+};
+
+struct ice_buf {
+#define ICE_PKG_BUF_SIZE	4096
+	u8 buf[ICE_PKG_BUF_SIZE];
+};
+
+struct ice_buf_table {
+	__le32 buf_count;
+	struct ice_buf buf_array[1];
+};
+
+/* global metadata specific segment */
+struct ice_global_metadata_seg {
+	struct ice_generic_seg_hdr hdr;
+	struct ice_pkg_ver pkg_ver;
+	__le32 track_id;
+	char pkg_name[ICE_PKG_NAME_SIZE];
+};
+
+#define ICE_MIN_S_OFF		12
+#define ICE_MAX_S_OFF		4095
+#define ICE_MIN_S_SZ		1
+#define ICE_MAX_S_SZ		4084
+
+/* section information */
+struct ice_section_entry {
+	__le32 type;
+	__le16 offset;
+	__le16 size;
+};
+
+#define ICE_MIN_S_COUNT		1
+#define ICE_MAX_S_COUNT		511
+#define ICE_MIN_S_DATA_END	12
+#define ICE_MAX_S_DATA_END	4096
+
+#define ICE_METADATA_BUF	0x80000000
+
+struct ice_buf_hdr {
+	__le16 section_count;
+	__le16 data_end;
+	struct ice_section_entry section_entry[1];
+};
+
+#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
+	sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
+
+/* ice package section IDs */
+#define ICE_SID_XLT0_SW			10
+#define ICE_SID_XLT_KEY_BUILDER_SW	11
+#define ICE_SID_XLT1_SW			12
+#define ICE_SID_XLT2_SW			13
+#define ICE_SID_PROFID_TCAM_SW		14
+#define ICE_SID_PROFID_REDIR_SW		15
+#define ICE_SID_FLD_VEC_SW		16
+#define ICE_SID_CDID_KEY_BUILDER_SW	17
+#define ICE_SID_CDID_REDIR_SW		18
+
+#define ICE_SID_XLT0_ACL		20
+#define ICE_SID_XLT_KEY_BUILDER_ACL	21
+#define ICE_SID_XLT1_ACL		22
+#define ICE_SID_XLT2_ACL		23
+#define ICE_SID_PROFID_TCAM_ACL		24
+#define ICE_SID_PROFID_REDIR_ACL	25
+#define ICE_SID_FLD_VEC_ACL		26
+#define ICE_SID_CDID_KEY_BUILDER_ACL	27
+#define ICE_SID_CDID_REDIR_ACL		28
+
+#define ICE_SID_XLT0_FD			30
+#define ICE_SID_XLT_KEY_BUILDER_FD	31
+#define ICE_SID_XLT1_FD			32
+#define ICE_SID_XLT2_FD			33
+#define ICE_SID_PROFID_TCAM_FD		34
+#define ICE_SID_PROFID_REDIR_FD		35
+#define ICE_SID_FLD_VEC_FD		36
+#define ICE_SID_CDID_KEY_BUILDER_FD	37
+#define ICE_SID_CDID_REDIR_FD		38
+
+#define ICE_SID_XLT0_RSS		40
+#define ICE_SID_XLT_KEY_BUILDER_RSS	41
+#define ICE_SID_XLT1_RSS		42
+#define ICE_SID_XLT2_RSS		43
+#define ICE_SID_PROFID_TCAM_RSS		44
+#define ICE_SID_PROFID_REDIR_RSS	45
+#define ICE_SID_FLD_VEC_RSS		46
+#define ICE_SID_CDID_KEY_BUILDER_RSS	47
+#define ICE_SID_CDID_REDIR_RSS		48
+
+#define ICE_SID_RXPARSER_CAM		50
+#define ICE_SID_RXPARSER_NOMATCH_CAM	51
+#define ICE_SID_RXPARSER_IMEM		52
+#define ICE_SID_RXPARSER_XLT0_BUILDER	53
+#define ICE_SID_RXPARSER_NODE_PTYPE	54
+#define ICE_SID_RXPARSER_MARKER_PTYPE	55
+#define ICE_SID_RXPARSER_BOOST_TCAM	56
+#define ICE_SID_RXPARSER_PROTO_GRP	57
+#define ICE_SID_RXPARSER_METADATA_INIT	58
+#define ICE_SID_RXPARSER_XLT0		59
+
+#define ICE_SID_TXPARSER_CAM		60
+#define ICE_SID_TXPARSER_NOMATCH_CAM	61
+#define ICE_SID_TXPARSER_IMEM		62
+#define ICE_SID_TXPARSER_XLT0_BUILDER	63
+#define ICE_SID_TXPARSER_NODE_PTYPE	64
+#define ICE_SID_TXPARSER_MARKER_PTYPE	65
+#define ICE_SID_TXPARSER_BOOST_TCAM	66
+#define ICE_SID_TXPARSER_PROTO_GRP	67
+#define ICE_SID_TXPARSER_METADATA_INIT	68
+#define ICE_SID_TXPARSER_XLT0		69
+
+#define ICE_SID_RXPARSER_INIT_REDIR	70
+#define ICE_SID_TXPARSER_INIT_REDIR	71
+#define ICE_SID_RXPARSER_MARKER_GRP	72
+#define ICE_SID_TXPARSER_MARKER_GRP	73
+#define ICE_SID_RXPARSER_LAST_PROTO	74
+#define ICE_SID_TXPARSER_LAST_PROTO	75
+#define ICE_SID_RXPARSER_PG_SPILL	76
+#define ICE_SID_TXPARSER_PG_SPILL	77
+#define ICE_SID_RXPARSER_NOMATCH_SPILL	78
+#define ICE_SID_TXPARSER_NOMATCH_SPILL	79
+
+#define ICE_SID_XLT0_PE			80
+#define ICE_SID_XLT_KEY_BUILDER_PE	81
+#define ICE_SID_XLT1_PE			82
+#define ICE_SID_XLT2_PE			83
+#define ICE_SID_PROFID_TCAM_PE		84
+#define ICE_SID_PROFID_REDIR_PE		85
+#define ICE_SID_FLD_VEC_PE		86
+#define ICE_SID_CDID_KEY_BUILDER_PE	87
+#define ICE_SID_CDID_REDIR_PE		88
+
+/* Label Metadata section IDs */
+#define ICE_SID_LBL_FIRST		0x80000010
+#define ICE_SID_LBL_RXPARSER_IMEM	0x80000010
+#define ICE_SID_LBL_TXPARSER_IMEM	0x80000011
+#define ICE_SID_LBL_RESERVED_12		0x80000012
+#define ICE_SID_LBL_RESERVED_13		0x80000013
+#define ICE_SID_LBL_RXPARSER_MARKER	0x80000014
+#define ICE_SID_LBL_TXPARSER_MARKER	0x80000015
+#define ICE_SID_LBL_PTYPE		0x80000016
+#define ICE_SID_LBL_PROTOCOL_ID		0x80000017
+#define ICE_SID_LBL_RXPARSER_TMEM	0x80000018
+#define ICE_SID_LBL_TXPARSER_TMEM	0x80000019
+#define ICE_SID_LBL_RXPARSER_PG		0x8000001A
+#define ICE_SID_LBL_TXPARSER_PG		0x8000001B
+#define ICE_SID_LBL_RXPARSER_M_TCAM	0x8000001C
+#define ICE_SID_LBL_TXPARSER_M_TCAM	0x8000001D
+#define ICE_SID_LBL_SW_PROFID_TCAM	0x8000001E
+#define ICE_SID_LBL_ACL_PROFID_TCAM	0x8000001F
+#define ICE_SID_LBL_PE_PROFID_TCAM	0x80000020
+#define ICE_SID_LBL_RSS_PROFID_TCAM	0x80000021
+#define ICE_SID_LBL_FD_PROFID_TCAM	0x80000022
+#define ICE_SID_LBL_FLAG		0x80000023
+#define ICE_SID_LBL_REG			0x80000024
+#define ICE_SID_LBL_SW_PTG		0x80000025
+#define ICE_SID_LBL_ACL_PTG		0x80000026
+#define ICE_SID_LBL_PE_PTG		0x80000027
+#define ICE_SID_LBL_RSS_PTG		0x80000028
+#define ICE_SID_LBL_FD_PTG		0x80000029
+#define ICE_SID_LBL_SW_VSIG		0x8000002A
+#define ICE_SID_LBL_ACL_VSIG		0x8000002B
+#define ICE_SID_LBL_PE_VSIG		0x8000002C
+#define ICE_SID_LBL_RSS_VSIG		0x8000002D
+#define ICE_SID_LBL_FD_VSIG		0x8000002E
+#define ICE_SID_LBL_PTYPE_META		0x8000002F
+#define ICE_SID_LBL_SW_PROFID		0x80000030
+#define ICE_SID_LBL_ACL_PROFID		0x80000031
+#define ICE_SID_LBL_PE_PROFID		0x80000032
+#define ICE_SID_LBL_RSS_PROFID		0x80000033
+#define ICE_SID_LBL_FD_PROFID		0x80000034
+#define ICE_SID_LBL_RXPARSER_MARKER_GRP	0x80000035
+#define ICE_SID_LBL_TXPARSER_MARKER_GRP	0x80000036
+#define ICE_SID_LBL_RXPARSER_PROTO	0x80000037
+#define ICE_SID_LBL_TXPARSER_PROTO	0x80000038
+/* The following define MUST be updated to reflect the last label section ID */
+#define ICE_SID_LBL_LAST		0x80000038
+
+enum ice_block {
+	ICE_BLK_SW = 0,
+	ICE_BLK_ACL,
+	ICE_BLK_FD,
+	ICE_BLK_RSS,
+	ICE_BLK_PE,
+	ICE_BLK_COUNT
+};
+
+enum ice_sect {
+	ICE_XLT0 = 0,
+	ICE_XLT_KB,
+	ICE_XLT1,
+	ICE_XLT2,
+	ICE_PROF_TCAM,
+	ICE_PROF_REDIR,
+	ICE_VEC_TBL,
+	ICE_CDID_KB,
+	ICE_CDID_REDIR,
+	ICE_SECT_COUNT
+};
+
+/* Packet Type (PTYPE) values */
+#define ICE_PTYPE_MAC_PAY		1
+#define ICE_PTYPE_IPV4FRAG_PAY		22
+#define ICE_PTYPE_IPV4_PAY		23
+#define ICE_PTYPE_IPV4_UDP_PAY		24
+#define ICE_PTYPE_IPV4_TCP_PAY		26
+#define ICE_PTYPE_IPV4_SCTP_PAY		27
+#define ICE_PTYPE_IPV4_ICMP_PAY		28
+#define ICE_PTYPE_IPV6FRAG_PAY		88
+#define ICE_PTYPE_IPV6_PAY		89
+#define ICE_PTYPE_IPV6_UDP_PAY		90
+#define ICE_PTYPE_IPV6_TCP_PAY		92
+#define ICE_PTYPE_IPV6_SCTP_PAY		93
+#define ICE_PTYPE_IPV6_ICMP_PAY		94
+
+/* Packet Type Groups (PTG) - Inner Most fields (IM) */
+#define ICE_PTG_IM_IPV4_TCP		16
+#define ICE_PTG_IM_IPV4_UDP		17
+#define ICE_PTG_IM_IPV4_SCTP		18
+#define ICE_PTG_IM_IPV4_PAY		20
+#define ICE_PTG_IM_IPV4_OTHER		21
+#define ICE_PTG_IM_IPV6_TCP		32
+#define ICE_PTG_IM_IPV6_UDP		33
+#define ICE_PTG_IM_IPV6_SCTP		34
+#define ICE_PTG_IM_IPV6_OTHER		37
+#define ICE_PTG_IM_L2_OTHER		67
+
+struct ice_flex_fields {
+	union {
+		struct {
+			u8 src_ip;
+			u8 dst_ip;
+			u8 flow_label;	/* valid for IPv6 only */
+		} ip_fields;
+
+		struct {
+			u8 src_prt;
+			u8 dst_prt;
+		} tcp_udp_fields;
+
+		struct {
+			u8 src_ip;
+			u8 dst_ip;
+			u8 src_prt;
+			u8 dst_prt;
+		} ip_tcp_udp_fields;
+
+		struct {
+			u8 src_prt;
+			u8 dst_prt;
+			u8 flow_label;	/* valid for IPv6 only */
+			u8 spi;
+		} ip_esp_fields;
+
+		struct {
+			u32 offset;
+			u32 length;
+		} off_len;
+	} fields;
+};
+
+#define ICE_XLT1_DFLT_GRP	0
+#define ICE_XLT1_TABLE_SIZE	1024
+
+/* package labels */
+struct ice_label {
+	__le16 value;
+#define ICE_PKG_LABEL_SIZE	64
+	char name[ICE_PKG_LABEL_SIZE];
+};
+
+struct ice_label_section {
+	__le16 count;
+	struct ice_label label[1];
+};
+
+#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
+	sizeof(struct ice_label_section) - sizeof(struct ice_label), \
+	sizeof(struct ice_label))
+
+struct ice_sw_fv_section {
+	__le16 count;
+	__le16 base_offset;
+	struct ice_fv fv[1];
+};
+
+struct ice_sw_fv_list_entry {
+	struct LIST_ENTRY_TYPE list_entry;
+	u32 profile_id;
+	struct ice_fv *fv_ptr;
+};
+
+#pragma pack(1)
+/* The BOOST tcam stores the match packet header in reverse order, meaning
+ * the fields are reversed; in addition, this means that the normally big endian
+ * fields of the packet are now little endian.
+ */
+struct ice_boost_key_value {
+#define ICE_BOOST_REMAINING_HV_KEY	15
+	u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
+	__le16 hv_dst_port_key;
+	__le16 hv_src_port_key;
+	u8 tcam_search_key;
+};
+
+#pragma pack()
+
+struct ice_boost_key {
+	struct ice_boost_key_value key;
+	struct ice_boost_key_value key2;
+};
+
+/* package Boost TCAM entry */
+struct ice_boost_tcam_entry {
+	__le16 addr;
+	__le16 reserved;
+	/* break up the 40 bytes of key into different fields */
+	struct ice_boost_key key;
+	u8 boost_hit_index_group;
+	/* The following contains bitfields which are not on byte boundaries.
+	 * These fields are currently unused by driver software.
+	 */
+#define ICE_BOOST_BIT_FIELDS		43
+	u8 bit_fields[ICE_BOOST_BIT_FIELDS];
+};
+
+struct ice_boost_tcam_section {
+	__le16 count;
+	__le16 reserved;
+	struct ice_boost_tcam_entry tcam[1];
+};
+
+#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
+	sizeof(struct ice_boost_tcam_section) - \
+	sizeof(struct ice_boost_tcam_entry), \
+	sizeof(struct ice_boost_tcam_entry))
+
+#pragma pack(1)
+struct ice_xlt1_section {
+	__le16 count;
+	__le16 offset;
+	u8 value[1];
+};
+
+#pragma pack()
+
+#define ICE_XLT1_SIZE(n)	(sizeof(struct ice_xlt1_section) + \
+				 (sizeof(u8) * ((n) - 1)))
+
+struct ice_xlt2_section {
+	__le16 count;
+	__le16 offset;
+	__le16 value[1];
+};
+
+#define ICE_XLT2_SIZE(n)	(sizeof(struct ice_xlt2_section) + \
+				 (sizeof(u16) * ((n) - 1)))
+
+struct ice_prof_redir_section {
+	__le16 count;
+	__le16 offset;
+	u8 redir_value[1];
+};
+
+#define ICE_PROF_REDIR_SIZE(n)	(sizeof(struct ice_prof_redir_section) + \
+				 (sizeof(u8) * ((n) - 1)))
+
+/* package buffer building */
+
+struct ice_buf_build {
+	struct ice_buf buf;
+	u16 reserved_section_table_entries;
+};
+
+struct ice_pkg_enum {
+	struct ice_buf_table *buf_table;
+	u32 buf_idx;
+
+	u32 type;
+	struct ice_buf_hdr *buf;
+	u32 sect_idx;
+	void *sect;
+	u32 sect_type;
+
+	u32 entry_idx;
+	void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
+};
+
+/* Tunnel enabling */
+
+enum ice_tunnel_type {
+	TNL_VXLAN = 0,
+	TNL_GTPC,
+	TNL_GTPC_TEID,
+	TNL_GTPU,
+	TNL_GTPU_TEID,
+	TNL_VXLAN_GPE,
+	TNL_GENEVE,
+	TNL_NAT,
+	TNL_ROCE_V2,
+	TNL_MPLSO_UDP,
+	TNL_UDP2_END,
+	TNL_UPD_END,
+	TNL_LAST = 0xFF,
+	TNL_ALL = 0xFF,
+};
+
+struct ice_tunnel_type_scan {
+	enum ice_tunnel_type type;
+	const char *label_prefix;
+};
+
+struct ice_tunnel_entry {
+	enum ice_tunnel_type type;
+	u8 valid;
+	u8 in_use;
+	u8 marked;
+	u16 boost_addr;
+	u16 port;
+	struct ice_boost_tcam_entry *boost_entry;
+};
+
+#define ICE_TUNNEL_MAX_ENTRIES	16
+
+struct ice_tunnel_table {
+	u16 count;
+	struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
+};
+
+struct ice_pkg_es {
+	__le16 count;
+	__le16 offset;
+	struct ice_fv_word es[1];
+};
+
+struct ice_es {
+	u32 sid;
+	u16 count;
+	u16 fvw;
+	u16 *ref_count;
+	u8 reverse; /* set to true to reverse FV order */
+	struct LIST_HEAD_TYPE prof_map;
+	struct ice_fv_word *t;
+	u8 *resource_used_hack; /* hack for testing */
+};
+
+/* PTYPE Group management */
+
+/* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type
+ * group (PTG) ID as output.
+ *
+ * Note: PTG 0 is the default packet type group and it is assumed that all PTYPE
+ * are a part of this group until moved to a new PTG.
+ */
+#define ICE_DEFAULT_PTG	0
+
+struct ice_ptg_entry {
+	u8 in_use;
+	struct ice_ptg_ptype *first_ptype;
+};
+
+struct ice_ptg_ptype {
+	u8 ptg;
+	struct ice_ptg_ptype *next_ptype;
+};
+
+#define ICE_MAX_TCAM_PER_PROFILE	8
+#define ICE_MAX_PTYPE_PER_PROFILE	8
+
+struct ice_prof_map {
+	struct LIST_ENTRY_TYPE list;
+	u64 profile_cookie;
+	u64 context;
+	u8 prof_id;
+	u8 ptype_count;
+	u8 ptype[ICE_MAX_PTYPE_PER_PROFILE];
+};
+
+#define ICE_INVALID_TCAM	0xFFFF
+
+struct ice_tcam_inf {
+	u8 ptg;
+	u8 prof_id;
+	u16 tcam_idx;
+	u8 in_use;
+};
+
+struct ice_vsig_prof {
+	struct LIST_ENTRY_TYPE list;
+	u64 profile_cookie;
+	u8 prof_id;
+	u8 tcam_count;
+	struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE];
+};
+
+struct ice_vsig_entry {
+	u8 in_use;
+	struct LIST_HEAD_TYPE prop_lst;
+	struct ice_vsig_vsi *first_vsi;
+};
+
+struct ice_vsig_vsi {
+	u16 changed;
+	u16 vsig;
+	u32 prop_mask;
+	struct ice_vsig_vsi *next_vsi;
+};
+
+#define ICE_XLT1_CNT	1024
+#define ICE_MAX_PTGS	256
+
+/* XLT1 Table */
+struct ice_xlt1 {
+	u32 sid;
+	u16 count;
+	struct ice_ptg_entry *ptg_tbl;
+	struct ice_ptg_ptype *ptypes;
+	u8 *t;
+};
+
+#define ICE_XLT2_CNT	768
+#define ICE_MAX_VSIGS	768
+
+/* Vsig bit layout:
+ * [0:12]: incremental vsig index 1 to ICE_MAX_VSIGS
+ * [13:15]: pf number of device
+ */
+#define ICE_VSIG_IDX_M	(0x1FFF)
+#define ICE_PF_NUM_S	13
+#define ICE_PF_NUM_M	(0x07 << ICE_PF_NUM_S)
+#define ICE_VSIG_VALUE(vsig, pf_id) \
+	(u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \
+	      (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))
+#define ICE_DEFAULT_VSIG	0
+
+/* XLT2 Table */
+struct ice_xlt2 {
+	u32 sid;
+	u16 count;
+	struct ice_vsig_entry *vsig_tbl;
+	struct ice_vsig_vsi *vsis;
+	u16 *t;
+};
+
+/* Extraction sequence - list of match fields:
+ * protocol id, offset, profile length
+ */
+union ice_match_fld {
+	struct {
+		u8 prot_id;
+		u8 offset;
+		u8 length;
+		u8 reserved; /* must be zero */
+	} fld;
+	u32 val;
+};
+
+#define ICE_MATCH_LIST_SZ	20
+#pragma pack(1)
+struct ice_match {
+	u8 count;
+	union ice_match_fld list[ICE_MATCH_LIST_SZ];
+};
+
+/* Profile ID Management */
+struct ice_prof_id_key {
+	__le16 flags;
+	u8 xlt1;
+	__le16 xlt2_cdid;
+};
+
+/* Keys are made up of two values, each one-half the size of the key.
+ * For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
+ */
+#define ICE_TCAM_KEY_VAL_SZ	5
+#define ICE_TCAM_KEY_SZ		(2 * ICE_TCAM_KEY_VAL_SZ)
+
+struct ice_prof_tcam_entry {
+	__le16 addr;
+	u8 key[ICE_TCAM_KEY_SZ];
+	u8 prof_id;
+};
+
+struct ice_prof_id_section {
+	__le16 count;
+	struct ice_prof_tcam_entry entry[1];
+};
+#pragma pack()
+
+struct ice_prof_tcam {
+	u32 sid;
+	u16 count;
+	u16 max_prof_id;
+	u8 cdid_bits; /* # cdid bits to use in key, 0, 2, 4, or 8 */
+	struct ice_prof_tcam_entry *t;
+	u8 *resource_used_hack;
+};
+
+struct ice_prof_redir {
+	u32 sid;
+	u16 count;
+	u8 *t;
+};
+
+/* Tables per block */
+struct ice_blk_info {
+	struct ice_xlt1 xlt1;
+	struct ice_xlt2 xlt2;
+	struct ice_prof_tcam prof;
+	struct ice_prof_redir prof_redir;
+	struct ice_es es;
+	u8 overwrite; /* set to true to allow overwrite of table entries */
+};
+
+enum ice_chg_type {
+	ICE_TCAM_NONE = 0,
+	ICE_PTG_ES_ADD,
+	ICE_TCAM_ADD,
+	ICE_TCAM_REM,
+	ICE_VSIG_ADD,
+	ICE_VSIG_REM,
+	ICE_VSI_MOVE,
+};
+
+struct ice_chs_chg {
+	struct LIST_ENTRY_TYPE list_entry;
+	enum ice_chg_type type;
+
+	u8 add_ptg;
+	u8 add_vsig;
+	u8 add_tcam_idx;
+	u8 add_prof;
+	u16 ptype;
+	u8 ptg;
+	u8 prof_id;
+	u16 vsi;
+	u16 vsig;
+	u16 orig_vsig;
+	u16 tcam_idx;
+	struct ice_prof_tcam_entry orig_ent;
+};
+
+#define ICE_FLOW_PTYPE_MAX		ICE_XLT1_CNT
+
 #endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index d7c12d172..17d79ba21 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -704,6 +704,42 @@ struct ice_hw {
 
 	u8 ucast_shared;	/* true if VSIs can share unicast addr */
 
+	/* Active package version (currently active) */
+	struct ice_pkg_ver active_pkg_ver;
+	u8 active_pkg_name[ICE_PKG_NAME_SIZE];
+
+	/* Driver's package ver - (from the Metadata seg) */
+	struct ice_pkg_ver pkg_ver;
+	u8 pkg_name[ICE_PKG_NAME_SIZE];
+
+	/* Driver's Ice package version (from the Ice seg) */
+	struct ice_pkg_ver ice_pkg_ver;
+	u8 ice_pkg_name[ICE_PKG_NAME_SIZE];
+
+	/* Pointer to the ice segment */
+	struct ice_seg *seg;
+
+	/* Pointer to allocated copy of pkg memory */
+	u8 *pkg_copy;
+
+	/* tunneling info */
+	struct ice_tunnel_table tnl;
+
+	/* PTYPE group and XLT1 management */
+#define ICE_MAX_PTGS	256
+	struct ice_ptg_entry ptg_tbl[ICE_BLK_COUNT][ICE_MAX_PTGS];
+
+#define ICE_XLT1_CNT	1024
+	struct ice_ptg_ptype xlt1_tbl[ICE_BLK_COUNT][ICE_XLT1_CNT];
+#define ICE_PKG_FILENAME	"package_file"
+#define ICE_PKG_FILENAME_EXT	"pkg"
+#define ICE_PKG_FILE_MAJ_VER	1
+#define ICE_PKG_FILE_MIN_VER	0
+
+	/* HW block tables */
+	struct ice_blk_info blk[ICE_BLK_COUNT];
+	struct ice_lock fl_profs_locks[ICE_BLK_COUNT];	/* lock fltr profiles */
+	struct LIST_HEAD_TYPE fl_profs[ICE_BLK_COUNT];
 };
 
 /* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ice/base/meson.build b/drivers/net/ice/base/meson.build
index 0cfc8cd67..77a9802f4 100644
--- a/drivers/net/ice/base/meson.build
+++ b/drivers/net/ice/base/meson.build
@@ -7,6 +7,7 @@ sources = [
 	'ice_sched.c',
 	'ice_switch.c',
 	'ice_nvm.c',
+	'ice_flex_pipe.c',
 ]
 
 error_cflags = ['-Wno-unused-value',
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH 6/7] net/ice/base: add flow module
  2019-01-15 12:56 [dpdk-dev] [PATCH 0/7] net/ice: update share code Qi Zhang
                   ` (4 preceding siblings ...)
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 5/7] net/ice/base: add flexible pipeline module Qi Zhang
@ 2019-01-15 12:56 ` Qi Zhang
  2019-01-16 12:14   ` Ferruh Yigit
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 7/7] net/ice/base: free flow profile entries Qi Zhang
                   ` (3 subsequent siblings)
  9 siblings, 1 reply; 21+ messages in thread
From: Qi Zhang @ 2019-01-15 12:56 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang
  Cc: paul.m.stillwell.jr, dev, bruce.richardson, ferruh.yigit, Qi Zhang

Add the module that implemented flow abstraction that base on
flexible pipeline.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/Makefile          |    1 +
 drivers/net/ice/base/ice_common.h |    1 +
 drivers/net/ice/base/ice_flow.c   | 2080 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_flow.h   |  337 ++++++
 drivers/net/ice/base/meson.build  |    1 +
 5 files changed, 2420 insertions(+)
 create mode 100644 drivers/net/ice/base/ice_flow.c

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 1bb76efc2..61846cae2 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -50,6 +50,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_sched.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_nvm.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_flex_pipe.c
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_flow.c
 
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx.c
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 67539ccb7..5ac991ef2 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -7,6 +7,7 @@
 
 #include "ice_type.h"
 
+#include "ice_flex_pipe.h"
 #include "ice_switch.h"
 
 enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
new file mode 100644
index 000000000..b57362d03
--- /dev/null
+++ b/drivers/net/ice/base/ice_flow.c
@@ -0,0 +1,2080 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2019
+ */
+
+#include "ice_common.h"
+#include "ice_flow.h"
+
+/* Size of known protocol header fields */
+#define ICE_FLOW_FLD_SZ_ETH_TYPE	2
+#define ICE_FLOW_FLD_SZ_VLAN		2
+#define ICE_FLOW_FLD_SZ_IPV4_ADDR	4
+#define ICE_FLOW_FLD_SZ_IPV6_ADDR	16
+#define ICE_FLOW_FLD_SZ_IP_DSCP		1
+#define ICE_FLOW_FLD_SZ_IP_TTL		1
+#define ICE_FLOW_FLD_SZ_IP_PROT		1
+#define ICE_FLOW_FLD_SZ_PORT		2
+#define ICE_FLOW_FLD_SZ_TCP_FLAGS	1
+#define ICE_FLOW_FLD_SZ_ICMP_TYPE	1
+#define ICE_FLOW_FLD_SZ_ICMP_CODE	1
+#define ICE_FLOW_FLD_SZ_ARP_OPER	2
+#define ICE_FLOW_FLD_SZ_GRE_KEYID	4
+
+/* Protocol header fields are extracted at the word boundaries as word-sized
+ * values. Specify the displacement value of some non-word-aligned fields needed
+ * to compute the offset of words containing the fields in the corresponding
+ * protocol headers. Displacement values are expressed in number of bits.
+ */
+#define ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP	(-4)
+#define ICE_FLOW_FLD_IPV6_TTL_PROT_DISP	((-2) * 8)
+#define ICE_FLOW_FLD_IPV6_TTL_TTL_DISP	((-1) * 8)
+
+/* Describe properties of a protocol header field */
+struct ice_flow_field_info {
+	enum ice_flow_seg_hdr hdr;
+	s16 off;	/* Offset from start of a protocol header, in bits */
+	u16 size;	/* Size of fields in bits */
+};
+
+/* Table containing properties of supported protocol header fields */
+static const
+struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
+	/* Ether */
+	/* ICE_FLOW_FIELD_IDX_ETH_DA */
+	{ ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN * 8 },
+	/* ICE_FLOW_FIELD_IDX_ETH_SA */
+	{ ICE_FLOW_SEG_HDR_ETH, ETH_ALEN * 8, ETH_ALEN * 8 },
+	/* ICE_FLOW_FIELD_IDX_S_VLAN */
+	{ ICE_FLOW_SEG_HDR_VLAN, 12 * 8, ICE_FLOW_FLD_SZ_VLAN * 8 },
+	/* ICE_FLOW_FIELD_IDX_C_VLAN */
+	{ ICE_FLOW_SEG_HDR_VLAN, 14 * 8, ICE_FLOW_FLD_SZ_VLAN * 8 },
+	/* ICE_FLOW_FIELD_IDX_ETH_TYPE */
+	{ ICE_FLOW_SEG_HDR_ETH, 12 * 8, ICE_FLOW_FLD_SZ_ETH_TYPE * 8 },
+	/* IPv4 */
+	/* ICE_FLOW_FIELD_IDX_IP_DSCP */
+	{ ICE_FLOW_SEG_HDR_IPV4, 1 * 8, 1 * 8 },
+	/* ICE_FLOW_FIELD_IDX_IP_TTL */
+	{ ICE_FLOW_SEG_HDR_NONE, 8 * 8, 1 * 8 },
+	/* ICE_FLOW_FIELD_IDX_IP_PROT */
+	{ ICE_FLOW_SEG_HDR_NONE, 9 * 8, ICE_FLOW_FLD_SZ_IP_PROT * 8 },
+	/* ICE_FLOW_FIELD_IDX_IPV4_SA */
+	{ ICE_FLOW_SEG_HDR_IPV4, 12 * 8, ICE_FLOW_FLD_SZ_IPV4_ADDR * 8 },
+	/* ICE_FLOW_FIELD_IDX_IPV4_DA */
+	{ ICE_FLOW_SEG_HDR_IPV4, 16 * 8, ICE_FLOW_FLD_SZ_IPV4_ADDR * 8 },
+	/* IPv6 */
+	/* ICE_FLOW_FIELD_IDX_IPV6_SA */
+	{ ICE_FLOW_SEG_HDR_IPV6, 8 * 8, ICE_FLOW_FLD_SZ_IPV6_ADDR * 8 },
+	/* ICE_FLOW_FIELD_IDX_IPV6_DA */
+	{ ICE_FLOW_SEG_HDR_IPV6, 24 * 8, ICE_FLOW_FLD_SZ_IPV6_ADDR * 8 },
+	/* Transport */
+	/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
+	{ ICE_FLOW_SEG_HDR_TCP, 0 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
+	{ ICE_FLOW_SEG_HDR_TCP, 2 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
+	{ ICE_FLOW_SEG_HDR_UDP, 0 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
+	{ ICE_FLOW_SEG_HDR_UDP, 2 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
+	{ ICE_FLOW_SEG_HDR_SCTP, 0 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
+	{ ICE_FLOW_SEG_HDR_SCTP, 2 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
+	{ ICE_FLOW_SEG_HDR_TCP, 13 * 8, ICE_FLOW_FLD_SZ_TCP_FLAGS * 8 },
+	/* ARP */
+	/* ICE_FLOW_FIELD_IDX_ARP_SIP */
+	{ ICE_FLOW_SEG_HDR_ARP, 14 * 8, ICE_FLOW_FLD_SZ_IPV4_ADDR * 8 },
+	/* ICE_FLOW_FIELD_IDX_ARP_DIP */
+	{ ICE_FLOW_SEG_HDR_ARP, 24 * 8, ICE_FLOW_FLD_SZ_IPV4_ADDR * 8 },
+	/* ICE_FLOW_FIELD_IDX_ARP_SHA */
+	{ ICE_FLOW_SEG_HDR_ARP, 8 * 8, ETH_ALEN * 8 },
+	/* ICE_FLOW_FIELD_IDX_ARP_DHA */
+	{ ICE_FLOW_SEG_HDR_ARP, 18 * 8, ETH_ALEN * 8 },
+	/* ICE_FLOW_FIELD_IDX_ARP_OP */
+	{ ICE_FLOW_SEG_HDR_ARP, 6 * 8, ICE_FLOW_FLD_SZ_ARP_OPER * 8 },
+	/* ICMP */
+	/* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
+	{ ICE_FLOW_SEG_HDR_ICMP, 0 * 8, ICE_FLOW_FLD_SZ_ICMP_TYPE * 8 },
+	/* ICE_FLOW_FIELD_IDX_ICMP_CODE */
+	{ ICE_FLOW_SEG_HDR_ICMP, 1 * 8, ICE_FLOW_FLD_SZ_ICMP_CODE * 8 },
+	/* GRE */
+	/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
+	{ ICE_FLOW_SEG_HDR_GRE, 12 * 8, ICE_FLOW_FLD_SZ_GRE_KEYID * 8 },
+};
+
+/* Bitmaps indicating relevant packet types for a particular protocol header
+ *
+ * Packet types for packets with an Outer/First/Single MAC header
+ */
+static const u32 ice_ptypes_mac_ofos[] = {
+	0xFDC00CC6, 0xBFBF7F7E, 0xF7EFDFDF, 0xFEFDFDFB,
+	0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
+	0x000B0F0F, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last MAC VLAN header */
+static const u32 ice_ptypes_macvlan_il[] = {
+	0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
+	0x0000077E, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv4 header */
+static const u32 ice_ptypes_ipv4_ofos[] = {
+	0xFDC00000, 0xBFBF7F7E, 0x00EFDFDF, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x0003000F, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header */
+static const u32 ice_ptypes_ipv4_il[] = {
+	0xE0000000, 0xB807700E, 0x8001DC03, 0xE01DC03B,
+	0x0007700E, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header */
+static const u32 ice_ptypes_ipv6_ofos[] = {
+	0x00000000, 0x00000000, 0xF7000000, 0xFEFDFDFB,
+	0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
+	0x00080F00, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header */
+static const u32 ice_ptypes_ipv6_il[] = {
+	0x00000000, 0x03B80770, 0x00EE01DC, 0x0EE00000,
+	0x03B80770, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First ARP header */
+static const u32 ice_ptypes_arp_of[] = {
+	0x00000800, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First UDP header */
+static const u32 ice_ptypes_udp_of[] = {
+	0x81000000, 0x00000000, 0x04000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last UDP header */
+static const u32 ice_ptypes_udp_il[] = {
+	0x80000000, 0x20204040, 0x00081010, 0x80810102,
+	0x00204040, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last TCP header */
+static const u32 ice_ptypes_tcp_il[] = {
+	0x04000000, 0x80810102, 0x10204040, 0x42040408,
+	0x00810002, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last SCTP header */
+static const u32 ice_ptypes_sctp_il[] = {
+	0x08000000, 0x01020204, 0x20408081, 0x04080810,
+	0x01020204, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First ICMP header */
+static const u32 ice_ptypes_icmp_of[] = {
+	0x10000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last ICMP header */
+static const u32 ice_ptypes_icmp_il[] = {
+	0x00000000, 0x02040408, 0x40810102, 0x08101020,
+	0x02040408, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First GRE header */
+static const u32 ice_ptypes_gre_of[] = {
+	0x00000000, 0xBFBF7800, 0x00EFDFDF, 0xFEFDE000,
+	0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last MAC header */
+static const u32 ice_ptypes_mac_il[] = {
+	0x00000000, 0x00000000, 0x00EFDE00, 0x00000000,
+	0x03BF7800, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Manage parameters and info. used during the creation of a flow profile */
+struct ice_flow_prof_params {
+	enum ice_block blk;
+	struct ice_flow_prof *prof;
+
+	u16 entry_length; /* # of bytes formatted entry will require */
+	u8 es_cnt;
+	struct ice_fv_word es[ICE_MAX_FV_WORDS];
+
+	ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
+};
+
+/**
+ * ice_is_pow2 - check if integer value is a power of 2
+ * @val: unsigned integer to be validated
+ */
+static bool ice_is_pow2(u64 val)
+{
+	return (val && !(val & (val - 1)));
+}
+
+#define ICE_FLOW_SEG_HDRS_L2_MASK	\
+	(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
+#define ICE_FLOW_SEG_HDRS_L3_MASK	\
+	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
+#define ICE_FLOW_SEG_HDRS_L4_MASK	\
+	(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
+	 ICE_FLOW_SEG_HDR_SCTP)
+
+/**
+ * ice_flow_val_hdrs - validates packet segments for valid protocol headers
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ */
+static enum ice_status
+ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+{
+	const u32 masks = (ICE_FLOW_SEG_HDRS_L2_MASK |
+			   ICE_FLOW_SEG_HDRS_L3_MASK |
+			   ICE_FLOW_SEG_HDRS_L4_MASK);
+	u8 i;
+
+	for (i = 0; i < segs_cnt; i++) {
+		/* No header specified */
+		if (!(segs[i].hdrs & masks) || (segs[i].hdrs & ~masks))
+			return ICE_ERR_PARAM;
+
+		/* Multiple L3 headers */
+		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
+		    !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
+			return ICE_ERR_PARAM;
+
+		/* Multiple L4 headers */
+		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
+		    !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
+			return ICE_ERR_PARAM;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/* Sizes of fixed known protocol headers without header options */
+#define ICE_FLOW_PROT_HDR_SZ_MAC	14
+#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN	(ICE_FLOW_PROT_HDR_SZ_MAC + 2)
+#define ICE_FLOW_PROT_HDR_SZ_IPV4	20
+#define ICE_FLOW_PROT_HDR_SZ_IPV6	40
+#define ICE_FLOW_PROT_HDR_SZ_ARP	28
+#define ICE_FLOW_PROT_HDR_SZ_ICMP	8
+#define ICE_FLOW_PROT_HDR_SZ_TCP	20
+#define ICE_FLOW_PROT_HDR_SZ_UDP	8
+#define ICE_FLOW_PROT_HDR_SZ_SCTP	12
+
+/**
+ * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose header size is to be determined
+ */
+static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
+{
+	u16 sz;
+
+	/* L2 headers */
+	sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
+		ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
+
+	/* L3 headers */
+	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
+		sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
+		sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
+		sz += ICE_FLOW_PROT_HDR_SZ_ARP;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
+		/* A L3 header is required if L4 is specified */
+		return 0;
+
+	/* L4 headers */
+	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
+		sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
+		sz += ICE_FLOW_PROT_HDR_SZ_TCP;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
+		sz += ICE_FLOW_PROT_HDR_SZ_UDP;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
+		sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
+
+	return sz;
+}
+
+/**
+ * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
+ * @params: information about the flow to be processed
+ *
+ * This function identifies the packet types associated with the protocol
+ * headers being present in packet segments of the specified flow profile.
+ */
+static enum ice_status
+ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
+{
+	struct ice_flow_prof *prof;
+	u8 i;
+
+	ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
+		   ICE_NONDMA_MEM);
+
+	prof = params->prof;
+
+	for (i = 0; i < params->prof->segs_cnt; i++) {
+		const ice_bitmap_t *src;
+		u32 hdrs;
+
+		if (i > 0 && (i + 1) < prof->segs_cnt)
+			continue;
+
+		hdrs = prof->segs[i].hdrs;
+
+		if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
+			src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
+				(const ice_bitmap_t *)ice_ptypes_mac_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_ETH;
+		}
+
+		if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
+			src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_VLAN;
+		}
+
+		if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
+			ice_and_bitmap(params->ptypes, params->ptypes,
+				       (const ice_bitmap_t *)ice_ptypes_arp_of,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_ARP;
+		}
+
+		if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+			src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
+				(const ice_bitmap_t *)ice_ptypes_ipv4_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_IPV4;
+		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+			src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
+				(const ice_bitmap_t *)ice_ptypes_ipv6_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_IPV6;
+		}
+
+		if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
+			src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
+				(const ice_bitmap_t *)ice_ptypes_icmp_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_ICMP;
+		} else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
+			src = !i ? (const ice_bitmap_t *)ice_ptypes_udp_of :
+				(const ice_bitmap_t *)ice_ptypes_udp_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_UDP;
+		} else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
+			ice_and_bitmap(params->ptypes, params->ptypes,
+				       (const ice_bitmap_t *)ice_ptypes_tcp_il,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_TCP;
+		} else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
+			src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_SCTP;
+		} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
+			if (!i) {
+				src = (const ice_bitmap_t *)ice_ptypes_gre_of;
+				ice_and_bitmap(params->ptypes, params->ptypes,
+					       src, ICE_FLOW_PTYPE_MAX);
+			}
+			hdrs &= ~ICE_FLOW_SEG_HDR_GRE;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
+ * @hw: pointer to the hw struct
+ * @params: information about the flow to be processed
+ * @seg: packet segment index of the field to be extracted
+ * @fld: ID of field to be extracted
+ *
+ * This function determines the protocol ID, offset, and size of the given
+ * field. It then allocates one or more extraction sequence entries for the
+ * given field, and fill the entries with protocol ID and offset information.
+ */
+static enum ice_status
+ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
+		    u8 seg, enum ice_flow_field fld)
+{
+	enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
+	enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
+	u8 fv_words = hw->blk[params->blk].es.fvw;
+	struct ice_flow_fld_info *flds;
+	u16 cnt, ese_bits, i;
+	s16 adj = 0;
+	u8 off;
+
+	flds = params->prof->segs[seg].fields;
+
+	switch (fld) {
+	case ICE_FLOW_FIELD_IDX_ETH_DA:
+	case ICE_FLOW_FIELD_IDX_ETH_SA:
+	case ICE_FLOW_FIELD_IDX_S_VLAN:
+	case ICE_FLOW_FIELD_IDX_C_VLAN:
+		prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_ETH_TYPE:
+		prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_IP_DSCP:
+		if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
+			adj = ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP;
+		/* Fall through */
+	case ICE_FLOW_FIELD_IDX_IP_TTL:
+	case ICE_FLOW_FIELD_IDX_IP_PROT:
+		/* Some fields are located at different offsets in IPv4 and
+		 * IPv6
+		 */
+		if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+			prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S :
+				ICE_PROT_IPV4_IL;
+			/* TTL and PROT share the same extraction seq. entry.
+			 * Each is considered a sibling to the other in term
+			 * sharing the same extraction sequence entry.
+			 */
+			if (fld == ICE_FLOW_FIELD_IDX_IP_TTL)
+				sib = ICE_FLOW_FIELD_IDX_IP_PROT;
+			else if (fld == ICE_FLOW_FIELD_IDX_IP_PROT)
+				sib = ICE_FLOW_FIELD_IDX_IP_TTL;
+		} else if (params->prof->segs[seg].hdrs &
+			   ICE_FLOW_SEG_HDR_IPV6) {
+			prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S :
+				ICE_PROT_IPV6_IL;
+			if (fld == ICE_FLOW_FIELD_IDX_IP_TTL)
+				adj = ICE_FLOW_FLD_IPV6_TTL_TTL_DISP;
+			else if (fld == ICE_FLOW_FIELD_IDX_IP_PROT)
+				adj = ICE_FLOW_FLD_IPV6_TTL_PROT_DISP;
+		}
+		break;
+	case ICE_FLOW_FIELD_IDX_IPV4_SA:
+	case ICE_FLOW_FIELD_IDX_IPV4_DA:
+		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_IPV6_SA:
+	case ICE_FLOW_FIELD_IDX_IPV6_DA:
+		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
+	case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
+	case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
+		prot_id = ICE_PROT_TCP_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
+	case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
+		prot_id = seg == 0 ? ICE_PROT_UDP_IL_OR_S : ICE_PROT_UDP_OF;
+		break;
+	case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
+	case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
+		prot_id = ICE_PROT_SCTP_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_ARP_SIP:
+	case ICE_FLOW_FIELD_IDX_ARP_DIP:
+	case ICE_FLOW_FIELD_IDX_ARP_SHA:
+	case ICE_FLOW_FIELD_IDX_ARP_DHA:
+	case ICE_FLOW_FIELD_IDX_ARP_OP:
+		prot_id = ICE_PROT_ARP_OF;
+		break;
+	case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
+	case ICE_FLOW_FIELD_IDX_ICMP_CODE:
+		/* ICMP type and code share the same extraction seq. entry */
+		prot_id = (params->prof->segs[seg].hdrs &
+			   ICE_FLOW_SEG_HDR_IPV4) ?
+			ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
+		sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
+			ICE_FLOW_FIELD_IDX_ICMP_CODE :
+			ICE_FLOW_FIELD_IDX_ICMP_TYPE;
+		break;
+	case ICE_FLOW_FIELD_IDX_GRE_KEYID:
+		prot_id = ICE_PROT_GRE_OF;
+		break;
+	default:
+		return ICE_ERR_NOT_IMPL;
+	}
+
+	/* Each extraction sequence entry is a word in size, and extracts a
+	 * word-aligned offset from a protocol header.
+	 */
+	ese_bits = ICE_FLOW_FV_EXTRACT_SZ * 8;
+
+	flds[fld].xtrct.prot_id = prot_id;
+	flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
+		ICE_FLOW_FV_EXTRACT_SZ;
+	flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
+	flds[fld].xtrct.idx = params->es_cnt;
+
+	/* Adjust the next field-entry index after accommodating the number of
+	 * entries this field consumes
+	 */
+	cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
+				  ice_flds_info[fld].size, ese_bits);
+
+	/* Fill in the extraction sequence entries needed for this field */
+	off = flds[fld].xtrct.off;
+	for (i = 0; i < cnt; i++) {
+		/* Only consume an extraction sequence entry if there is no
+		 * sibling field associated with this field or the sibling entry
+		 * already extracts the word shared with this field.
+		 */
+		if (sib == ICE_FLOW_FIELD_IDX_MAX ||
+		    flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
+		    flds[sib].xtrct.off != off) {
+			u8 idx;
+
+			/* Make sure the number of extraction sequence required
+			 * does not exceed the block's capability
+			 */
+			if (params->es_cnt >= fv_words)
+				return ICE_ERR_MAX_LIMIT;
+
+			/* some blocks require a reversed field vector layout */
+			if (hw->blk[params->blk].es.reverse)
+				idx = fv_words - params->es_cnt - 1;
+			else
+				idx = params->es_cnt;
+
+			params->es[idx].prot_id = prot_id;
+			params->es[idx].off = off;
+			params->es_cnt++;
+		}
+
+		off += ICE_FLOW_FV_EXTRACT_SZ;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
+ * @hw: pointer to the hw struct
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose raw fields are to be be extracted
+ */
+static enum ice_status
+ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
+		     u8 seg)
+{
+	u16 hdrs_sz;
+	u8 i;
+
+	if (!params->prof->segs[seg].raws_cnt)
+		return ICE_SUCCESS;
+
+	if (params->prof->segs[seg].raws_cnt >
+	    ARRAY_SIZE(params->prof->segs[seg].raws))
+		return ICE_ERR_MAX_LIMIT;
+
+	/* Offsets within the segment headers are not supported */
+	hdrs_sz = ice_flow_calc_seg_sz(params, seg);
+	if (!hdrs_sz)
+		return ICE_ERR_PARAM;
+
+	for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
+		struct ice_flow_seg_fld_raw *raw;
+		u16 off, cnt, j;
+
+		raw = &params->prof->segs[seg].raws[i];
+
+		/* Only support matching raw fields in the payload */
+		if (raw->off < hdrs_sz)
+			return ICE_ERR_PARAM;
+
+		/* Convert the segment-relative offset into payload-relative
+		 * offset.
+		 */
+		off = raw->off - hdrs_sz;
+
+		/* Storing extraction information */
+		raw->info.xtrct.prot_id = ICE_PROT_PAY;
+		raw->info.xtrct.off = (off / ICE_FLOW_FV_EXTRACT_SZ) *
+			ICE_FLOW_FV_EXTRACT_SZ;
+		raw->info.xtrct.disp = (off % ICE_FLOW_FV_EXTRACT_SZ) * 8;
+		raw->info.xtrct.idx = params->es_cnt;
+
+		/* Determine the number of field vector entries this raw field
+		 * consumes.
+		 */
+		cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
+					  (raw->info.src.last * 8),
+					  ICE_FLOW_FV_EXTRACT_SZ * 8);
+		off = raw->info.xtrct.off;
+		for (j = 0; j < cnt; j++) {
+			/* Make sure the number of extraction sequence required
+			 * does not exceed the block's capability
+			 */
+			if (params->es_cnt >= hw->blk[params->blk].es.count ||
+			    params->es_cnt >= ICE_MAX_FV_WORDS)
+				return ICE_ERR_MAX_LIMIT;
+
+			params->es[params->es_cnt].prot_id = ICE_PROT_PAY;
+			params->es[params->es_cnt].off = off;
+			params->es_cnt++;
+			off += ICE_FLOW_FV_EXTRACT_SZ;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
+ * @hw: pointer to the hw struct
+ * @params: information about the flow to be processed
+ *
+ * This function iterates through all matched fields in the given segments, and
+ * creates an extraction sequence for the fields.
+ */
+static enum ice_status
+ice_flow_create_xtrct_seq(struct ice_hw *hw,
+			  struct ice_flow_prof_params *params)
+{
+	enum ice_status status = ICE_SUCCESS;
+	u8 i;
+
+	for (i = 0; i < params->prof->segs_cnt; i++) {
+		u64 match = params->prof->segs[i].match;
+		u16 j;
+
+		for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
+			const u64 bit = BIT_ULL(j);
+
+			if (match & bit) {
+				status = ice_flow_xtract_fld
+					(hw, params, i, (enum ice_flow_field)j);
+				if (status)
+					return status;
+				match &= ~bit;
+			}
+		}
+
+		/* Process raw matching bytes */
+		status = ice_flow_xtract_raws(hw, params, i);
+		if (status)
+			return status;
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_proc_segs - process all packet segments associated with a profile
+ * @hw: pointer to the hw struct
+ * @params: information about the flow to be processed
+ */
+static enum ice_status
+ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
+{
+	enum ice_status status;
+
+	status = ice_flow_proc_seg_hdrs(params);
+	if (status)
+		return status;
+
+	status = ice_flow_create_xtrct_seq(hw, params);
+	if (status)
+		return status;
+
+	switch (params->blk) {
+	case ICE_BLK_RSS:
+		/* Only header information is provided for RSS configuration.
+		 * No further processing is needed.
+		 */
+		status = ICE_SUCCESS;
+		break;
+	case ICE_BLK_FD:
+		status = ICE_SUCCESS;
+		break;
+	case ICE_BLK_SW:
+	default:
+		return ICE_ERR_NOT_IMPL;
+	}
+
+	return status;
+}
+
+#define ICE_FLOW_FIND_PROF_CHK_FLDS	0x00000001
+#define ICE_FLOW_FIND_PROF_CHK_VSI	0x00000002
+
+/**
+ * ice_flow_find_prof_conds - Find a profile matching headers and conditions
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
+ * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
+			 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
+			 u8 segs_cnt, u16 vsi_handle, u32 conds)
+{
+	struct ice_flow_prof *p;
+
+	LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+		if (p->dir == dir && segs_cnt && segs_cnt == p->segs_cnt) {
+			u8 i;
+
+			/* Check for profile-VSI association if specified */
+			if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
+			    ice_is_vsi_valid(hw, vsi_handle) &&
+			    !ice_is_bit_set(p->vsis, vsi_handle))
+				continue;
+
+			/* Protocol headers must be checked. Matched fields are
+			 * checked if specified.
+			 */
+			for (i = 0; i < segs_cnt; i++)
+				if (segs[i].hdrs != p->segs[i].hdrs ||
+				    ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
+				     segs[i].match != p->segs[i].match))
+					break;
+
+			/* A match is found if all segments are matched */
+			if (i == segs_cnt)
+				return p;
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * ice_flow_find_prof - Look up a profile matching headers and matched fields
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ */
+u64
+ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+		   struct ice_flow_seg_info *segs, u8 segs_cnt)
+{
+	struct ice_flow_prof *p;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+	p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
+				     ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+
+	return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
+}
+
+/**
+ * ice_flow_find_prof_id - Look up a profile with given profile ID
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @prof_id: unique ID to identify this flow profile
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+	struct ice_flow_prof *p;
+
+	LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+		if (p->id == prof_id)
+			return p;
+	}
+
+	return NULL;
+}
+
+/**
+ * ice_flow_rem_entry_sync - Remove a flow entry
+ * @hw: pointer to the hw struct
+ * @entry: flow entry to be removed
+ */
+static enum ice_status
+ice_flow_rem_entry_sync(struct ice_hw *hw, struct ice_flow_entry *entry)
+{
+	if (!entry)
+		return ICE_ERR_BAD_PTR;
+
+	LIST_DEL(&entry->l_entry);
+
+	if (entry->entry)
+		ice_free(hw, entry->entry);
+
+	if (entry->acts)
+		ice_free(hw, entry->acts);
+
+	ice_free(hw, entry);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @acts: array of default actions
+ * @acts_cnt: number of default actions
+ * @prof: stores the returned flow profile added
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
+		       enum ice_flow_dir dir, u64 prof_id,
+		       struct ice_flow_seg_info *segs, u8 segs_cnt,
+		       struct ice_flow_action *acts, u8 acts_cnt,
+		       struct ice_flow_prof **prof)
+{
+	struct ice_flow_prof_params params;
+	enum ice_status status = ICE_SUCCESS;
+	u8 i;
+
+	if (!prof || (acts_cnt && !acts))
+		return ICE_ERR_BAD_PTR;
+
+	ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
+	params.prof = (struct ice_flow_prof *)
+		ice_malloc(hw, sizeof(*params.prof));
+	if (!params.prof)
+		return ICE_ERR_NO_MEMORY;
+
+	/* initialize extraction sequence to all invalid (0xff) */
+	ice_memset(params.es, 0xff, sizeof(params.es), ICE_NONDMA_MEM);
+
+	params.blk = blk;
+	params.prof->id = prof_id;
+	params.prof->dir = dir;
+	params.prof->segs_cnt = segs_cnt;
+
+	/* Make a copy of the segments that need to be persistent in the flow
+	 * profile instance
+	 */
+	for (i = 0; i < segs_cnt; i++)
+		ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
+			   ICE_NONDMA_TO_NONDMA);
+
+	/* Make a copy of the actions that need to be persistent in the flow
+	 * profile instance.
+	 */
+	if (acts_cnt) {
+		params.prof->acts = (struct ice_flow_action *)
+			ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
+				   ICE_NONDMA_TO_NONDMA);
+
+		if (!params.prof->acts) {
+			status = ICE_ERR_NO_MEMORY;
+			goto out;
+		}
+	}
+
+	status = ice_flow_proc_segs(hw, &params);
+	if (status) {
+		ice_debug(hw, ICE_DBG_FLOW,
+			  "Error processing a flow's packet segments\n");
+		goto out;
+	}
+
+	/* Add a HW profile for this flow profile */
+	status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
+	if (status) {
+		ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
+		goto out;
+	}
+
+	INIT_LIST_HEAD(&params.prof->entries);
+	ice_init_lock(&params.prof->entries_lock);
+	*prof = params.prof;
+
+out:
+	if (status) {
+		if (params.prof->acts)
+			ice_free(hw, params.prof->acts);
+		ice_free(hw, params.prof);
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_rem_prof_sync - remove a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile to remove
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
+		       struct ice_flow_prof *prof)
+{
+	enum ice_status status = ICE_SUCCESS;
+
+	/* Remove all remaining flow entries before removing the flow profile */
+	if (!LIST_EMPTY(&prof->entries)) {
+		struct ice_flow_entry *e, *t;
+
+		ice_acquire_lock(&prof->entries_lock);
+
+		LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
+					 l_entry) {
+			status = ice_flow_rem_entry_sync(hw, e);
+			if (status)
+				break;
+		}
+
+		ice_release_lock(&prof->entries_lock);
+	}
+
+	/* Remove all hardware profiles associated with this flow profile */
+	status = ice_rem_prof(hw, blk, prof->id);
+	if (!status) {
+		LIST_DEL(&prof->l_entry);
+		ice_destroy_lock(&prof->entries_lock);
+		if (prof->acts)
+			ice_free(hw, prof->acts);
+		ice_free(hw, prof);
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_assoc_prof - associate a VSI with a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software vsi handle has been validated
+ */
+static enum ice_status
+ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
+		    struct ice_flow_prof *prof, u16 vsi_handle)
+{
+	enum ice_status status = ICE_SUCCESS;
+
+	if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
+		status = ice_add_prof_id_flow(hw, blk,
+					      ice_get_hw_vsi_num(hw,
+								 vsi_handle),
+					      prof->id);
+		if (!status)
+			ice_set_bit(vsi_handle, prof->vsis);
+		else
+			ice_debug(hw, ICE_DBG_FLOW,
+				  "HW profile add failed, %d\n",
+				  status);
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software vsi handle has been validated
+ */
+static enum ice_status
+ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
+		       struct ice_flow_prof *prof, u16 vsi_handle)
+{
+	enum ice_status status = ICE_SUCCESS;
+
+	if (ice_is_bit_set(prof->vsis, vsi_handle)) {
+		status = ice_rem_prof_id_flow(hw, blk,
+					      ice_get_hw_vsi_num(hw,
+								 vsi_handle),
+					      prof->id);
+		if (!status)
+			ice_clear_bit(vsi_handle, prof->vsis);
+		else
+			ice_debug(hw, ICE_DBG_FLOW,
+				  "HW profile remove failed, %d\n",
+				  status);
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @acts: array of default actions
+ * @acts_cnt: number of default actions
+ * @prof: stores the returned flow profile added
+ */
+enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+		  struct ice_flow_action *acts, u8 acts_cnt,
+		  struct ice_flow_prof **prof)
+{
+	enum ice_status status;
+
+	if (segs_cnt > ICE_FLOW_SEG_MAX)
+		return ICE_ERR_MAX_LIMIT;
+
+	if (!segs_cnt)
+		return ICE_ERR_PARAM;
+
+	if (!segs)
+		return ICE_ERR_BAD_PTR;
+
+	status = ice_flow_val_hdrs(segs, segs_cnt);
+	if (status)
+		return status;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+	status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
+					acts, acts_cnt, prof);
+	if (!status)
+		LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
+
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+
+	return status;
+}
+
+/**
+ * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
+ * @hw: pointer to the hw struct
+ * @blk: the block for which the flow profile is to be removed
+ * @prof_id: unique ID of the flow profile to be removed
+ */
+enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+	struct ice_flow_prof *prof;
+	enum ice_status status;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+	prof = ice_flow_find_prof_id(hw, blk, prof_id);
+	if (!prof) {
+		status = ICE_ERR_DOES_NOT_EXIST;
+		goto out;
+	}
+
+	/* prof becomes invalid after the call */
+	status = ice_flow_rem_prof_sync(hw, blk, prof);
+
+out:
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+
+	return status;
+}
+
+/**
+ * ice_flow_get_hw_prof - return the hw profile for a specific profile id handle
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @prof_id: the profile id handle
+ * @hw_prof_id: pointer to variable to receive the hw profile id
+ */
+enum ice_status
+ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+		     u8 *hw_prof_id)
+{
+	struct ice_prof_map *map;
+
+	map = ice_search_prof_id(hw, blk, prof_id);
+	if (map) {
+		*hw_prof_id = map->prof_id;
+		return ICE_SUCCESS;
+	}
+
+	return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_flow_find_entry - look for a flow entry using its unique ID
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @entry_id: unique ID to identify this flow entry
+ *
+ * This function looks for the flow entry with the specified unique ID in all
+ * flow profiles of the specified classification stage. If the entry is found,
+ * and it returns the handle to the flow entry. Otherwise, it returns
+ * ICE_FLOW_ENTRY_ID_INVAL.
+ */
+u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
+{
+	struct ice_flow_entry *found = NULL;
+	struct ice_flow_prof *p;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+	LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+		struct ice_flow_entry *e;
+
+		ice_acquire_lock(&p->entries_lock);
+		LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
+			if (e->id == entry_id) {
+				found = e;
+				break;
+			}
+		ice_release_lock(&p->entries_lock);
+
+		if (found)
+			break;
+	}
+
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+
+	return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
+}
+
+/**
+ * ice_flow_add_entry - Add a flow entry
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @prof_id: ID of the profile to add a new flow entry to
+ * @entry_id: unique ID to identify this flow entry
+ * @vsi_handle: software VSI handle for the flow entry
+ * @prio: priority of the flow entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @acts: arrays of actions to be performed on a match
+ * @acts_cnt: number of actions
+ * @entry_h: pointer to buffer that receives the new flow entry's handle
+ */
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+		   u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
+		   void *data, struct ice_flow_action *acts, u8 acts_cnt,
+		   u64 *entry_h)
+{
+	struct ice_flow_prof *prof = NULL;
+	struct ice_flow_entry *e = NULL;
+	enum ice_status status = ICE_SUCCESS;
+
+	if (acts_cnt && !acts)
+		return ICE_ERR_PARAM;
+
+	/* No flow entry data is expected for RSS */
+	if (!entry_h || (!data && blk != ICE_BLK_RSS))
+		return ICE_ERR_BAD_PTR;
+
+	if (!ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+	prof = ice_flow_find_prof_id(hw, blk, prof_id);
+	if (!prof) {
+		status = ICE_ERR_DOES_NOT_EXIST;
+	} else {
+		/* Allocate memory for the entry being added and associate
+		 * the VSI to the found flow profile
+		 */
+		e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
+		if (!e)
+			status = ICE_ERR_NO_MEMORY;
+		else
+			status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+	}
+
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+	if (status)
+		goto out;
+
+	e->id = entry_id;
+	e->vsi_handle = vsi_handle;
+	e->prof = prof;
+
+	switch (blk) {
+	case ICE_BLK_RSS:
+		/* RSS will add only one entry per VSI per profile */
+		break;
+	case ICE_BLK_FD:
+		break;
+	case ICE_BLK_SW:
+	case ICE_BLK_PE:
+	default:
+		status = ICE_ERR_NOT_IMPL;
+		goto out;
+	}
+
+	ice_acquire_lock(&prof->entries_lock);
+	LIST_ADD(&e->l_entry, &prof->entries);
+	ice_release_lock(&prof->entries_lock);
+
+	*entry_h = ICE_FLOW_ENTRY_HNDL(e);
+
+out:
+	if (status && e) {
+		if (e->entry)
+			ice_free(hw, e->entry);
+		ice_free(hw, e);
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_rem_entry - Remove a flow entry
+ * @hw: pointer to the hw struct
+ * @entry_h: handle to the flow entry to be removed
+ */
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h)
+{
+	struct ice_flow_entry *entry;
+	struct ice_flow_prof *prof;
+	enum ice_status status;
+
+	if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
+		return ICE_ERR_PARAM;
+
+	entry = ICE_FLOW_ENTRY_PTR(entry_h);
+
+	/* Retain the pointer to the flow profile as the entry will be freed */
+	prof = entry->prof;
+
+	ice_acquire_lock(&prof->entries_lock);
+	status = ice_flow_rem_entry_sync(hw, entry);
+	ice_release_lock(&prof->entries_lock);
+
+	return status;
+}
+
+/**
+ * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @type: type of the field
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ *           entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ *            input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ *            entry's input buffer
+ *
+ * This helper function stores information of a field being matched, including
+ * the type of the field and the locations of the value to match, the mask, and
+ * and the upper-bound value in the start of the input buffer for a flow entry.
+ * This function should only be used for fixed-size data structures.
+ *
+ * This function also opportunistically determines the protocol headers to be
+ * present based on the fields being set. Some fields cannot be used alone to
+ * determine the protocol headers present. Sometimes, fields for particular
+ * protocol headers are not matched. In those cases, the protocol headers
+ * must be explicitly set.
+ */
+static void
+ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+		     enum ice_flow_fld_match_type type, u16 val_loc,
+		     u16 mask_loc, u16 last_loc)
+{
+	u64 bit = BIT_ULL(fld);
+
+	seg->match |= bit;
+	if (type == ICE_FLOW_FLD_TYPE_RANGE)
+		seg->range |= bit;
+
+	seg->fields[fld].type = type;
+	seg->fields[fld].src.val = val_loc;
+	seg->fields[fld].src.mask = mask_loc;
+	seg->fields[fld].src.last = last_loc;
+
+	ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
+}
+
+/**
+ * ice_flow_set_fld - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ *           entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ *            input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ *            entry's input buffer
+ * @range: indicate if field being matched is to be in a range
+ *
+ * This function specifies the locations, in the form of byte offsets from the
+ * start of the input buffer for a flow entry, from where the value to match,
+ * the mask value, and upper value can be extracted. These locations are then
+ * stored in the flow profile. When adding a flow entry associated with the
+ * flow profile, these locations will be used to quickly extract the values and
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
+{
+	enum ice_flow_fld_match_type t = range ?
+		ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
+
+	ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
+}
+
+/**
+ * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ *           entry's input buffer
+ * @pref_loc: location of prefix value from entry's input buffer
+ * @pref_sz: size of the location holding the prefix value
+ *
+ * This function specifies the locations, in the form of byte offsets from the
+ * start of the input buffer for a flow entry, from where the value to match
+ * and the IPv4 prefix value can be extracted. These locations are then stored
+ * in the flow profile. When adding flow entries to the associated flow profile,
+ * these locations can be used to quickly extract the values to create the
+ * content of a match entry. This function should only be used for fixed-size
+ * data structures.
+ */
+void
+ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+			u16 val_loc, u16 pref_loc, u8 pref_sz)
+{
+	/* For this type of field, the "mask" location is for the prefix value's
+	 * location and the "last" location is for the size of the location of
+	 * the prefix value.
+	 */
+	ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
+			     pref_loc, (u16)pref_sz);
+}
+
+/**
+ * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
+ * @seg: packet segment the field being set belongs to
+ * @off: offset of the raw field from the beginning of the segment in bytes
+ * @len: length of the raw pattern to be matched
+ * @val_loc: location of the value to match from entry's input buffer
+ * @mask_loc: location of mask value from entry's input buffer
+ *
+ * This function specifies the offset of the raw field to be match from the
+ * beginning of the specified packet segment, and the locations, in the form of
+ * byte offsets from the start of the input buffer for a flow entry, from where
+ * the value to match and the mask value to be extracted. These locations are
+ * then stored in the flow profile. When adding flow entries to the associated
+ * flow profile, these locations can be used to quickly extract the values to
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+		     u16 val_loc, u16 mask_loc)
+{
+	if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
+		seg->raws[seg->raws_cnt].off = off;
+		seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
+		seg->raws[seg->raws_cnt].info.src.val = val_loc;
+		seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
+		/* The "last" field is used to store the length of the field */
+		seg->raws[seg->raws_cnt].info.src.last = len;
+	}
+
+	/* Overflows of "raws" will be handled as an error condition later in
+	 * the flow when this information is processed.
+	 */
+	seg->raws_cnt++;
+}
+
+#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
+	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+
+#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
+	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
+	 ICE_FLOW_SEG_HDR_SCTP)
+
+#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
+	(ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
+	 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
+
+/**
+ * ice_flow_set_rss_seg_info - setup packet segments for RSS
+ * @segs: pointer to the flow field segment(s)
+ * @hash_fields: fields to be hashed on for the segment(s)
+ * @flow_hdr: protocol header fields within a packet segment
+ *
+ * Helper function to extract fields from hash bitmap and use flow
+ * header value to set flow field segment for further use in flow
+ * profile entry or removal.
+ */
+static enum ice_status
+ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
+			  u32 flow_hdr)
+{
+	u64 val = hash_fields;
+	u8 i;
+
+	for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
+		u64 bit = BIT_ULL(i);
+
+		if (val & bit) {
+			ice_flow_set_fld(segs, (enum ice_flow_field)i,
+					 ICE_FLOW_FLD_OFF_INVAL,
+					 ICE_FLOW_FLD_OFF_INVAL,
+					 ICE_FLOW_FLD_OFF_INVAL, false);
+			val &= ~bit;
+		}
+	}
+	ICE_FLOW_SET_HDRS(segs, flow_hdr);
+
+	if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
+		return ICE_ERR_PARAM;
+
+	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
+	if (!ice_is_pow2(val))
+		return ICE_ERR_CFG;
+
+	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
+	if (val && !ice_is_pow2(val))
+		return ICE_ERR_CFG;
+
+	return ICE_SUCCESS;
+}
+
+/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
+ * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
+ * convert its values to their appropriate flow L3, L4 values.
+ */
+#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
+#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
+#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
+	(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
+	 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
+
+#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
+#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
+#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
+	(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
+	 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
+
+#define ICE_FLOW_MAX_CFG	10
+
+/**
+ * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
+ *
+ * This function will take the hash bitmap provided by the AVF driver via a
+ * message, convert it to ICE-compatible values, and configure RSS flow
+ * profiles.
+ */
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+{
+	u64 added_cfg[ICE_FLOW_MAX_CFG], hash_flds;
+	enum ice_status status = ICE_SUCCESS;
+	u8 i, idx = 0;
+
+	if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
+	    !ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	/* Make sure no unsupported bits are specified */
+	if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
+			 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
+		return ICE_ERR_CFG;
+
+	hash_flds = avf_hash;
+
+	/* Always create an L3 RSS configuration for any L4 RSS configuration */
+	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
+		hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
+
+	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
+		hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
+
+	/* Create the corresponding RSS configuration for each valid hash bit */
+	while (hash_flds) {
+		u64 rss_hash = ICE_HASH_INVALID;
+
+		if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
+			if (hash_flds & ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV4 |
+					ICE_FLOW_HASH_TCP_PORT;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
+			} else if (hash_flds &
+				   ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV4 |
+					ICE_FLOW_HASH_UDP_PORT;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
+			} else if (hash_flds &
+				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
+				rss_hash = ICE_FLOW_HASH_IPV4 |
+					ICE_FLOW_HASH_SCTP_PORT;
+				hash_flds &=
+					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
+			} else if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV4;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
+			}
+		} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
+			if (hash_flds & ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV6 |
+					ICE_FLOW_HASH_TCP_PORT;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
+			} else if (hash_flds &
+				   ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV6 |
+					ICE_FLOW_HASH_UDP_PORT;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
+			} else if (hash_flds &
+				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
+				rss_hash = ICE_FLOW_HASH_IPV6 |
+					ICE_FLOW_HASH_SCTP_PORT;
+				hash_flds &=
+					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
+			} else if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV6;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
+			}
+		}
+
+		if (rss_hash == ICE_HASH_INVALID)
+			return ICE_ERR_OUT_OF_RANGE;
+
+		status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
+					 ICE_FLOW_SEG_HDR_NONE);
+		if (status)
+			break;
+		added_cfg[idx++] = rss_hash;
+	}
+
+	/* If status is not success, we must remove all hash configurations
+	 * that were successfully added previously in this call for the vsi
+	 */
+	if (status)
+		for (i = 0; i < idx; i++)
+			ice_rem_rss_cfg(hw, vsi_handle, added_cfg[i],
+					ICE_FLOW_SEG_HDR_NONE);
+
+	return status;
+}
+
+/**
+ * ice_rem_all_rss_vsi_ctx - remove all RSS configurations from VSI context
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ */
+void ice_rem_all_rss_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+	struct ice_rss_cfg *r, *tmp;
+
+	if (!ice_is_vsi_valid(hw, vsi_handle) ||
+	    LIST_EMPTY(&hw->vsi_ctx[vsi_handle]->rss_list_head))
+		return;
+
+	ice_acquire_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+	LIST_FOR_EACH_ENTRY_SAFE(r, tmp,
+				 &hw->vsi_ctx[vsi_handle]->rss_list_head,
+				 ice_rss_cfg, l_entry) {
+		LIST_DEL(&r->l_entry);
+		ice_free(hw, r);
+	}
+	ice_release_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+}
+
+/**
+ * ice_rem_vsi_rss_cfg - remove RSS configurations associated with vsi
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function will iterate through all flow profiles and disassociate
+ * the vsi from that profile. If the flow profile has no vsis it will
+ * be removed.
+ */
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+	const enum ice_block blk = ICE_BLK_RSS;
+	struct ice_flow_prof *p, *t;
+	enum ice_status status = ICE_SUCCESS;
+
+	if (!ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+	LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
+				 l_entry) {
+		if (ice_is_bit_set(p->vsis, vsi_handle)) {
+			status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
+			if (status)
+				break;
+
+			if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
+				status = ice_flow_rem_prof_sync(hw, blk, p);
+				if (status)
+					break;
+			}
+		}
+	}
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+
+	return status;
+}
+
+/**
+ * ice_rem_rss_cfg_vsi_ctx - remove RSS configuration from VSI context
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static void
+ice_rem_rss_cfg_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
+			struct ice_flow_prof *prof)
+{
+	struct ice_rss_cfg *r, *tmp;
+
+	/* Search for RSS hash fields associated to the VSI that match the
+	 * hash configurations associated to the flow profile. If found
+	 * remove from the RSS entry list of the VSI context and delete entry.
+	 */
+	LIST_FOR_EACH_ENTRY_SAFE(r, tmp,
+				 &hw->vsi_ctx[vsi_handle]->rss_list_head,
+				 ice_rss_cfg, l_entry) {
+		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+			LIST_DEL(&r->l_entry);
+			ice_free(hw, r);
+			return;
+		}
+	}
+}
+
+/**
+ * ice_add_rss_vsi_ctx - add RSS configuration to VSI context
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
+		    struct ice_flow_prof *prof)
+{
+	struct ice_rss_cfg *r, *rss_cfg;
+
+	LIST_FOR_EACH_ENTRY(r, &hw->vsi_ctx[vsi_handle]->rss_list_head,
+			    ice_rss_cfg, l_entry)
+		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs)
+			return ICE_SUCCESS;
+
+	rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
+	if (!rss_cfg)
+		return ICE_ERR_NO_MEMORY;
+
+	rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
+	rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
+	LIST_ADD(&rss_cfg->l_entry, &hw->vsi_ctx[vsi_handle]->rss_list_head);
+
+	return ICE_SUCCESS;
+}
+
+#define ICE_FLOW_PROF_HASH_S	0
+#define ICE_FLOW_PROF_HASH_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
+#define ICE_FLOW_PROF_HDR_S	32
+#define ICE_FLOW_PROF_HDR_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_HDR_S)
+
+#define ICE_FLOW_GEN_PROFID(hash, hdr) \
+	(u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
+	      (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M))
+
+/**
+ * ice_add_rss_cfg_sync - add an RSS configuration
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		     u32 addl_hdrs)
+{
+	const enum ice_block blk = ICE_BLK_RSS;
+	struct ice_flow_prof *prof = NULL;
+	struct ice_flow_seg_info *segs;
+	enum ice_status status = ICE_SUCCESS;
+
+	segs = (struct ice_flow_seg_info *)ice_malloc(hw, sizeof(*segs));
+	if (!segs)
+		return ICE_ERR_NO_MEMORY;
+
+	/* Construct the packet segment info from the hashed fields */
+	status = ice_flow_set_rss_seg_info(segs, hashed_flds, addl_hdrs);
+	if (status)
+		goto exit;
+
+	/* Search for a flow profile that has matching headers, hash fields
+	 * and has the input vsi associated to it. If found, no further
+	 * operations required and exit.
+	 */
+	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+					vsi_handle,
+					ICE_FLOW_FIND_PROF_CHK_FLDS |
+					ICE_FLOW_FIND_PROF_CHK_VSI);
+	if (prof)
+		goto exit;
+
+	/* Check if a flow profile exists with the same protocol headers and
+	 * associated with the input vsi. If so disasscociate the vsi from
+	 * this profile. The vsi will be added to a new profile created with
+	 * the protocol header and new hash field configuration.
+	 */
+	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+					vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
+	if (prof) {
+		status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
+		if (!status)
+			ice_rem_rss_cfg_vsi_ctx(hw, vsi_handle, prof);
+		else
+			goto exit;
+
+		/* Remove profile if it has no vsis associated */
+		if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
+			status = ice_flow_rem_prof_sync(hw, blk, prof);
+			if (status)
+				goto exit;
+		}
+	}
+
+	/* Search for a profile that has same match fields only. If this
+	 * exists then associate the vsi to this profile.
+	 */
+	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+					vsi_handle,
+					ICE_FLOW_FIND_PROF_CHK_FLDS);
+	if (prof) {
+		status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+		if (!status)
+			status = ice_add_rss_vsi_ctx(hw, vsi_handle, prof);
+		goto exit;
+	}
+
+	/* Create a new flow profile with generated profile and packet
+	 * segment information.
+	 */
+	status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
+				   ICE_FLOW_GEN_PROFID(hashed_flds, segs->hdrs),
+				   segs, 1, NULL, 0, &prof);
+	if (status)
+		goto exit;
+
+	status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+	/* If association to a new flow profile failed then this profile can
+	 * be removed.
+	 */
+	if (status) {
+		ice_flow_rem_prof_sync(hw, blk, prof);
+		goto exit;
+	}
+
+	status = ice_add_rss_vsi_ctx(hw, vsi_handle, prof);
+
+exit:
+	ice_free(hw, segs);
+	return status;
+}
+
+/**
+ * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ *
+ * This function will generate a flow profile based on fields associated with
+ * the input fields to hash on, the flow type and use the VSI number to add
+ * a flow entry to the profile.
+ */
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		u32 addl_hdrs)
+{
+	enum ice_status status;
+
+	if (hashed_flds == ICE_HASH_INVALID ||
+	    !ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	ice_acquire_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+	status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs);
+	ice_release_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+
+	return status;
+}
+
+/**
+ * ice_rem_rss_cfg_sync - remove an existing RSS configuration
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
+ * @addl_hdrs: Protocol header fields within a packet segment
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		     u32 addl_hdrs)
+{
+	const enum ice_block blk = ICE_BLK_RSS;
+	struct ice_flow_seg_info *segs;
+	struct ice_flow_prof *prof;
+	enum ice_status status;
+
+	segs = (struct ice_flow_seg_info *)ice_malloc(hw, sizeof(*segs));
+	if (!segs)
+		return ICE_ERR_NO_MEMORY;
+
+	/* Construct the packet segment info from the hashed fields */
+	status = ice_flow_set_rss_seg_info(segs, hashed_flds, addl_hdrs);
+	if (status)
+		goto out;
+
+	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+					vsi_handle,
+					ICE_FLOW_FIND_PROF_CHK_FLDS);
+	if (!prof) {
+		status = ICE_ERR_DOES_NOT_EXIST;
+		goto out;
+	}
+
+	status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
+	if (status)
+		goto out;
+
+	if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
+		status = ice_flow_rem_prof_sync(hw, blk, prof);
+
+	ice_rem_rss_cfg_vsi_ctx(hw, vsi_handle, prof);
+
+out:
+	ice_free(hw, segs);
+	return status;
+}
+
+/**
+ * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
+ * @addl_hdrs: Protocol header fields within a packet segment
+ *
+ * This function will lookup the flow profile based on the input
+ * hash field bitmap, iterate through the profile entry list of
+ * that profile and find entry associated with input vsi to be
+ * removed. Calls are made to underlying flow apis which will in
+ * turn build or update buffers for RSS XLT1 section.
+ */
+enum ice_status
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		u32 addl_hdrs)
+{
+	enum ice_status status;
+
+	if (hashed_flds == ICE_HASH_INVALID ||
+	    !ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	ice_acquire_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+	status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs);
+	ice_release_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+
+	return status;
+}
+
+/**
+ * ice_replay_rss_cfg - remove RSS configurations associated with vsi
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ */
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+	enum ice_status status = ICE_SUCCESS;
+	struct ice_rss_cfg *r;
+
+	if (!ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	ice_acquire_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+	LIST_FOR_EACH_ENTRY(r, &hw->vsi_ctx[vsi_handle]->rss_list_head,
+			    ice_rss_cfg, l_entry) {
+		status = ice_add_rss_cfg_sync(hw, vsi_handle, r->hashed_flds,
+					      r->packet_hdr);
+		if (status)
+			break;
+	}
+	ice_release_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+
+	return status;
+}
+
+/**
+ * ice_get_rss_cfg - returns hashed fields for the given header types
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hdrs: protocol header type
+ *
+ * This function will return the match fields of the first instance of flow
+ * profile having the given header types and containing input vsi
+ */
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
+{
+	struct ice_rss_cfg *r, *rss_cfg = NULL;
+
+	/* verify if the protocol header is non zero and vsi is valid */
+	if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_HASH_INVALID;
+
+	ice_acquire_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+	LIST_FOR_EACH_ENTRY(r, &hw->vsi_ctx[vsi_handle]->rss_list_head,
+			    ice_rss_cfg, l_entry)
+		if (r->packet_hdr == hdrs) {
+			rss_cfg = r;
+			break;
+		}
+	ice_release_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+
+	return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+}
diff --git a/drivers/net/ice/base/ice_flow.h b/drivers/net/ice/base/ice_flow.h
index 228a2c089..ca8cb5369 100644
--- a/drivers/net/ice/base/ice_flow.h
+++ b/drivers/net/ice/base/ice_flow.h
@@ -5,4 +5,341 @@
 #ifndef _ICE_FLOW_H_
 #define _ICE_FLOW_H_
 
+#include "ice_flex_type.h"
+#define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix)))
+#define ICE_FLOW_PROF_ID_INVAL		0xfffffffffffffffful
+#define ICE_FLOW_PROF_ID_BYPASS		0
+#define ICE_FLOW_PROF_ID_DEFAULT	1
+#define ICE_FLOW_ENTRY_HANDLE_INVAL	0
+#define ICE_FLOW_VSI_INVAL		0xffff
+#define ICE_FLOW_FLD_OFF_INVAL		0xffff
+
+/* Use any of the type from flow field to generate a equivalent hash field */
+#define ICE_FLOW_HASH_FLD(t)	(1ULL << (t))
+
+#define ICE_FLOW_HASH_IPV4	\
+	(ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
+	 ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_IPV4_DA))
+#define ICE_FLOW_HASH_IPV6	\
+	(ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
+	 ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_IPV6_DA))
+#define ICE_FLOW_HASH_TCP_PORT	\
+	(ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
+	 ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
+#define ICE_FLOW_HASH_UDP_PORT	\
+	(ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
+	 ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
+#define ICE_FLOW_HASH_SCTP_PORT	\
+	(ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
+	 ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
+
+#define ICE_HASH_INVALID	0
+#define ICE_HASH_TCP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_TCP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_UDP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_SCTP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
+
+/* Protocol header fields within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization such as GRE,
+ * VxLAN, etc.
+ */
+enum ice_flow_seg_hdr {
+	ICE_FLOW_SEG_HDR_NONE	= 0x00000000,
+	ICE_FLOW_SEG_HDR_ETH	= 0x00000001,
+	ICE_FLOW_SEG_HDR_VLAN	= 0x00000002,
+	ICE_FLOW_SEG_HDR_IPV4	= 0x00000004,
+	ICE_FLOW_SEG_HDR_IPV6	= 0x00000008,
+	ICE_FLOW_SEG_HDR_ARP	= 0x00000010,
+	ICE_FLOW_SEG_HDR_ICMP	= 0x00000020,
+	ICE_FLOW_SEG_HDR_TCP	= 0x00000040,
+	ICE_FLOW_SEG_HDR_UDP	= 0x00000080,
+	ICE_FLOW_SEG_HDR_SCTP	= 0x00000100,
+	ICE_FLOW_SEG_HDR_GRE	= 0x00000200,
+};
+
+enum ice_flow_field {
+	/* L2 */
+	ICE_FLOW_FIELD_IDX_ETH_DA,
+	ICE_FLOW_FIELD_IDX_ETH_SA,
+	ICE_FLOW_FIELD_IDX_S_VLAN,
+	ICE_FLOW_FIELD_IDX_C_VLAN,
+	ICE_FLOW_FIELD_IDX_ETH_TYPE,
+	/* L3 */
+	ICE_FLOW_FIELD_IDX_IP_DSCP,
+	ICE_FLOW_FIELD_IDX_IP_TTL,
+	ICE_FLOW_FIELD_IDX_IP_PROT,
+	ICE_FLOW_FIELD_IDX_IPV4_SA,
+	ICE_FLOW_FIELD_IDX_IPV4_DA,
+	ICE_FLOW_FIELD_IDX_IPV6_SA,
+	ICE_FLOW_FIELD_IDX_IPV6_DA,
+	/* L4 */
+	ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+	ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
+	ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+	ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
+	ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+	ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
+	ICE_FLOW_FIELD_IDX_TCP_FLAGS,
+	/* ARP */
+	ICE_FLOW_FIELD_IDX_ARP_SIP,
+	ICE_FLOW_FIELD_IDX_ARP_DIP,
+	ICE_FLOW_FIELD_IDX_ARP_SHA,
+	ICE_FLOW_FIELD_IDX_ARP_DHA,
+	ICE_FLOW_FIELD_IDX_ARP_OP,
+	/* ICMP */
+	ICE_FLOW_FIELD_IDX_ICMP_TYPE,
+	ICE_FLOW_FIELD_IDX_ICMP_CODE,
+	/* GRE */
+	ICE_FLOW_FIELD_IDX_GRE_KEYID,
+	 /* The total number of enums must not exceed 64 */
+	ICE_FLOW_FIELD_IDX_MAX
+};
+
+/* Flow headers and fields for AVF support */
+enum ice_flow_avf_hdr_field {
+	/* Values 0 - 28 are reserved for future use */
+	ICE_AVF_FLOW_FIELD_INVALID		= 0,
+	ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP	= 29,
+	ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
+	ICE_AVF_FLOW_FIELD_IPV4_UDP,
+	ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
+	ICE_AVF_FLOW_FIELD_IPV4_TCP,
+	ICE_AVF_FLOW_FIELD_IPV4_SCTP,
+	ICE_AVF_FLOW_FIELD_IPV4_OTHER,
+	ICE_AVF_FLOW_FIELD_FRAG_IPV4,
+	ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP	= 39,
+	ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
+	ICE_AVF_FLOW_FIELD_IPV6_UDP,
+	ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
+	ICE_AVF_FLOW_FIELD_IPV6_TCP,
+	ICE_AVF_FLOW_FIELD_IPV6_SCTP,
+	ICE_AVF_FLOW_FIELD_IPV6_OTHER,
+	ICE_AVF_FLOW_FIELD_FRAG_IPV6,
+	ICE_AVF_FLOW_FIELD_RSVD47,
+	ICE_AVF_FLOW_FIELD_FCOE_OX,
+	ICE_AVF_FLOW_FIELD_FCOE_RX,
+	ICE_AVF_FLOW_FIELD_FCOE_OTHER,
+	/* Values 51-62 are reserved */
+	ICE_AVF_FLOW_FIELD_L2_PAYLOAD		= 63,
+	ICE_AVF_FLOW_FIELD_MAX
+};
+
+/* Supported RSS offloads  This macro is defined to support
+ * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
+ * capabilities to the caller of this ops.
+ */
+#define ICE_DEFAULT_RSS_HENA ( \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+
+enum ice_flow_dir {
+	ICE_FLOW_DIR_UNDEFINED	= 0,
+	ICE_FLOW_TX		= 0x01,
+	ICE_FLOW_RX		= 0x02,
+	ICE_FLOW_TX_RX		= ICE_FLOW_RX | ICE_FLOW_TX
+};
+
+enum ice_flow_priority {
+	ICE_FLOW_PRIO_LOW,
+	ICE_FLOW_PRIO_NORMAL,
+	ICE_FLOW_PRIO_HIGH
+};
+
+#define ICE_FLOW_SEG_MAX		2
+#define ICE_FLOW_SEG_RAW_FLD_MAX	2
+#define ICE_FLOW_PROFILE_MAX		1024
+#define ICE_FLOW_SW_FIELD_VECTOR_MAX	48
+#define ICE_FLOW_ACL_FIELD_VECTOR_MAX	32
+#define ICE_FLOW_FV_EXTRACT_SZ		2
+
+#define ICE_FLOW_SET_HDRS(seg, val)	((seg)->hdrs |= (u32)(val))
+
+struct ice_flow_seg_xtrct {
+	u8 prot_id;	/* Protocol ID of extracted header field */
+	u8 off;		/* Starting offset of the field in header in bytes */
+	u8 idx;		/* Index of FV entry used */
+	u8 disp;	/* Displacement of field in bits fr. FV entry's start */
+};
+
+enum ice_flow_fld_match_type {
+	ICE_FLOW_FLD_TYPE_REG,		/* Value, mask */
+	ICE_FLOW_FLD_TYPE_RANGE,	/* Value, mask, last (upper bound) */
+	ICE_FLOW_FLD_TYPE_PREFIX,	/* IP address, prefix, size of prefix */
+	ICE_FLOW_FLD_TYPE_SIZE,		/* Value, mask, size of match */
+};
+
+struct ice_flow_fld_loc {
+	/* Describe offsets of field information relative to the beginning of
+	 * input buffer provided when adding flow entries.
+	 */
+	u16 val;	/* Offset where the value is located */
+	u16 mask;	/* Offset where the mask/prefix value is located */
+	u16 last;	/* Length or offset where the upper value is located */
+};
+
+struct ice_flow_fld_info {
+	enum ice_flow_fld_match_type type;
+	/* Location where to retrieve data from an input buffer */
+	struct ice_flow_fld_loc src;
+	/* Location where to put the data into the final entry buffer */
+	struct ice_flow_fld_loc entry;
+	struct ice_flow_seg_xtrct xtrct;
+};
+
+struct ice_flow_seg_fld_raw {
+	int off;	/* Offset from the start of the segment */
+	struct ice_flow_fld_info info;
+};
+
+struct ice_flow_seg_info {
+	u32 hdrs;	/* Bitmask indicating protocol headers present */
+	u64 match;	/* Bitmask indicating header fields to be matched */
+	u64 range;	/* Bitmask indicating header fields matched as ranges */
+
+	struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
+
+	u8 raws_cnt;	/* Number of raw fields to be matched */
+	struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
+};
+
+/* This structure describes a flow entry, and is tracked only in this file */
+struct ice_flow_entry {
+	struct LIST_ENTRY_TYPE l_entry;
+
+	u64 id;
+	u16 vsi_handle;
+	enum ice_flow_priority priority;
+
+	struct ice_flow_prof *prof;
+
+	/* Flow entry's content */
+	u16 entry_sz;
+	void *entry;
+
+	/* Action list */
+	u8 acts_cnt;
+	struct ice_flow_action *acts;
+};
+
+#define ICE_FLOW_ENTRY_HNDL(e)	((u64)e)
+#define ICE_FLOW_ENTRY_PTR(h)	((struct ice_flow_entry *)(h))
+
+struct ice_flow_prof {
+	struct LIST_ENTRY_TYPE l_entry;
+
+	u64 id;
+	enum ice_flow_dir dir;
+
+	/* Keep track of flow entries associated with this flow profile */
+	struct ice_lock entries_lock;
+	struct LIST_HEAD_TYPE entries;
+
+	u8 segs_cnt;
+	struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
+
+	/* software VSI handles referenced by this flow profile */
+	ice_declare_bitmap(vsis, ICE_MAX_VSI);
+
+	union {
+		/* struct sw_recipe */
+		/* struct fd */
+		u32 data;
+	} cfg;
+
+	/* Default actions */
+	u8 acts_cnt;
+	struct ice_flow_action *acts;
+};
+
+struct ice_rss_cfg {
+	struct LIST_ENTRY_TYPE l_entry;
+	u64 hashed_flds;
+	u32 packet_hdr;
+};
+
+enum ice_flow_action_type {
+	ICE_FLOW_ACT_NOP,
+	ICE_FLOW_ACT_ALLOW,
+	ICE_FLOW_ACT_DROP,
+	ICE_FLOW_ACT_COUNT,
+	ICE_FLOW_ACT_FWD_VSI,
+	ICE_FLOW_ACT_FWD_VSI_LIST,	/* Should be abstracted away */
+	ICE_FLOW_ACT_FWD_QUEUE,		/* Can Queues be abstracted away? */
+	ICE_FLOW_ACT_FWD_QUEUE_GROUP,	/* Can Queues be abstracted away? */
+	ICE_FLOW_ACTION_PUSH,
+	ICE_FLOW_ACTION_POP,
+	ICE_FLOW_ACTION_MODIFY,
+};
+
+struct ice_flow_action {
+	enum ice_flow_action_type type;
+	union {
+		u32 dummy;
+	} data;
+};
+
+/* TDD esp in the linux code doesn't like prototypes, so
+ * ifdef them all out, so they stop conflicting with our mocks
+ */
+u64
+ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+		   struct ice_flow_seg_info *segs, u8 segs_cnt);
+enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+		  struct ice_flow_action *acts, u8 acts_cnt,
+		  struct ice_flow_prof **prof);
+enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
+
+enum ice_status
+ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+		     u8 *hw_prof);
+
+u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id);
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+		   u64 entry_id, u16 vsi, enum ice_flow_priority prio,
+		   void *data, struct ice_flow_action *acts, u8 acts_cnt,
+		   u64 *entry_h);
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h);
+void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
+void
+ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+			u16 val_loc, u16 prefix_loc, u8 prefix_sz);
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+		     u16 val_loc, u16 mask_loc);
+void ice_rem_all_rss_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		u32 addl_hdrs);
+enum ice_status
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		u32 addl_hdrs);
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
 #endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ice/base/meson.build b/drivers/net/ice/base/meson.build
index 77a9802f4..d7f8536bc 100644
--- a/drivers/net/ice/base/meson.build
+++ b/drivers/net/ice/base/meson.build
@@ -8,6 +8,7 @@ sources = [
 	'ice_switch.c',
 	'ice_nvm.c',
 	'ice_flex_pipe.c',
+	'ice_flow.c',
 ]
 
 error_cflags = ['-Wno-unused-value',
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH 7/7] net/ice/base: free flow profile entries
  2019-01-15 12:56 [dpdk-dev] [PATCH 0/7] net/ice: update share code Qi Zhang
                   ` (5 preceding siblings ...)
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 6/7] net/ice/base: add flow module Qi Zhang
@ 2019-01-15 12:56 ` Qi Zhang
  2019-01-16 12:15   ` Ferruh Yigit
  2019-01-16  5:05 ` [dpdk-dev] [PATCH 0/7] net/ice: update share code Lu, Wenzhuo
                   ` (2 subsequent siblings)
  9 siblings, 1 reply; 21+ messages in thread
From: Qi Zhang @ 2019-01-15 12:56 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang
  Cc: paul.m.stillwell.jr, dev, bruce.richardson, ferruh.yigit, Qi Zhang

Free flow profile entries when free hw tables.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/base/ice_flex_pipe.c | 38 ++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)

diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index 0b8376d31..479b8bded 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -3034,6 +3034,42 @@ static void ice_fill_blk_tbls(struct ice_hw *hw, enum ice_block block_id)
 }
 
 /**
+ * ice_free_flow_profs - free flow profile entries
+ * @hw: pointer to the hardware structure
+ */
+static void ice_free_flow_profs(struct ice_hw *hw)
+{
+	u8 i;
+
+	for (i = 0; i < ICE_BLK_COUNT; i++) {
+		struct ice_flow_prof *p, *tmp;
+
+		if (!&hw->fl_profs[i])
+			continue;
+
+		/* This call is being made as part of resource deallocation
+		 * during unload. Lock acquire and release will not be
+		 * necessary here.
+		 */
+		LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[i],
+					 ice_flow_prof, l_entry) {
+			struct ice_flow_entry *e, *t;
+
+			LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
+						 ice_flow_entry, l_entry)
+				ice_flow_rem_entry(hw, ICE_FLOW_ENTRY_HNDL(e));
+
+			LIST_DEL(&p->l_entry);
+			if (p->acts)
+				ice_free(hw, p->acts);
+			ice_free(hw, p);
+		}
+
+		ice_destroy_lock(&hw->fl_profs_locks[i]);
+	}
+}
+
+/**
  * ice_free_prof_map - frees the profile map
  * @hw: pointer to the hardware structure
  * @blk: the hw block which contains the profile map to be freed
@@ -3096,6 +3132,8 @@ void ice_free_hw_tbls(struct ice_hw *hw)
 	}
 
 	ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
+
+	ice_free_flow_profs(hw);
 }
 
 /**
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [dpdk-dev] [PATCH 0/7] net/ice: update share code
  2019-01-15 12:56 [dpdk-dev] [PATCH 0/7] net/ice: update share code Qi Zhang
                   ` (6 preceding siblings ...)
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 7/7] net/ice/base: free flow profile entries Qi Zhang
@ 2019-01-16  5:05 ` Lu, Wenzhuo
  2019-01-16 12:16 ` Ferruh Yigit
  2019-01-16 14:13 ` [dpdk-dev] [PATCH v2 " Qi Zhang
  9 siblings, 0 replies; 21+ messages in thread
From: Lu, Wenzhuo @ 2019-01-16  5:05 UTC (permalink / raw)
  To: Zhang, Qi Z, Yang, Qiming
  Cc: Stillwell Jr, Paul M, dev, Richardson, Bruce, Yigit, Ferruh

> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Tuesday, January 15, 2019 8:57 PM
> To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>
> Cc: Stillwell Jr, Paul M <paul.m.stillwell.jr@intel.com>; dev@dpdk.org;
> Richardson, Bruce <bruce.richardson@intel.com>; Yigit, Ferruh
> <ferruh.yigit@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH 0/7] net/ice: update share code
> 
> For ice family NICs, package processing pipe line can be configured in a
> package file should be downloaded into device by driver during init.
> The patch set add necessary share code APIs to support package download.
> Also some code clean is included.
> 
> Though package download will not be enabled in 19.02 as plan, but we hope
> the share code part could be merged early.
> 
> Meanwhile we will submit a seperate  RFC patch for package download for
> customer early evelutation.
> 
Series Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [dpdk-dev] [PATCH 6/7] net/ice/base: add flow module
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 6/7] net/ice/base: add flow module Qi Zhang
@ 2019-01-16 12:14   ` Ferruh Yigit
  0 siblings, 0 replies; 21+ messages in thread
From: Ferruh Yigit @ 2019-01-16 12:14 UTC (permalink / raw)
  To: Qi Zhang, wenzhuo.lu, qiming.yang
  Cc: paul.m.stillwell.jr, dev, bruce.richardson

On 1/15/2019 12:56 PM, Qi Zhang wrote:
> Add the module that implemented flow abstraction that base on
> flexible pipeline.
> 
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>

<...>

> +/**
> + * ice_flow_find_entry - look for a flow entry using its unique ID
> + * @hw: pointer to the hw struct
> + * @blk: classification stage
> + * @entry_id: unique ID to identify this flow entry
> + *
> + * This function looks for the flow entry with the specified unique ID in all
> + * flow profiles of the specified classification stage. If the entry is found,
> + * and it returns the handle to the flow entry. Otherwise, it returns
> + * ICE_FLOW_ENTRY_ID_INVAL.
> + */
> +u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
> +{
> +	struct ice_flow_entry *found = NULL;
> +	struct ice_flow_prof *p;
> +
> +	ice_acquire_lock(&hw->fl_profs_locks[blk]);
> +
> +	LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
> +		struct ice_flow_entry *e;
> +
> +		ice_acquire_lock(&p->entries_lock);
> +		LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
> +			if (e->id == entry_id) {
> +				found = e;
> +				break;
> +			}
> +		ice_release_lock(&p->entries_lock);
> +
> +		if (found)
> +			break;
> +	}
> +
> +	ice_release_lock(&hw->fl_profs_locks[blk]);
> +
> +	return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;

ICE_FLOW_ENTRY_HNDL macro is causing build error for 32-bits build [1], there
are a few more usage of this macro, all are causing same issue:

[1]
Building i686-native-linuxapp-gcc ...
In file included from .../drivers/net/ice/base/ice_flow.c:6:
.../drivers/net/ice/base/ice_flow.c: In function ‘ice_flow_find_entry’:
.../drivers/net/ice/base/ice_flow.h:242:33: error: cast from pointer to integer
of different size [-Werror=pointer-to-int-cast]
 #define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
                                 ^
.../drivers/net/ice/base/ice_flow.c:1247:17: note: in expansion of macro
‘ICE_FLOW_ENTRY_HNDL’
  return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
                 ^~~~~~~~~~~~~~~~~~~

<...>

> +/**
> + * ice_flow_rem_entry - Remove a flow entry
> + * @hw: pointer to the hw struct
> + * @entry_h: handle to the flow entry to be removed
> + */
> +enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h)
> +{
> +	struct ice_flow_entry *entry;
> +	struct ice_flow_prof *prof;
> +	enum ice_status status;
> +
> +	if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
> +		return ICE_ERR_PARAM;
> +
> +	entry = ICE_FLOW_ENTRY_PTR(entry_h);

causing build error for 32-bits [2]:

[2]
.../drivers/net/ice/base/ice_flow.c: In function ‘ice_flow_rem_entry’:
.../drivers/net/ice/base/ice_flow.h:243:32: error: cast to pointer from integer
of different size [-Werror=int-to-pointer-cast]
 #define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
                                ^
.../drivers/net/ice/base/ice_flow.c:1350:10: note: in expansion of macro
‘ICE_FLOW_ENTRY_PTR’
  entry = ICE_FLOW_ENTRY_PTR(entry_h);
          ^~~~~~~~~~~~~~~~~~

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [dpdk-dev] [PATCH 7/7] net/ice/base: free flow profile entries
  2019-01-15 12:56 ` [dpdk-dev] [PATCH 7/7] net/ice/base: free flow profile entries Qi Zhang
@ 2019-01-16 12:15   ` Ferruh Yigit
  0 siblings, 0 replies; 21+ messages in thread
From: Ferruh Yigit @ 2019-01-16 12:15 UTC (permalink / raw)
  To: Qi Zhang, wenzhuo.lu, qiming.yang
  Cc: paul.m.stillwell.jr, dev, bruce.richardson

On 1/15/2019 12:56 PM, Qi Zhang wrote:
> Free flow profile entries when free hw tables.
> 
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>

<...>

>  /**
> + * ice_free_flow_profs - free flow profile entries
> + * @hw: pointer to the hardware structure
> + */
> +static void ice_free_flow_profs(struct ice_hw *hw)
> +{
> +	u8 i;
> +
> +	for (i = 0; i < ICE_BLK_COUNT; i++) {
> +		struct ice_flow_prof *p, *tmp;
> +
> +		if (!&hw->fl_profs[i])
> +			continue;
> +
> +		/* This call is being made as part of resource deallocation
> +		 * during unload. Lock acquire and release will not be
> +		 * necessary here.
> +		 */
> +		LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[i],
> +					 ice_flow_prof, l_entry) {
> +			struct ice_flow_entry *e, *t;
> +
> +			LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
> +						 ice_flow_entry, l_entry)
> +				ice_flow_rem_entry(hw, ICE_FLOW_ENTRY_HNDL(e));

Same 32-bits build error here for ICE_FLOW_ENTRY_HNDL usage.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [dpdk-dev] [PATCH 0/7] net/ice: update share code
  2019-01-15 12:56 [dpdk-dev] [PATCH 0/7] net/ice: update share code Qi Zhang
                   ` (7 preceding siblings ...)
  2019-01-16  5:05 ` [dpdk-dev] [PATCH 0/7] net/ice: update share code Lu, Wenzhuo
@ 2019-01-16 12:16 ` Ferruh Yigit
  2019-01-16 14:13 ` [dpdk-dev] [PATCH v2 " Qi Zhang
  9 siblings, 0 replies; 21+ messages in thread
From: Ferruh Yigit @ 2019-01-16 12:16 UTC (permalink / raw)
  To: Qi Zhang, wenzhuo.lu, qiming.yang
  Cc: paul.m.stillwell.jr, dev, bruce.richardson

On 1/15/2019 12:56 PM, Qi Zhang wrote:
> For ice family NICs, package processing pipe line can be configured in
> a package file should be downloaded into device by driver during init.
> The patch set add necessary share code APIs to support package download.
> Also some code clean is included.
> 
> Though package download will not be enabled in 19.02 as plan, but we
> hope the share code part could be merged early.
> 
> Meanwhile we will submit a seperate  RFC patch for package download for
> customer early evelutation.
> 
> Qi Zhang (7):
>   net/ice/base: code clean
>   net/ice/base: add API to support resource allocate
>   net/ice/base: add package download related data structure
>   net/ice/base: add some help macros
>   net/ice/base: add flexible pipeline module
>   net/ice/base: add flow module
>   net/ice/base: free flow profile entries

Since there will be already a new version, can you also check the checkpatch
errors, there are a few reported.

Thanks,
ferruh

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH v2 0/7] net/ice: update share code
  2019-01-15 12:56 [dpdk-dev] [PATCH 0/7] net/ice: update share code Qi Zhang
                   ` (8 preceding siblings ...)
  2019-01-16 12:16 ` Ferruh Yigit
@ 2019-01-16 14:13 ` Qi Zhang
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 1/7] net/ice/base: code clean Qi Zhang
                     ` (7 more replies)
  9 siblings, 8 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-16 14:13 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang; +Cc: paul.m.stillwell.jr, dev, ferruh.yigit, Qi Zhang

For ice family NICs, package processing pipe line can be configured in
a package file should be downloaded into device by driver during init.
The patch set add necessary share code APIs to support package download.
Also some code clean is included.

Though package download will not be enabled in 19.02 as plan, but we
hope the share code part could be merged early.

Meanwhile we will submit a seperate  RFC patch for package download for
customer early evelutation.

v2:
- fix 32 bit compile error.
- fix check patch warning.

Qi Zhang (7):
  net/ice/base: code clean
  net/ice/base: add API to support resource allocate
  net/ice/base: add package download related data structure
  net/ice/base: add some help macros
  net/ice/base: add flexible pipeline module
  net/ice/base: add flow module
  net/ice/base: free flow profile entries

 drivers/net/ice/Makefile              |    2 +
 drivers/net/ice/base/ice_adminq_cmd.h |   85 +
 drivers/net/ice/base/ice_common.c     |   85 +-
 drivers/net/ice/base/ice_common.h     |   32 +-
 drivers/net/ice/base/ice_controlq.c   |    2 +-
 drivers/net/ice/base/ice_flex_pipe.c  | 5056 +++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_flex_pipe.h  |  107 +
 drivers/net/ice/base/ice_flex_type.h  |  685 +++++
 drivers/net/ice/base/ice_flow.c       | 2080 ++++++++++++++
 drivers/net/ice/base/ice_flow.h       |  337 +++
 drivers/net/ice/base/ice_osdep.h      |    2 +
 drivers/net/ice/base/ice_type.h       |   37 +-
 drivers/net/ice/base/meson.build      |    2 +
 13 files changed, 8479 insertions(+), 33 deletions(-)
 create mode 100644 drivers/net/ice/base/ice_flex_pipe.c
 create mode 100644 drivers/net/ice/base/ice_flex_pipe.h
 create mode 100644 drivers/net/ice/base/ice_flow.c

-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH v2 1/7] net/ice/base: code clean
  2019-01-16 14:13 ` [dpdk-dev] [PATCH v2 " Qi Zhang
@ 2019-01-16 14:13   ` Qi Zhang
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 2/7] net/ice/base: add API to support resource allocate Qi Zhang
                     ` (6 subsequent siblings)
  7 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-16 14:13 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang; +Cc: paul.m.stillwell.jr, dev, ferruh.yigit, Qi Zhang

Remove some unnecessary code.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/base/ice_common.c   | 14 --------------
 drivers/net/ice/base/ice_common.h   | 17 -----------------
 drivers/net/ice/base/ice_controlq.c |  2 +-
 3 files changed, 1 insertion(+), 32 deletions(-)

diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index d49264d14..2ccf58527 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -60,16 +60,6 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
 	return status;
 }
 
-#if defined(FPGA_SUPPORT) || defined(CVL_A0_SUPPORT)
-void ice_dev_onetime_setup(struct ice_hw *hw)
-{
-	/* configure Rx - set non pxe mode */
-	wr32(hw, GLLAN_RCTL_0, 0x1);
-
-
-
-}
-#endif /* FPGA_SUPPORT || CVL_A0_SUPPORT */
 
 /**
  * ice_clear_pf_cfg - Clear PF configuration
@@ -830,10 +820,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
 	if (status)
 		goto err_unroll_sched;
 
-#if defined(FPGA_SUPPORT) || defined(CVL_A0_SUPPORT)
-	/* some of the register write workarounds to get Rx working */
-	ice_dev_onetime_setup(hw);
-#endif /* FPGA_SUPPORT || CVL_A0_SUPPORT */
 
 	/* Get MAC information */
 	/* A single port can report up to two (LAN and WoL) addresses */
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 082ae66f9..0197fbfe3 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -9,18 +9,6 @@
 
 #include "ice_switch.h"
 
-/* prototype for functions used for SW locks */
-void ice_free_list(struct LIST_HEAD_TYPE *list);
-void ice_init_lock(struct ice_lock *lock);
-void ice_acquire_lock(struct ice_lock *lock);
-void ice_release_lock(struct ice_lock *lock);
-void ice_destroy_lock(struct ice_lock *lock);
-
-void *ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m, u64 size);
-void ice_free_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m);
-
-bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq);
-
 enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
 
 void
@@ -61,11 +49,6 @@ enum ice_status ice_get_caps(struct ice_hw *hw);
 
 
 
-#if defined(FPGA_SUPPORT) || defined(CVL_A0_SUPPORT)
-void ice_dev_onetime_setup(struct ice_hw *hw);
-#endif /* FPGA_SUPPORT || CVL_A0_SUPPORT */
-
-
 enum ice_status
 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
 		  u32 rxq_index);
diff --git a/drivers/net/ice/base/ice_controlq.c b/drivers/net/ice/base/ice_controlq.c
index fb82c23ee..cbc4cb4c2 100644
--- a/drivers/net/ice/base/ice_controlq.c
+++ b/drivers/net/ice/base/ice_controlq.c
@@ -735,7 +735,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  * Returns true if the firmware has processed all descriptors on the
  * admin send queue. Returns false if there are still requests pending.
  */
-bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 {
 	/* AQ designers suggest use of head for better
 	 * timing reliability than DD bit
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH v2 2/7] net/ice/base: add API to support resource allocate
  2019-01-16 14:13 ` [dpdk-dev] [PATCH v2 " Qi Zhang
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 1/7] net/ice/base: code clean Qi Zhang
@ 2019-01-16 14:13   ` Qi Zhang
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 3/7] net/ice/base: add package download related data structure Qi Zhang
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-16 14:13 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang; +Cc: paul.m.stillwell.jr, dev, ferruh.yigit, Qi Zhang

Added API ice_alloc_hw_res and ice_free_hw_res.
Added resource type macro.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/base/ice_adminq_cmd.h | 27 +++++++++++++
 drivers/net/ice/base/ice_common.c     | 71 +++++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_common.h     |  4 ++
 3 files changed, 102 insertions(+)

diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 9332f84aa..3003efd55 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -238,8 +238,35 @@ struct ice_aqc_get_sw_cfg_resp {
  * Free Resources command (indirect 0x0209)
  * Get Allocated Resource Descriptors Command (indirect 0x020A)
  */
+#define ICE_AQC_RES_TYPE_VEB_COUNTER			0x00
+#define ICE_AQC_RES_TYPE_VLAN_COUNTER			0x01
+#define ICE_AQC_RES_TYPE_MIRROR_RULE			0x02
 #define ICE_AQC_RES_TYPE_VSI_LIST_REP			0x03
 #define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE			0x04
+#define ICE_AQC_RES_TYPE_RECIPE				0x05
+#define ICE_AQC_RES_TYPE_PROFILE			0x06
+#define ICE_AQC_RES_TYPE_SWID				0x07
+#define ICE_AQC_RES_TYPE_VSI				0x08
+#define ICE_AQC_RES_TYPE_FLU				0x09
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_1			0x0A
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_2			0x0B
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_4			0x0C
+#define ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH		0x20
+#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK		0x21
+#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES	0x22
+#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES		0x23
+#define ICE_AQC_RES_TYPE_FLEX_DESC_PROG			0x30
+#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID	0x48
+#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM		0x49
+#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID		0x50
+#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM		0x51
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID		0x58
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM		0x59
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID		0x60
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM		0x61
+/* Resource types 0x62-67 are reserved for Hash profile builder */
+#define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID		0x68
+#define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM		0x69
 
 #define ICE_AQC_RES_TYPE_FLAG_SHARED			BIT(7)
 #define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM		BIT(12)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 2ccf58527..145f66a90 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -1712,6 +1712,77 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 }
 
+/**
+ * ice_alloc_hw_res - allocate resource
+ * @hw: pointer to the hw struct
+ * @type: type of resource
+ * @num: number of resources to allocate
+ * @sh: shared if true, dedicated if false
+ * @res: pointer to array that will receive the resources
+ */
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool sh, u16 *res)
+{
+	struct ice_aqc_alloc_free_res_elem *buf;
+	enum ice_status status;
+	u16 buf_len;
+
+	buf_len = sizeof(*buf) + sizeof(buf->elem) * (num - 1);
+	buf = (struct ice_aqc_alloc_free_res_elem *)
+		ice_malloc(hw, buf_len);
+	if (!buf)
+		return ICE_ERR_NO_MEMORY;
+
+	/* Prepare buffer to allocate resource. */
+	buf->num_elems = CPU_TO_LE16(num);
+	buf->res_type = CPU_TO_LE16(type | (sh ? ICE_AQC_RES_TYPE_FLAG_SHARED :
+		ICE_AQC_RES_TYPE_FLAG_DEDICATED));
+	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+				       ice_aqc_opc_alloc_res, NULL);
+	if (status)
+		goto ice_alloc_res_exit;
+
+	ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
+		   ICE_NONDMA_TO_NONDMA);
+
+ice_alloc_res_exit:
+	ice_free(hw, buf);
+	return status;
+}
+
+/**
+ * ice_free_hw_res - free allocated hw resource
+ * @hw: pointer to the hw struct
+ * @type: type of resource to free
+ * @num: number of resources
+ * @res: pointer to array that contains the resources to free
+ */
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+{
+	struct ice_aqc_alloc_free_res_elem *buf;
+	enum ice_status status;
+	u16 buf_len;
+
+	buf_len = sizeof(*buf) + sizeof(buf->elem) * (num - 1);
+	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
+	if (!buf)
+		return ICE_ERR_NO_MEMORY;
+
+	/* Prepare buffer to free resource. */
+	buf->num_elems = CPU_TO_LE16(num);
+	buf->res_type = CPU_TO_LE16(type);
+	ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
+		   ICE_NONDMA_TO_NONDMA);
+
+	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
+				       ice_aqc_opc_free_res, NULL);
+	if (status)
+		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
+
+	ice_free(hw, buf);
+	return status;
+}
 
 /**
  * ice_get_num_per_func - determine number of resources per PF
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 0197fbfe3..45d93eb64 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -32,6 +32,10 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
 		enum ice_aq_res_access_type access, u32 timeout);
 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
 enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool sh, u16 *res);
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
+enum ice_status
 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd);
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH v2 3/7] net/ice/base: add package download related data structure
  2019-01-16 14:13 ` [dpdk-dev] [PATCH v2 " Qi Zhang
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 1/7] net/ice/base: code clean Qi Zhang
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 2/7] net/ice/base: add API to support resource allocate Qi Zhang
@ 2019-01-16 14:13   ` Qi Zhang
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 4/7] net/ice/base: add some help macros Qi Zhang
                     ` (4 subsequent siblings)
  7 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-16 14:13 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang; +Cc: paul.m.stillwell.jr, dev, ferruh.yigit, Qi Zhang

As title.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/base/ice_adminq_cmd.h | 58 +++++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_type.h       |  1 -
 2 files changed, 58 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 3003efd55..a1b9edd14 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -1576,6 +1576,57 @@ struct ice_aqc_move_txqs_data {
 
 
 
+/* Download Package (indirect 0x0C40) */
+/* Also used for Update Package (indirect 0x0C42) */
+struct ice_aqc_download_pkg {
+	u8 flags;
+#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF	0x01
+	u8 reserved[3];
+	__le32 reserved1;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+struct ice_aqc_download_pkg_resp {
+	__le32 error_offset;
+	__le32 error_info;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+/* Get Package Info List (indirect 0x0C43) */
+struct ice_aqc_get_pkg_info_list {
+	__le32 reserved1;
+	__le32 reserved2;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+/* Version format for packages */
+struct ice_pkg_ver {
+	u8 major;
+	u8 minor;
+	u8 update;
+	u8 draft;
+};
+
+#define ICE_PKG_NAME_SIZE	32
+
+struct ice_aqc_get_pkg_info {
+	struct ice_pkg_ver ver;
+	char name[ICE_PKG_NAME_SIZE];
+	u8 is_in_nvm;
+	u8 is_active;
+	u8 is_active_at_boot;
+	u8 is_modified;
+};
+
+/* Get Package Info List response buffer format (0x0C43) */
+struct ice_aqc_get_pkg_info_resp {
+	__le32 count;
+	struct ice_aqc_get_pkg_info pkg_info[1];
+};
+
 
 
 
@@ -1726,6 +1777,8 @@ struct ice_aq_desc {
 		struct ice_aqc_txqs_cleanup txqs_cleanup;
 		struct ice_aqc_add_get_update_free_vsi vsi_cmd;
 		struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
+		struct ice_aqc_download_pkg download_pkg;
+		struct ice_aqc_get_pkg_info_list get_pkg_info_list;
 		struct ice_aqc_fw_logging fw_logging;
 		struct ice_aqc_get_clear_fw_log get_clear_fw_log;
 		struct ice_aqc_set_mac_lb set_mac_lb;
@@ -1903,6 +1956,11 @@ enum ice_adminq_opc {
 	ice_aqc_opc_txqs_cleanup			= 0x0C31,
 	ice_aqc_opc_move_recfg_txqs			= 0x0C32,
 
+	/* package commands */
+	ice_aqc_opc_download_pkg			= 0x0C40,
+	ice_aqc_opc_upload_section			= 0x0C41,
+	ice_aqc_opc_update_pkg				= 0x0C42,
+	ice_aqc_opc_get_pkg_info_list			= 0x0C43,
 
 
 
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index 256bf3f37..d7c12d172 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -704,7 +704,6 @@ struct ice_hw {
 
 	u8 ucast_shared;	/* true if VSIs can share unicast addr */
 
-
 };
 
 /* Statistics collected by each port, VSI, VEB, and S-channel */
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH v2 4/7] net/ice/base: add some help macros
  2019-01-16 14:13 ` [dpdk-dev] [PATCH v2 " Qi Zhang
                     ` (2 preceding siblings ...)
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 3/7] net/ice/base: add package download related data structure Qi Zhang
@ 2019-01-16 14:13   ` Qi Zhang
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 5/7] net/ice/base: add flexible pipeline module Qi Zhang
                     ` (3 subsequent siblings)
  7 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-16 14:13 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang; +Cc: paul.m.stillwell.jr, dev, ferruh.yigit, Qi Zhang

As title.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/base/ice_common.h | 10 ++++++++++
 drivers/net/ice/base/ice_osdep.h  |  2 ++
 2 files changed, 12 insertions(+)

diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 45d93eb64..67539ccb7 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -51,6 +51,16 @@ void ice_clear_pxe_mode(struct ice_hw *hw);
 
 enum ice_status ice_get_caps(struct ice_hw *hw);
 
+/* Define a macro that will align a pointer to point to the next memory address
+ * that falls on the given power of 2 (i.e., 2, 4, 8, 16, 32, 64...). For
+ * example, given the variable pointer = 0x1006, then after the following call:
+ *
+ *      pointer = ICE_ALIGN(pointer, 4)
+ *
+ * ... the value of pointer would equal 0x1008, since 0x1008 is the next
+ * address after 0x1006 which is divisible by 4.
+ */
+#define ICE_ALIGN(ptr, align)   (((ptr) + ((align) - 1)) & ~((align) - 1))
 
 
 enum ice_status
diff --git a/drivers/net/ice/base/ice_osdep.h b/drivers/net/ice/base/ice_osdep.h
index a3351c034..252c8f4e7 100644
--- a/drivers/net/ice/base/ice_osdep.h
+++ b/drivers/net/ice/base/ice_osdep.h
@@ -457,6 +457,8 @@ LIST_HEAD(ice_list_head, ice_list_entry);
 
 /*Note parameters are swapped*/
 #define LIST_FIRST_ENTRY(head, type, field) (type *)((head)->lh_first)
+#define LIST_NEXT_ENTRY(entry, type, field) \
+	((type *)(entry)->field.next.le_next)
 #define LIST_ADD(entry, list_head)    LIST_INSERT_HEAD(list_head, entry, next)
 #define LIST_ADD_AFTER(entry, list_entry) \
 	LIST_INSERT_AFTER(list_entry, entry, next)
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH v2 5/7] net/ice/base: add flexible pipeline module
  2019-01-16 14:13 ` [dpdk-dev] [PATCH v2 " Qi Zhang
                     ` (3 preceding siblings ...)
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 4/7] net/ice/base: add some help macros Qi Zhang
@ 2019-01-16 14:13   ` Qi Zhang
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 6/7] net/ice/base: add flow module Qi Zhang
                     ` (2 subsequent siblings)
  7 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-16 14:13 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang; +Cc: paul.m.stillwell.jr, dev, ferruh.yigit, Qi Zhang

The flexible pipeline module provide the intrastructure for ice's
flexible packet processing feature.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/Makefile             |    1 +
 drivers/net/ice/base/ice_flex_pipe.c | 5018 ++++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_flex_pipe.h |  107 +
 drivers/net/ice/base/ice_flex_type.h |  686 +++++
 drivers/net/ice/base/ice_type.h      |   36 +
 drivers/net/ice/base/meson.build     |    1 +
 6 files changed, 5849 insertions(+)
 create mode 100644 drivers/net/ice/base/ice_flex_pipe.c
 create mode 100644 drivers/net/ice/base/ice_flex_pipe.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 4b421cde3..1bb76efc2 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -49,6 +49,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_common.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_sched.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_nvm.c
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_flex_pipe.c
 
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx.c
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
new file mode 100644
index 000000000..1887fc38c
--- /dev/null
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -0,0 +1,5018 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2019
+ */
+
+#include "ice_common.h"
+#include "ice_flex_pipe.h"
+#include "ice_protocol_type.h"
+#include "ice_flow.h"
+
+static const struct ice_tunnel_type_scan tnls[] = {
+	{ TNL_VXLAN,		"TNL_VXLAN" },
+	{ TNL_GTPC,		"TNL_GTPC" },
+	{ TNL_GTPC_TEID,	"TNL_GTPC_TEID" },
+	{ TNL_GTPU,		"TNL_GTPC" },
+	{ TNL_GTPU_TEID,	"TNL_GTPU_TEID" },
+	{ TNL_VXLAN_GPE,	"TNL_VXLAN_GPE" },
+	{ TNL_GENEVE,		"TNL_GENEVE" },
+	{ TNL_NAT,		"TNL_NAT" },
+	{ TNL_ROCE_V2,		"TNL_ROCE_V2" },
+	{ TNL_MPLSO_UDP,	"TNL_MPLSO_UDP" },
+	{ TNL_UDP2_END,		"TNL_UDP2_END" },
+	{ TNL_UPD_END,		"TNL_UPD_END" },
+	{ TNL_LAST,		"" }
+};
+
+static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
+	/* SWITCH */
+	{
+		ICE_SID_XLT0_SW,
+		ICE_SID_XLT_KEY_BUILDER_SW,
+		ICE_SID_XLT1_SW,
+		ICE_SID_XLT2_SW,
+		ICE_SID_PROFID_TCAM_SW,
+		ICE_SID_PROFID_REDIR_SW,
+		ICE_SID_FLD_VEC_SW,
+		ICE_SID_CDID_KEY_BUILDER_SW,
+		ICE_SID_CDID_REDIR_SW
+	},
+
+	/* ACL */
+	{
+		ICE_SID_XLT0_ACL,
+		ICE_SID_XLT_KEY_BUILDER_ACL,
+		ICE_SID_XLT1_ACL,
+		ICE_SID_XLT2_ACL,
+		ICE_SID_PROFID_TCAM_ACL,
+		ICE_SID_PROFID_REDIR_ACL,
+		ICE_SID_FLD_VEC_ACL,
+		ICE_SID_CDID_KEY_BUILDER_ACL,
+		ICE_SID_CDID_REDIR_ACL
+	},
+
+	/* FD */
+	{
+		ICE_SID_XLT0_FD,
+		ICE_SID_XLT_KEY_BUILDER_FD,
+		ICE_SID_XLT1_FD,
+		ICE_SID_XLT2_FD,
+		ICE_SID_PROFID_TCAM_FD,
+		ICE_SID_PROFID_REDIR_FD,
+		ICE_SID_FLD_VEC_FD,
+		ICE_SID_CDID_KEY_BUILDER_FD,
+		ICE_SID_CDID_REDIR_FD
+	},
+
+	/* RSS */
+	{
+		ICE_SID_XLT0_RSS,
+		ICE_SID_XLT_KEY_BUILDER_RSS,
+		ICE_SID_XLT1_RSS,
+		ICE_SID_XLT2_RSS,
+		ICE_SID_PROFID_TCAM_RSS,
+		ICE_SID_PROFID_REDIR_RSS,
+		ICE_SID_FLD_VEC_RSS,
+		ICE_SID_CDID_KEY_BUILDER_RSS,
+		ICE_SID_CDID_REDIR_RSS
+	},
+
+	/* PE */
+	{
+		ICE_SID_XLT0_PE,
+		ICE_SID_XLT_KEY_BUILDER_PE,
+		ICE_SID_XLT1_PE,
+		ICE_SID_XLT2_PE,
+		ICE_SID_PROFID_TCAM_PE,
+		ICE_SID_PROFID_REDIR_PE,
+		ICE_SID_FLD_VEC_PE,
+		ICE_SID_CDID_KEY_BUILDER_PE,
+		ICE_SID_CDID_REDIR_PE
+	}
+};
+
+/**
+ * ice_sect_id - returns section ID
+ * @blk: block type
+ * @sect: section type
+ *
+ * This helper function returns the proper section ID given a block type and a
+ * section type.
+ */
+static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
+{
+	return ice_sect_lkup[blk][sect];
+}
+
+/**
+ * ice_pkg_val_buf
+ * @buf: pointer to the ice buffer
+ *
+ * This helper function validates a buffer's header.
+ */
+static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+{
+	struct ice_buf_hdr *hdr;
+	u16 section_count;
+	u16 data_end;
+
+	hdr = (struct ice_buf_hdr *)buf->buf;
+	/* verify data */
+	section_count = LE16_TO_CPU(hdr->section_count);
+	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
+		return NULL;
+
+	data_end = LE16_TO_CPU(hdr->data_end);
+	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
+		return NULL;
+
+	return hdr;
+}
+
+/**
+ * ice_find_buf_table
+ * @ice_seg: pointer to the ice segment
+ *
+ * Returns the address of the buffer table within the ice segment.
+ */
+static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
+{
+	struct ice_nvm_table *nvms;
+
+	nvms = (struct ice_nvm_table *)(ice_seg->device_table +
+		LE32_TO_CPU(ice_seg->device_table_count));
+
+	return (struct ice_buf_table *)
+		(nvms->vers + LE32_TO_CPU(nvms->table_count));
+}
+
+/**
+ * ice_pkg_enum_buf
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This function will enumerate all the buffers in the ice segment. The first
+ * call is made with the ice_seg parameter non-NULL; on subsequent calls,
+ * ice_seg is set to NULL which continues the enumeration. When the function
+ * returns a NULL pointer, then the end of the buffers has been reached, or an
+ * unexpected value has been detected (for example an invalid section count or
+ * an invalid buffer end value).
+ */
+static struct ice_buf_hdr *
+ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+	if (ice_seg) {
+		state->buf_table = ice_find_buf_table(ice_seg);
+		if (!state->buf_table)
+			return NULL;
+
+		state->buf_idx = 0;
+		return ice_pkg_val_buf(state->buf_table->buf_array);
+	}
+
+	if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
+		return ice_pkg_val_buf(state->buf_table->buf_array +
+				       state->buf_idx);
+	else
+		return NULL;
+}
+
+/**
+ * ice_pkg_advance_sect
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This helper function will advance the section within the ice segment,
+ * also advancing the buffer if needed.
+ */
+static bool
+ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+	if (!ice_seg && !state->buf)
+		return false;
+
+	if (!ice_seg && state->buf)
+		if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
+			return true;
+
+	state->buf = ice_pkg_enum_buf(ice_seg, state);
+	if (!state->buf)
+		return false;
+
+	/* start of new buffer, reset section index */
+	state->sect_idx = 0;
+	return true;
+}
+
+/**
+ * ice_pkg_enum_section
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ *
+ * This function will enumerate all the sections of a particular type in the
+ * ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the matching
+ * sections has been reached.
+ */
+static void *
+ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+		     u32 sect_type)
+{
+	u16 offset, size;
+
+	if (ice_seg)
+		state->type = sect_type;
+
+	if (!ice_pkg_advance_sect(ice_seg, state))
+		return NULL;
+
+	/* scan for next matching section */
+	while (state->buf->section_entry[state->sect_idx].type !=
+	       CPU_TO_LE32(state->type))
+		if (!ice_pkg_advance_sect(NULL, state))
+			return NULL;
+
+	/* validate section */
+	offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
+		return NULL;
+
+	size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
+	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
+		return NULL;
+
+	/* make sure the section fits in the buffer */
+	if (offset + size > ICE_PKG_BUF_SIZE)
+		return NULL;
+
+	state->sect_type =
+		LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
+
+	/* calc pointer to this section */
+	state->sect = ((u8 *)state->buf) +
+		LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+
+	return state->sect;
+}
+
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+static void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+		   u32 sect_type, u32 *offset,
+		   void *(*handler)(u32 sect_type, void *section,
+				    u32 index, u32 *offset))
+{
+	void *entry;
+
+	if (ice_seg) {
+		if (!handler)
+			return NULL;
+
+		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+			return NULL;
+
+		state->entry_idx = 0;
+		state->handler = handler;
+	} else {
+		state->entry_idx++;
+	}
+
+	if (!state->handler)
+		return NULL;
+
+	/* get entry */
+	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+			       offset);
+	if (!entry) {
+		/* end of a section, look for another section of this type */
+		if (!ice_pkg_enum_section(NULL, state, 0))
+			return NULL;
+
+		state->entry_idx = 0;
+		entry = state->handler(state->sect_type, state->sect,
+				       state->entry_idx, offset);
+	}
+
+	return entry;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost tcam entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost tcam sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost tcam entries.
+ */
+static void *
+ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+	struct ice_boost_tcam_section *boost;
+
+	if (!section)
+		return NULL;
+
+	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+		return NULL;
+
+	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+		return NULL;
+
+	if (offset)
+		*offset = 0;
+
+	boost = (struct ice_boost_tcam_section *)section;
+	if (index >= LE16_TO_CPU(boost->count))
+		return NULL;
+
+	return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static enum ice_status
+ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+		     struct ice_boost_tcam_entry **entry)
+{
+	struct ice_boost_tcam_entry *tcam;
+	struct ice_pkg_enum state;
+
+	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+	if (!ice_seg)
+		return ICE_ERR_PARAM;
+
+	do {
+		tcam = (struct ice_boost_tcam_entry *)
+		       ice_pkg_enum_entry(ice_seg, &state,
+					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+					  ice_boost_tcam_handler);
+		if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
+			*entry = tcam;
+			return ICE_SUCCESS;
+		}
+
+		ice_seg = NULL;
+	} while (tcam);
+
+	*entry = NULL;
+	return ICE_ERR_CFG;
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *
+ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
+		       u32 *offset)
+{
+	struct ice_label_section *labels;
+
+	if (!section)
+		return NULL;
+
+	if (index > ICE_MAX_LABELS_IN_BUF)
+		return NULL;
+
+	if (offset)
+		*offset = 0;
+
+	labels = (struct ice_label_section *)section;
+	if (index >= LE16_TO_CPU(labels->count))
+		return NULL;
+
+	return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *
+ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
+		u16 *value)
+{
+	struct ice_label *label;
+
+	/* Check for valid label section on first call */
+	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+		return NULL;
+
+	label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
+						       NULL,
+						       ice_label_enum_handler);
+	if (!label)
+		return NULL;
+
+	*value = LE16_TO_CPU(label->value);
+	return label->name;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+	struct ice_pkg_enum state;
+	char *label_name;
+	u16 val;
+	int i;
+
+	ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
+
+	if (!ice_seg)
+		return;
+
+	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+				     &val);
+
+	while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+		for (i = 0; tnls[i].type != TNL_LAST; i++) {
+			if (!strncmp(label_name, tnls[i].label_prefix,
+				     strlen(tnls[i].label_prefix))) {
+				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+				hw->tnl.tbl[hw->tnl.count].valid = false;
+				hw->tnl.tbl[hw->tnl.count].in_use = false;
+				hw->tnl.tbl[hw->tnl.count].marked = false;
+				hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+				hw->tnl.tbl[hw->tnl.count].port = 0;
+				hw->tnl.count++;
+				break;
+			}
+		}
+
+		label_name = ice_enum_labels(NULL, 0, &state, &val);
+	}
+
+	/* Cache the appropriate boost tcam entry pointers */
+	for (i = 0; i < hw->tnl.count; i++) {
+		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+				     &hw->tnl.tbl[i].boost_entry);
+		if (hw->tnl.tbl[i].boost_entry)
+			hw->tnl.tbl[i].valid = true;
+	}
+}
+
+/* Key creation */
+
+#define ICE_DC_KEY	0x1	/* don't care */
+#define ICE_DC_KEYINV	0x1
+#define ICE_NM_KEY	0x0	/* never match */
+#define ICE_NM_KEYINV	0x0
+#define ICE_0_KEY	0x1	/* match 0 */
+#define ICE_0_KEYINV	0x0
+#define ICE_1_KEY	0x0	/* match 1 */
+#define ICE_1_KEYINV	0x1
+
+/**
+ * ice_gen_key_word - generate 16-bits of a key/mask word
+ * @val: the value
+ * @valid: valid bits mask (change only the valid bits)
+ * @dont_care: don't care mask
+ * @nvr_mtch: never match mask
+ * @key: pointer to an array of where the resulting key portion
+ * @key_inv: pointer to an array of where the resulting key invert portion
+ *
+ * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
+ * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
+ * of key and 8 bits of key invert.
+ *
+ *     '0' =    b01, always match a 0 bit
+ *     '1' =    b10, always match a 1 bit
+ *     '?' =    b11, don't care bit (always matches)
+ *     '~' =    b00, never match bit
+ *
+ * Input:
+ *          val:         b0  1  0  1  0  1
+ *          dont_care:   b0  0  1  1  0  0
+ *          never_mtch:  b0  0  0  0  1  1
+ *          ------------------------------
+ * Result:  key:        b01 10 11 11 00 00
+ */
+static enum ice_status
+ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
+		 u8 *key_inv)
+{
+	u8 in_key = *key, in_key_inv = *key_inv;
+	u8 i;
+
+	/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
+	if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
+		return ICE_ERR_CFG;
+
+	*key = 0;
+	*key_inv = 0;
+
+	/* encode the 8 bits into 8-bit key and 8-bit key invert */
+	for (i = 0; i < 8; i++) {
+		*key >>= 1;
+		*key_inv >>= 1;
+
+		if (!(valid & 0x1)) { /* change only valid bits */
+			*key |= (in_key & 0x1) << 7;
+			*key_inv |= (in_key_inv & 0x1) << 7;
+		} else if (dont_care & 0x1) { /* don't care bit */
+			*key |= ICE_DC_KEY << 7;
+			*key_inv |= ICE_DC_KEYINV << 7;
+		} else if (nvr_mtch & 0x1) { /* never match bit */
+			*key |= ICE_NM_KEY << 7;
+			*key_inv |= ICE_NM_KEYINV << 7;
+		} else if (val & 0x01) { /* exact 1 match */
+			*key |= ICE_1_KEY << 7;
+			*key_inv |= ICE_1_KEYINV << 7;
+		} else { /* exact 0 match */
+			*key |= ICE_0_KEY << 7;
+			*key_inv |= ICE_0_KEYINV << 7;
+		}
+
+		dont_care >>= 1;
+		nvr_mtch >>= 1;
+		valid >>= 1;
+		val >>= 1;
+		in_key >>= 1;
+		in_key_inv >>= 1;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_bits_max_set - determine if the number of bits set is within a maximum
+ * @mask: pointer to the byte array which is the mask
+ * @size: the number of bytes in the mask
+ * @max: the max number of set bits
+ *
+ * This function determines if there are at most 'max' number of bits set in an
+ * array. Returns true if the number for bits set is <= max or will return false
+ * otherwise.
+ */
+static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
+{
+	u16 count = 0;
+	u16 i, j;
+
+	/* check each byte */
+	for (i = 0; i < size; i++) {
+		/* if 0, go to next byte */
+		if (!mask[i])
+			continue;
+
+		/* We know there is at least one set bit in this byte because of
+		 * the above check; if we already have found 'max' number of
+		 * bits set, then we can return failure now.
+		 */
+		if (count == max)
+			return false;
+
+		/* count the bits in this byte, checking threshold */
+		for (j = 0; j < BITS_PER_BYTE; j++) {
+			count += (mask[i] & (0x1 << j)) ? 1 : 0;
+			if (count > max)
+				return false;
+		}
+	}
+
+	return true;
+}
+
+/**
+ * ice_set_key - generate a variable sized key with multiples of 16-bits
+ * @key: pointer to where the key will be stored
+ * @size: the size of the complete key in bytes (must be even)
+ * @val: array of 8-bit values that makes up the value portion of the key
+ * @upd: array of 8-bit masks that determine what key portion to update
+ * @dc: array of 8-bit masks that make up the dont' care mask
+ * @nm: array of 8-bit masks that make up the never match mask
+ * @off: the offset of the first byte in the key to update
+ * @len: the number of bytes in the key update
+ *
+ * This function generates a key from a value, a don't care mask and a never
+ * match mask.
+ * upd, dc, and nm are optional parameters, and can be NULL:
+ *	upd == NULL --> udp mask is all 1's (update all bits)
+ *	dc == NULL --> dc mask is all 0's (no don't care bits)
+ *	nm == NULL --> nm mask is all 0's (no never match bits)
+ */
+enum ice_status
+ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
+	    u16 len)
+{
+	u16 half_size;
+	u16 i;
+
+	/* size must be a multiple of 2 bytes. */
+	if (size % 2)
+		return ICE_ERR_CFG;
+	half_size = size / 2;
+
+	if (off + len > half_size)
+		return ICE_ERR_CFG;
+
+	/* Make sure at most one bit is set in the never match mask. Having more
+	 * than one never match mask bit set will cause HW to consume excessive
+	 * power otherwise; this is a power management efficiency check.
+	 */
+#define ICE_NVR_MTCH_BITS_MAX	1
+	if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
+		return ICE_ERR_CFG;
+
+	for (i = 0; i < len; i++)
+		if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
+				     dc ? dc[i] : 0, nm ? nm[i] : 0,
+				     key + off + i, key + half_size + off + i))
+			return ICE_ERR_CFG;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_acquire_global_cfg_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the global config lock for reading
+ * or writing of the package. When attempting to obtain write access, the
+ * caller must check for the following two return values:
+ *
+ * ICE_SUCCESS        - Means the caller has acquired the global config lock
+ *                      and can perform writing of the package.
+ * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
+ *                      package or has found that no update was necessary; in
+ *                      this case, the caller can just skip performing any
+ *                      update of the package.
+ */
+static enum ice_status
+ice_acquire_global_cfg_lock(struct ice_hw *hw,
+			    enum ice_aq_res_access_type access)
+{
+	enum ice_status status;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_global_cfg_lock");
+
+	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
+				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+
+	if (status == ICE_ERR_AQ_NO_WORK)
+		ice_debug(hw, ICE_DBG_PKG,
+			  "Global config lock: No work to do\n");
+
+	return status;
+}
+
+/**
+ * ice_release_global_cfg_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the global config lock.
+ */
+static void ice_release_global_cfg_lock(struct ice_hw *hw)
+{
+	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
+}
+
+/**
+ * ice_acquire_change_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the change lock.
+ */
+static enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
+{
+	ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_change_lock");
+
+	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
+			       ICE_CHANGE_LOCK_TIMEOUT);
+}
+
+/**
+ * ice_release_change_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the change lock using the proper Admin Command.
+ */
+static void ice_release_change_lock(struct ice_hw *hw)
+{
+	ice_debug(hw, ICE_DBG_TRACE, "ice_release_change_lock");
+
+	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
+}
+
+/**
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+static enum ice_status
+ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+		    u16 buf_size, bool last_buf, u32 *error_offset,
+		    u32 *error_info, struct ice_sq_cd *cd)
+{
+	struct ice_aqc_download_pkg *cmd;
+	struct ice_aq_desc desc;
+	enum ice_status status;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_aq_download_pkg");
+
+	if (error_offset)
+		*error_offset = 0;
+	if (error_info)
+		*error_info = 0;
+
+	cmd = &desc.params.download_pkg;
+	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+	if (last_buf)
+		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+	if (status == ICE_ERR_AQ_ERROR) {
+		/* Read error from buffer only when the FW returned an error */
+		struct ice_aqc_download_pkg_resp *resp;
+
+		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+		if (error_offset)
+			*error_offset = LE32_TO_CPU(resp->error_offset);
+		if (error_info)
+			*error_info = LE32_TO_CPU(resp->error_info);
+	}
+
+	return status;
+}
+
+/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+enum ice_status
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+		      u16 buf_size, struct ice_sq_cd *cd)
+{
+	struct ice_aq_desc desc;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_aq_upload_section");
+	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+	return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static enum ice_status
+ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
+		  bool last_buf, u32 *error_offset, u32 *error_info,
+		  struct ice_sq_cd *cd)
+{
+	struct ice_aqc_download_pkg *cmd;
+	struct ice_aq_desc desc;
+	enum ice_status status;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_aq_update_pkg");
+
+	if (error_offset)
+		*error_offset = 0;
+	if (error_info)
+		*error_info = 0;
+
+	cmd = &desc.params.download_pkg;
+	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+	if (last_buf)
+		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+	if (status == ICE_ERR_AQ_ERROR) {
+		/* Read error from buffer only when the FW returned an error */
+		struct ice_aqc_download_pkg_resp *resp;
+
+		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+		if (error_offset)
+			*error_offset = LE32_TO_CPU(resp->error_offset);
+		if (error_info)
+			*error_info = LE32_TO_CPU(resp->error_info);
+	}
+
+	return status;
+}
+
+/**
+ * ice_find_seg_in_pkg
+ * @hw: pointer to the hardware structure
+ * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ */
+struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+		    struct ice_pkg_hdr *pkg_hdr)
+{
+	u32 i;
+
+	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+	ice_debug(hw, ICE_DBG_PKG, "Package version: %d.%d.%d.%d\n",
+		  pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
+		  pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
+
+	/* Search all package segments for the requested segment type */
+	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
+		struct ice_generic_seg_hdr *seg;
+
+		seg = (struct ice_generic_seg_hdr *)
+			((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
+
+		if (LE32_TO_CPU(seg->seg_type) == seg_type)
+			return seg;
+	}
+
+	return NULL;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+	enum ice_status status;
+	u32 offset, info, i;
+
+	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+	if (status)
+		return status;
+
+	for (i = 0; i < count; i++) {
+		bool last = ((i + 1) == count);
+
+		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+
+		status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
+					   last, &offset, &info, NULL);
+
+		if (status) {
+			ice_debug(hw, ICE_DBG_PKG,
+				  "Update pkg failed: err %d off %d inf %d\n",
+				  status, offset, info);
+			break;
+		}
+	}
+
+	ice_release_change_lock(hw);
+
+	return status;
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware. Metadata buffers are skipped, and the first metadata buffer
+ * found indicates that the rest of the buffers are all metadata buffers.
+ */
+static enum ice_status
+ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+	enum ice_status status;
+	struct ice_buf_hdr *bh;
+	u32 offset, info, i;
+
+	if (!bufs || !count)
+		return ICE_ERR_PARAM;
+
+	/* If the first buffer's first section has its metadata bit set
+	 * then there are no buffers to be downloaded, and the operation is
+	 * considered a success.
+	 */
+	bh = (struct ice_buf_hdr *)bufs;
+	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
+		return ICE_SUCCESS;
+
+	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+	if (status)
+		return status;
+
+	for (i = 0; i < count; i++) {
+		bool last = ((i + 1) == count);
+
+		if (!last) {
+			/* check next buffer for metadata flag */
+			bh = (struct ice_buf_hdr *)(bufs + i + 1);
+
+			/* A set metadata flag in the next buffer will signal
+			 * that the current buffer will be the last buffer
+			 * downloaded
+			 */
+			if (LE16_TO_CPU(bh->section_count))
+				if (LE32_TO_CPU(bh->section_entry[0].type) &
+				    ICE_METADATA_BUF)
+					last = true;
+		}
+
+		bh = (struct ice_buf_hdr *)(bufs + i);
+
+		status = ice_aq_download_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
+					     last, &offset, &info, NULL);
+
+		if (status) {
+			ice_debug(hw, ICE_DBG_PKG,
+				  "Pkg download failed: err %d off %d inf %d\n",
+				  status, offset, info);
+			break;
+		}
+
+		if (last)
+			break;
+	}
+
+	ice_release_global_cfg_lock(hw);
+
+	return status;
+}
+
+/**
+ * ice_aq_get_pkg_info_list
+ * @hw: pointer to the hardware structure
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Package Info List (0x0C43)
+ */
+static enum ice_status
+ice_aq_get_pkg_info_list(struct ice_hw *hw,
+			 struct ice_aqc_get_pkg_info_resp *pkg_info,
+			 u16 buf_size, struct ice_sq_cd *cd)
+{
+	struct ice_aq_desc desc;
+
+	ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_pkg_info_list");
+	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+
+	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+enum ice_status ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+	struct ice_buf_table *ice_buf_tbl;
+
+	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+	ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
+		  ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
+		  ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
+
+	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+		  LE32_TO_CPU(ice_seg->hdr.seg_type),
+		  LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
+
+	ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+		  LE32_TO_CPU(ice_buf_tbl->buf_count));
+
+	return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+				  LE32_TO_CPU(ice_buf_tbl->buf_count));
+}
+
+/**
+ * ice_init_pkg_info
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ *
+ * Saves off the package details into the hw structure.
+ */
+enum ice_status
+ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+	struct ice_aqc_get_pkg_info_resp *pkg_info;
+	struct ice_global_metadata_seg *meta_seg;
+	struct ice_generic_seg_hdr *seg_hdr;
+	enum ice_status status;
+	u16 size;
+	u32 i;
+
+	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+	if (!pkg_hdr)
+		return ICE_ERR_PARAM;
+
+	meta_seg = (struct ice_global_metadata_seg *)
+		   ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
+	if (meta_seg) {
+		hw->pkg_ver = meta_seg->pkg_ver;
+		ice_memcpy(hw->pkg_name, meta_seg->pkg_name,
+			   sizeof(hw->pkg_name), ICE_NONDMA_TO_NONDMA);
+
+		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
+			  meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
+			  meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
+			  meta_seg->pkg_name);
+	} else {
+		ice_debug(hw, ICE_DBG_INIT,
+			  "Did not find metadata segment in driver package\n");
+		return ICE_ERR_CFG;
+	}
+
+	seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+	if (seg_hdr) {
+		hw->ice_pkg_ver = seg_hdr->seg_ver;
+		ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
+			   sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA);
+
+		ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
+			  seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
+			  seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
+			  seg_hdr->seg_name);
+	} else {
+		ice_debug(hw, ICE_DBG_INIT,
+			  "Did not find ice segment in driver package\n");
+		return ICE_ERR_CFG;
+	}
+
+#define ICE_PKG_CNT	4
+	size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
+				    (ICE_PKG_CNT - 1));
+	pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+	if (!pkg_info)
+		return ICE_ERR_NO_MEMORY;
+
+	status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
+	if (status)
+		goto init_pkg_free_alloc;
+
+	for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
+#define ICE_PKG_FLAG_COUNT	4
+		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
+		u8 place = 0;
+
+		if (pkg_info->pkg_info[i].is_active) {
+			flags[place++] = 'A';
+			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+			ice_memcpy(hw->active_pkg_name,
+				   pkg_info->pkg_info[i].name,
+				   sizeof(hw->active_pkg_name),
+				   ICE_NONDMA_TO_NONDMA);
+		}
+		if (pkg_info->pkg_info[i].is_active_at_boot)
+			flags[place++] = 'B';
+		if (pkg_info->pkg_info[i].is_modified)
+			flags[place++] = 'M';
+		if (pkg_info->pkg_info[i].is_in_nvm)
+			flags[place++] = 'N';
+
+		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
+			  i, pkg_info->pkg_info[i].ver.major,
+			  pkg_info->pkg_info[i].ver.minor,
+			  pkg_info->pkg_info[i].ver.update,
+			  pkg_info->pkg_info[i].ver.draft,
+			  pkg_info->pkg_info[i].name, flags);
+	}
+
+init_pkg_free_alloc:
+	ice_free(hw, pkg_info);
+
+	return status;
+}
+
+/**
+ * ice_find_label_value
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @name: name of the label to search for
+ * @type: the section type that will contain the label
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Finds a label's value given the label name and the section type to search.
+ * The ice_seg parameter must not be NULL since the first call to
+ * ice_enum_labels requires a pointer to an actual ice_seg structure.
+ */
+enum ice_status
+ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
+		     u16 *value)
+{
+	struct ice_pkg_enum state;
+	char *label_name;
+	u16 val;
+
+	if (!ice_seg)
+		return ICE_ERR_PARAM;
+
+	do {
+		label_name = ice_enum_labels(ice_seg, type, &state, &val);
+		if (label_name && !strcmp(label_name, name)) {
+			*value = val;
+			return ICE_SUCCESS;
+		}
+
+		ice_seg = NULL;
+	} while (label_name);
+
+	return ICE_ERR_CFG;
+}
+
+/**
+ * ice_verify_pkg - verify package
+ * @pkg: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * Verifies various attributes of the package file, including length, format
+ * version, and the requirement of at least one segment.
+ */
+static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+{
+	u32 seg_count;
+	u32 i;
+
+	if (len < sizeof(*pkg))
+		return ICE_ERR_BUF_TOO_SHORT;
+
+	if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+	    pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+	    pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
+	    pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
+		return ICE_ERR_CFG;
+
+	/* pkg must have at least one segment */
+	seg_count = LE32_TO_CPU(pkg->seg_count);
+	if (seg_count < 1)
+		return ICE_ERR_CFG;
+
+	/* make sure segment array fits in package length */
+	if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
+		return ICE_ERR_BUF_TOO_SHORT;
+
+	/* all segments must fit within length */
+	for (i = 0; i < seg_count; i++) {
+		u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
+		struct ice_generic_seg_hdr *seg;
+
+		/* segment header must fit */
+		if (len < off + sizeof(*seg))
+			return ICE_ERR_BUF_TOO_SHORT;
+
+		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+
+		/* segment body must fit */
+		if (len < off + LE32_TO_CPU(seg->seg_size))
+			return ICE_ERR_BUF_TOO_SHORT;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_free_seg - free package segment pointer
+ * @hw: pointer to the hardware structure
+ *
+ * Frees the package segment pointer in the proper manner, depending on if the
+ * segment was allocated or just the passed in pointer was stored.
+ */
+void ice_free_seg(struct ice_hw *hw)
+{
+	if (hw->pkg_copy) {
+		ice_free(hw, hw->pkg_copy);
+		hw->pkg_copy = NULL;
+	}
+	hw->seg = NULL;
+}
+
+/**
+ * ice_init_pkg_regs - initialize additional package registers
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_pkg_regs(struct ice_hw *hw)
+{
+#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
+#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
+#define ICE_SW_BLK_IDX	0
+
+	/* setup Switch block input mask, which is 48-bits in two parts */
+	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
+	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
+}
+
+/**
+ * ice_init_pkg - initialize/download package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function initializes a package. The package contains HW tables
+ * required to do packet processing. First, the function extracts package
+ * information such as version. Then it finds the ice configuration segment
+ * within the package; this function then saves a copy of the segment pointer
+ * within the supplied package buffer. Next, the function will cache any hints
+ * from the package, followed by downloading the package itself. Note, that if
+ * a previous PF driver has already downloaded the package successfully, then
+ * the current driver will not have to download the package again.
+ *
+ * The local package contents will be used to query default behavior and to
+ * update specific sections of the HW's version of the package (e.g. to update
+ * the parse graph to understand new protocols).
+ *
+ * This function stores a pointer to the package buffer memory, and it is
+ * expected that the supplied buffer will not be freed immediately. If the
+ * package buffer needs to be freed, such as when read from a file, use
+ * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
+ * case.
+ */
+static enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+{
+	struct ice_pkg_hdr *pkg;
+	enum ice_status status;
+	struct ice_seg *seg;
+
+	if (!buf || !len)
+		return ICE_ERR_PARAM;
+
+	pkg = (struct ice_pkg_hdr *)buf;
+	status = ice_verify_pkg(pkg, len);
+	if (status) {
+		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+			  status);
+		return status;
+	}
+
+	/* initialize package info */
+	status = ice_init_pkg_info(hw, pkg);
+	if (status)
+		return status;
+
+	/* find segment in given package */
+	seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
+	if (!seg) {
+		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+		return ICE_ERR_CFG;
+	}
+
+	/* initialize package hints and then download package */
+	ice_init_pkg_hints(hw, seg);
+	status = ice_download_pkg(hw, seg);
+	if (status == ICE_ERR_AQ_NO_WORK) {
+		ice_debug(hw, ICE_DBG_INIT,
+			  "package previously loaded - no work.\n");
+		status = ICE_SUCCESS;
+	}
+
+	/* Free a previous segment, if necessary */
+	ice_free_seg(hw);
+	if (!status) {
+		hw->seg = seg;
+		/* on successful package download, update other required
+		 * registers to support the package
+		 */
+		ice_init_pkg_regs(hw);
+	} else {
+		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
+			  status);
+	}
+
+	return status;
+}
+
+/**
+ * ice_copy_and_init_pkg - initialize/download a copy of the package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function copies the package buffer, and then calls ice_init_pkg() to
+ * initialize the copied package contents.
+ *
+ * The copying is necessary if the package buffer supplied is constant, or if
+ * the memory may disappear shortly after calling this function.
+ *
+ * If the package buffer resides in the data segment and can be modified, the
+ * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
+ *
+ * However, if the package buffer needs to be copied first, such as when being
+ * read from a file, the caller should use ice_copy_and_init_pkg().
+ *
+ * This function will first copy the package buffer, before calling
+ * ice_init_pkg(). The caller is free to immediately destroy the original
+ * package buffer, as the new copy will be managed by this function and
+ * related routines.
+ */
+enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
+{
+	enum ice_status status;
+	u8 *buf_copy;
+
+	if (!buf || !len)
+		return ICE_ERR_PARAM;
+
+	buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
+
+	status = ice_init_pkg(hw, buf_copy, len);
+	if (status)
+		/* Free the copy, since we failed to initialize the package */
+		ice_free(hw, buf_copy);
+	else
+		/* Track the copied pkg so we can free it later */
+		hw->pkg_copy = buf_copy;
+
+	return status;
+}
+
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+	struct ice_buf_build *bld;
+	struct ice_buf_hdr *buf;
+
+	bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
+	if (!bld)
+		return NULL;
+
+	buf = (struct ice_buf_hdr *)bld;
+	buf->data_end = CPU_TO_LE16(sizeof(*buf) -
+				    sizeof(buf->section_entry[0]));
+	return bld;
+}
+
+/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector
+ * vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+	struct ice_sw_fv_section *fv_section =
+		(struct ice_sw_fv_section *)section;
+
+	if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+		return NULL;
+	if (index >= LE16_TO_CPU(fv_section->count))
+		return NULL;
+	if (offset)
+		/* "index" passed in to this function is relative to a given
+		 * 4k block. To get to the true index into the field vector
+		 * table need to add the relative index to the base_offset
+		 * field of this section
+		 */
+		*offset = LE16_TO_CPU(fv_section->base_offset) + index;
+	return fv_section->fv + index;
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @prot_ids: field vector to search for with a given protocol id
+ * @ids_cnt: lookup/protocol count
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol id and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile id information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u16 *prot_ids, u8 ids_cnt,
+		   struct LIST_HEAD_TYPE *fv_list)
+{
+	struct ice_sw_fv_list_entry *fvl;
+	struct ice_sw_fv_list_entry *tmp;
+	struct ice_pkg_enum state;
+	struct ice_seg *ice_seg;
+	struct ice_fv *fv;
+	u32 offset;
+
+	if (!ids_cnt || !hw->seg)
+		return ICE_ERR_PARAM;
+
+	ice_seg = hw->seg;
+	do {
+		u8 i;
+
+		fv = (struct ice_fv *)
+			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+					   &offset, ice_sw_fv_handler);
+
+		for (i = 0; i < ids_cnt && fv; i++) {
+			int j;
+
+			/* This code assumes that if a switch field vector line
+			 * has a matching protocol, then this line will contain
+			 * the entries necessary to represent every field in
+			 * that protocol header.
+			 */
+			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+				if (fv->ew[j].prot_id == prot_ids[i])
+					break;
+			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+				break;
+			if (i + 1 == ids_cnt) {
+				fvl = (struct ice_sw_fv_list_entry *)
+					ice_malloc(hw, sizeof(*fvl));
+				if (!fvl)
+					goto err;
+				fvl->fv_ptr = fv;
+				fvl->profile_id = offset;
+				LIST_ADD(&fvl->list_entry, fv_list);
+				break;
+			}
+		}
+		ice_seg = NULL;
+	} while (fv);
+	if (LIST_EMPTY(fv_list))
+		return ICE_ERR_CFG;
+	return ICE_SUCCESS;
+
+err:
+	LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
+				 list_entry) {
+		LIST_DEL(&fvl->list_entry);
+		ice_free(hw, fvl);
+	}
+
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+static struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+				 void **section)
+{
+	struct ice_buf_build *buf;
+
+	if (!section)
+		return NULL;
+
+	buf = ice_pkg_buf_alloc(hw);
+	if (!buf)
+		return NULL;
+
+	if (ice_pkg_buf_reserve_section(buf, 1))
+		goto ice_pkg_buf_alloc_single_section_err;
+
+	*section = ice_pkg_buf_alloc_section(buf, type, size);
+	if (!*section)
+		goto ice_pkg_buf_alloc_single_section_err;
+
+	return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+	ice_pkg_buf_free(hw, buf);
+	return NULL;
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+	struct ice_buf_hdr *buf;
+	u16 section_count;
+	u16 data_end;
+
+	if (!bld)
+		return ICE_ERR_PARAM;
+
+	buf = (struct ice_buf_hdr *)&bld->buf;
+
+	/* already an active section, can't increase table size */
+	section_count = LE16_TO_CPU(buf->section_count);
+	if (section_count > 0)
+		return ICE_ERR_CFG;
+
+	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+		return ICE_ERR_CFG;
+	bld->reserved_section_table_entries += count;
+
+	data_end = LE16_TO_CPU(buf->data_end) +
+		   (count * sizeof(buf->section_entry[0]));
+	buf->data_end = CPU_TO_LE16(data_end);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_pkg_buf_unreserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to unreserve
+ *
+ * Unreserves one or more section table entries in a package buffer, releasing
+ * space that can be used for section data. This routine can be called
+ * multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+enum ice_status
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
+{
+	struct ice_buf_hdr *buf;
+	u16 section_count;
+	u16 data_end;
+
+	if (!bld)
+		return ICE_ERR_PARAM;
+
+	buf = (struct ice_buf_hdr *)&bld->buf;
+
+	/* already an active section, can't decrease table size */
+	section_count = LE16_TO_CPU(buf->section_count);
+	if (section_count > 0)
+		return ICE_ERR_CFG;
+
+	if (count > bld->reserved_section_table_entries)
+		return ICE_ERR_CFG;
+	bld->reserved_section_table_entries -= count;
+
+	data_end = LE16_TO_CPU(buf->data_end) -
+		   (count * sizeof(buf->section_entry[0]));
+	buf->data_end = CPU_TO_LE16(data_end);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+	struct ice_buf_hdr *buf;
+	u16 sect_count;
+	u16 data_end;
+
+	if (!bld || !type || !size)
+		return NULL;
+
+	buf = (struct ice_buf_hdr *)&bld->buf;
+
+	/* check for enough space left in buffer */
+	data_end = LE16_TO_CPU(buf->data_end);
+
+	/* section start must align on 4 byte boundary */
+	data_end = ICE_ALIGN(data_end, 4);
+
+	if ((data_end + size) > ICE_MAX_S_DATA_END)
+		return NULL;
+
+	/* check for more available section table entries */
+	sect_count = LE16_TO_CPU(buf->section_count);
+	if (sect_count < bld->reserved_section_table_entries) {
+		void *section_ptr = ((u8 *)buf) + data_end;
+
+		buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
+		buf->section_entry[sect_count].size = CPU_TO_LE16(size);
+		buf->section_entry[sect_count].type = CPU_TO_LE32(type);
+
+		data_end += size;
+		buf->data_end = CPU_TO_LE16(data_end);
+
+		buf->section_count = CPU_TO_LE16(sect_count + 1);
+		return section_ptr;
+	}
+
+	/* no free section table entries */
+	return NULL;
+}
+
+/**
+ * ice_pkg_buf_get_free_space
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of free bytes remaining in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
+{
+	struct ice_buf_hdr *buf;
+
+	if (!bld)
+		return 0;
+
+	buf = (struct ice_buf_hdr *)&bld->buf;
+	return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+	struct ice_buf_hdr *buf;
+
+	if (!bld)
+		return 0;
+
+	buf = (struct ice_buf_hdr *)&bld->buf;
+	return LE16_TO_CPU(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf_header
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+	if (!bld)
+		return NULL;
+
+	return &bld->buf;
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+	ice_free(hw, bld);
+}
+
+/* PTG Management */
+
+/**
+ * ice_ptg_update_xlt1 - Updates packet type groups in hw via xlt1 table
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ *
+ * This function will update the xlt1 hardware table to reflect the new
+ * packet type group configuration.
+ */
+enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
+{
+	struct ice_xlt1_section *sect;
+	struct ice_buf_build *bld;
+	enum ice_status status;
+	u16 index;
+
+	bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
+					       ICE_XLT1_SIZE(ICE_XLT1_CNT),
+					       (void **)&sect);
+	if (!bld)
+		return ICE_ERR_NO_MEMORY;
+
+	sect->count = CPU_TO_LE16(ICE_XLT1_CNT);
+	sect->offset = CPU_TO_LE16(0);
+	for (index = 0; index < ICE_XLT1_CNT; index++)
+		sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg;
+
+	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+
+	ice_pkg_buf_free(hw, bld);
+
+	return status;
+}
+
+/**
+ * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptype: the ptype to search for
+ * @ptg: pointer to variable that receives the PTG
+ *
+ * This function will search the PTGs for a particular ptype, returning the
+ * PTG ID that contains it through the ptg parameter, with the value of
+ * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
+ */
+enum ice_status
+ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
+{
+	if (ptype >= ICE_XLT1_CNT || !ptg)
+		return ICE_ERR_PARAM;
+
+	*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptg_alloc_val - Allocates a new packet type group ID by value
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptg: the ptg to allocate
+ *
+ * This function allocates a given packet type group ID specified by the ptg
+ * parameter.
+ */
+static
+void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
+{
+	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
+}
+
+/**
+ * ice_ptg_alloc - Find a free entry and allocates a new packet type group ID
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ *
+ * This function allocates and returns a new packet type group ID. Note
+ * that 0 is the default packet type group, so successfully created PTGs will
+ * have a non-zero ID value; which means a 0 return value indicates an error.
+ */
+u8 ice_ptg_alloc(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 i;
+
+	/* Skip the default PTG of 0 */
+	for (i = 1; i < ICE_MAX_PTGS; i++)
+		if (!hw->blk[blk].xlt1.ptg_tbl[i].in_use) {
+			/* found a free PTG ID */
+			ice_ptg_alloc_val(hw, blk, i);
+			return (u8)i;
+		}
+
+	return 0;
+}
+
+/**
+ * ice_ptg_free - Frees a packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptg: the ptg ID to free
+ *
+ * This function frees a packet type group, and returns all the current ptypes
+ * within it to the default PTG.
+ */
+void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
+{
+	struct ice_ptg_ptype *p, *temp;
+
+	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false;
+	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+	while (p) {
+		p->ptg = ICE_DEFAULT_PTG;
+		temp = p->next_ptype;
+		p->next_ptype = NULL;
+		p = temp;
+	}
+
+	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL;
+}
+
+/**
+ * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptype: the ptype to remove
+ * @ptg: the ptg to remove the ptype from
+ *
+ * This function will remove the ptype from the specific ptg, and move it to
+ * the default PTG (ICE_DEFAULT_PTG).
+ */
+static enum ice_status
+ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
+{
+	struct ice_ptg_ptype **ch;
+	struct ice_ptg_ptype *p;
+
+	if (ptype > ICE_XLT1_CNT - 1)
+		return ICE_ERR_PARAM;
+
+	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	/* Should not happen if .in_use is set, bad config */
+	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
+		return ICE_ERR_CFG;
+
+	/* find the ptype within this PTG, and bypass the link over it */
+	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+	ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+	while (p) {
+		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
+			*ch = p->next_ptype;
+			break;
+		}
+
+		ch = &p->next_ptype;
+		p = p->next_ptype;
+	}
+
+	hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
+	hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptype: the ptype to add or move
+ * @ptg: the ptg to add or move the ptype to
+ *
+ * This function will either add or move a ptype to a particular PTG depending
+ * on if the ptype is already part of another group. Note that using a
+ * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
+ * default PTG.
+ */
+enum ice_status
+ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
+{
+	enum ice_status status;
+	u8 original_ptg;
+
+	if (ptype > ICE_XLT1_CNT - 1)
+		return ICE_ERR_PARAM;
+
+	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
+	if (status)
+		return status;
+
+	/* Is ptype already in the correct PTG? */
+	if (original_ptg == ptg)
+		return ICE_SUCCESS;
+
+	/* Remove from original PTG and move back to the default PTG */
+	if (original_ptg != ICE_DEFAULT_PTG)
+		ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
+
+	/* Moving to default PTG? Then we're done with this request */
+	if (ptg == ICE_DEFAULT_PTG)
+		return ICE_SUCCESS;
+
+	/* Add ptype to PTG at beginning of list */
+	hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
+		hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
+		&hw->blk[blk].xlt1.ptypes[ptype];
+
+	hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
+	hw->blk[blk].xlt1.t[ptype] = ptg;
+
+	return ICE_SUCCESS;
+}
+
+/* Block / table size info */
+struct ice_blk_size_details {
+	u16 xlt1;			/* # xlt1 entries */
+	u16 xlt2;			/* # xlt2 entries */
+	u16 prof_tcam;			/* # profile id tcam entries */
+	u16 prof_id;			/* # profile ids */
+	u8 prof_cdid_bits;		/* # cdid one-hot bits used in key */
+	u16 prof_redir;			/* # profile redirection entries */
+	u16 es;				/* # extraction sequence entries */
+	u16 fvw;			/* # field vector words */
+	u8 overwrite;			/* overwrite existing entries allowed */
+	u8 reverse;			/* reverse FV order */
+};
+
+static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
+	/**
+	 * Table Definitions
+	 * XLT1 - Number of entries in XLT1 table
+	 * XLT2 - Number of entries in XLT2 table
+	 * TCAM - Number of entries Profile ID TCAM table
+	 * CDID - Control Domain ID of the hardware block
+	 * PRED - Number of entries in the Profile Redirection Table
+	 * FV   - Number of entries in the Field Vector
+	 * FVW  - Width (in WORDs) of the Field Vector
+	 * OVR  - Overwrite existing table entries
+	 * REV  - Reverse FV
+	 */
+	/*          XLT1        , XLT2        ,TCAM, PID,CDID,PRED,   FV, FVW */
+	/*          Overwrite   , Reverse FV */
+	/* SW  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
+		    false, false },
+	/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
+		    false, false },
+	/* FD  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
+		    false, true  },
+	/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
+		    true,  true  },
+	/* PE  */ { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
+		    false, false },
+};
+
+enum ice_sid_all {
+	ICE_SID_XLT1_OFF = 0,
+	ICE_SID_XLT2_OFF,
+	ICE_SID_PR_OFF,
+	ICE_SID_PR_REDIR_OFF,
+	ICE_SID_ES_OFF,
+	ICE_SID_OFF_COUNT,
+};
+
+/* Characteristic handling */
+
+/**
+ * ice_match_prop_lst - determine if properties of two lists match
+ * @list1: first properties list
+ * @list2: second properties list
+ *
+ * Count, cookies and the order must match in order to be considered equivalent.
+ */
+static bool
+ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
+{
+	struct ice_vsig_prof *tmp1;
+	struct ice_vsig_prof *tmp2;
+	u16 chk_count = 0;
+	u16 count = 0;
+
+	/* compare counts */
+	LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) {
+		count++;
+	}
+	LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) {
+		chk_count++;
+	}
+	if (!count || count != chk_count)
+		return false;
+
+	tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
+	tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
+
+	/* profile cookies must compare, and in the exact same order to take
+	 * into account priority
+	 */
+	while (--count) {
+		if (tmp2->profile_cookie != tmp1->profile_cookie)
+			return false;
+
+		tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
+		tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
+	}
+
+	return true;
+}
+
+/* VSIG Management */
+
+/**
+ * ice_vsig_update_xlt2_sect - update one section of xlt2 table
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsi: hw vsi number to program
+ * @vsig: vsig for the vsi
+ *
+ * This function will update the xlt2 hardware table with the input vsi
+ * group configuration.
+ */
+static enum ice_status
+ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
+			  u16 vsig)
+{
+	struct ice_xlt2_section *sect;
+	struct ice_buf_build *bld;
+	enum ice_status status;
+
+	bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
+					       sizeof(struct ice_xlt2_section),
+					       (void **)&sect);
+	if (!bld)
+		return ICE_ERR_NO_MEMORY;
+
+	sect->count = CPU_TO_LE16(1);
+	sect->offset = CPU_TO_LE16(vsi);
+	sect->value[0] = CPU_TO_LE16(vsig);
+
+	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+
+	ice_pkg_buf_free(hw, bld);
+
+	return status;
+}
+
+/**
+ * ice_vsig_update_xlt2 - update xlt2 table with VSIG configuration
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ *
+ * This function will update the xlt2 hardware table with the input vsi
+ * group configuration of used vsis.
+ */
+enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 vsi;
+
+	for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) {
+		/* update only vsis that have been changed */
+		if (hw->blk[blk].xlt2.vsis[vsi].changed) {
+			enum ice_status status;
+			u16 vsig;
+
+			vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
+			status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig);
+			if (status)
+				return status;
+
+			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_find_vsi - find a VSIG that contains a specified vsi
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsi: vsi of interest
+ * @vsig: pointer to receive the vsi group
+ *
+ * This function will lookup the vsi entry in the XLT2 list and return
+ * the vsi group its associated with.
+ */
+enum ice_status
+ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
+{
+	if (!vsig || vsi >= ICE_MAX_VSI)
+		return ICE_ERR_PARAM;
+
+	/* As long as there's a default or valid VSIG associated with the input
+	 * vsi, the functions returns a success. Any handling of VSIG will be
+	 * done by the following add, update or remove functions.
+	 */
+	*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_alloc_val - allocate a new VSIG by value
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsig: the vsig to allocate
+ *
+ * This function will allocate a given VSIG specified by the vsig parameter.
+ */
+static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+
+	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
+		INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+		hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
+	}
+
+	return ICE_VSIG_VALUE(idx, hw->pf_id);
+}
+
+/**
+ * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ *
+ * This function will iterate through the VSIG list and mark the first
+ * unused entry for the new VSIG entry as used and return that value.
+ */
+static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 i;
+
+	for (i = 1; i < ICE_MAX_VSIGS; i++)
+		if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+			return ice_vsig_alloc_val(hw, blk, i);
+
+	return ICE_DEFAULT_VSIG;
+}
+
+/**
+ * ice_find_dup_props_vsig - find vsi group with a specified set of properties
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @chs: characteristic list
+ * @vsig: returns the VSIG with the matching profiles, if found
+ *
+ * Each VSIG is associated with a characteristic set; i.e. all vsis under
+ * a group have the same characteristic set. To check if there exists a VSIG
+ * which has the same characteristics as the input characteristics; this
+ * function will iterate through the xlt2 list and return the VSIG that has a
+ * matching configuration. In order to make sure that priorities are accounted
+ * for, the list must match exactly, including the order in which the
+ * characteristics are listed.
+ */
+enum ice_status
+ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
+			struct LIST_HEAD_TYPE *chs, u16 *vsig)
+{
+	struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
+	u16 i;
+
+	for (i = 0; i < xlt2->count; i++) {
+		if (xlt2->vsig_tbl[i].in_use &&
+		    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
+			*vsig = (i | ((hw->pf_id << ICE_PF_NUM_S) &
+				      ICE_PF_NUM_M));
+			*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
+			return ICE_SUCCESS;
+		}
+	}
+
+	return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_vsig_free - free vsi group
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsig: VSIG to remove
+ *
+ * The function will remove all vsis associated with the input VSIG and move
+ * them to the DEFAULT_VSIG and mark the VSIG available.
+ */
+enum ice_status
+ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+	struct ice_vsig_prof *dtmp, *del;
+	struct ice_vsig_vsi *vsi_cur;
+	u16 idx;
+
+	idx = vsig & ICE_VSIG_IDX_M;
+	if (idx >= ICE_MAX_VSIGS)
+		return ICE_ERR_PARAM;
+
+	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
+
+	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+	if (!vsi_cur)
+		return ICE_ERR_CFG;
+
+	/* remove all vsis associated with this VSIG XLT2 entry */
+	do {
+		struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+
+		vsi_cur->vsig = ICE_DEFAULT_VSIG;
+		vsi_cur->changed = 1;
+		vsi_cur->next_vsi = NULL;
+		vsi_cur = tmp;
+	} while (vsi_cur);
+
+	/* NULL terminate head of vsi list */
+	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
+
+	/* free characteristic list */
+	LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
+				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+				 ice_vsig_prof, list) {
+		LIST_DEL(&del->list);
+		ice_free(hw, del);
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_add_mv_vsi - add or move a vsi to a vsi group
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsi: vsi to move
+ * @vsig: destination vsi group
+ *
+ * This function will move or add the input vsi to the target VSIG.
+ * The function will find the original VSIG the vsi belongs to and
+ * move the entry to the DEFAULT_VSIG, update the original VSIG and
+ * then move entry to the new VSIG.
+ */
+enum ice_status
+ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+	struct ice_vsig_vsi *tmp;
+	enum ice_status status;
+	u16 orig_vsig, idx;
+
+	idx = vsig & ICE_VSIG_IDX_M;
+
+	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+		return ICE_ERR_PARAM;
+
+	/* if VSIG not in use and VSIG is not default type this VSIG
+	 * doesn't exist.
+	 */
+	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
+	    vsig != ICE_DEFAULT_VSIG)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+	if (status)
+		return status;
+
+	/* no update required if vsigs match */
+	if (orig_vsig == vsig)
+		return ICE_SUCCESS;
+
+	if (orig_vsig != ICE_DEFAULT_VSIG) {
+		/* remove entry from orig_vsig and add to default VSIG */
+		status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
+		if (status)
+			return status;
+	}
+
+	if (idx == ICE_DEFAULT_VSIG)
+		return ICE_SUCCESS;
+
+	/* Create vsi entry and add VSIG and prop_mask values */
+	hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
+	hw->blk[blk].xlt2.vsis[vsi].changed = 1;
+
+	/* Add new entry to the head of the VSIG list */
+	tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
+		&hw->blk[blk].xlt2.vsis[vsi];
+	hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
+	hw->blk[blk].xlt2.t[vsi] = vsig;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_remove_vsi - remove vsi from VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsi: vsi to remove
+ * @vsig: vsi group to remove from
+ *
+ * The function will remove the input vsi from its vsi group and move it
+ * to the DEFAULT_VSIG.
+ */
+enum ice_status
+ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+	struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
+	u16 idx;
+
+	idx = vsig & ICE_VSIG_IDX_M;
+
+	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+		return ICE_ERR_PARAM;
+
+	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	/* entry already in default VSIG, dont have to remove */
+	if (idx == ICE_DEFAULT_VSIG)
+		return ICE_SUCCESS;
+
+	vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+	if (!(*vsi_head))
+		return ICE_ERR_CFG;
+
+	vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
+	vsi_cur = (*vsi_head);
+
+	/* iterate the vsi list, skip over the entry to be removed */
+	while (vsi_cur) {
+		if (vsi_tgt == vsi_cur) {
+			(*vsi_head) = vsi_cur->next_vsi;
+			break;
+		}
+		vsi_head = &vsi_cur->next_vsi;
+		vsi_cur = vsi_cur->next_vsi;
+	}
+
+	/* verify if vsi was removed from group list */
+	if (!vsi_cur)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	vsi_cur->vsig = ICE_DEFAULT_VSIG;
+	vsi_cur->changed = 1;
+	vsi_cur->next_vsi = NULL;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_find_prof_id - find profile id for a given field vector
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @fv: field vector to search for
+ * @prof_id: receives the profile id
+ */
+static enum ice_status
+ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
+		 struct ice_fv_word *fv, u8 *prof_id)
+{
+	struct ice_es *es = &hw->blk[blk].es;
+	u16 off, i;
+
+	for (i = 0; i < es->count; i++) {
+		off = i * es->fvw;
+
+		if (memcmp(&es->t[off], fv, es->fvw * 2))
+			continue;
+
+		*prof_id = i;
+		return ICE_SUCCESS;
+	}
+
+	return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_prof_id_rsrc_type - get profile id resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+	switch (blk) {
+	case ICE_BLK_SW:
+		*rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID;
+		break;
+	case ICE_BLK_ACL:
+		*rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID;
+		break;
+	case ICE_BLK_FD:
+		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
+		break;
+	case ICE_BLK_RSS:
+		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
+		break;
+	case ICE_BLK_PE:
+		*rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
+/**
+ * ice_tcam_ent_rsrc_type - get tcam entry resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+	switch (blk) {
+	case ICE_BLK_SW:
+		*rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM;
+		break;
+	case ICE_BLK_ACL:
+		*rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM;
+		break;
+	case ICE_BLK_FD:
+		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
+		break;
+	case ICE_BLK_RSS:
+		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
+		break;
+	case ICE_BLK_PE:
+		*rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
+/**
+ * ice_workaround_get_res_blk - determine the block from a resource type
+ * @type: type of resource
+ * @blk: pointer to a enum that will receive the block type
+ * @tcam: pointer to variable that will be set to true for a TCAM resource type
+ */
+static enum
+ice_status ice_workaround_get_res_blk(u16 type, enum ice_block *blk, bool *tcam)
+{
+	/* just need to support TCAM entries and Profile IDs for now */
+	*tcam = false;
+
+	switch (type) {
+	case ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM:
+		*blk = ICE_BLK_SW;
+		*tcam = true;
+		break;
+	case ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM:
+		*blk = ICE_BLK_ACL;
+		*tcam = true;
+		break;
+	case ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM:
+		*blk = ICE_BLK_FD;
+		*tcam = true;
+		break;
+	case ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM:
+		*blk = ICE_BLK_RSS;
+		*tcam = true;
+		break;
+	case ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM:
+		*blk = ICE_BLK_PE;
+		*tcam = true;
+		break;
+	case ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID:
+		*blk = ICE_BLK_SW;
+		break;
+	case ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID:
+		*blk = ICE_BLK_ACL;
+		break;
+	case ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID:
+		*blk = ICE_BLK_FD;
+		break;
+	case ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID:
+		*blk = ICE_BLK_RSS;
+		break;
+	case ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID:
+		*blk = ICE_BLK_PE;
+		break;
+	default:
+		return ICE_ERR_PARAM;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_alloc_res_workaround
+ * @hw: pointer to the hw struct
+ * @type: type of resource
+ * @num: number of resources to allocate
+ * @res: pointer to array that will receive the resources
+ */
+static enum ice_status
+ice_alloc_res_workaround(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+{
+	enum ice_block blk;
+	u16 count = 0;
+	bool tcam;
+	u16 first;
+	u16 last;
+	u16 max;
+	u16 i;
+
+/* Number of PFs we support with this workaround */
+#define ICE_WA_PF_COUNT	4
+#define ICE_WA_1ST_TCAM	4
+#define ICE_WA_1ST_FV	4
+
+	/* Only allow our supported PFs */
+	if (hw->pf_id >= ICE_WA_PF_COUNT)
+		return ICE_ERR_AQ_ERROR;
+
+	if (ice_workaround_get_res_blk(type, &blk, &tcam))
+		return ICE_ERR_AQ_ERROR;
+
+	if (tcam) {
+		/* range of entries based on PF */
+		max = hw->blk[blk].prof.count / ICE_WA_PF_COUNT;
+		first = max * hw->pf_id;
+		last = first + max;
+
+		/* Profile IDs - start at non-zero index for PROF ID TCAM table
+		 * The first few entries are for bypass, default and errors
+		 * (only relevant for PF 0)
+		 */
+		first += hw->pf_id ? 0 : ICE_WA_1ST_TCAM;
+
+		for (i = first; i < last && count < num; i++) {
+			if (!hw->blk[blk].prof.resource_used_hack[i]) {
+				res[count++] = i;
+				hw->blk[blk].prof.resource_used_hack[i] = true;
+			}
+		}
+
+		/* handle failure case */
+		if (count < num) {
+			for (i = 0; i < count; i++) {
+				hw->blk[blk].prof.resource_used_hack[res[i]] =
+					false;
+				res[i] = 0;
+			}
+
+			return ICE_ERR_AQ_ERROR;
+		}
+	} else {
+		/* range of entries based on PF */
+		max = hw->blk[blk].es.count / ICE_WA_PF_COUNT;
+		first = max * hw->pf_id;
+		last = first + max;
+
+		/* FV index - start at non-zero index for Field vector table
+		 * The first few entries are for bypass, default and errors
+		 * (only relevant for PF 0)
+		 */
+		first += hw->pf_id ? 0 : ICE_WA_1ST_FV;
+
+		for (i = first; i < last && count < num; i++) {
+			if (!hw->blk[blk].es.resource_used_hack[i]) {
+				res[count++] = i;
+				hw->blk[blk].es.resource_used_hack[i] = true;
+			}
+		}
+
+		/* handle failure case */
+		if (count < num) {
+			for (i = 0; i < count; i++) {
+				hw->blk[blk].es.resource_used_hack[res[i]] =
+					false;
+				res[i] = 0;
+			}
+
+			return ICE_ERR_AQ_ERROR;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_free_res_workaround
+ * @hw: pointer to the hw struct
+ * @type: type of resource to free
+ * @num: number of resources
+ * @res: array of resource ids to free
+ */
+static enum ice_status
+ice_free_res_workaround(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+{
+	enum ice_block blk;
+	bool tcam = false;
+	u16 i;
+
+	if (ice_workaround_get_res_blk(type, &blk, &tcam))
+		return ICE_ERR_AQ_ERROR;
+
+	if (tcam) {
+		/* TCAM entries */
+		for (i = 0; i < num; i++) {
+			if (res[i] < hw->blk[blk].prof.count) {
+				u16 idx = res[i];
+
+				ice_free_hw_res(hw, type, 1, &idx);
+				hw->blk[blk].prof.resource_used_hack[res[i]] =
+					false;
+			}
+		}
+
+	} else {
+		/* Profile IDs */
+		for (i = 0; i < num; i++) {
+			if (res[i] < hw->blk[blk].es.count) {
+				u16 idx = res[i];
+
+				ice_free_hw_res(hw, type, 1, &idx);
+				hw->blk[blk].es.resource_used_hack[res[i]] =
+					false;
+			}
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_alloc_tcam_ent - allocate hardware tcam entry
+ * @hw: pointer to the hw struct
+ * @blk: the block to allocate the tcam for
+ * @tcam_idx: pointer to variable to receive the tcam entry
+ *
+ * This function allocates a new entry in a Profile ID TCAM for a specific
+ * block.
+ */
+static enum ice_status
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+{
+	u16 res_type;
+
+	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+		return ICE_ERR_PARAM;
+
+	return ice_alloc_res_workaround(hw, res_type, 1, tcam_idx);
+}
+
+/**
+ * ice_free_tcam_ent - free hardware tcam entry
+ * @hw: pointer to the hw struct
+ * @blk: the block from which to free the tcam entry
+ * @tcam_idx: the tcam entry to free
+ *
+ * This function frees an entry in a Profile ID TCAM for a specific block.
+ */
+static enum ice_status
+ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
+{
+	u16 res_type;
+
+	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+		return ICE_ERR_PARAM;
+
+	return ice_free_res_workaround(hw, res_type, 1, &tcam_idx);
+}
+
+/**
+ * ice_alloc_prof_id - allocate profile id
+ * @hw: pointer to the hw struct
+ * @blk: the block to allocate the profile id for
+ * @prof_id: pointer to variable to receive the profile id
+ *
+ * This function allocates a new profile id, which also corresponds to a Field
+ * Vector (Extraction Sequence) entry.
+ */
+static enum ice_status
+ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+{
+	enum ice_status status;
+	u16 res_type;
+	u16 get_prof;
+
+	if (!ice_prof_id_rsrc_type(blk, &res_type))
+		return ICE_ERR_PARAM;
+
+	status = ice_alloc_res_workaround(hw, res_type, 1, &get_prof);
+	if (!status)
+		*prof_id = (u8)get_prof;
+
+	return status;
+}
+
+/**
+ * ice_free_prof_id - free profile id
+ * @hw: pointer to the hw struct
+ * @blk: the block from which to free the profile id
+ * @prof_id: the profile id to free
+ *
+ * This function frees a profile id, which also corresponds to a Field Vector.
+ */
+static enum ice_status
+ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+	u16 tmp_prof_id = (u16)prof_id;
+	u16 res_type;
+
+	if (!ice_prof_id_rsrc_type(blk, &res_type))
+		return ICE_ERR_PARAM;
+
+	return ice_free_res_workaround(hw, res_type, 1, &tmp_prof_id);
+	/* The following code is a WORKAROUND until DCR 076 is available.
+	 * DCR 076 - Update to Profile ID TCAM Resource Allocation
+	 *
+	 * Once the DCR 076 changes are available in FW, this code can be
+	 * restored. Original code:
+	 *
+	 * return ice_free_res(hw, res_type, 1, &tmp_prof_id);
+	 */
+}
+
+/**
+ * ice_prof_inc_ref - increment reference count for profile
+ * @hw: pointer to the hw struct
+ * @blk: the block from which to free the profile id
+ * @prof_id: the profile id for which to increment the reference count
+ */
+static enum ice_status
+ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+	if (prof_id > hw->blk[blk].es.count)
+		return ICE_ERR_PARAM;
+
+	hw->blk[blk].es.ref_count[prof_id]++;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_dec_ref - decrement reference count for profile
+ * @hw: pointer to the hw struct
+ * @blk: the block from which to free the profile id
+ * @prof_id: the profile id for which to decrement the reference count
+ */
+static enum ice_status
+ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+	if (prof_id > hw->blk[blk].es.count)
+		return ICE_ERR_PARAM;
+
+	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
+		if (!--hw->blk[blk].es.ref_count[prof_id])
+			return ice_free_prof_id(hw, blk, prof_id);
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_write_es - write an extraction sequence to hardware
+ * @hw: pointer to the hw struct
+ * @blk: the block in which to write the extraction sequence
+ * @prof_id: the profile id to write
+ * @fv: pointer to the extraction sequence to write
+ */
+static void
+ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
+	     struct ice_fv_word *fv)
+{
+	u16 off;
+
+	off = prof_id * hw->blk[blk].es.fvw;
+	ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * 2,
+		   ICE_NONDMA_TO_NONDMA);
+}
+
+/* Block / table section IDs */
+static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
+	/* SWITCH */
+	{	ICE_SID_XLT1_SW,
+		ICE_SID_XLT2_SW,
+		ICE_SID_PROFID_TCAM_SW,
+		ICE_SID_PROFID_REDIR_SW,
+		ICE_SID_FLD_VEC_SW
+	},
+
+	/* ACL */
+	{	ICE_SID_XLT1_ACL,
+		ICE_SID_XLT2_ACL,
+		ICE_SID_PROFID_TCAM_ACL,
+		ICE_SID_PROFID_REDIR_ACL,
+		ICE_SID_FLD_VEC_ACL
+	},
+
+	/* FD */
+	{	ICE_SID_XLT1_FD,
+		ICE_SID_XLT2_FD,
+		ICE_SID_PROFID_TCAM_FD,
+		ICE_SID_PROFID_REDIR_FD,
+		ICE_SID_FLD_VEC_FD
+	},
+
+	/* RSS */
+	{	ICE_SID_XLT1_RSS,
+		ICE_SID_XLT2_RSS,
+		ICE_SID_PROFID_TCAM_RSS,
+		ICE_SID_PROFID_REDIR_RSS,
+		ICE_SID_FLD_VEC_RSS
+	},
+
+	/* PE */
+	{	ICE_SID_XLT1_PE,
+		ICE_SID_XLT2_PE,
+		ICE_SID_PROFID_TCAM_PE,
+		ICE_SID_PROFID_REDIR_PE,
+		ICE_SID_FLD_VEC_PE
+	}
+};
+
+/**
+ * ice_fill_tbl - Reads content of a single table type into database
+ * @hw: pointer to the hardware structure
+ * @block_id: Block ID of the table to copy
+ * @sid: Section ID of the table to copy
+ *
+ * Will attempt to read the entire content of a given table of a single block
+ * into the driver database. We assume that the buffer will always
+ * be as large or larger than the data contained in the package. If
+ * this condition is not met, there is most likely an error in the package
+ * contents.
+ */
+static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
+{
+	u32 dst_len, sect_len, offset = 0;
+	struct ice_prof_redir_section *pr;
+	struct ice_prof_id_section *pid;
+	struct ice_xlt1_section *xlt1;
+	struct ice_xlt2_section *xlt2;
+	struct ice_sw_fv_section *es;
+	struct ice_pkg_enum state;
+	u8 *src, *dst;
+	void *sect;
+
+	/* if the hw segment pointer is null then the first iteration of
+	 * ice_pkg_enum_section() will fail. In this case the Hw tables will
+	 * not be filled and return success.
+	 */
+	if (!hw->seg) {
+		ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
+		return;
+	}
+
+	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+	sect = ice_pkg_enum_section(hw->seg, &state, sid);
+
+	while (sect) {
+		switch (sid) {
+		case ICE_SID_XLT1_FD:
+		case ICE_SID_XLT1_RSS:
+		case ICE_SID_XLT1_ACL:
+			xlt1 = (struct ice_xlt1_section *)sect;
+			src = xlt1->value;
+			sect_len = LE16_TO_CPU(xlt1->count) *
+				sizeof(*hw->blk[block_id].xlt1.t);
+			dst = hw->blk[block_id].xlt1.t;
+			dst_len = hw->blk[block_id].xlt1.count *
+				sizeof(*hw->blk[block_id].xlt1.t);
+			break;
+		case ICE_SID_XLT2_FD:
+		case ICE_SID_XLT2_RSS:
+		case ICE_SID_XLT2_ACL:
+			xlt2 = (struct ice_xlt2_section *)sect;
+			src = (u8 *)xlt2->value;
+			sect_len = LE16_TO_CPU(xlt2->count) *
+				sizeof(*hw->blk[block_id].xlt2.t);
+			dst = (u8 *)hw->blk[block_id].xlt2.t;
+			dst_len = hw->blk[block_id].xlt2.count *
+				sizeof(*hw->blk[block_id].xlt2.t);
+			break;
+		case ICE_SID_PROFID_TCAM_FD:
+		case ICE_SID_PROFID_TCAM_RSS:
+		case ICE_SID_PROFID_TCAM_ACL:
+			pid = (struct ice_prof_id_section *)sect;
+			src = (u8 *)pid->entry;
+			sect_len = LE16_TO_CPU(pid->count) *
+				sizeof(*hw->blk[block_id].prof.t);
+			dst = (u8 *)hw->blk[block_id].prof.t;
+			dst_len = hw->blk[block_id].prof.count *
+				sizeof(*hw->blk[block_id].prof.t);
+			break;
+		case ICE_SID_PROFID_REDIR_FD:
+		case ICE_SID_PROFID_REDIR_RSS:
+		case ICE_SID_PROFID_REDIR_ACL:
+			pr = (struct ice_prof_redir_section *)sect;
+			src = pr->redir_value;
+			sect_len = LE16_TO_CPU(pr->count) *
+				sizeof(*hw->blk[block_id].prof_redir.t);
+			dst = hw->blk[block_id].prof_redir.t;
+			dst_len = hw->blk[block_id].prof_redir.count *
+				sizeof(*hw->blk[block_id].prof_redir.t);
+			break;
+		case ICE_SID_FLD_VEC_FD:
+		case ICE_SID_FLD_VEC_RSS:
+		case ICE_SID_FLD_VEC_ACL:
+			es = (struct ice_sw_fv_section *)sect;
+			src = (u8 *)es->fv;
+			sect_len = LE16_TO_CPU(es->count) *
+				sizeof(*hw->blk[block_id].prof_redir.t);
+			dst = (u8 *)hw->blk[block_id].es.t;
+			dst_len = hw->blk[block_id].es.count *
+				sizeof(*hw->blk[block_id].es.t);
+			break;
+		default:
+			return;
+		}
+
+		/* if the section offset exceeds destination length, terminate
+		 * table fill.
+		 */
+		if (offset > dst_len)
+			return;
+
+		/* if the sum of section size and offset exceed destination size
+		 * then we are out of bounds of the Hw table size for that PF.
+		 * Changing section length to fill the remaining table space
+		 * of that PF.
+		 */
+		if ((offset + sect_len) > dst_len)
+			sect_len = dst_len - offset;
+
+		ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
+		offset += sect_len;
+		sect = ice_pkg_enum_section(NULL, &state, sid);
+	}
+}
+
+/**
+ * ice_fill_blk_tbls - Read package content for tables of a block
+ * @hw: pointer to the hardware structure
+ * @block_id: The block ID which contains the tables to be copied
+ *
+ * Reads the current package contents and populates the driver
+ * database with the data it contains to allow for advanced driver
+ * features.
+ */
+static void ice_fill_blk_tbls(struct ice_hw *hw, enum ice_block block_id)
+{
+	ice_fill_tbl(hw, block_id, hw->blk[block_id].xlt1.sid);
+	ice_fill_tbl(hw, block_id, hw->blk[block_id].xlt2.sid);
+	ice_fill_tbl(hw, block_id, hw->blk[block_id].prof.sid);
+	ice_fill_tbl(hw, block_id, hw->blk[block_id].prof_redir.sid);
+	ice_fill_tbl(hw, block_id, hw->blk[block_id].es.sid);
+}
+
+/**
+ * ice_free_prof_map - frees the profile map
+ * @hw: pointer to the hardware structure
+ * @blk: the hw block which contains the profile map to be freed
+ */
+static void ice_free_prof_map(struct ice_hw *hw, enum ice_block blk)
+{
+	struct ice_prof_map *del, *tmp;
+
+	if (LIST_EMPTY(&hw->blk[blk].es.prof_map))
+		return;
+
+	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &hw->blk[blk].es.prof_map,
+				 ice_prof_map, list) {
+		LIST_DEL(&del->list);
+		ice_free(hw, del);
+	}
+}
+
+/**
+ * ice_free_vsig_tbl - free complete VSIG table entries
+ * @hw: pointer to the hardware structure
+ * @blk: the hw block on which to free the VSIG table entries
+ */
+static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 i;
+
+	if (!hw->blk[blk].xlt2.vsig_tbl)
+		return;
+
+	for (i = 1; i < ICE_MAX_VSIGS; i++)
+		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+			ice_vsig_free(hw, blk, i);
+}
+
+/**
+ * ice_free_hw_tbls - free hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+void ice_free_hw_tbls(struct ice_hw *hw)
+{
+	u8 i;
+
+	for (i = 0; i < ICE_BLK_COUNT; i++) {
+		ice_free_prof_map(hw, (enum ice_block)i);
+		ice_free_vsig_tbl(hw, (enum ice_block)i);
+		ice_free(hw, hw->blk[i].xlt1.ptypes);
+		ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
+		ice_free(hw, hw->blk[i].xlt1.t);
+		ice_free(hw, hw->blk[i].xlt2.t);
+		ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
+		ice_free(hw, hw->blk[i].xlt2.vsis);
+		ice_free(hw, hw->blk[i].prof.t);
+		ice_free(hw, hw->blk[i].prof_redir.t);
+		ice_free(hw, hw->blk[i].es.t);
+		ice_free(hw, hw->blk[i].es.ref_count);
+
+		ice_free(hw, hw->blk[i].es.resource_used_hack);
+		ice_free(hw, hw->blk[i].prof.resource_used_hack);
+	}
+
+	ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
+}
+
+/**
+ * ice_init_flow_profs - init flow profile locks and list heads
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_flow_profs(struct ice_hw *hw)
+{
+	u8 i;
+
+	for (i = 0; i < ICE_BLK_COUNT; i++) {
+		ice_init_lock(&hw->fl_profs_locks[i]);
+		INIT_LIST_HEAD(&hw->fl_profs[i]);
+	}
+}
+
+/**
+ * ice_init_sw_xlt1_db - init software xlt1 database from hw tables
+ * @hw: pointer to the hardware structure
+ * @blk: the hw block to initialize
+ */
+static
+void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 pt;
+
+	for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
+		u8 ptg;
+
+		ptg = hw->blk[blk].xlt1.t[pt];
+		if (ptg != ICE_DEFAULT_PTG) {
+			ice_ptg_alloc_val(hw, blk, ptg);
+			ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
+		}
+	}
+}
+
+/**
+ * ice_init_sw_xlt2_db - init software xlt2 database from hw tables
+ * @hw: pointer to the hardware structure
+ * @blk: the hw block to initialize
+ */
+static
+void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
+{
+	u16 vsi;
+
+	for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
+		u16 vsig;
+
+		vsig = hw->blk[blk].xlt2.t[vsi];
+		if (vsig) {
+			ice_vsig_alloc_val(hw, blk, vsig);
+			ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+			/* no changes at this time, since this has been
+			 * initialized from the original package
+			 */
+			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+		}
+	}
+}
+
+/**
+ * ice_init_sw_db - init software database from hw tables
+ * @hw: pointer to the hardware structure
+ */
+static
+void ice_init_sw_db(struct ice_hw *hw)
+{
+	u16 i;
+
+	for (i = 0; i < ICE_BLK_COUNT; i++) {
+		ice_init_sw_xlt1_db(hw, (enum ice_block)i);
+		ice_init_sw_xlt2_db(hw, (enum ice_block)i);
+	}
+}
+
+/**
+ * ice_init_hw_tbls - init hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+{
+	u8 i;
+
+	ice_init_flow_profs(hw);
+
+	for (i = 0; i < ICE_BLK_COUNT; i++) {
+		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+		struct ice_prof_tcam *prof = &hw->blk[i].prof;
+		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+		struct ice_es *es = &hw->blk[i].es;
+
+		hw->blk[i].overwrite = blk_sizes[i].overwrite;
+		es->reverse = blk_sizes[i].reverse;
+
+		xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
+		xlt1->count = blk_sizes[i].xlt1;
+
+		xlt1->ptypes = (struct ice_ptg_ptype *)
+			ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
+
+		if (!xlt1->ptypes)
+			goto err;
+
+		xlt1->ptg_tbl = (struct ice_ptg_entry *)
+			ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
+
+		if (!xlt1->ptg_tbl)
+			goto err;
+
+		xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
+		if (!xlt1->t)
+			goto err;
+
+		xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
+		xlt2->count = blk_sizes[i].xlt2;
+
+		xlt2->vsis = (struct ice_vsig_vsi *)
+			ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
+
+		if (!xlt2->vsis)
+			goto err;
+
+		xlt2->vsig_tbl = (struct ice_vsig_entry *)
+			ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
+		if (!xlt2->vsig_tbl)
+			goto err;
+
+		xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
+		if (!xlt2->t)
+			goto err;
+
+		prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
+		prof->count = blk_sizes[i].prof_tcam;
+		prof->max_prof_id = blk_sizes[i].prof_id;
+		prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
+		prof->t = (struct ice_prof_tcam_entry *)
+			ice_calloc(hw, prof->count, sizeof(*prof->t));
+
+		if (!prof->t)
+			goto err;
+
+		prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
+		prof_redir->count = blk_sizes[i].prof_redir;
+		prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
+						 sizeof(*prof_redir->t));
+
+		if (!prof_redir->t)
+			goto err;
+
+		es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
+		es->count = blk_sizes[i].es;
+		es->fvw = blk_sizes[i].fvw;
+		es->t = (struct ice_fv_word *)
+			ice_calloc(hw, es->count * es->fvw, sizeof(*es->t));
+
+		if (!es->t)
+			goto err;
+
+		es->ref_count = (u16 *)
+			ice_calloc(hw, es->count, sizeof(*es->ref_count));
+
+		if (!es->ref_count)
+			goto err;
+
+		es->resource_used_hack = (u8 *)
+			ice_calloc(hw, hw->blk[i].es.count, sizeof(u8));
+
+		if (!es->resource_used_hack)
+			goto err;
+
+		prof->resource_used_hack = (u8 *)ice_calloc(hw, prof->count,
+							    sizeof(u8));
+
+		if (!prof->resource_used_hack)
+			goto err;
+
+		INIT_LIST_HEAD(&es->prof_map);
+
+		/* Now that tables are allocated, read in package data */
+		ice_fill_blk_tbls(hw, (enum ice_block)i);
+	}
+
+	ice_init_sw_db(hw);
+
+	return ICE_SUCCESS;
+
+err:
+	ice_free_hw_tbls(hw);
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_prof_gen_key - generate profile id key
+ * @hw: pointer to the hw struct
+ * @blk: the block in which to write profile id to
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: cdid portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ * @key: output of profile id key
+ */
+static enum ice_status
+ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
+		 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+		 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
+		 u8 key[ICE_TCAM_KEY_SZ])
+{
+	struct ice_prof_id_key inkey;
+
+	inkey.xlt1 = ptg;
+	inkey.xlt2_cdid = CPU_TO_LE16(vsig);
+	inkey.flags = CPU_TO_LE16(flags);
+
+	switch (hw->blk[blk].prof.cdid_bits) {
+	case 0:
+		break;
+	case 2:
+#define ICE_CD_2_M 0xC000U
+#define ICE_CD_2_S 14
+		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
+		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
+		break;
+	case 4:
+#define ICE_CD_4_M 0xF000U
+#define ICE_CD_4_S 12
+		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
+		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
+		break;
+	case 8:
+#define ICE_CD_8_M 0xFF00U
+#define ICE_CD_8_S 16
+		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
+		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
+		break;
+	default:
+		ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
+		break;
+	};
+
+	return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
+			   nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
+}
+
+/**
+ * ice_tcam_write_entry - write tcam entry
+ * @hw: pointer to the hw struct
+ * @blk: the block in which to write profile id to
+ * @idx: the entry index to write to
+ * @prof_id: profile id
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: cdid portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ */
+static enum ice_status
+ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
+		     u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
+		     u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+		     u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
+		     u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
+{
+	struct ice_prof_tcam_entry;
+	enum ice_status status;
+
+	status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
+				  dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
+	if (!status) {
+		hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
+		hw->blk[blk].prof.t[idx].prof_id = prof_id;
+	}
+
+	return status;
+}
+
+/**
+ * ice_vsig_get_ref - returns number of vsis belong to a VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsig: VSIG to query
+ * @refs: pointer to variable to receive the reference count
+ */
+static enum ice_status
+ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+	struct ice_vsig_vsi *ptr;
+	*refs = 0;
+
+	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+	while (ptr) {
+		(*refs)++;
+		ptr = ptr->next_vsi;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_ptg - get or allocate a ptg for a ptype
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @ptype: the ptype to retrieve the PTG for
+ * @ptg: receives the PTG of the ptype
+ * @add: receive boolean indicating whether PTG was added or not
+ */
+static enum ice_status
+ice_get_ptg(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg,
+	    bool *add)
+{
+	enum ice_status status;
+
+	*ptg = ICE_DEFAULT_PTG;
+	*add = false;
+
+	status = ice_ptg_find_ptype(hw, blk, ptype, ptg);
+	if (status)
+		return status;
+
+	if (*ptg == ICE_DEFAULT_PTG) {
+		/* need to allocate a PTG, and add ptype to it */
+		*ptg = ice_ptg_alloc(hw, blk);
+		if (*ptg == ICE_DEFAULT_PTG)
+			return ICE_ERR_HW_TABLE;
+
+		status = ice_ptg_add_mv_ptype(hw, blk, ptype, *ptg);
+		if (status)
+			return ICE_ERR_HW_TABLE;
+
+		*add = true;
+	}
+
+	return ICE_SUCCESS;
+};
+
+/**
+ * ice_has_prof_vsig - check to see if VSIG has a specific profile
+ * @hw: pointer to the hardware structure
+ * @blk: hw block
+ * @vsig: VSIG to check against
+ * @hdl: profile handle
+ */
+static bool
+ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+	struct ice_vsig_prof *ent;
+
+	LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+			    ice_vsig_prof, list) {
+		if (ent->profile_cookie == hdl)
+			return true;
+	}
+
+	ice_debug(hw, ICE_DBG_INIT,
+		  "Characteristic list for vsi group %d not found.\n",
+		  vsig);
+	return false;
+}
+
+/**
+ * ice_prof_bld_es - build profile id extraction sequence changes
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
+		struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
+{
+	u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
+	struct ice_chs_chg *tmp;
+
+	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
+			u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
+			struct ice_pkg_es *p;
+			u32 id;
+
+			id = ice_sect_id(blk, ICE_VEC_TBL);
+			p = (struct ice_pkg_es *)
+				ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
+							  vec_size -
+							  sizeof(p->es[0]));
+
+			if (!p)
+				return ICE_ERR_MAX_LIMIT;
+
+			p->count = CPU_TO_LE16(1);
+			p->offset = CPU_TO_LE16(tmp->prof_id);
+
+			ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
+				   ICE_NONDMA_TO_NONDMA);
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_tcam - build profile id tcam changes
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
+		  struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
+{
+	struct ice_chs_chg *tmp;
+
+	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+		if ((tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) ||
+		    tmp->type == ICE_TCAM_REM) {
+			struct ice_prof_id_section *p;
+			u32 id;
+
+			id = ice_sect_id(blk, ICE_PROF_TCAM);
+			p = (struct ice_prof_id_section *)
+				ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+			if (!p)
+				return ICE_ERR_MAX_LIMIT;
+
+			p->count = CPU_TO_LE16(1);
+			p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
+			p->entry[0].prof_id = tmp->prof_id;
+
+			ice_memcpy(p->entry[0].key,
+				   &hw->blk[blk].prof.t[tmp->tcam_idx].key,
+				   sizeof(hw->blk[blk].prof.t->key),
+				   ICE_NONDMA_TO_NONDMA);
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_xlt1 - build xlt1 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
+		  struct LIST_HEAD_TYPE *chgs)
+{
+	struct ice_chs_chg *tmp;
+
+	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
+			struct ice_xlt1_section *p;
+			u32 id;
+
+			id = ice_sect_id(blk, ICE_XLT1);
+			p = (struct ice_xlt1_section *)
+				ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+			if (!p)
+				return ICE_ERR_MAX_LIMIT;
+
+			p->count = CPU_TO_LE16(1);
+			p->offset = CPU_TO_LE16(tmp->ptype);
+			p->value[0] = tmp->ptg;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_xlt2 - build xlt2 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
+		  struct LIST_HEAD_TYPE *chgs)
+{
+	struct ice_chs_chg *tmp;
+
+	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+		bool found = false;
+
+		if (tmp->type == ICE_VSIG_ADD)
+			found = true;
+		else if (tmp->type == ICE_VSI_MOVE)
+			found = true;
+		else if (tmp->type == ICE_VSIG_REM)
+			found = true;
+
+		if (found) {
+			struct ice_xlt2_section *p;
+			u32 id;
+
+			id = ice_sect_id(blk, ICE_XLT2);
+			p = (struct ice_xlt2_section *)
+				ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+			if (!p)
+				return ICE_ERR_MAX_LIMIT;
+
+			p->count = CPU_TO_LE16(1);
+			p->offset = CPU_TO_LE16(tmp->vsi);
+			p->value[0] = CPU_TO_LE16(tmp->vsig);
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_upd_prof_hw - update hardware using the change list
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
+		struct LIST_HEAD_TYPE *chgs)
+{
+	struct ice_buf_build *b;
+	struct ice_chs_chg *tmp;
+	enum ice_status status;
+	u16 pkg_sects = 0;
+	u16 sects = 0;
+	u16 xlt1 = 0;
+	u16 xlt2 = 0;
+	u16 tcam = 0;
+	u16 es = 0;
+
+	/* count number of sections we need */
+	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+		switch (tmp->type) {
+		case ICE_PTG_ES_ADD:
+			if (tmp->add_ptg)
+				xlt1++;
+			if (tmp->add_prof)
+				es++;
+			break;
+		case ICE_TCAM_ADD:
+		case ICE_TCAM_REM:
+			tcam++;
+			break;
+		case ICE_VSIG_ADD:
+		case ICE_VSI_MOVE:
+		case ICE_VSIG_REM:
+			xlt2++;
+			break;
+		default:
+			break;
+		}
+	}
+	sects = xlt1 + xlt2 + tcam + es;
+
+	if (!sects)
+		return ICE_SUCCESS;
+
+	/* Build update package buffer */
+	b = ice_pkg_buf_alloc(hw);
+	if (!b)
+		return ICE_ERR_NO_MEMORY;
+
+	status = ice_pkg_buf_reserve_section(b, sects);
+	if (status)
+		goto error_tmp;
+
+	/* Preserve order of table update: ES, TCAM, PTG, VSIG */
+	if (es) {
+		status = ice_prof_bld_es(hw, blk, b, chgs);
+		if (status)
+			goto error_tmp;
+	}
+
+	if (tcam) {
+		status = ice_prof_bld_tcam(hw, blk, b, chgs);
+		if (status)
+			goto error_tmp;
+	}
+
+	if (xlt1) {
+		status = ice_prof_bld_xlt1(blk, b, chgs);
+		if (status)
+			goto error_tmp;
+	}
+
+	if (xlt2) {
+		status = ice_prof_bld_xlt2(blk, b, chgs);
+		if (status)
+			goto error_tmp;
+	}
+
+	/* After package buffer build check if the section count in buffer is
+	 * non-zero and matches the number of sections detected for package
+	 * update.
+	 */
+	pkg_sects = ice_pkg_buf_get_active_sections(b);
+	if (!pkg_sects || pkg_sects != sects) {
+		status = ICE_ERR_INVAL_SIZE;
+		goto error_tmp;
+	}
+
+	/* update package */
+	status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
+	if (status == ICE_ERR_AQ_ERROR)
+		ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile.");
+
+error_tmp:
+	ice_pkg_buf_free(hw, b);
+	return status;
+}
+
+/**
+ * ice_add_prof - add profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking id
+ * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @es: extraction sequence (length of array is determined by the block)
+ *
+ * This function registers a profile, which matches a set of PTYPES with a
+ * particular extraction sequence. While the hardware profile is allocated
+ * it will not be written until the first call to ice_add_flow that specifies
+ * the id value used here.
+ */
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+	     struct ice_fv_word *es)
+{
+	u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
+	struct ice_prof_map *prof;
+	enum ice_status status;
+	u32 byte = 0;
+	u8 prof_id;
+
+	/* search for existing profile */
+	status = ice_find_prof_id(hw, blk, es, &prof_id);
+	if (status) {
+		/* allocate profile id */
+		status = ice_alloc_prof_id(hw, blk, &prof_id);
+		if (status)
+			goto err_ice_add_prof;
+
+		/* and write new es */
+		ice_write_es(hw, blk, prof_id, es);
+	}
+
+	/* add profile info */
+
+	prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
+	if (!prof)
+		goto err_ice_add_prof;
+
+	prof->profile_cookie = id;
+	prof->prof_id = prof_id;
+	prof->ptype_count = 0;
+	prof->context = 0;
+
+	/* build list of ptgs */
+	while (bytes && prof->ptype_count < ICE_MAX_PTYPE_PER_PROFILE) {
+		u32 bit;
+
+		if (!ptypes[byte]) {
+			bytes--;
+			byte++;
+			continue;
+		}
+		/* Examine 8 bits per byte */
+		for (bit = 0; bit < 8; bit++) {
+			if (ptypes[byte] & 1 << bit) {
+				u16 ptype;
+				u8 m;
+
+				ptype = byte * 8 + bit;
+				if (ptype < ICE_FLOW_PTYPE_MAX) {
+					prof->ptype[prof->ptype_count] = ptype;
+
+					if (++prof->ptype_count >=
+						ICE_MAX_PTYPE_PER_PROFILE)
+						break;
+				}
+
+				/* nothing left in byte, then exit */
+				m = ~((1 << (bit + 1)) - 1);
+				if (!(ptypes[byte] & m))
+					break;
+			}
+		}
+
+		bytes--;
+		byte++;
+	}
+	LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
+
+	return ICE_SUCCESS;
+
+err_ice_add_prof:
+	return status;
+}
+
+/**
+ * ice_search_prof_id - Search for a profile tracking ID
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added.
+ */
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+	struct ice_prof_map *entry = NULL;
+	struct ice_prof_map *map;
+
+	LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map,
+			    list) {
+		if (map->profile_cookie == id) {
+			entry = map;
+			break;
+		}
+	}
+
+	return entry;
+}
+
+/**
+ * ice_set_prof_context - Set context for a given profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @cntxt: context
+ */
+struct ice_prof_map *
+ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
+{
+	struct ice_prof_map *entry;
+
+	entry = ice_search_prof_id(hw, blk, id);
+	if (entry)
+		entry->context = cntxt;
+
+	return entry;
+}
+
+/**
+ * ice_get_prof_context - Get context for a given profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @cntxt: pointer to variable to receive the context
+ */
+struct ice_prof_map *
+ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
+{
+	struct ice_prof_map *entry;
+
+	entry = ice_search_prof_id(hw, blk, id);
+	if (entry)
+		*cntxt = entry->context;
+
+	return entry;
+}
+
+/**
+ * ice_vsig_prof_id_count - count profiles in a VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ */
+static u16
+ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
+	struct ice_vsig_prof *p;
+
+	LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+			    ice_vsig_prof, list) {
+		count++;
+	}
+
+	return count;
+}
+
+/**
+ * ice_rel_tcam_idx - release a tcam index
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @idx: the index to release
+ */
+static enum ice_status
+ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
+{
+	/* Masks to invoke a never match entry */
+	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
+	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+	enum ice_status status;
+
+	/* write the tcam entry */
+	status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
+				      dc_msk, nm_msk);
+	if (status)
+		return status;
+
+	/* release the tcam entry */
+	status = ice_free_tcam_ent(hw, blk, idx);
+
+	return status;
+}
+
+/**
+ * ice_rem_prof_id - remove one profile from a VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ * @prof: pointer to profile structure to remove
+ * @chg: pointer to list to record changes
+ */
+static enum ice_status
+ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+		struct ice_vsig_prof *prof, struct LIST_HEAD_TYPE *chg)
+{
+	enum ice_status status;
+	struct ice_chs_chg *p;
+	u16 i;
+
+	for (i = 0; i < prof->tcam_count; i++) {
+		p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+		if (!p)
+			goto err_ice_rem_prof_id;
+
+		p->type = ICE_TCAM_REM;
+		p->vsig = vsig;
+		p->prof_id = prof->tcam[i].prof_id;
+		p->tcam_idx = prof->tcam[i].tcam_idx;
+
+		p->ptg = prof->tcam[i].ptg;
+		prof->tcam[i].in_use = false;
+		p->orig_ent = hw->blk[blk].prof.t[p->tcam_idx];
+		status = ice_rel_tcam_idx(hw, blk, p->tcam_idx);
+		if (!status)
+			status = ice_prof_dec_ref(hw, blk, p->prof_id);
+
+		LIST_ADD(&p->list_entry, chg);
+
+		if (status)
+			goto err_ice_rem_prof_id;
+	}
+
+	return ICE_SUCCESS;
+
+err_ice_rem_prof_id:
+	/* caller will clean up the change list */
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_rem_vsig - remove VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: the VSIG to remove
+ * @chg: the change list
+ */
+static enum ice_status
+ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+	     struct LIST_HEAD_TYPE *chg)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+	struct ice_vsig_vsi *vsi_cur;
+	struct ice_vsig_prof *d, *t;
+	enum ice_status status;
+
+	/* remove TCAM entries */
+	LIST_FOR_EACH_ENTRY_SAFE(d, t,
+				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+				 ice_vsig_prof, list) {
+		status = ice_rem_prof_id(hw, blk, vsig, d, chg);
+		if (status)
+			goto err_ice_rem_vsig;
+
+		LIST_DEL(&d->list);
+		ice_free(hw, d);
+	}
+
+	/* Move all VSIS associated with this VSIG to the default VSIG */
+	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+	if (!vsi_cur)
+		return ICE_ERR_CFG;
+
+	do {
+		struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+		struct ice_chs_chg *p;
+
+		p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+		if (!p)
+			goto err_ice_rem_vsig;
+
+		p->type = ICE_VSIG_REM;
+		p->orig_vsig = vsig;
+		p->vsig = ICE_DEFAULT_VSIG;
+		p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
+
+		LIST_ADD(&p->list_entry, chg);
+
+		status = ice_vsig_free(hw, blk, vsig);
+		if (status)
+			return status;
+
+		vsi_cur = tmp;
+	} while (vsi_cur);
+
+	return ICE_SUCCESS;
+
+err_ice_rem_vsig:
+	/* the caller will free up the change list */
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ * @hdl: profile handle indicating which profile to remove
+ * @chg: list to receive a record of changes
+ */
+static enum ice_status
+ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+		     struct LIST_HEAD_TYPE *chg)
+{
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+	struct ice_vsig_prof *p, *t;
+	enum ice_status status;
+
+	LIST_FOR_EACH_ENTRY_SAFE(p, t,
+				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+				 ice_vsig_prof, list) {
+		if (p->profile_cookie == hdl) {
+			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
+				/* this is the last profile, remove the VSIG */
+				return ice_rem_vsig(hw, blk, vsig, chg);
+
+			status = ice_rem_prof_id(hw, blk, vsig, p, chg);
+			if (!status) {
+				LIST_DEL(&p->list);
+				ice_free(hw, p);
+			}
+			return status;
+		}
+	}
+
+	return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_flow_all - remove all flows with a particular profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ */
+static enum ice_status
+ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+	struct ice_chs_chg *del, *tmp;
+	struct LIST_HEAD_TYPE chg;
+	enum ice_status status;
+	u16 i;
+
+	INIT_LIST_HEAD(&chg);
+
+	for (i = 1; i < ICE_MAX_VSIGS; i++) {
+		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
+			if (ice_has_prof_vsig(hw, blk, i, id)) {
+				status = ice_rem_prof_id_vsig(hw, blk, i, id,
+							      &chg);
+				if (status)
+					goto err_ice_rem_flow_all;
+			}
+		}
+	}
+
+	status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_flow_all:
+	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+		LIST_DEL(&del->list_entry);
+		ice_free(hw, del);
+	}
+
+	return status;
+}
+
+/**
+ * ice_rem_prof - remove profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will remove the profile specified by the id parameter, which was
+ * previously created through ice_add_prof. If any existing entries
+ * are associated with this profile, they will be removed as well.
+ */
+enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+	enum ice_status status;
+	struct ice_prof_map *pmap;
+
+	pmap = ice_search_prof_id(hw, blk, id);
+	if (!pmap)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	status = ice_free_prof_id(hw, blk, pmap->prof_id);
+
+	if (status)
+		return status;
+
+	/* remove all flows with this profile */
+	status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
+	if (status)
+		return status;
+	LIST_DEL(&pmap->list);
+	ice_free(hw, pmap);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_prof_ptgs - get ptgs for profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @hdl: profile handle
+ * @chg: change list
+ */
+static enum ice_status
+ice_get_prof_ptgs(struct ice_hw *hw, enum ice_block blk, u64 hdl,
+		  struct LIST_HEAD_TYPE *chg)
+{
+	struct ice_prof_map *map;
+	struct ice_chs_chg *p;
+	u16 i;
+
+	/* Get the details on the profile specified by the handle id */
+	map = ice_search_prof_id(hw, blk, hdl);
+	if (!map)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	for (i = 0; i < map->ptype_count; i++) {
+		enum ice_status status;
+		bool add;
+		u8 ptg;
+
+		status = ice_get_ptg(hw, blk, map->ptype[i], &ptg, &add);
+		if (status)
+			goto err_ice_get_prof_ptgs;
+
+		if (add || !hw->blk[blk].es.ref_count[map->prof_id]) {
+			/* add PTG to change list */
+			p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+			if (!p)
+				goto err_ice_get_prof_ptgs;
+
+			p->type = ICE_PTG_ES_ADD;
+			p->ptype = map->ptype[i];
+			p->ptg = ptg;
+			p->add_ptg = add;
+
+			p->add_prof = !hw->blk[blk].es.ref_count[map->prof_id];
+			p->prof_id = map->prof_id;
+
+			LIST_ADD(&p->list_entry, chg);
+		}
+	}
+
+	return ICE_SUCCESS;
+
+err_ice_get_prof_ptgs:
+	/* let caller clean up the change list */
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: VSIG from which to copy the list
+ * @lst: output list
+ *
+ * This routine makes a copy of the list of profiles in the specified VSIG.
+ */
+static enum ice_status
+ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+		   struct LIST_HEAD_TYPE *lst)
+{
+	struct ice_vsig_prof *ent1, *ent2;
+	u16 idx = vsig & ICE_VSIG_IDX_M;
+
+	LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+			    ice_vsig_prof, list) {
+		struct ice_vsig_prof *p;
+
+		/* copy to the input list */
+		p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
+		if (!p)
+			goto err_ice_get_profs_vsig;
+
+		ice_memcpy(p, ent1, sizeof(*p), ICE_NONDMA_TO_NONDMA);
+
+		LIST_ADD(&p->list, lst);
+	}
+
+	return ICE_SUCCESS;
+
+err_ice_get_profs_vsig:
+	LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
+		LIST_DEL(&ent1->list);
+		ice_free(hw, ent1);
+	}
+
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_add_prof_to_lst - add profile entry to a list
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @lst: the list to be added to
+ * @hdl: profile handle of entry to add
+ */
+static enum ice_status
+ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
+		    struct LIST_HEAD_TYPE *lst, u64 hdl)
+{
+	struct ice_vsig_prof *p;
+	struct ice_prof_map *map;
+	u16 i;
+
+	map = ice_search_prof_id(hw, blk, hdl);
+	if (!map)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
+	if (!p)
+		return ICE_ERR_NO_MEMORY;
+
+	p->profile_cookie = map->profile_cookie;
+	p->prof_id = map->prof_id;
+	p->tcam_count = map->ptype_count;
+
+	for (i = 0; i < map->ptype_count; i++) {
+		enum ice_status status;
+		u8 ptg;
+
+		p->tcam[i].prof_id = map->prof_id;
+		p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
+
+		status = ice_ptg_find_ptype(hw, blk, map->ptype[i], &ptg);
+		if (status) {
+			ice_free(hw, p);
+			return status;
+		}
+
+		p->tcam[i].ptg = ptg;
+	}
+
+	LIST_ADD(&p->list, lst);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_move_vsi - move VSI to another VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: the VSI to move
+ * @vsig: the VSIG to move the VSI to
+ * @chg: the change list
+ */
+static enum ice_status
+ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
+	     struct LIST_HEAD_TYPE *chg)
+{
+	enum ice_status status;
+	struct ice_chs_chg *p;
+	u16 orig_vsig;
+
+	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+	if (!p)
+		return ICE_ERR_NO_MEMORY;
+
+	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+	if (!status)
+		status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+	if (status) {
+		ice_free(hw, p);
+		return status;
+	}
+
+	p->type = ICE_VSI_MOVE;
+	p->vsi = vsi;
+	p->orig_vsig = orig_vsig;
+	p->vsig = vsig;
+
+	LIST_ADD(&p->list_entry, chg);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_tcam_ena_dis - add enable or disable tcam change
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @enable: true to enable, false to disable
+ * @vsig: the vsig of the tcam entry
+ * @tcam: pointer the tcam info structure of the tcam to disable
+ * @chg: the change list
+ *
+ * This function appends an enable or disable tcam entry in the change log
+ */
+static enum ice_status
+ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
+		      u16 vsig, struct ice_tcam_inf *tcam,
+		      struct LIST_HEAD_TYPE *chg)
+{
+	enum ice_status status;
+	struct ice_chs_chg *p;
+
+	/* Default: enable means change the low flag bit to don't care */
+	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+
+	/* If disabled, change the low flag bit to never match */
+	if (!enable) {
+		dc_msk[0] = 0x00;
+		nm_msk[0] = 0x01;
+	}
+
+	/* add TCAM to change list */
+	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+	if (!p)
+		return ICE_ERR_NO_MEMORY;
+
+	status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
+				      tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
+				      nm_msk);
+	if (status)
+		goto err_ice_prof_tcam_ena_dis;
+
+	tcam->in_use = enable;
+
+	p->type = ICE_TCAM_ADD;
+	p->add_tcam_idx = true;
+	p->prof_id = tcam->prof_id;
+	p->ptg = tcam->ptg;
+	p->vsig = 0;
+	p->tcam_idx = tcam->tcam_idx;
+
+	/* log change */
+	LIST_ADD(&p->list_entry, chg);
+
+	return ICE_SUCCESS;
+
+err_ice_prof_tcam_ena_dis:
+	ice_free(hw, p);
+	return status;
+}
+
+/**
+ * ice_adj_prof_priorities - adjust profile based on priorities
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: the VSIG for which to adjust profile priorities
+ * @chg: the change list
+ */
+static enum ice_status
+ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+			struct LIST_HEAD_TYPE *chg)
+{
+	ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
+	struct ice_vsig_prof *t;
+	enum ice_status status;
+	u16 idx;
+
+	ice_memset(ptgs_used, 0, sizeof(ptgs_used), ICE_NONDMA_MEM);
+	idx = vsig & ICE_VSIG_IDX_M;
+
+	/* Priority is based on the order in which the profiles are added. The
+	 * newest added profile has highest priority and the oldest added
+	 * profile has the lowest priority. Since the profile property list for
+	 * a VSIG is sorted from newest to oldest, this code traverses the list
+	 * in order and enables the first of each PTG that it finds (that is not
+	 * already enabled); it also disables any duplicate PTGs that it finds
+	 * in the older profiles (that are currently enabled).
+	 */
+
+	LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+			    ice_vsig_prof, list) {
+		u16 i;
+
+		for (i = 0; i < t->tcam_count; i++) {
+			/* Scan the priorities from newest to oldest.
+			 * Make sure that the newest profiles take priority.
+			 */
+			if (ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
+			    t->tcam[i].in_use) {
+				/* need to mark this PTG as never match, as it
+				 * was already in use and therefore duplicate
+				 * (and lower priority)
+				 */
+				status = ice_prof_tcam_ena_dis(hw, blk, false,
+							       vsig,
+							       &t->tcam[i],
+							       chg);
+				if (status)
+					return status;
+			} else if (!ice_is_bit_set(ptgs_used, t->tcam[i].ptg) &&
+				   !t->tcam[i].in_use) {
+				/* need to enable this PTG, as it in not in use
+				 * and not enabled (highest priority)
+				 */
+				status = ice_prof_tcam_ena_dis(hw, blk, true,
+							       vsig,
+							       &t->tcam[i],
+							       chg);
+				if (status)
+					return status;
+			}
+
+			/* keep track of used ptgs */
+			ice_set_bit(t->tcam[i].ptg, ptgs_used);
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_add_prof_id_vsig - add profile to VSIG
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsig: the VSIG to which this profile is to be added
+ * @hdl: the profile handle indicating the profile to add
+ * @chg: the change list
+ */
+static enum ice_status
+ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+		     struct LIST_HEAD_TYPE *chg)
+{
+	/* Masks that ignore flags */
+	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
+	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+	struct ice_prof_map *map;
+	struct ice_vsig_prof *t;
+	struct ice_chs_chg *p;
+	u16 i;
+
+	/* Get the details on the profile specified by the handle id */
+	map = ice_search_prof_id(hw, blk, hdl);
+	if (!map)
+		return ICE_ERR_DOES_NOT_EXIST;
+
+	/* Error, if this VSIG already has this profile */
+	if (ice_has_prof_vsig(hw, blk, vsig, hdl))
+		return ICE_ERR_ALREADY_EXISTS;
+
+	/* new VSIG profile structure */
+	t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
+	if (!t)
+		goto err_ice_add_prof_id_vsig;
+
+	t->profile_cookie = map->profile_cookie;
+	t->prof_id = map->prof_id;
+	t->tcam_count = map->ptype_count;
+
+	/* create tcam entries */
+	for (i = 0; i < map->ptype_count; i++) {
+		enum ice_status status;
+		u16 tcam_idx;
+		bool add;
+		u8 ptg;
+
+		/* If properly sequenced, we should never have to allocate new
+		 * PTGs
+		 */
+		status = ice_get_ptg(hw, blk, map->ptype[i], &ptg, &add);
+		if (status)
+			goto err_ice_add_prof_id_vsig;
+
+		/* add TCAM to change list */
+		p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+		if (!p)
+			goto err_ice_add_prof_id_vsig;
+
+		/* allocate the tcam entry index */
+		status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
+		if (status)
+			goto err_ice_add_prof_id_vsig;
+
+		t->tcam[i].ptg = ptg;
+		t->tcam[i].prof_id = map->prof_id;
+		t->tcam[i].tcam_idx = tcam_idx;
+		t->tcam[i].in_use = true;
+
+		p->type = ICE_TCAM_ADD;
+		p->add_tcam_idx = true;
+		p->prof_id = t->tcam[i].prof_id;
+		p->ptg = t->tcam[i].ptg;
+		p->vsig = vsig;
+		p->tcam_idx = t->tcam[i].tcam_idx;
+
+		/* write the tcam entry */
+		status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
+					      t->tcam[i].prof_id,
+					      t->tcam[i].ptg, vsig, 0, 0,
+					      vl_msk, dc_msk, nm_msk);
+		if (status)
+			goto err_ice_add_prof_id_vsig;
+
+		/* this increments the reference count of how many tcam entries
+		 * are using this hw profile id
+		 */
+		status = ice_prof_inc_ref(hw, blk, t->tcam[i].prof_id);
+
+		/* log change */
+		LIST_ADD(&p->list_entry, chg);
+	}
+
+	/* add profile to VSIG */
+	LIST_ADD(&t->list,
+		 &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
+
+	return ICE_SUCCESS;
+
+err_ice_add_prof_id_vsig:
+	/* let caller clean up the change list */
+	return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_create_prof_id_vsig - add a new VSIG with a single profile
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @hdl: the profile handle of the profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
+			struct LIST_HEAD_TYPE *chg)
+{
+	enum ice_status status;
+	struct ice_chs_chg *p;
+	u16 new_vsig;
+
+	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+	if (!p)
+		return ICE_ERR_NO_MEMORY;
+
+	new_vsig = ice_vsig_alloc(hw, blk);
+	if (!new_vsig)
+		return ICE_ERR_HW_TABLE;
+
+	status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
+	if (status)
+		return status;
+
+	status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
+	if (status)
+		return status;
+
+	p->type = ICE_VSIG_ADD;
+	p->vsi = vsi;
+	p->orig_vsig = ICE_DEFAULT_VSIG;
+	p->vsig = new_vsig;
+
+	LIST_ADD(&p->list_entry, chg);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_create_vsig_from_list - create a new VSIG with a list of profiles
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @lst: the list of profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
+			 struct LIST_HEAD_TYPE *lst, struct LIST_HEAD_TYPE *chg)
+{
+	struct ice_vsig_prof *t;
+	enum ice_status status;
+	u16 vsig;
+
+	vsig = ice_vsig_alloc(hw, blk);
+	if (!vsig)
+		return ICE_ERR_HW_TABLE;
+
+	status = ice_move_vsi(hw, blk, vsi, vsig, chg);
+	if (status)
+		return status;
+
+	LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
+		status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
+					      chg);
+		if (status)
+			return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_find_prof_vsig - find a VSIG with a specific profile handle
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @hdl: the profile handle of the profile to search for
+ * @vsig: returns the VSIG with the matching profile
+ */
+static bool
+ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
+{
+	struct ice_vsig_prof *t;
+	struct LIST_HEAD_TYPE lst;
+	enum ice_status status;
+
+	INIT_LIST_HEAD(&lst);
+
+	t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
+	if (!t)
+		return false;
+
+	t->profile_cookie = hdl;
+	LIST_ADD(&t->list, &lst);
+
+	status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
+
+	LIST_DEL(&t->list);
+	ice_free(hw, t);
+
+	return status == ICE_SUCCESS;
+}
+
+/**
+ * ice_add_prof_id_flow - add profile flow
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: the vsi to enable with the profile specified by id
+ * @hdl: profile handle
+ *
+ * Calling this function will update the hardware tables to enable the
+ * profile indicated by the id parameter for the VSIs specified in the vsi
+ * array. Once successfully called, the flow will be enabled.
+ */
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+	struct ice_vsig_prof *tmp1, *del1;
+	struct LIST_HEAD_TYPE union_lst;
+	struct ice_chs_chg *tmp, *del;
+	struct LIST_HEAD_TYPE chrs;
+	struct LIST_HEAD_TYPE chg;
+	enum ice_status status;
+	u16 vsig, or_vsig = 0;
+
+	INIT_LIST_HEAD(&union_lst);
+	INIT_LIST_HEAD(&chrs);
+	INIT_LIST_HEAD(&chg);
+
+	status = ice_get_prof_ptgs(hw, blk, hdl, &chg);
+	if (status)
+		return status;
+
+	/* determine if vsi is already part of a VSIG */
+	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+	if (!status && vsig) {
+		bool only_vsi;
+		u16 ref;
+
+		/* found in vsig */
+		or_vsig = vsig;
+
+		/* make sure that there is no overlap/conflict between the new
+		 * characteristics and the existing ones; we don't support that
+		 * scenario
+		 */
+		if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
+			status = ICE_ERR_ALREADY_EXISTS;
+			goto err_ice_add_prof_id_flow;
+		}
+
+		/* last VSI in the VSIG? */
+		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+		if (status)
+			goto err_ice_add_prof_id_flow;
+		only_vsi = (ref == 1);
+
+		/* create a union of the current profiles and the one being
+		 * added
+		 */
+		status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
+		if (status)
+			goto err_ice_add_prof_id_flow;
+
+		status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
+		if (status)
+			goto err_ice_add_prof_id_flow;
+
+		/* search for an existing VSIG with an exact charc match */
+		status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
+		if (!status) {
+			/* found an exact match */
+			/* move vsi to the VSIG that matches */
+			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+
+			/* remove original VSIG if we just moved the only VSI
+			 * from it
+			 */
+			if (only_vsi) {
+				status = ice_rem_vsig(hw, blk, or_vsig, &chg);
+				if (status)
+					goto err_ice_add_prof_id_flow;
+			}
+		} else if (only_vsi) {
+			/* If the original VSIG only contains one VSI, then it
+			 * will be the requesting VSI. In this case the VSI is
+			 * not sharing entries and we can simply add the new
+			 * profile to the VSIG.
+			 */
+			status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+
+			/* Adjust priorities */
+			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+		} else {
+			/* No match, so we need a new VSIG */
+			status = ice_create_vsig_from_lst(hw, blk, vsi,
+							  &union_lst, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+
+			/* Adjust priorities */
+			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+		}
+	} else {
+		/* need to find or add a VSIG */
+		/* search for an exising VSIG with an exact charc match */
+		if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
+			/* found an exact match */
+			/* add or move vsi to the VSIG that matches */
+			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+		} else {
+			/* we did not find an exact match */
+			/* we need to add a VSIG */
+			status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
+							 &chg);
+			if (status)
+				goto err_ice_add_prof_id_flow;
+		}
+	}
+
+	/* update hardware */
+	if (!status)
+		status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_add_prof_id_flow:
+	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+		LIST_DEL(&del->list_entry);
+		ice_free(hw, del);
+	}
+
+	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
+		LIST_DEL(&del1->list);
+		ice_free(hw, del1);
+	}
+
+	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &chrs, ice_vsig_prof, list) {
+		LIST_DEL(&del1->list);
+		ice_free(hw, del1);
+	}
+
+	return status;
+}
+
+/**
+ * ice_add_flow - add flow
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: array of VSIs to enable with the profile specified by id
+ * @count: number of elements in the vsi array
+ * @id: profile tracking id
+ *
+ * Calling this function will update the hardware tables to enable the
+ * profile indicated by the id parameter for the VSIs specified in the vsi
+ * array. Once successfully called, the flow will be enabled.
+ */
+enum ice_status
+ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+	     u64 id)
+{
+	enum ice_status status;
+	u16 i;
+
+	for (i = 0; i < count; i++) {
+		status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
+		if (status)
+			return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_rem_prof_from_list - remove a profile from list
+ * @hw: pointer to the hw struct
+ * @lst: list to remove the profile from
+ * @hdl: the profile handle indicating the profile to remove
+ */
+static enum ice_status
+ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
+{
+	struct ice_vsig_prof *ent, *tmp;
+
+	LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) {
+		if (ent->profile_cookie == hdl) {
+			LIST_DEL(&ent->list);
+			ice_free(hw, ent);
+			return ICE_SUCCESS;
+		}
+	}
+
+	return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_prof_id_flow - remove flow
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: the vsi from which to remove the profile specified by id
+ * @hdl: profile tracking handle
+ *
+ * Calling this function will update the hardware tables to remove the
+ * profile indicated by the id parameter for the VSIs specified in the vsi
+ * array. Once successfully called, the flow will be disabled.
+ */
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+	struct ice_vsig_prof *tmp1, *del1;
+	struct LIST_HEAD_TYPE chg, copy;
+	struct ice_chs_chg *tmp, *del;
+	enum ice_status status;
+	u16 vsig;
+
+	INIT_LIST_HEAD(&copy);
+	INIT_LIST_HEAD(&chg);
+
+	/* determine if vsi is already part of a VSIG */
+	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+	if (!status && vsig) {
+		bool last_profile;
+		bool only_vsi;
+		u16 ref;
+
+		/* found in VSIG */
+		last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
+		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+		if (status)
+			goto err_ice_rem_prof_id_flow;
+		only_vsi = (ref == 1);
+
+		if (only_vsi) {
+			/* If the original VSIG only contains one reference,
+			 * which will be the requesting VSI, then the VSI is not
+			 * sharing entries and we can simply remove the specific
+			 * characteristics from the VSIG.
+			 */
+
+			if (last_profile) {
+				/* If there are no profiles left for this VSIG,
+				 * then simply remove the the VSIG.
+				 */
+				status = ice_rem_vsig(hw, blk, vsig, &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+			} else {
+				status = ice_rem_prof_id_vsig(hw, blk, vsig,
+							      hdl, &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+
+				/* Adjust priorities */
+				status = ice_adj_prof_priorities(hw, blk, vsig,
+								 &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+			}
+
+		} else {
+			/* Make a copy of the VSIG's list of Profiles */
+			status = ice_get_profs_vsig(hw, blk, vsig, &copy);
+			if (status)
+				goto err_ice_rem_prof_id_flow;
+
+			/* Remove specified profile entry from the list */
+			status = ice_rem_prof_from_list(hw, &copy, hdl);
+			if (status)
+				goto err_ice_rem_prof_id_flow;
+
+			if (LIST_EMPTY(&copy)) {
+				status = ice_move_vsi(hw, blk, vsi,
+						      ICE_DEFAULT_VSIG, &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+
+			} else if (ice_find_dup_props_vsig(hw, blk, &copy,
+							   &vsig)) {
+				/* found an exact match */
+				/* add or move vsi to the VSIG that matches */
+				/* Search for a VSIG with a matching profile
+				 * list
+				 */
+
+				/* Found match, move VSI to the matching VSIG */
+				status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+			} else {
+				/* since no existing VSIG supports this
+				 * characteristic pattern, we need to create a
+				 * new VSIG and tcam entries
+				 */
+				status = ice_create_vsig_from_lst(hw, blk, vsi,
+								  &copy, &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+
+				/* Adjust priorities */
+				status = ice_adj_prof_priorities(hw, blk, vsig,
+								 &chg);
+				if (status)
+					goto err_ice_rem_prof_id_flow;
+			}
+		}
+	} else {
+		status = ICE_ERR_DOES_NOT_EXIST;
+	}
+
+	/* update hardware tables */
+	if (!status)
+		status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_prof_id_flow:
+	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+		LIST_DEL(&del->list_entry);
+		ice_free(hw, del);
+	}
+
+	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &copy, ice_vsig_prof, list) {
+		LIST_DEL(&del1->list);
+		ice_free(hw, del1);
+	}
+
+	return status;
+}
+
+/**
+ * ice_rem_flow - remove flow
+ * @hw: pointer to the hw struct
+ * @blk: hardware block
+ * @vsi: array of VSIs from which to remove the profile specified by id
+ * @count: number of elements in the vsi array
+ * @id: profile tracking id
+ *
+ * The function will remove flows from the specified VSIs that were enabled
+ * using ice_add_flow. The id value will indicated which profile will be
+ * removed. Once successfully called, the flow will be disabled.
+ */
+enum ice_status
+ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+	     u64 id)
+{
+	enum ice_status status;
+	u16 i;
+
+	for (i = 0; i < count; i++) {
+		status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);
+		if (status)
+			return status;
+	}
+
+	return ICE_SUCCESS;
+}
diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h
new file mode 100644
index 000000000..81e89aeeb
--- /dev/null
+++ b/drivers/net/ice/base/ice_flex_pipe.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2019
+ */
+
+#ifndef _ICE_FLEX_PIPE_H_
+#define _ICE_FLEX_PIPE_H_
+
+#include "ice_type.h"
+
+/* Package format version */
+#define ICE_PKG_FMT_VER_MAJ	1
+#define ICE_PKG_FMT_VER_MNR	0
+#define ICE_PKG_FMT_VER_UPD	0
+#define ICE_PKG_FMT_VER_DFT	0
+
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+
+struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+		    struct ice_pkg_hdr *pkg_hdr);
+enum ice_status ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg);
+
+enum ice_status
+ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_header);
+
+void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg);
+
+enum ice_status
+ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
+		     u16 *value);
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u16 *prot_ids, u8 ids_cnt,
+		   struct LIST_HEAD_TYPE *fv_list);
+enum ice_status
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+		      u16 buf_size, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
+
+/* package buffer building routines */
+
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
+enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
+void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
+
+/* XLT1/PType group functions */
+enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk);
+enum ice_status
+ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg);
+u8 ice_ptg_alloc(struct ice_hw *hw, enum ice_block blk);
+void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg);
+enum ice_status
+ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg);
+
+/* XLT2/Vsi group functions */
+enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk);
+enum ice_status
+ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig);
+enum ice_status
+ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
+			struct LIST_HEAD_TYPE *chs, u16 *vsig);
+
+enum ice_status
+ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig);
+enum ice_status ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig);
+enum ice_status
+ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig);
+enum ice_status
+ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig);
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+	     struct ice_fv_word *es);
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+struct ice_prof_map *
+ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt);
+struct ice_prof_map *
+ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt);
+enum ice_status
+ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
+enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
+void ice_free_seg(struct ice_hw *hw);
+void ice_free_hw_tbls(struct ice_hw *hw);
+enum ice_status
+ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+	     u64 id);
+enum ice_status
+ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+	     u64 id);
+enum ice_status
+ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
+
+enum ice_status
+ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
+	    u16 len);
+#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h
index 84a38cb70..9f98f4f2a 100644
--- a/drivers/net/ice/base/ice_flex_type.h
+++ b/drivers/net/ice/base/ice_flex_type.h
@@ -16,4 +16,690 @@ struct ice_fv {
 	struct ice_fv_word ew[ICE_MAX_FV_WORDS];
 };
 
+/* Package and segment headers and tables */
+struct ice_pkg_hdr {
+	struct ice_pkg_ver format_ver;
+	__le32 seg_count;
+	__le32 seg_offset[1];
+};
+
+/* generic segment */
+struct ice_generic_seg_hdr {
+#define SEGMENT_TYPE_METADATA	0x00000001
+#define SEGMENT_TYPE_ICE	0x00000010
+	__le32 seg_type;
+	struct ice_pkg_ver seg_ver;
+	__le32 seg_size;
+	char seg_name[ICE_PKG_NAME_SIZE];
+};
+
+/* ice specific segment */
+
+union ice_device_id {
+	struct {
+		__le16 device_id;
+		__le16 vendor_id;
+	} dev_vend_id;
+	__le32 id;
+};
+
+struct ice_device_id_entry {
+	union ice_device_id device;
+	union ice_device_id sub_device;
+};
+
+struct ice_seg {
+	struct ice_generic_seg_hdr hdr;
+	__le32 device_table_count;
+	struct ice_device_id_entry device_table[1];
+};
+
+struct ice_nvm_table {
+	__le32 table_count;
+	__le32 vers[1];
+};
+
+struct ice_buf {
+#define ICE_PKG_BUF_SIZE	4096
+	u8 buf[ICE_PKG_BUF_SIZE];
+};
+
+struct ice_buf_table {
+	__le32 buf_count;
+	struct ice_buf buf_array[1];
+};
+
+/* global metadata specific segment */
+struct ice_global_metadata_seg {
+	struct ice_generic_seg_hdr hdr;
+	struct ice_pkg_ver pkg_ver;
+	__le32 track_id;
+	char pkg_name[ICE_PKG_NAME_SIZE];
+};
+
+#define ICE_MIN_S_OFF		12
+#define ICE_MAX_S_OFF		4095
+#define ICE_MIN_S_SZ		1
+#define ICE_MAX_S_SZ		4084
+
+/* section information */
+struct ice_section_entry {
+	__le32 type;
+	__le16 offset;
+	__le16 size;
+};
+
+#define ICE_MIN_S_COUNT		1
+#define ICE_MAX_S_COUNT		511
+#define ICE_MIN_S_DATA_END	12
+#define ICE_MAX_S_DATA_END	4096
+
+#define ICE_METADATA_BUF	0x80000000
+
+struct ice_buf_hdr {
+	__le16 section_count;
+	__le16 data_end;
+	struct ice_section_entry section_entry[1];
+};
+
+#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
+	sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
+
+/* ice package section IDs */
+#define ICE_SID_XLT0_SW			10
+#define ICE_SID_XLT_KEY_BUILDER_SW	11
+#define ICE_SID_XLT1_SW			12
+#define ICE_SID_XLT2_SW			13
+#define ICE_SID_PROFID_TCAM_SW		14
+#define ICE_SID_PROFID_REDIR_SW		15
+#define ICE_SID_FLD_VEC_SW		16
+#define ICE_SID_CDID_KEY_BUILDER_SW	17
+#define ICE_SID_CDID_REDIR_SW		18
+
+#define ICE_SID_XLT0_ACL		20
+#define ICE_SID_XLT_KEY_BUILDER_ACL	21
+#define ICE_SID_XLT1_ACL		22
+#define ICE_SID_XLT2_ACL		23
+#define ICE_SID_PROFID_TCAM_ACL		24
+#define ICE_SID_PROFID_REDIR_ACL	25
+#define ICE_SID_FLD_VEC_ACL		26
+#define ICE_SID_CDID_KEY_BUILDER_ACL	27
+#define ICE_SID_CDID_REDIR_ACL		28
+
+#define ICE_SID_XLT0_FD			30
+#define ICE_SID_XLT_KEY_BUILDER_FD	31
+#define ICE_SID_XLT1_FD			32
+#define ICE_SID_XLT2_FD			33
+#define ICE_SID_PROFID_TCAM_FD		34
+#define ICE_SID_PROFID_REDIR_FD		35
+#define ICE_SID_FLD_VEC_FD		36
+#define ICE_SID_CDID_KEY_BUILDER_FD	37
+#define ICE_SID_CDID_REDIR_FD		38
+
+#define ICE_SID_XLT0_RSS		40
+#define ICE_SID_XLT_KEY_BUILDER_RSS	41
+#define ICE_SID_XLT1_RSS		42
+#define ICE_SID_XLT2_RSS		43
+#define ICE_SID_PROFID_TCAM_RSS		44
+#define ICE_SID_PROFID_REDIR_RSS	45
+#define ICE_SID_FLD_VEC_RSS		46
+#define ICE_SID_CDID_KEY_BUILDER_RSS	47
+#define ICE_SID_CDID_REDIR_RSS		48
+
+#define ICE_SID_RXPARSER_CAM		50
+#define ICE_SID_RXPARSER_NOMATCH_CAM	51
+#define ICE_SID_RXPARSER_IMEM		52
+#define ICE_SID_RXPARSER_XLT0_BUILDER	53
+#define ICE_SID_RXPARSER_NODE_PTYPE	54
+#define ICE_SID_RXPARSER_MARKER_PTYPE	55
+#define ICE_SID_RXPARSER_BOOST_TCAM	56
+#define ICE_SID_RXPARSER_PROTO_GRP	57
+#define ICE_SID_RXPARSER_METADATA_INIT	58
+#define ICE_SID_RXPARSER_XLT0		59
+
+#define ICE_SID_TXPARSER_CAM		60
+#define ICE_SID_TXPARSER_NOMATCH_CAM	61
+#define ICE_SID_TXPARSER_IMEM		62
+#define ICE_SID_TXPARSER_XLT0_BUILDER	63
+#define ICE_SID_TXPARSER_NODE_PTYPE	64
+#define ICE_SID_TXPARSER_MARKER_PTYPE	65
+#define ICE_SID_TXPARSER_BOOST_TCAM	66
+#define ICE_SID_TXPARSER_PROTO_GRP	67
+#define ICE_SID_TXPARSER_METADATA_INIT	68
+#define ICE_SID_TXPARSER_XLT0		69
+
+#define ICE_SID_RXPARSER_INIT_REDIR	70
+#define ICE_SID_TXPARSER_INIT_REDIR	71
+#define ICE_SID_RXPARSER_MARKER_GRP	72
+#define ICE_SID_TXPARSER_MARKER_GRP	73
+#define ICE_SID_RXPARSER_LAST_PROTO	74
+#define ICE_SID_TXPARSER_LAST_PROTO	75
+#define ICE_SID_RXPARSER_PG_SPILL	76
+#define ICE_SID_TXPARSER_PG_SPILL	77
+#define ICE_SID_RXPARSER_NOMATCH_SPILL	78
+#define ICE_SID_TXPARSER_NOMATCH_SPILL	79
+
+#define ICE_SID_XLT0_PE			80
+#define ICE_SID_XLT_KEY_BUILDER_PE	81
+#define ICE_SID_XLT1_PE			82
+#define ICE_SID_XLT2_PE			83
+#define ICE_SID_PROFID_TCAM_PE		84
+#define ICE_SID_PROFID_REDIR_PE		85
+#define ICE_SID_FLD_VEC_PE		86
+#define ICE_SID_CDID_KEY_BUILDER_PE	87
+#define ICE_SID_CDID_REDIR_PE		88
+
+/* Label Metadata section IDs */
+#define ICE_SID_LBL_FIRST		0x80000010
+#define ICE_SID_LBL_RXPARSER_IMEM	0x80000010
+#define ICE_SID_LBL_TXPARSER_IMEM	0x80000011
+#define ICE_SID_LBL_RESERVED_12		0x80000012
+#define ICE_SID_LBL_RESERVED_13		0x80000013
+#define ICE_SID_LBL_RXPARSER_MARKER	0x80000014
+#define ICE_SID_LBL_TXPARSER_MARKER	0x80000015
+#define ICE_SID_LBL_PTYPE		0x80000016
+#define ICE_SID_LBL_PROTOCOL_ID		0x80000017
+#define ICE_SID_LBL_RXPARSER_TMEM	0x80000018
+#define ICE_SID_LBL_TXPARSER_TMEM	0x80000019
+#define ICE_SID_LBL_RXPARSER_PG		0x8000001A
+#define ICE_SID_LBL_TXPARSER_PG		0x8000001B
+#define ICE_SID_LBL_RXPARSER_M_TCAM	0x8000001C
+#define ICE_SID_LBL_TXPARSER_M_TCAM	0x8000001D
+#define ICE_SID_LBL_SW_PROFID_TCAM	0x8000001E
+#define ICE_SID_LBL_ACL_PROFID_TCAM	0x8000001F
+#define ICE_SID_LBL_PE_PROFID_TCAM	0x80000020
+#define ICE_SID_LBL_RSS_PROFID_TCAM	0x80000021
+#define ICE_SID_LBL_FD_PROFID_TCAM	0x80000022
+#define ICE_SID_LBL_FLAG		0x80000023
+#define ICE_SID_LBL_REG			0x80000024
+#define ICE_SID_LBL_SW_PTG		0x80000025
+#define ICE_SID_LBL_ACL_PTG		0x80000026
+#define ICE_SID_LBL_PE_PTG		0x80000027
+#define ICE_SID_LBL_RSS_PTG		0x80000028
+#define ICE_SID_LBL_FD_PTG		0x80000029
+#define ICE_SID_LBL_SW_VSIG		0x8000002A
+#define ICE_SID_LBL_ACL_VSIG		0x8000002B
+#define ICE_SID_LBL_PE_VSIG		0x8000002C
+#define ICE_SID_LBL_RSS_VSIG		0x8000002D
+#define ICE_SID_LBL_FD_VSIG		0x8000002E
+#define ICE_SID_LBL_PTYPE_META		0x8000002F
+#define ICE_SID_LBL_SW_PROFID		0x80000030
+#define ICE_SID_LBL_ACL_PROFID		0x80000031
+#define ICE_SID_LBL_PE_PROFID		0x80000032
+#define ICE_SID_LBL_RSS_PROFID		0x80000033
+#define ICE_SID_LBL_FD_PROFID		0x80000034
+#define ICE_SID_LBL_RXPARSER_MARKER_GRP	0x80000035
+#define ICE_SID_LBL_TXPARSER_MARKER_GRP	0x80000036
+#define ICE_SID_LBL_RXPARSER_PROTO	0x80000037
+#define ICE_SID_LBL_TXPARSER_PROTO	0x80000038
+/* The following define MUST be updated to reflect the last label section ID */
+#define ICE_SID_LBL_LAST		0x80000038
+
+enum ice_block {
+	ICE_BLK_SW = 0,
+	ICE_BLK_ACL,
+	ICE_BLK_FD,
+	ICE_BLK_RSS,
+	ICE_BLK_PE,
+	ICE_BLK_COUNT
+};
+
+enum ice_sect {
+	ICE_XLT0 = 0,
+	ICE_XLT_KB,
+	ICE_XLT1,
+	ICE_XLT2,
+	ICE_PROF_TCAM,
+	ICE_PROF_REDIR,
+	ICE_VEC_TBL,
+	ICE_CDID_KB,
+	ICE_CDID_REDIR,
+	ICE_SECT_COUNT
+};
+
+/* Packet Type (PTYPE) values */
+#define ICE_PTYPE_MAC_PAY		1
+#define ICE_PTYPE_IPV4FRAG_PAY		22
+#define ICE_PTYPE_IPV4_PAY		23
+#define ICE_PTYPE_IPV4_UDP_PAY		24
+#define ICE_PTYPE_IPV4_TCP_PAY		26
+#define ICE_PTYPE_IPV4_SCTP_PAY		27
+#define ICE_PTYPE_IPV4_ICMP_PAY		28
+#define ICE_PTYPE_IPV6FRAG_PAY		88
+#define ICE_PTYPE_IPV6_PAY		89
+#define ICE_PTYPE_IPV6_UDP_PAY		90
+#define ICE_PTYPE_IPV6_TCP_PAY		92
+#define ICE_PTYPE_IPV6_SCTP_PAY		93
+#define ICE_PTYPE_IPV6_ICMP_PAY		94
+
+/* Packet Type Groups (PTG) - Inner Most fields (IM) */
+#define ICE_PTG_IM_IPV4_TCP		16
+#define ICE_PTG_IM_IPV4_UDP		17
+#define ICE_PTG_IM_IPV4_SCTP		18
+#define ICE_PTG_IM_IPV4_PAY		20
+#define ICE_PTG_IM_IPV4_OTHER		21
+#define ICE_PTG_IM_IPV6_TCP		32
+#define ICE_PTG_IM_IPV6_UDP		33
+#define ICE_PTG_IM_IPV6_SCTP		34
+#define ICE_PTG_IM_IPV6_OTHER		37
+#define ICE_PTG_IM_L2_OTHER		67
+
+struct ice_flex_fields {
+	union {
+		struct {
+			u8 src_ip;
+			u8 dst_ip;
+			u8 flow_label;	/* valid for IPv6 only */
+		} ip_fields;
+
+		struct {
+			u8 src_prt;
+			u8 dst_prt;
+		} tcp_udp_fields;
+
+		struct {
+			u8 src_ip;
+			u8 dst_ip;
+			u8 src_prt;
+			u8 dst_prt;
+		} ip_tcp_udp_fields;
+
+		struct {
+			u8 src_prt;
+			u8 dst_prt;
+			u8 flow_label;	/* valid for IPv6 only */
+			u8 spi;
+		} ip_esp_fields;
+
+		struct {
+			u32 offset;
+			u32 length;
+		} off_len;
+	} fields;
+};
+
+#define ICE_XLT1_DFLT_GRP	0
+#define ICE_XLT1_TABLE_SIZE	1024
+
+/* package labels */
+struct ice_label {
+	__le16 value;
+#define ICE_PKG_LABEL_SIZE	64
+	char name[ICE_PKG_LABEL_SIZE];
+};
+
+struct ice_label_section {
+	__le16 count;
+	struct ice_label label[1];
+};
+
+#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
+	sizeof(struct ice_label_section) - sizeof(struct ice_label), \
+	sizeof(struct ice_label))
+
+struct ice_sw_fv_section {
+	__le16 count;
+	__le16 base_offset;
+	struct ice_fv fv[1];
+};
+
+struct ice_sw_fv_list_entry {
+	struct LIST_ENTRY_TYPE list_entry;
+	u32 profile_id;
+	struct ice_fv *fv_ptr;
+};
+
+#pragma pack(1)
+/* The BOOST tcam stores the match packet header in reverse order, meaning
+ * the fields are reversed; in addition, this means that the normally big endian
+ * fields of the packet are now little endian.
+ */
+struct ice_boost_key_value {
+#define ICE_BOOST_REMAINING_HV_KEY	15
+	u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
+	__le16 hv_dst_port_key;
+	__le16 hv_src_port_key;
+	u8 tcam_search_key;
+};
+
+#pragma pack()
+
+struct ice_boost_key {
+	struct ice_boost_key_value key;
+	struct ice_boost_key_value key2;
+};
+
+/* package Boost TCAM entry */
+struct ice_boost_tcam_entry {
+	__le16 addr;
+	__le16 reserved;
+	/* break up the 40 bytes of key into different fields */
+	struct ice_boost_key key;
+	u8 boost_hit_index_group;
+	/* The following contains bitfields which are not on byte boundaries.
+	 * These fields are currently unused by driver software.
+	 */
+#define ICE_BOOST_BIT_FIELDS		43
+	u8 bit_fields[ICE_BOOST_BIT_FIELDS];
+};
+
+struct ice_boost_tcam_section {
+	__le16 count;
+	__le16 reserved;
+	struct ice_boost_tcam_entry tcam[1];
+};
+
+#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
+	sizeof(struct ice_boost_tcam_section) - \
+	sizeof(struct ice_boost_tcam_entry), \
+	sizeof(struct ice_boost_tcam_entry))
+
+#pragma pack(1)
+struct ice_xlt1_section {
+	__le16 count;
+	__le16 offset;
+	u8 value[1];
+};
+
+#pragma pack()
+
+#define ICE_XLT1_SIZE(n)	(sizeof(struct ice_xlt1_section) + \
+				 (sizeof(u8) * ((n) - 1)))
+
+struct ice_xlt2_section {
+	__le16 count;
+	__le16 offset;
+	__le16 value[1];
+};
+
+#define ICE_XLT2_SIZE(n)	(sizeof(struct ice_xlt2_section) + \
+				 (sizeof(u16) * ((n) - 1)))
+
+struct ice_prof_redir_section {
+	__le16 count;
+	__le16 offset;
+	u8 redir_value[1];
+};
+
+#define ICE_PROF_REDIR_SIZE(n)	(sizeof(struct ice_prof_redir_section) + \
+				 (sizeof(u8) * ((n) - 1)))
+
+/* package buffer building */
+
+struct ice_buf_build {
+	struct ice_buf buf;
+	u16 reserved_section_table_entries;
+};
+
+struct ice_pkg_enum {
+	struct ice_buf_table *buf_table;
+	u32 buf_idx;
+
+	u32 type;
+	struct ice_buf_hdr *buf;
+	u32 sect_idx;
+	void *sect;
+	u32 sect_type;
+
+	u32 entry_idx;
+	void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
+};
+
+/* Tunnel enabling */
+
+enum ice_tunnel_type {
+	TNL_VXLAN = 0,
+	TNL_GTPC,
+	TNL_GTPC_TEID,
+	TNL_GTPU,
+	TNL_GTPU_TEID,
+	TNL_VXLAN_GPE,
+	TNL_GENEVE,
+	TNL_NAT,
+	TNL_ROCE_V2,
+	TNL_MPLSO_UDP,
+	TNL_UDP2_END,
+	TNL_UPD_END,
+	TNL_LAST = 0xFF,
+	TNL_ALL = 0xFF,
+};
+
+struct ice_tunnel_type_scan {
+	enum ice_tunnel_type type;
+	const char *label_prefix;
+};
+
+struct ice_tunnel_entry {
+	enum ice_tunnel_type type;
+	u8 valid;
+	u8 in_use;
+	u8 marked;
+	u16 boost_addr;
+	u16 port;
+	struct ice_boost_tcam_entry *boost_entry;
+};
+
+#define ICE_TUNNEL_MAX_ENTRIES	16
+
+struct ice_tunnel_table {
+	u16 count;
+	struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
+};
+
+struct ice_pkg_es {
+	__le16 count;
+	__le16 offset;
+	struct ice_fv_word es[1];
+};
+
+struct ice_es {
+	u32 sid;
+	u16 count;
+	u16 fvw;
+	u16 *ref_count;
+	u8 reverse; /* set to true to reverse FV order */
+	struct LIST_HEAD_TYPE prof_map;
+	struct ice_fv_word *t;
+	u8 *resource_used_hack; /* hack for testing */
+};
+
+/* PTYPE Group management */
+
+/* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type
+ * group (PTG) ID as output.
+ *
+ * Note: PTG 0 is the default packet type group and it is assumed that all PTYPE
+ * are a part of this group until moved to a new PTG.
+ */
+#define ICE_DEFAULT_PTG	0
+
+struct ice_ptg_entry {
+	u8 in_use;
+	struct ice_ptg_ptype *first_ptype;
+};
+
+struct ice_ptg_ptype {
+	u8 ptg;
+	struct ice_ptg_ptype *next_ptype;
+};
+
+#define ICE_MAX_TCAM_PER_PROFILE	8
+#define ICE_MAX_PTYPE_PER_PROFILE	8
+
+struct ice_prof_map {
+	struct LIST_ENTRY_TYPE list;
+	u64 profile_cookie;
+	u64 context;
+	u8 prof_id;
+	u8 ptype_count;
+	u8 ptype[ICE_MAX_PTYPE_PER_PROFILE];
+};
+
+#define ICE_INVALID_TCAM	0xFFFF
+
+struct ice_tcam_inf {
+	u8 ptg;
+	u8 prof_id;
+	u16 tcam_idx;
+	u8 in_use;
+};
+
+struct ice_vsig_prof {
+	struct LIST_ENTRY_TYPE list;
+	u64 profile_cookie;
+	u8 prof_id;
+	u8 tcam_count;
+	struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE];
+};
+
+struct ice_vsig_entry {
+	u8 in_use;
+	struct LIST_HEAD_TYPE prop_lst;
+	struct ice_vsig_vsi *first_vsi;
+};
+
+struct ice_vsig_vsi {
+	u16 changed;
+	u16 vsig;
+	u32 prop_mask;
+	struct ice_vsig_vsi *next_vsi;
+};
+
+#define ICE_XLT1_CNT	1024
+#define ICE_MAX_PTGS	256
+
+/* XLT1 Table */
+struct ice_xlt1 {
+	u32 sid;
+	u16 count;
+	struct ice_ptg_entry *ptg_tbl;
+	struct ice_ptg_ptype *ptypes;
+	u8 *t;
+};
+
+#define ICE_XLT2_CNT	768
+#define ICE_MAX_VSIGS	768
+
+/* Vsig bit layout:
+ * [0:12]: incremental vsig index 1 to ICE_MAX_VSIGS
+ * [13:15]: pf number of device
+ */
+#define ICE_VSIG_IDX_M	(0x1FFF)
+#define ICE_PF_NUM_S	13
+#define ICE_PF_NUM_M	(0x07 << ICE_PF_NUM_S)
+#define ICE_VSIG_VALUE(vsig, pf_id) \
+	(u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \
+	      (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))
+#define ICE_DEFAULT_VSIG	0
+
+/* XLT2 Table */
+struct ice_xlt2 {
+	u32 sid;
+	u16 count;
+	struct ice_vsig_entry *vsig_tbl;
+	struct ice_vsig_vsi *vsis;
+	u16 *t;
+};
+
+/* Extraction sequence - list of match fields:
+ * protocol id, offset, profile length
+ */
+union ice_match_fld {
+	struct {
+		u8 prot_id;
+		u8 offset;
+		u8 length;
+		u8 reserved; /* must be zero */
+	} fld;
+	u32 val;
+};
+
+#define ICE_MATCH_LIST_SZ	20
+#pragma pack(1)
+struct ice_match {
+	u8 count;
+	union ice_match_fld list[ICE_MATCH_LIST_SZ];
+};
+
+/* Profile ID Management */
+struct ice_prof_id_key {
+	__le16 flags;
+	u8 xlt1;
+	__le16 xlt2_cdid;
+};
+
+/* Keys are made up of two values, each one-half the size of the key.
+ * For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
+ */
+#define ICE_TCAM_KEY_VAL_SZ	5
+#define ICE_TCAM_KEY_SZ		(2 * ICE_TCAM_KEY_VAL_SZ)
+
+struct ice_prof_tcam_entry {
+	__le16 addr;
+	u8 key[ICE_TCAM_KEY_SZ];
+	u8 prof_id;
+};
+
+struct ice_prof_id_section {
+	__le16 count;
+	struct ice_prof_tcam_entry entry[1];
+};
+
+#pragma pack()
+
+struct ice_prof_tcam {
+	u32 sid;
+	u16 count;
+	u16 max_prof_id;
+	u8 cdid_bits; /* # cdid bits to use in key, 0, 2, 4, or 8 */
+	struct ice_prof_tcam_entry *t;
+	u8 *resource_used_hack;
+};
+
+struct ice_prof_redir {
+	u32 sid;
+	u16 count;
+	u8 *t;
+};
+
+/* Tables per block */
+struct ice_blk_info {
+	struct ice_xlt1 xlt1;
+	struct ice_xlt2 xlt2;
+	struct ice_prof_tcam prof;
+	struct ice_prof_redir prof_redir;
+	struct ice_es es;
+	u8 overwrite; /* set to true to allow overwrite of table entries */
+};
+
+enum ice_chg_type {
+	ICE_TCAM_NONE = 0,
+	ICE_PTG_ES_ADD,
+	ICE_TCAM_ADD,
+	ICE_TCAM_REM,
+	ICE_VSIG_ADD,
+	ICE_VSIG_REM,
+	ICE_VSI_MOVE,
+};
+
+struct ice_chs_chg {
+	struct LIST_ENTRY_TYPE list_entry;
+	enum ice_chg_type type;
+
+	u8 add_ptg;
+	u8 add_vsig;
+	u8 add_tcam_idx;
+	u8 add_prof;
+	u16 ptype;
+	u8 ptg;
+	u8 prof_id;
+	u16 vsi;
+	u16 vsig;
+	u16 orig_vsig;
+	u16 tcam_idx;
+	struct ice_prof_tcam_entry orig_ent;
+};
+
+#define ICE_FLOW_PTYPE_MAX		ICE_XLT1_CNT
+
 #endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index d7c12d172..17d79ba21 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -704,6 +704,42 @@ struct ice_hw {
 
 	u8 ucast_shared;	/* true if VSIs can share unicast addr */
 
+	/* Active package version (currently active) */
+	struct ice_pkg_ver active_pkg_ver;
+	u8 active_pkg_name[ICE_PKG_NAME_SIZE];
+
+	/* Driver's package ver - (from the Metadata seg) */
+	struct ice_pkg_ver pkg_ver;
+	u8 pkg_name[ICE_PKG_NAME_SIZE];
+
+	/* Driver's Ice package version (from the Ice seg) */
+	struct ice_pkg_ver ice_pkg_ver;
+	u8 ice_pkg_name[ICE_PKG_NAME_SIZE];
+
+	/* Pointer to the ice segment */
+	struct ice_seg *seg;
+
+	/* Pointer to allocated copy of pkg memory */
+	u8 *pkg_copy;
+
+	/* tunneling info */
+	struct ice_tunnel_table tnl;
+
+	/* PTYPE group and XLT1 management */
+#define ICE_MAX_PTGS	256
+	struct ice_ptg_entry ptg_tbl[ICE_BLK_COUNT][ICE_MAX_PTGS];
+
+#define ICE_XLT1_CNT	1024
+	struct ice_ptg_ptype xlt1_tbl[ICE_BLK_COUNT][ICE_XLT1_CNT];
+#define ICE_PKG_FILENAME	"package_file"
+#define ICE_PKG_FILENAME_EXT	"pkg"
+#define ICE_PKG_FILE_MAJ_VER	1
+#define ICE_PKG_FILE_MIN_VER	0
+
+	/* HW block tables */
+	struct ice_blk_info blk[ICE_BLK_COUNT];
+	struct ice_lock fl_profs_locks[ICE_BLK_COUNT];	/* lock fltr profiles */
+	struct LIST_HEAD_TYPE fl_profs[ICE_BLK_COUNT];
 };
 
 /* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ice/base/meson.build b/drivers/net/ice/base/meson.build
index 0cfc8cd67..77a9802f4 100644
--- a/drivers/net/ice/base/meson.build
+++ b/drivers/net/ice/base/meson.build
@@ -7,6 +7,7 @@ sources = [
 	'ice_sched.c',
 	'ice_switch.c',
 	'ice_nvm.c',
+	'ice_flex_pipe.c',
 ]
 
 error_cflags = ['-Wno-unused-value',
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH v2 6/7] net/ice/base: add flow module
  2019-01-16 14:13 ` [dpdk-dev] [PATCH v2 " Qi Zhang
                     ` (4 preceding siblings ...)
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 5/7] net/ice/base: add flexible pipeline module Qi Zhang
@ 2019-01-16 14:13   ` Qi Zhang
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 7/7] net/ice/base: free flow profile entries Qi Zhang
  2019-01-17 13:31   ` [dpdk-dev] [PATCH v2 0/7] net/ice: update share code Zhang, Qi Z
  7 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-16 14:13 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang; +Cc: paul.m.stillwell.jr, dev, ferruh.yigit, Qi Zhang

Add the module that implemented flow abstraction that base on
flexible pipeline.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/Makefile          |    1 +
 drivers/net/ice/base/ice_common.h |    1 +
 drivers/net/ice/base/ice_flow.c   | 2080 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_flow.h   |  337 ++++++
 drivers/net/ice/base/meson.build  |    1 +
 5 files changed, 2420 insertions(+)
 create mode 100644 drivers/net/ice/base/ice_flow.c

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 1bb76efc2..61846cae2 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -50,6 +50,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_sched.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_nvm.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_flex_pipe.c
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_flow.c
 
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx.c
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 67539ccb7..5ac991ef2 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -7,6 +7,7 @@
 
 #include "ice_type.h"
 
+#include "ice_flex_pipe.h"
 #include "ice_switch.h"
 
 enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
new file mode 100644
index 000000000..cc3f69b51
--- /dev/null
+++ b/drivers/net/ice/base/ice_flow.c
@@ -0,0 +1,2080 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2019
+ */
+
+#include "ice_common.h"
+#include "ice_flow.h"
+
+/* Size of known protocol header fields */
+#define ICE_FLOW_FLD_SZ_ETH_TYPE	2
+#define ICE_FLOW_FLD_SZ_VLAN		2
+#define ICE_FLOW_FLD_SZ_IPV4_ADDR	4
+#define ICE_FLOW_FLD_SZ_IPV6_ADDR	16
+#define ICE_FLOW_FLD_SZ_IP_DSCP		1
+#define ICE_FLOW_FLD_SZ_IP_TTL		1
+#define ICE_FLOW_FLD_SZ_IP_PROT		1
+#define ICE_FLOW_FLD_SZ_PORT		2
+#define ICE_FLOW_FLD_SZ_TCP_FLAGS	1
+#define ICE_FLOW_FLD_SZ_ICMP_TYPE	1
+#define ICE_FLOW_FLD_SZ_ICMP_CODE	1
+#define ICE_FLOW_FLD_SZ_ARP_OPER	2
+#define ICE_FLOW_FLD_SZ_GRE_KEYID	4
+
+/* Protocol header fields are extracted at the word boundaries as word-sized
+ * values. Specify the displacement value of some non-word-aligned fields needed
+ * to compute the offset of words containing the fields in the corresponding
+ * protocol headers. Displacement values are expressed in number of bits.
+ */
+#define ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP	(-4)
+#define ICE_FLOW_FLD_IPV6_TTL_PROT_DISP	((-2) * 8)
+#define ICE_FLOW_FLD_IPV6_TTL_TTL_DISP	((-1) * 8)
+
+/* Describe properties of a protocol header field */
+struct ice_flow_field_info {
+	enum ice_flow_seg_hdr hdr;
+	s16 off;	/* Offset from start of a protocol header, in bits */
+	u16 size;	/* Size of fields in bits */
+};
+
+/* Table containing properties of supported protocol header fields */
+static const
+struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
+	/* Ether */
+	/* ICE_FLOW_FIELD_IDX_ETH_DA */
+	{ ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN * 8 },
+	/* ICE_FLOW_FIELD_IDX_ETH_SA */
+	{ ICE_FLOW_SEG_HDR_ETH, ETH_ALEN * 8, ETH_ALEN * 8 },
+	/* ICE_FLOW_FIELD_IDX_S_VLAN */
+	{ ICE_FLOW_SEG_HDR_VLAN, 12 * 8, ICE_FLOW_FLD_SZ_VLAN * 8 },
+	/* ICE_FLOW_FIELD_IDX_C_VLAN */
+	{ ICE_FLOW_SEG_HDR_VLAN, 14 * 8, ICE_FLOW_FLD_SZ_VLAN * 8 },
+	/* ICE_FLOW_FIELD_IDX_ETH_TYPE */
+	{ ICE_FLOW_SEG_HDR_ETH, 12 * 8, ICE_FLOW_FLD_SZ_ETH_TYPE * 8 },
+	/* IPv4 */
+	/* ICE_FLOW_FIELD_IDX_IP_DSCP */
+	{ ICE_FLOW_SEG_HDR_IPV4, 1 * 8, 1 * 8 },
+	/* ICE_FLOW_FIELD_IDX_IP_TTL */
+	{ ICE_FLOW_SEG_HDR_NONE, 8 * 8, 1 * 8 },
+	/* ICE_FLOW_FIELD_IDX_IP_PROT */
+	{ ICE_FLOW_SEG_HDR_NONE, 9 * 8, ICE_FLOW_FLD_SZ_IP_PROT * 8 },
+	/* ICE_FLOW_FIELD_IDX_IPV4_SA */
+	{ ICE_FLOW_SEG_HDR_IPV4, 12 * 8, ICE_FLOW_FLD_SZ_IPV4_ADDR * 8 },
+	/* ICE_FLOW_FIELD_IDX_IPV4_DA */
+	{ ICE_FLOW_SEG_HDR_IPV4, 16 * 8, ICE_FLOW_FLD_SZ_IPV4_ADDR * 8 },
+	/* IPv6 */
+	/* ICE_FLOW_FIELD_IDX_IPV6_SA */
+	{ ICE_FLOW_SEG_HDR_IPV6, 8 * 8, ICE_FLOW_FLD_SZ_IPV6_ADDR * 8 },
+	/* ICE_FLOW_FIELD_IDX_IPV6_DA */
+	{ ICE_FLOW_SEG_HDR_IPV6, 24 * 8, ICE_FLOW_FLD_SZ_IPV6_ADDR * 8 },
+	/* Transport */
+	/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
+	{ ICE_FLOW_SEG_HDR_TCP, 0 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
+	{ ICE_FLOW_SEG_HDR_TCP, 2 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
+	{ ICE_FLOW_SEG_HDR_UDP, 0 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
+	{ ICE_FLOW_SEG_HDR_UDP, 2 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
+	{ ICE_FLOW_SEG_HDR_SCTP, 0 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
+	{ ICE_FLOW_SEG_HDR_SCTP, 2 * 8, ICE_FLOW_FLD_SZ_PORT * 8 },
+	/* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
+	{ ICE_FLOW_SEG_HDR_TCP, 13 * 8, ICE_FLOW_FLD_SZ_TCP_FLAGS * 8 },
+	/* ARP */
+	/* ICE_FLOW_FIELD_IDX_ARP_SIP */
+	{ ICE_FLOW_SEG_HDR_ARP, 14 * 8, ICE_FLOW_FLD_SZ_IPV4_ADDR * 8 },
+	/* ICE_FLOW_FIELD_IDX_ARP_DIP */
+	{ ICE_FLOW_SEG_HDR_ARP, 24 * 8, ICE_FLOW_FLD_SZ_IPV4_ADDR * 8 },
+	/* ICE_FLOW_FIELD_IDX_ARP_SHA */
+	{ ICE_FLOW_SEG_HDR_ARP, 8 * 8, ETH_ALEN * 8 },
+	/* ICE_FLOW_FIELD_IDX_ARP_DHA */
+	{ ICE_FLOW_SEG_HDR_ARP, 18 * 8, ETH_ALEN * 8 },
+	/* ICE_FLOW_FIELD_IDX_ARP_OP */
+	{ ICE_FLOW_SEG_HDR_ARP, 6 * 8, ICE_FLOW_FLD_SZ_ARP_OPER * 8 },
+	/* ICMP */
+	/* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
+	{ ICE_FLOW_SEG_HDR_ICMP, 0 * 8, ICE_FLOW_FLD_SZ_ICMP_TYPE * 8 },
+	/* ICE_FLOW_FIELD_IDX_ICMP_CODE */
+	{ ICE_FLOW_SEG_HDR_ICMP, 1 * 8, ICE_FLOW_FLD_SZ_ICMP_CODE * 8 },
+	/* GRE */
+	/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
+	{ ICE_FLOW_SEG_HDR_GRE, 12 * 8, ICE_FLOW_FLD_SZ_GRE_KEYID * 8 },
+};
+
+/* Bitmaps indicating relevant packet types for a particular protocol header
+ *
+ * Packet types for packets with an Outer/First/Single MAC header
+ */
+static const u32 ice_ptypes_mac_ofos[] = {
+	0xFDC00CC6, 0xBFBF7F7E, 0xF7EFDFDF, 0xFEFDFDFB,
+	0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
+	0x000B0F0F, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last MAC VLAN header */
+static const u32 ice_ptypes_macvlan_il[] = {
+	0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
+	0x0000077E, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv4 header */
+static const u32 ice_ptypes_ipv4_ofos[] = {
+	0xFDC00000, 0xBFBF7F7E, 0x00EFDFDF, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x0003000F, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header */
+static const u32 ice_ptypes_ipv4_il[] = {
+	0xE0000000, 0xB807700E, 0x8001DC03, 0xE01DC03B,
+	0x0007700E, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header */
+static const u32 ice_ptypes_ipv6_ofos[] = {
+	0x00000000, 0x00000000, 0xF7000000, 0xFEFDFDFB,
+	0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
+	0x00080F00, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header */
+static const u32 ice_ptypes_ipv6_il[] = {
+	0x00000000, 0x03B80770, 0x00EE01DC, 0x0EE00000,
+	0x03B80770, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First ARP header */
+static const u32 ice_ptypes_arp_of[] = {
+	0x00000800, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First UDP header */
+static const u32 ice_ptypes_udp_of[] = {
+	0x81000000, 0x00000000, 0x04000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last UDP header */
+static const u32 ice_ptypes_udp_il[] = {
+	0x80000000, 0x20204040, 0x00081010, 0x80810102,
+	0x00204040, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last TCP header */
+static const u32 ice_ptypes_tcp_il[] = {
+	0x04000000, 0x80810102, 0x10204040, 0x42040408,
+	0x00810002, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last SCTP header */
+static const u32 ice_ptypes_sctp_il[] = {
+	0x08000000, 0x01020204, 0x20408081, 0x04080810,
+	0x01020204, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First ICMP header */
+static const u32 ice_ptypes_icmp_of[] = {
+	0x10000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last ICMP header */
+static const u32 ice_ptypes_icmp_il[] = {
+	0x00000000, 0x02040408, 0x40810102, 0x08101020,
+	0x02040408, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First GRE header */
+static const u32 ice_ptypes_gre_of[] = {
+	0x00000000, 0xBFBF7800, 0x00EFDFDF, 0xFEFDE000,
+	0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last MAC header */
+static const u32 ice_ptypes_mac_il[] = {
+	0x00000000, 0x00000000, 0x00EFDE00, 0x00000000,
+	0x03BF7800, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Manage parameters and info. used during the creation of a flow profile */
+struct ice_flow_prof_params {
+	enum ice_block blk;
+	struct ice_flow_prof *prof;
+
+	u16 entry_length; /* # of bytes formatted entry will require */
+	u8 es_cnt;
+	struct ice_fv_word es[ICE_MAX_FV_WORDS];
+
+	ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
+};
+
+/**
+ * ice_is_pow2 - check if integer value is a power of 2
+ * @val: unsigned integer to be validated
+ */
+static bool ice_is_pow2(u64 val)
+{
+	return (val && !(val & (val - 1)));
+}
+
+#define ICE_FLOW_SEG_HDRS_L2_MASK	\
+	(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
+#define ICE_FLOW_SEG_HDRS_L3_MASK	\
+	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
+#define ICE_FLOW_SEG_HDRS_L4_MASK	\
+	(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
+	 ICE_FLOW_SEG_HDR_SCTP)
+
+/**
+ * ice_flow_val_hdrs - validates packet segments for valid protocol headers
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ */
+static enum ice_status
+ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+{
+	const u32 masks = (ICE_FLOW_SEG_HDRS_L2_MASK |
+			   ICE_FLOW_SEG_HDRS_L3_MASK |
+			   ICE_FLOW_SEG_HDRS_L4_MASK);
+	u8 i;
+
+	for (i = 0; i < segs_cnt; i++) {
+		/* No header specified */
+		if (!(segs[i].hdrs & masks) || (segs[i].hdrs & ~masks))
+			return ICE_ERR_PARAM;
+
+		/* Multiple L3 headers */
+		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
+		    !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
+			return ICE_ERR_PARAM;
+
+		/* Multiple L4 headers */
+		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
+		    !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
+			return ICE_ERR_PARAM;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/* Sizes of fixed known protocol headers without header options */
+#define ICE_FLOW_PROT_HDR_SZ_MAC	14
+#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN	(ICE_FLOW_PROT_HDR_SZ_MAC + 2)
+#define ICE_FLOW_PROT_HDR_SZ_IPV4	20
+#define ICE_FLOW_PROT_HDR_SZ_IPV6	40
+#define ICE_FLOW_PROT_HDR_SZ_ARP	28
+#define ICE_FLOW_PROT_HDR_SZ_ICMP	8
+#define ICE_FLOW_PROT_HDR_SZ_TCP	20
+#define ICE_FLOW_PROT_HDR_SZ_UDP	8
+#define ICE_FLOW_PROT_HDR_SZ_SCTP	12
+
+/**
+ * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose header size is to be determined
+ */
+static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
+{
+	u16 sz;
+
+	/* L2 headers */
+	sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
+		ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
+
+	/* L3 headers */
+	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
+		sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
+		sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
+		sz += ICE_FLOW_PROT_HDR_SZ_ARP;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
+		/* A L3 header is required if L4 is specified */
+		return 0;
+
+	/* L4 headers */
+	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
+		sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
+		sz += ICE_FLOW_PROT_HDR_SZ_TCP;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
+		sz += ICE_FLOW_PROT_HDR_SZ_UDP;
+	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
+		sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
+
+	return sz;
+}
+
+/**
+ * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
+ * @params: information about the flow to be processed
+ *
+ * This function identifies the packet types associated with the protocol
+ * headers being present in packet segments of the specified flow profile.
+ */
+static enum ice_status
+ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
+{
+	struct ice_flow_prof *prof;
+	u8 i;
+
+	ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
+		   ICE_NONDMA_MEM);
+
+	prof = params->prof;
+
+	for (i = 0; i < params->prof->segs_cnt; i++) {
+		const ice_bitmap_t *src;
+		u32 hdrs;
+
+		if (i > 0 && (i + 1) < prof->segs_cnt)
+			continue;
+
+		hdrs = prof->segs[i].hdrs;
+
+		if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
+			src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
+				(const ice_bitmap_t *)ice_ptypes_mac_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_ETH;
+		}
+
+		if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
+			src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_VLAN;
+		}
+
+		if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
+			ice_and_bitmap(params->ptypes, params->ptypes,
+				       (const ice_bitmap_t *)ice_ptypes_arp_of,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_ARP;
+		}
+
+		if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+			src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
+				(const ice_bitmap_t *)ice_ptypes_ipv4_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_IPV4;
+		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+			src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
+				(const ice_bitmap_t *)ice_ptypes_ipv6_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_IPV6;
+		}
+
+		if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
+			src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
+				(const ice_bitmap_t *)ice_ptypes_icmp_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_ICMP;
+		} else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
+			src = !i ? (const ice_bitmap_t *)ice_ptypes_udp_of :
+				(const ice_bitmap_t *)ice_ptypes_udp_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_UDP;
+		} else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
+			ice_and_bitmap(params->ptypes, params->ptypes,
+				       (const ice_bitmap_t *)ice_ptypes_tcp_il,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_TCP;
+		} else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
+			src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
+			ice_and_bitmap(params->ptypes, params->ptypes, src,
+				       ICE_FLOW_PTYPE_MAX);
+			hdrs &= ~ICE_FLOW_SEG_HDR_SCTP;
+		} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
+			if (!i) {
+				src = (const ice_bitmap_t *)ice_ptypes_gre_of;
+				ice_and_bitmap(params->ptypes, params->ptypes,
+					       src, ICE_FLOW_PTYPE_MAX);
+			}
+			hdrs &= ~ICE_FLOW_SEG_HDR_GRE;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
+ * @hw: pointer to the hw struct
+ * @params: information about the flow to be processed
+ * @seg: packet segment index of the field to be extracted
+ * @fld: ID of field to be extracted
+ *
+ * This function determines the protocol ID, offset, and size of the given
+ * field. It then allocates one or more extraction sequence entries for the
+ * given field, and fill the entries with protocol ID and offset information.
+ */
+static enum ice_status
+ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
+		    u8 seg, enum ice_flow_field fld)
+{
+	enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
+	enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
+	u8 fv_words = hw->blk[params->blk].es.fvw;
+	struct ice_flow_fld_info *flds;
+	u16 cnt, ese_bits, i;
+	s16 adj = 0;
+	u8 off;
+
+	flds = params->prof->segs[seg].fields;
+
+	switch (fld) {
+	case ICE_FLOW_FIELD_IDX_ETH_DA:
+	case ICE_FLOW_FIELD_IDX_ETH_SA:
+	case ICE_FLOW_FIELD_IDX_S_VLAN:
+	case ICE_FLOW_FIELD_IDX_C_VLAN:
+		prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_ETH_TYPE:
+		prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_IP_DSCP:
+		if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
+			adj = ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP;
+		/* Fall through */
+	case ICE_FLOW_FIELD_IDX_IP_TTL:
+	case ICE_FLOW_FIELD_IDX_IP_PROT:
+		/* Some fields are located at different offsets in IPv4 and
+		 * IPv6
+		 */
+		if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+			prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S :
+				ICE_PROT_IPV4_IL;
+			/* TTL and PROT share the same extraction seq. entry.
+			 * Each is considered a sibling to the other in term
+			 * sharing the same extraction sequence entry.
+			 */
+			if (fld == ICE_FLOW_FIELD_IDX_IP_TTL)
+				sib = ICE_FLOW_FIELD_IDX_IP_PROT;
+			else if (fld == ICE_FLOW_FIELD_IDX_IP_PROT)
+				sib = ICE_FLOW_FIELD_IDX_IP_TTL;
+		} else if (params->prof->segs[seg].hdrs &
+			   ICE_FLOW_SEG_HDR_IPV6) {
+			prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S :
+				ICE_PROT_IPV6_IL;
+			if (fld == ICE_FLOW_FIELD_IDX_IP_TTL)
+				adj = ICE_FLOW_FLD_IPV6_TTL_TTL_DISP;
+			else if (fld == ICE_FLOW_FIELD_IDX_IP_PROT)
+				adj = ICE_FLOW_FLD_IPV6_TTL_PROT_DISP;
+		}
+		break;
+	case ICE_FLOW_FIELD_IDX_IPV4_SA:
+	case ICE_FLOW_FIELD_IDX_IPV4_DA:
+		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_IPV6_SA:
+	case ICE_FLOW_FIELD_IDX_IPV6_DA:
+		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
+	case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
+	case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
+		prot_id = ICE_PROT_TCP_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
+	case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
+		prot_id = seg == 0 ? ICE_PROT_UDP_IL_OR_S : ICE_PROT_UDP_OF;
+		break;
+	case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
+	case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
+		prot_id = ICE_PROT_SCTP_IL;
+		break;
+	case ICE_FLOW_FIELD_IDX_ARP_SIP:
+	case ICE_FLOW_FIELD_IDX_ARP_DIP:
+	case ICE_FLOW_FIELD_IDX_ARP_SHA:
+	case ICE_FLOW_FIELD_IDX_ARP_DHA:
+	case ICE_FLOW_FIELD_IDX_ARP_OP:
+		prot_id = ICE_PROT_ARP_OF;
+		break;
+	case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
+	case ICE_FLOW_FIELD_IDX_ICMP_CODE:
+		/* ICMP type and code share the same extraction seq. entry */
+		prot_id = (params->prof->segs[seg].hdrs &
+			   ICE_FLOW_SEG_HDR_IPV4) ?
+			ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
+		sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
+			ICE_FLOW_FIELD_IDX_ICMP_CODE :
+			ICE_FLOW_FIELD_IDX_ICMP_TYPE;
+		break;
+	case ICE_FLOW_FIELD_IDX_GRE_KEYID:
+		prot_id = ICE_PROT_GRE_OF;
+		break;
+	default:
+		return ICE_ERR_NOT_IMPL;
+	}
+
+	/* Each extraction sequence entry is a word in size, and extracts a
+	 * word-aligned offset from a protocol header.
+	 */
+	ese_bits = ICE_FLOW_FV_EXTRACT_SZ * 8;
+
+	flds[fld].xtrct.prot_id = prot_id;
+	flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
+		ICE_FLOW_FV_EXTRACT_SZ;
+	flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
+	flds[fld].xtrct.idx = params->es_cnt;
+
+	/* Adjust the next field-entry index after accommodating the number of
+	 * entries this field consumes
+	 */
+	cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
+				  ice_flds_info[fld].size, ese_bits);
+
+	/* Fill in the extraction sequence entries needed for this field */
+	off = flds[fld].xtrct.off;
+	for (i = 0; i < cnt; i++) {
+		/* Only consume an extraction sequence entry if there is no
+		 * sibling field associated with this field or the sibling entry
+		 * already extracts the word shared with this field.
+		 */
+		if (sib == ICE_FLOW_FIELD_IDX_MAX ||
+		    flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
+		    flds[sib].xtrct.off != off) {
+			u8 idx;
+
+			/* Make sure the number of extraction sequence required
+			 * does not exceed the block's capability
+			 */
+			if (params->es_cnt >= fv_words)
+				return ICE_ERR_MAX_LIMIT;
+
+			/* some blocks require a reversed field vector layout */
+			if (hw->blk[params->blk].es.reverse)
+				idx = fv_words - params->es_cnt - 1;
+			else
+				idx = params->es_cnt;
+
+			params->es[idx].prot_id = prot_id;
+			params->es[idx].off = off;
+			params->es_cnt++;
+		}
+
+		off += ICE_FLOW_FV_EXTRACT_SZ;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
+ * @hw: pointer to the hw struct
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose raw fields are to be be extracted
+ */
+static enum ice_status
+ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
+		     u8 seg)
+{
+	u16 hdrs_sz;
+	u8 i;
+
+	if (!params->prof->segs[seg].raws_cnt)
+		return ICE_SUCCESS;
+
+	if (params->prof->segs[seg].raws_cnt >
+	    ARRAY_SIZE(params->prof->segs[seg].raws))
+		return ICE_ERR_MAX_LIMIT;
+
+	/* Offsets within the segment headers are not supported */
+	hdrs_sz = ice_flow_calc_seg_sz(params, seg);
+	if (!hdrs_sz)
+		return ICE_ERR_PARAM;
+
+	for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
+		struct ice_flow_seg_fld_raw *raw;
+		u16 off, cnt, j;
+
+		raw = &params->prof->segs[seg].raws[i];
+
+		/* Only support matching raw fields in the payload */
+		if (raw->off < hdrs_sz)
+			return ICE_ERR_PARAM;
+
+		/* Convert the segment-relative offset into payload-relative
+		 * offset.
+		 */
+		off = raw->off - hdrs_sz;
+
+		/* Storing extraction information */
+		raw->info.xtrct.prot_id = ICE_PROT_PAY;
+		raw->info.xtrct.off = (off / ICE_FLOW_FV_EXTRACT_SZ) *
+			ICE_FLOW_FV_EXTRACT_SZ;
+		raw->info.xtrct.disp = (off % ICE_FLOW_FV_EXTRACT_SZ) * 8;
+		raw->info.xtrct.idx = params->es_cnt;
+
+		/* Determine the number of field vector entries this raw field
+		 * consumes.
+		 */
+		cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
+					  (raw->info.src.last * 8),
+					  ICE_FLOW_FV_EXTRACT_SZ * 8);
+		off = raw->info.xtrct.off;
+		for (j = 0; j < cnt; j++) {
+			/* Make sure the number of extraction sequence required
+			 * does not exceed the block's capability
+			 */
+			if (params->es_cnt >= hw->blk[params->blk].es.count ||
+			    params->es_cnt >= ICE_MAX_FV_WORDS)
+				return ICE_ERR_MAX_LIMIT;
+
+			params->es[params->es_cnt].prot_id = ICE_PROT_PAY;
+			params->es[params->es_cnt].off = off;
+			params->es_cnt++;
+			off += ICE_FLOW_FV_EXTRACT_SZ;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
+ * @hw: pointer to the hw struct
+ * @params: information about the flow to be processed
+ *
+ * This function iterates through all matched fields in the given segments, and
+ * creates an extraction sequence for the fields.
+ */
+static enum ice_status
+ice_flow_create_xtrct_seq(struct ice_hw *hw,
+			  struct ice_flow_prof_params *params)
+{
+	enum ice_status status = ICE_SUCCESS;
+	u8 i;
+
+	for (i = 0; i < params->prof->segs_cnt; i++) {
+		u64 match = params->prof->segs[i].match;
+		u16 j;
+
+		for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
+			const u64 bit = BIT_ULL(j);
+
+			if (match & bit) {
+				status = ice_flow_xtract_fld
+					(hw, params, i, (enum ice_flow_field)j);
+				if (status)
+					return status;
+				match &= ~bit;
+			}
+		}
+
+		/* Process raw matching bytes */
+		status = ice_flow_xtract_raws(hw, params, i);
+		if (status)
+			return status;
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_proc_segs - process all packet segments associated with a profile
+ * @hw: pointer to the hw struct
+ * @params: information about the flow to be processed
+ */
+static enum ice_status
+ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
+{
+	enum ice_status status;
+
+	status = ice_flow_proc_seg_hdrs(params);
+	if (status)
+		return status;
+
+	status = ice_flow_create_xtrct_seq(hw, params);
+	if (status)
+		return status;
+
+	switch (params->blk) {
+	case ICE_BLK_RSS:
+		/* Only header information is provided for RSS configuration.
+		 * No further processing is needed.
+		 */
+		status = ICE_SUCCESS;
+		break;
+	case ICE_BLK_FD:
+		status = ICE_SUCCESS;
+		break;
+	case ICE_BLK_SW:
+	default:
+		return ICE_ERR_NOT_IMPL;
+	}
+
+	return status;
+}
+
+#define ICE_FLOW_FIND_PROF_CHK_FLDS	0x00000001
+#define ICE_FLOW_FIND_PROF_CHK_VSI	0x00000002
+
+/**
+ * ice_flow_find_prof_conds - Find a profile matching headers and conditions
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
+ * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
+			 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
+			 u8 segs_cnt, u16 vsi_handle, u32 conds)
+{
+	struct ice_flow_prof *p;
+
+	LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+		if (p->dir == dir && segs_cnt && segs_cnt == p->segs_cnt) {
+			u8 i;
+
+			/* Check for profile-VSI association if specified */
+			if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
+			    ice_is_vsi_valid(hw, vsi_handle) &&
+			    !ice_is_bit_set(p->vsis, vsi_handle))
+				continue;
+
+			/* Protocol headers must be checked. Matched fields are
+			 * checked if specified.
+			 */
+			for (i = 0; i < segs_cnt; i++)
+				if (segs[i].hdrs != p->segs[i].hdrs ||
+				    ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
+				     segs[i].match != p->segs[i].match))
+					break;
+
+			/* A match is found if all segments are matched */
+			if (i == segs_cnt)
+				return p;
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * ice_flow_find_prof - Look up a profile matching headers and matched fields
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ */
+u64
+ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+		   struct ice_flow_seg_info *segs, u8 segs_cnt)
+{
+	struct ice_flow_prof *p;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+	p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
+				     ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+
+	return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
+}
+
+/**
+ * ice_flow_find_prof_id - Look up a profile with given profile ID
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @prof_id: unique ID to identify this flow profile
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+	struct ice_flow_prof *p;
+
+	LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+		if (p->id == prof_id)
+			return p;
+	}
+
+	return NULL;
+}
+
+/**
+ * ice_flow_rem_entry_sync - Remove a flow entry
+ * @hw: pointer to the hw struct
+ * @entry: flow entry to be removed
+ */
+static enum ice_status
+ice_flow_rem_entry_sync(struct ice_hw *hw, struct ice_flow_entry *entry)
+{
+	if (!entry)
+		return ICE_ERR_BAD_PTR;
+
+	LIST_DEL(&entry->l_entry);
+
+	if (entry->entry)
+		ice_free(hw, entry->entry);
+
+	if (entry->acts)
+		ice_free(hw, entry->acts);
+
+	ice_free(hw, entry);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @acts: array of default actions
+ * @acts_cnt: number of default actions
+ * @prof: stores the returned flow profile added
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
+		       enum ice_flow_dir dir, u64 prof_id,
+		       struct ice_flow_seg_info *segs, u8 segs_cnt,
+		       struct ice_flow_action *acts, u8 acts_cnt,
+		       struct ice_flow_prof **prof)
+{
+	struct ice_flow_prof_params params;
+	enum ice_status status = ICE_SUCCESS;
+	u8 i;
+
+	if (!prof || (acts_cnt && !acts))
+		return ICE_ERR_BAD_PTR;
+
+	ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
+	params.prof = (struct ice_flow_prof *)
+		ice_malloc(hw, sizeof(*params.prof));
+	if (!params.prof)
+		return ICE_ERR_NO_MEMORY;
+
+	/* initialize extraction sequence to all invalid (0xff) */
+	ice_memset(params.es, 0xff, sizeof(params.es), ICE_NONDMA_MEM);
+
+	params.blk = blk;
+	params.prof->id = prof_id;
+	params.prof->dir = dir;
+	params.prof->segs_cnt = segs_cnt;
+
+	/* Make a copy of the segments that need to be persistent in the flow
+	 * profile instance
+	 */
+	for (i = 0; i < segs_cnt; i++)
+		ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
+			   ICE_NONDMA_TO_NONDMA);
+
+	/* Make a copy of the actions that need to be persistent in the flow
+	 * profile instance.
+	 */
+	if (acts_cnt) {
+		params.prof->acts = (struct ice_flow_action *)
+			ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
+				   ICE_NONDMA_TO_NONDMA);
+
+		if (!params.prof->acts) {
+			status = ICE_ERR_NO_MEMORY;
+			goto out;
+		}
+	}
+
+	status = ice_flow_proc_segs(hw, &params);
+	if (status) {
+		ice_debug(hw, ICE_DBG_FLOW,
+			  "Error processing a flow's packet segments\n");
+		goto out;
+	}
+
+	/* Add a HW profile for this flow profile */
+	status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
+	if (status) {
+		ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
+		goto out;
+	}
+
+	INIT_LIST_HEAD(&params.prof->entries);
+	ice_init_lock(&params.prof->entries_lock);
+	*prof = params.prof;
+
+out:
+	if (status) {
+		if (params.prof->acts)
+			ice_free(hw, params.prof->acts);
+		ice_free(hw, params.prof);
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_rem_prof_sync - remove a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile to remove
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
+		       struct ice_flow_prof *prof)
+{
+	enum ice_status status = ICE_SUCCESS;
+
+	/* Remove all remaining flow entries before removing the flow profile */
+	if (!LIST_EMPTY(&prof->entries)) {
+		struct ice_flow_entry *e, *t;
+
+		ice_acquire_lock(&prof->entries_lock);
+
+		LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
+					 l_entry) {
+			status = ice_flow_rem_entry_sync(hw, e);
+			if (status)
+				break;
+		}
+
+		ice_release_lock(&prof->entries_lock);
+	}
+
+	/* Remove all hardware profiles associated with this flow profile */
+	status = ice_rem_prof(hw, blk, prof->id);
+	if (!status) {
+		LIST_DEL(&prof->l_entry);
+		ice_destroy_lock(&prof->entries_lock);
+		if (prof->acts)
+			ice_free(hw, prof->acts);
+		ice_free(hw, prof);
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_assoc_prof - associate a VSI with a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software vsi handle has been validated
+ */
+static enum ice_status
+ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
+		    struct ice_flow_prof *prof, u16 vsi_handle)
+{
+	enum ice_status status = ICE_SUCCESS;
+
+	if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
+		status = ice_add_prof_id_flow(hw, blk,
+					      ice_get_hw_vsi_num(hw,
+								 vsi_handle),
+					      prof->id);
+		if (!status)
+			ice_set_bit(vsi_handle, prof->vsis);
+		else
+			ice_debug(hw, ICE_DBG_FLOW,
+				  "HW profile add failed, %d\n",
+				  status);
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software vsi handle has been validated
+ */
+static enum ice_status
+ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
+		       struct ice_flow_prof *prof, u16 vsi_handle)
+{
+	enum ice_status status = ICE_SUCCESS;
+
+	if (ice_is_bit_set(prof->vsis, vsi_handle)) {
+		status = ice_rem_prof_id_flow(hw, blk,
+					      ice_get_hw_vsi_num(hw,
+								 vsi_handle),
+					      prof->id);
+		if (!status)
+			ice_clear_bit(vsi_handle, prof->vsis);
+		else
+			ice_debug(hw, ICE_DBG_FLOW,
+				  "HW profile remove failed, %d\n",
+				  status);
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @acts: array of default actions
+ * @acts_cnt: number of default actions
+ * @prof: stores the returned flow profile added
+ */
+enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+		  struct ice_flow_action *acts, u8 acts_cnt,
+		  struct ice_flow_prof **prof)
+{
+	enum ice_status status;
+
+	if (segs_cnt > ICE_FLOW_SEG_MAX)
+		return ICE_ERR_MAX_LIMIT;
+
+	if (!segs_cnt)
+		return ICE_ERR_PARAM;
+
+	if (!segs)
+		return ICE_ERR_BAD_PTR;
+
+	status = ice_flow_val_hdrs(segs, segs_cnt);
+	if (status)
+		return status;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+	status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
+					acts, acts_cnt, prof);
+	if (!status)
+		LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
+
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+
+	return status;
+}
+
+/**
+ * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
+ * @hw: pointer to the hw struct
+ * @blk: the block for which the flow profile is to be removed
+ * @prof_id: unique ID of the flow profile to be removed
+ */
+enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+	struct ice_flow_prof *prof;
+	enum ice_status status;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+	prof = ice_flow_find_prof_id(hw, blk, prof_id);
+	if (!prof) {
+		status = ICE_ERR_DOES_NOT_EXIST;
+		goto out;
+	}
+
+	/* prof becomes invalid after the call */
+	status = ice_flow_rem_prof_sync(hw, blk, prof);
+
+out:
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+
+	return status;
+}
+
+/**
+ * ice_flow_get_hw_prof - return the hw profile for a specific profile id handle
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @prof_id: the profile id handle
+ * @hw_prof_id: pointer to variable to receive the hw profile id
+ */
+enum ice_status
+ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+		     u8 *hw_prof_id)
+{
+	struct ice_prof_map *map;
+
+	map = ice_search_prof_id(hw, blk, prof_id);
+	if (map) {
+		*hw_prof_id = map->prof_id;
+		return ICE_SUCCESS;
+	}
+
+	return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_flow_find_entry - look for a flow entry using its unique ID
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @entry_id: unique ID to identify this flow entry
+ *
+ * This function looks for the flow entry with the specified unique ID in all
+ * flow profiles of the specified classification stage. If the entry is found,
+ * and it returns the handle to the flow entry. Otherwise, it returns
+ * ICE_FLOW_ENTRY_ID_INVAL.
+ */
+u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
+{
+	struct ice_flow_entry *found = NULL;
+	struct ice_flow_prof *p;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+	LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+		struct ice_flow_entry *e;
+
+		ice_acquire_lock(&p->entries_lock);
+		LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
+			if (e->id == entry_id) {
+				found = e;
+				break;
+			}
+		ice_release_lock(&p->entries_lock);
+
+		if (found)
+			break;
+	}
+
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+
+	return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
+}
+
+/**
+ * ice_flow_add_entry - Add a flow entry
+ * @hw: pointer to the hw struct
+ * @blk: classification stage
+ * @prof_id: ID of the profile to add a new flow entry to
+ * @entry_id: unique ID to identify this flow entry
+ * @vsi_handle: software VSI handle for the flow entry
+ * @prio: priority of the flow entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @acts: arrays of actions to be performed on a match
+ * @acts_cnt: number of actions
+ * @entry_h: pointer to buffer that receives the new flow entry's handle
+ */
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+		   u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
+		   void *data, struct ice_flow_action *acts, u8 acts_cnt,
+		   u64 *entry_h)
+{
+	struct ice_flow_prof *prof = NULL;
+	struct ice_flow_entry *e = NULL;
+	enum ice_status status = ICE_SUCCESS;
+
+	if (acts_cnt && !acts)
+		return ICE_ERR_PARAM;
+
+	/* No flow entry data is expected for RSS */
+	if (!entry_h || (!data && blk != ICE_BLK_RSS))
+		return ICE_ERR_BAD_PTR;
+
+	if (!ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+	prof = ice_flow_find_prof_id(hw, blk, prof_id);
+	if (!prof) {
+		status = ICE_ERR_DOES_NOT_EXIST;
+	} else {
+		/* Allocate memory for the entry being added and associate
+		 * the VSI to the found flow profile
+		 */
+		e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
+		if (!e)
+			status = ICE_ERR_NO_MEMORY;
+		else
+			status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+	}
+
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+	if (status)
+		goto out;
+
+	e->id = entry_id;
+	e->vsi_handle = vsi_handle;
+	e->prof = prof;
+
+	switch (blk) {
+	case ICE_BLK_RSS:
+		/* RSS will add only one entry per VSI per profile */
+		break;
+	case ICE_BLK_FD:
+		break;
+	case ICE_BLK_SW:
+	case ICE_BLK_PE:
+	default:
+		status = ICE_ERR_NOT_IMPL;
+		goto out;
+	}
+
+	ice_acquire_lock(&prof->entries_lock);
+	LIST_ADD(&e->l_entry, &prof->entries);
+	ice_release_lock(&prof->entries_lock);
+
+	*entry_h = ICE_FLOW_ENTRY_HNDL(e);
+
+out:
+	if (status && e) {
+		if (e->entry)
+			ice_free(hw, e->entry);
+		ice_free(hw, e);
+	}
+
+	return status;
+}
+
+/**
+ * ice_flow_rem_entry - Remove a flow entry
+ * @hw: pointer to the hw struct
+ * @entry_h: handle to the flow entry to be removed
+ */
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h)
+{
+	struct ice_flow_entry *entry;
+	struct ice_flow_prof *prof;
+	enum ice_status status;
+
+	if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
+		return ICE_ERR_PARAM;
+
+	entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
+
+	/* Retain the pointer to the flow profile as the entry will be freed */
+	prof = entry->prof;
+
+	ice_acquire_lock(&prof->entries_lock);
+	status = ice_flow_rem_entry_sync(hw, entry);
+	ice_release_lock(&prof->entries_lock);
+
+	return status;
+}
+
+/**
+ * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @type: type of the field
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ *           entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ *            input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ *            entry's input buffer
+ *
+ * This helper function stores information of a field being matched, including
+ * the type of the field and the locations of the value to match, the mask, and
+ * and the upper-bound value in the start of the input buffer for a flow entry.
+ * This function should only be used for fixed-size data structures.
+ *
+ * This function also opportunistically determines the protocol headers to be
+ * present based on the fields being set. Some fields cannot be used alone to
+ * determine the protocol headers present. Sometimes, fields for particular
+ * protocol headers are not matched. In those cases, the protocol headers
+ * must be explicitly set.
+ */
+static void
+ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+		     enum ice_flow_fld_match_type type, u16 val_loc,
+		     u16 mask_loc, u16 last_loc)
+{
+	u64 bit = BIT_ULL(fld);
+
+	seg->match |= bit;
+	if (type == ICE_FLOW_FLD_TYPE_RANGE)
+		seg->range |= bit;
+
+	seg->fields[fld].type = type;
+	seg->fields[fld].src.val = val_loc;
+	seg->fields[fld].src.mask = mask_loc;
+	seg->fields[fld].src.last = last_loc;
+
+	ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
+}
+
+/**
+ * ice_flow_set_fld - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ *           entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ *            input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ *            entry's input buffer
+ * @range: indicate if field being matched is to be in a range
+ *
+ * This function specifies the locations, in the form of byte offsets from the
+ * start of the input buffer for a flow entry, from where the value to match,
+ * the mask value, and upper value can be extracted. These locations are then
+ * stored in the flow profile. When adding a flow entry associated with the
+ * flow profile, these locations will be used to quickly extract the values and
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
+{
+	enum ice_flow_fld_match_type t = range ?
+		ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
+
+	ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
+}
+
+/**
+ * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ *           entry's input buffer
+ * @pref_loc: location of prefix value from entry's input buffer
+ * @pref_sz: size of the location holding the prefix value
+ *
+ * This function specifies the locations, in the form of byte offsets from the
+ * start of the input buffer for a flow entry, from where the value to match
+ * and the IPv4 prefix value can be extracted. These locations are then stored
+ * in the flow profile. When adding flow entries to the associated flow profile,
+ * these locations can be used to quickly extract the values to create the
+ * content of a match entry. This function should only be used for fixed-size
+ * data structures.
+ */
+void
+ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+			u16 val_loc, u16 pref_loc, u8 pref_sz)
+{
+	/* For this type of field, the "mask" location is for the prefix value's
+	 * location and the "last" location is for the size of the location of
+	 * the prefix value.
+	 */
+	ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
+			     pref_loc, (u16)pref_sz);
+}
+
+/**
+ * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
+ * @seg: packet segment the field being set belongs to
+ * @off: offset of the raw field from the beginning of the segment in bytes
+ * @len: length of the raw pattern to be matched
+ * @val_loc: location of the value to match from entry's input buffer
+ * @mask_loc: location of mask value from entry's input buffer
+ *
+ * This function specifies the offset of the raw field to be match from the
+ * beginning of the specified packet segment, and the locations, in the form of
+ * byte offsets from the start of the input buffer for a flow entry, from where
+ * the value to match and the mask value to be extracted. These locations are
+ * then stored in the flow profile. When adding flow entries to the associated
+ * flow profile, these locations can be used to quickly extract the values to
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+		     u16 val_loc, u16 mask_loc)
+{
+	if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
+		seg->raws[seg->raws_cnt].off = off;
+		seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
+		seg->raws[seg->raws_cnt].info.src.val = val_loc;
+		seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
+		/* The "last" field is used to store the length of the field */
+		seg->raws[seg->raws_cnt].info.src.last = len;
+	}
+
+	/* Overflows of "raws" will be handled as an error condition later in
+	 * the flow when this information is processed.
+	 */
+	seg->raws_cnt++;
+}
+
+#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
+	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+
+#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
+	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
+	 ICE_FLOW_SEG_HDR_SCTP)
+
+#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
+	(ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
+	 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
+
+/**
+ * ice_flow_set_rss_seg_info - setup packet segments for RSS
+ * @segs: pointer to the flow field segment(s)
+ * @hash_fields: fields to be hashed on for the segment(s)
+ * @flow_hdr: protocol header fields within a packet segment
+ *
+ * Helper function to extract fields from hash bitmap and use flow
+ * header value to set flow field segment for further use in flow
+ * profile entry or removal.
+ */
+static enum ice_status
+ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
+			  u32 flow_hdr)
+{
+	u64 val = hash_fields;
+	u8 i;
+
+	for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
+		u64 bit = BIT_ULL(i);
+
+		if (val & bit) {
+			ice_flow_set_fld(segs, (enum ice_flow_field)i,
+					 ICE_FLOW_FLD_OFF_INVAL,
+					 ICE_FLOW_FLD_OFF_INVAL,
+					 ICE_FLOW_FLD_OFF_INVAL, false);
+			val &= ~bit;
+		}
+	}
+	ICE_FLOW_SET_HDRS(segs, flow_hdr);
+
+	if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
+		return ICE_ERR_PARAM;
+
+	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
+	if (!ice_is_pow2(val))
+		return ICE_ERR_CFG;
+
+	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
+	if (val && !ice_is_pow2(val))
+		return ICE_ERR_CFG;
+
+	return ICE_SUCCESS;
+}
+
+/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
+ * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
+ * convert its values to their appropriate flow L3, L4 values.
+ */
+#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
+#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
+#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
+	(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
+	 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
+
+#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
+#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
+#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
+	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
+	(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
+	 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
+
+#define ICE_FLOW_MAX_CFG	10
+
+/**
+ * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
+ *
+ * This function will take the hash bitmap provided by the AVF driver via a
+ * message, convert it to ICE-compatible values, and configure RSS flow
+ * profiles.
+ */
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+{
+	u64 added_cfg[ICE_FLOW_MAX_CFG], hash_flds;
+	enum ice_status status = ICE_SUCCESS;
+	u8 i, idx = 0;
+
+	if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
+	    !ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	/* Make sure no unsupported bits are specified */
+	if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
+			 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
+		return ICE_ERR_CFG;
+
+	hash_flds = avf_hash;
+
+	/* Always create an L3 RSS configuration for any L4 RSS configuration */
+	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
+		hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
+
+	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
+		hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
+
+	/* Create the corresponding RSS configuration for each valid hash bit */
+	while (hash_flds) {
+		u64 rss_hash = ICE_HASH_INVALID;
+
+		if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
+			if (hash_flds & ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV4 |
+					ICE_FLOW_HASH_TCP_PORT;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
+			} else if (hash_flds &
+				   ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV4 |
+					ICE_FLOW_HASH_UDP_PORT;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
+			} else if (hash_flds &
+				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
+				rss_hash = ICE_FLOW_HASH_IPV4 |
+					ICE_FLOW_HASH_SCTP_PORT;
+				hash_flds &=
+					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
+			} else if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV4;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
+			}
+		} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
+			if (hash_flds & ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV6 |
+					ICE_FLOW_HASH_TCP_PORT;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
+			} else if (hash_flds &
+				   ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV6 |
+					ICE_FLOW_HASH_UDP_PORT;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
+			} else if (hash_flds &
+				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
+				rss_hash = ICE_FLOW_HASH_IPV6 |
+					ICE_FLOW_HASH_SCTP_PORT;
+				hash_flds &=
+					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
+			} else if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
+				rss_hash = ICE_FLOW_HASH_IPV6;
+				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
+			}
+		}
+
+		if (rss_hash == ICE_HASH_INVALID)
+			return ICE_ERR_OUT_OF_RANGE;
+
+		status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
+					 ICE_FLOW_SEG_HDR_NONE);
+		if (status)
+			break;
+		added_cfg[idx++] = rss_hash;
+	}
+
+	/* If status is not success, we must remove all hash configurations
+	 * that were successfully added previously in this call for the vsi
+	 */
+	if (status)
+		for (i = 0; i < idx; i++)
+			ice_rem_rss_cfg(hw, vsi_handle, added_cfg[i],
+					ICE_FLOW_SEG_HDR_NONE);
+
+	return status;
+}
+
+/**
+ * ice_rem_all_rss_vsi_ctx - remove all RSS configurations from VSI context
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ */
+void ice_rem_all_rss_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+	struct ice_rss_cfg *r, *tmp;
+
+	if (!ice_is_vsi_valid(hw, vsi_handle) ||
+	    LIST_EMPTY(&hw->vsi_ctx[vsi_handle]->rss_list_head))
+		return;
+
+	ice_acquire_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+	LIST_FOR_EACH_ENTRY_SAFE(r, tmp,
+				 &hw->vsi_ctx[vsi_handle]->rss_list_head,
+				 ice_rss_cfg, l_entry) {
+		LIST_DEL(&r->l_entry);
+		ice_free(hw, r);
+	}
+	ice_release_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+}
+
+/**
+ * ice_rem_vsi_rss_cfg - remove RSS configurations associated with vsi
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function will iterate through all flow profiles and disassociate
+ * the vsi from that profile. If the flow profile has no vsis it will
+ * be removed.
+ */
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+	const enum ice_block blk = ICE_BLK_RSS;
+	struct ice_flow_prof *p, *t;
+	enum ice_status status = ICE_SUCCESS;
+
+	if (!ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	ice_acquire_lock(&hw->fl_profs_locks[blk]);
+	LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
+				 l_entry) {
+		if (ice_is_bit_set(p->vsis, vsi_handle)) {
+			status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
+			if (status)
+				break;
+
+			if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
+				status = ice_flow_rem_prof_sync(hw, blk, p);
+				if (status)
+					break;
+			}
+		}
+	}
+	ice_release_lock(&hw->fl_profs_locks[blk]);
+
+	return status;
+}
+
+/**
+ * ice_rem_rss_cfg_vsi_ctx - remove RSS configuration from VSI context
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static void
+ice_rem_rss_cfg_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
+			struct ice_flow_prof *prof)
+{
+	struct ice_rss_cfg *r, *tmp;
+
+	/* Search for RSS hash fields associated to the VSI that match the
+	 * hash configurations associated to the flow profile. If found
+	 * remove from the RSS entry list of the VSI context and delete entry.
+	 */
+	LIST_FOR_EACH_ENTRY_SAFE(r, tmp,
+				 &hw->vsi_ctx[vsi_handle]->rss_list_head,
+				 ice_rss_cfg, l_entry) {
+		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+			LIST_DEL(&r->l_entry);
+			ice_free(hw, r);
+			return;
+		}
+	}
+}
+
+/**
+ * ice_add_rss_vsi_ctx - add RSS configuration to VSI context
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
+		    struct ice_flow_prof *prof)
+{
+	struct ice_rss_cfg *r, *rss_cfg;
+
+	LIST_FOR_EACH_ENTRY(r, &hw->vsi_ctx[vsi_handle]->rss_list_head,
+			    ice_rss_cfg, l_entry)
+		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs)
+			return ICE_SUCCESS;
+
+	rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
+	if (!rss_cfg)
+		return ICE_ERR_NO_MEMORY;
+
+	rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
+	rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
+	LIST_ADD(&rss_cfg->l_entry, &hw->vsi_ctx[vsi_handle]->rss_list_head);
+
+	return ICE_SUCCESS;
+}
+
+#define ICE_FLOW_PROF_HASH_S	0
+#define ICE_FLOW_PROF_HASH_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
+#define ICE_FLOW_PROF_HDR_S	32
+#define ICE_FLOW_PROF_HDR_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_HDR_S)
+
+#define ICE_FLOW_GEN_PROFID(hash, hdr) \
+	(u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
+	      (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M))
+
+/**
+ * ice_add_rss_cfg_sync - add an RSS configuration
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		     u32 addl_hdrs)
+{
+	const enum ice_block blk = ICE_BLK_RSS;
+	struct ice_flow_prof *prof = NULL;
+	struct ice_flow_seg_info *segs;
+	enum ice_status status = ICE_SUCCESS;
+
+	segs = (struct ice_flow_seg_info *)ice_malloc(hw, sizeof(*segs));
+	if (!segs)
+		return ICE_ERR_NO_MEMORY;
+
+	/* Construct the packet segment info from the hashed fields */
+	status = ice_flow_set_rss_seg_info(segs, hashed_flds, addl_hdrs);
+	if (status)
+		goto exit;
+
+	/* Search for a flow profile that has matching headers, hash fields
+	 * and has the input vsi associated to it. If found, no further
+	 * operations required and exit.
+	 */
+	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+					vsi_handle,
+					ICE_FLOW_FIND_PROF_CHK_FLDS |
+					ICE_FLOW_FIND_PROF_CHK_VSI);
+	if (prof)
+		goto exit;
+
+	/* Check if a flow profile exists with the same protocol headers and
+	 * associated with the input vsi. If so disasscociate the vsi from
+	 * this profile. The vsi will be added to a new profile created with
+	 * the protocol header and new hash field configuration.
+	 */
+	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+					vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
+	if (prof) {
+		status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
+		if (!status)
+			ice_rem_rss_cfg_vsi_ctx(hw, vsi_handle, prof);
+		else
+			goto exit;
+
+		/* Remove profile if it has no vsis associated */
+		if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
+			status = ice_flow_rem_prof_sync(hw, blk, prof);
+			if (status)
+				goto exit;
+		}
+	}
+
+	/* Search for a profile that has same match fields only. If this
+	 * exists then associate the vsi to this profile.
+	 */
+	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+					vsi_handle,
+					ICE_FLOW_FIND_PROF_CHK_FLDS);
+	if (prof) {
+		status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+		if (!status)
+			status = ice_add_rss_vsi_ctx(hw, vsi_handle, prof);
+		goto exit;
+	}
+
+	/* Create a new flow profile with generated profile and packet
+	 * segment information.
+	 */
+	status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
+				   ICE_FLOW_GEN_PROFID(hashed_flds, segs->hdrs),
+				   segs, 1, NULL, 0, &prof);
+	if (status)
+		goto exit;
+
+	status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+	/* If association to a new flow profile failed then this profile can
+	 * be removed.
+	 */
+	if (status) {
+		ice_flow_rem_prof_sync(hw, blk, prof);
+		goto exit;
+	}
+
+	status = ice_add_rss_vsi_ctx(hw, vsi_handle, prof);
+
+exit:
+	ice_free(hw, segs);
+	return status;
+}
+
+/**
+ * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ *
+ * This function will generate a flow profile based on fields associated with
+ * the input fields to hash on, the flow type and use the VSI number to add
+ * a flow entry to the profile.
+ */
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		u32 addl_hdrs)
+{
+	enum ice_status status;
+
+	if (hashed_flds == ICE_HASH_INVALID ||
+	    !ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	ice_acquire_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+	status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs);
+	ice_release_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+
+	return status;
+}
+
+/**
+ * ice_rem_rss_cfg_sync - remove an existing RSS configuration
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
+ * @addl_hdrs: Protocol header fields within a packet segment
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		     u32 addl_hdrs)
+{
+	const enum ice_block blk = ICE_BLK_RSS;
+	struct ice_flow_seg_info *segs;
+	struct ice_flow_prof *prof;
+	enum ice_status status;
+
+	segs = (struct ice_flow_seg_info *)ice_malloc(hw, sizeof(*segs));
+	if (!segs)
+		return ICE_ERR_NO_MEMORY;
+
+	/* Construct the packet segment info from the hashed fields */
+	status = ice_flow_set_rss_seg_info(segs, hashed_flds, addl_hdrs);
+	if (status)
+		goto out;
+
+	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+					vsi_handle,
+					ICE_FLOW_FIND_PROF_CHK_FLDS);
+	if (!prof) {
+		status = ICE_ERR_DOES_NOT_EXIST;
+		goto out;
+	}
+
+	status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
+	if (status)
+		goto out;
+
+	if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
+		status = ice_flow_rem_prof_sync(hw, blk, prof);
+
+	ice_rem_rss_cfg_vsi_ctx(hw, vsi_handle, prof);
+
+out:
+	ice_free(hw, segs);
+	return status;
+}
+
+/**
+ * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
+ * @addl_hdrs: Protocol header fields within a packet segment
+ *
+ * This function will lookup the flow profile based on the input
+ * hash field bitmap, iterate through the profile entry list of
+ * that profile and find entry associated with input vsi to be
+ * removed. Calls are made to underlying flow apis which will in
+ * turn build or update buffers for RSS XLT1 section.
+ */
+enum ice_status
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		u32 addl_hdrs)
+{
+	enum ice_status status;
+
+	if (hashed_flds == ICE_HASH_INVALID ||
+	    !ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	ice_acquire_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+	status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs);
+	ice_release_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+
+	return status;
+}
+
+/**
+ * ice_replay_rss_cfg - remove RSS configurations associated with vsi
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ */
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+	enum ice_status status = ICE_SUCCESS;
+	struct ice_rss_cfg *r;
+
+	if (!ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_ERR_PARAM;
+
+	ice_acquire_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+	LIST_FOR_EACH_ENTRY(r, &hw->vsi_ctx[vsi_handle]->rss_list_head,
+			    ice_rss_cfg, l_entry) {
+		status = ice_add_rss_cfg_sync(hw, vsi_handle, r->hashed_flds,
+					      r->packet_hdr);
+		if (status)
+			break;
+	}
+	ice_release_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+
+	return status;
+}
+
+/**
+ * ice_get_rss_cfg - returns hashed fields for the given header types
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hdrs: protocol header type
+ *
+ * This function will return the match fields of the first instance of flow
+ * profile having the given header types and containing input vsi
+ */
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
+{
+	struct ice_rss_cfg *r, *rss_cfg = NULL;
+
+	/* verify if the protocol header is non zero and vsi is valid */
+	if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
+		return ICE_HASH_INVALID;
+
+	ice_acquire_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+	LIST_FOR_EACH_ENTRY(r, &hw->vsi_ctx[vsi_handle]->rss_list_head,
+			    ice_rss_cfg, l_entry)
+		if (r->packet_hdr == hdrs) {
+			rss_cfg = r;
+			break;
+		}
+	ice_release_lock(&hw->vsi_ctx[vsi_handle]->rss_locks);
+
+	return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+}
diff --git a/drivers/net/ice/base/ice_flow.h b/drivers/net/ice/base/ice_flow.h
index 228a2c089..51d7c3ac9 100644
--- a/drivers/net/ice/base/ice_flow.h
+++ b/drivers/net/ice/base/ice_flow.h
@@ -5,4 +5,341 @@
 #ifndef _ICE_FLOW_H_
 #define _ICE_FLOW_H_
 
+#include "ice_flex_type.h"
+#define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix)))
+#define ICE_FLOW_PROF_ID_INVAL		0xfffffffffffffffful
+#define ICE_FLOW_PROF_ID_BYPASS		0
+#define ICE_FLOW_PROF_ID_DEFAULT	1
+#define ICE_FLOW_ENTRY_HANDLE_INVAL	0
+#define ICE_FLOW_VSI_INVAL		0xffff
+#define ICE_FLOW_FLD_OFF_INVAL		0xffff
+
+/* Use any of the type from flow field to generate a equivalent hash field */
+#define ICE_FLOW_HASH_FLD(t)	(1ULL << (t))
+
+#define ICE_FLOW_HASH_IPV4	\
+	(ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
+	 ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_IPV4_DA))
+#define ICE_FLOW_HASH_IPV6	\
+	(ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
+	 ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_IPV6_DA))
+#define ICE_FLOW_HASH_TCP_PORT	\
+	(ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
+	 ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
+#define ICE_FLOW_HASH_UDP_PORT	\
+	(ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
+	 ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
+#define ICE_FLOW_HASH_SCTP_PORT	\
+	(ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
+	 ICE_FLOW_HASH_FLD(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
+
+#define ICE_HASH_INVALID	0
+#define ICE_HASH_TCP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_TCP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_UDP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_SCTP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
+
+/* Protocol header fields within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization such as GRE,
+ * VxLAN, etc.
+ */
+enum ice_flow_seg_hdr {
+	ICE_FLOW_SEG_HDR_NONE	= 0x00000000,
+	ICE_FLOW_SEG_HDR_ETH	= 0x00000001,
+	ICE_FLOW_SEG_HDR_VLAN	= 0x00000002,
+	ICE_FLOW_SEG_HDR_IPV4	= 0x00000004,
+	ICE_FLOW_SEG_HDR_IPV6	= 0x00000008,
+	ICE_FLOW_SEG_HDR_ARP	= 0x00000010,
+	ICE_FLOW_SEG_HDR_ICMP	= 0x00000020,
+	ICE_FLOW_SEG_HDR_TCP	= 0x00000040,
+	ICE_FLOW_SEG_HDR_UDP	= 0x00000080,
+	ICE_FLOW_SEG_HDR_SCTP	= 0x00000100,
+	ICE_FLOW_SEG_HDR_GRE	= 0x00000200,
+};
+
+enum ice_flow_field {
+	/* L2 */
+	ICE_FLOW_FIELD_IDX_ETH_DA,
+	ICE_FLOW_FIELD_IDX_ETH_SA,
+	ICE_FLOW_FIELD_IDX_S_VLAN,
+	ICE_FLOW_FIELD_IDX_C_VLAN,
+	ICE_FLOW_FIELD_IDX_ETH_TYPE,
+	/* L3 */
+	ICE_FLOW_FIELD_IDX_IP_DSCP,
+	ICE_FLOW_FIELD_IDX_IP_TTL,
+	ICE_FLOW_FIELD_IDX_IP_PROT,
+	ICE_FLOW_FIELD_IDX_IPV4_SA,
+	ICE_FLOW_FIELD_IDX_IPV4_DA,
+	ICE_FLOW_FIELD_IDX_IPV6_SA,
+	ICE_FLOW_FIELD_IDX_IPV6_DA,
+	/* L4 */
+	ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+	ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
+	ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+	ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
+	ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+	ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
+	ICE_FLOW_FIELD_IDX_TCP_FLAGS,
+	/* ARP */
+	ICE_FLOW_FIELD_IDX_ARP_SIP,
+	ICE_FLOW_FIELD_IDX_ARP_DIP,
+	ICE_FLOW_FIELD_IDX_ARP_SHA,
+	ICE_FLOW_FIELD_IDX_ARP_DHA,
+	ICE_FLOW_FIELD_IDX_ARP_OP,
+	/* ICMP */
+	ICE_FLOW_FIELD_IDX_ICMP_TYPE,
+	ICE_FLOW_FIELD_IDX_ICMP_CODE,
+	/* GRE */
+	ICE_FLOW_FIELD_IDX_GRE_KEYID,
+	 /* The total number of enums must not exceed 64 */
+	ICE_FLOW_FIELD_IDX_MAX
+};
+
+/* Flow headers and fields for AVF support */
+enum ice_flow_avf_hdr_field {
+	/* Values 0 - 28 are reserved for future use */
+	ICE_AVF_FLOW_FIELD_INVALID		= 0,
+	ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP	= 29,
+	ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
+	ICE_AVF_FLOW_FIELD_IPV4_UDP,
+	ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
+	ICE_AVF_FLOW_FIELD_IPV4_TCP,
+	ICE_AVF_FLOW_FIELD_IPV4_SCTP,
+	ICE_AVF_FLOW_FIELD_IPV4_OTHER,
+	ICE_AVF_FLOW_FIELD_FRAG_IPV4,
+	ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP	= 39,
+	ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
+	ICE_AVF_FLOW_FIELD_IPV6_UDP,
+	ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
+	ICE_AVF_FLOW_FIELD_IPV6_TCP,
+	ICE_AVF_FLOW_FIELD_IPV6_SCTP,
+	ICE_AVF_FLOW_FIELD_IPV6_OTHER,
+	ICE_AVF_FLOW_FIELD_FRAG_IPV6,
+	ICE_AVF_FLOW_FIELD_RSVD47,
+	ICE_AVF_FLOW_FIELD_FCOE_OX,
+	ICE_AVF_FLOW_FIELD_FCOE_RX,
+	ICE_AVF_FLOW_FIELD_FCOE_OTHER,
+	/* Values 51-62 are reserved */
+	ICE_AVF_FLOW_FIELD_L2_PAYLOAD		= 63,
+	ICE_AVF_FLOW_FIELD_MAX
+};
+
+/* Supported RSS offloads  This macro is defined to support
+ * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
+ * capabilities to the caller of this ops.
+ */
+#define ICE_DEFAULT_RSS_HENA ( \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+	BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+
+enum ice_flow_dir {
+	ICE_FLOW_DIR_UNDEFINED	= 0,
+	ICE_FLOW_TX		= 0x01,
+	ICE_FLOW_RX		= 0x02,
+	ICE_FLOW_TX_RX		= ICE_FLOW_RX | ICE_FLOW_TX
+};
+
+enum ice_flow_priority {
+	ICE_FLOW_PRIO_LOW,
+	ICE_FLOW_PRIO_NORMAL,
+	ICE_FLOW_PRIO_HIGH
+};
+
+#define ICE_FLOW_SEG_MAX		2
+#define ICE_FLOW_SEG_RAW_FLD_MAX	2
+#define ICE_FLOW_PROFILE_MAX		1024
+#define ICE_FLOW_SW_FIELD_VECTOR_MAX	48
+#define ICE_FLOW_ACL_FIELD_VECTOR_MAX	32
+#define ICE_FLOW_FV_EXTRACT_SZ		2
+
+#define ICE_FLOW_SET_HDRS(seg, val)	((seg)->hdrs |= (u32)(val))
+
+struct ice_flow_seg_xtrct {
+	u8 prot_id;	/* Protocol ID of extracted header field */
+	u8 off;		/* Starting offset of the field in header in bytes */
+	u8 idx;		/* Index of FV entry used */
+	u8 disp;	/* Displacement of field in bits fr. FV entry's start */
+};
+
+enum ice_flow_fld_match_type {
+	ICE_FLOW_FLD_TYPE_REG,		/* Value, mask */
+	ICE_FLOW_FLD_TYPE_RANGE,	/* Value, mask, last (upper bound) */
+	ICE_FLOW_FLD_TYPE_PREFIX,	/* IP address, prefix, size of prefix */
+	ICE_FLOW_FLD_TYPE_SIZE,		/* Value, mask, size of match */
+};
+
+struct ice_flow_fld_loc {
+	/* Describe offsets of field information relative to the beginning of
+	 * input buffer provided when adding flow entries.
+	 */
+	u16 val;	/* Offset where the value is located */
+	u16 mask;	/* Offset where the mask/prefix value is located */
+	u16 last;	/* Length or offset where the upper value is located */
+};
+
+struct ice_flow_fld_info {
+	enum ice_flow_fld_match_type type;
+	/* Location where to retrieve data from an input buffer */
+	struct ice_flow_fld_loc src;
+	/* Location where to put the data into the final entry buffer */
+	struct ice_flow_fld_loc entry;
+	struct ice_flow_seg_xtrct xtrct;
+};
+
+struct ice_flow_seg_fld_raw {
+	int off;	/* Offset from the start of the segment */
+	struct ice_flow_fld_info info;
+};
+
+struct ice_flow_seg_info {
+	u32 hdrs;	/* Bitmask indicating protocol headers present */
+	u64 match;	/* Bitmask indicating header fields to be matched */
+	u64 range;	/* Bitmask indicating header fields matched as ranges */
+
+	struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
+
+	u8 raws_cnt;	/* Number of raw fields to be matched */
+	struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
+};
+
+/* This structure describes a flow entry, and is tracked only in this file */
+struct ice_flow_entry {
+	struct LIST_ENTRY_TYPE l_entry;
+
+	u64 id;
+	u16 vsi_handle;
+	enum ice_flow_priority priority;
+
+	struct ice_flow_prof *prof;
+
+	/* Flow entry's content */
+	u16 entry_sz;
+	void *entry;
+
+	/* Action list */
+	u8 acts_cnt;
+	struct ice_flow_action *acts;
+};
+
+#define ICE_FLOW_ENTRY_HNDL(e)	((unsigned long)e)
+#define ICE_FLOW_ENTRY_PTR(h)	((struct ice_flow_entry *)(h))
+
+struct ice_flow_prof {
+	struct LIST_ENTRY_TYPE l_entry;
+
+	u64 id;
+	enum ice_flow_dir dir;
+
+	/* Keep track of flow entries associated with this flow profile */
+	struct ice_lock entries_lock;
+	struct LIST_HEAD_TYPE entries;
+
+	u8 segs_cnt;
+	struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
+
+	/* software VSI handles referenced by this flow profile */
+	ice_declare_bitmap(vsis, ICE_MAX_VSI);
+
+	union {
+		/* struct sw_recipe */
+		/* struct fd */
+		u32 data;
+	} cfg;
+
+	/* Default actions */
+	u8 acts_cnt;
+	struct ice_flow_action *acts;
+};
+
+struct ice_rss_cfg {
+	struct LIST_ENTRY_TYPE l_entry;
+	u64 hashed_flds;
+	u32 packet_hdr;
+};
+
+enum ice_flow_action_type {
+	ICE_FLOW_ACT_NOP,
+	ICE_FLOW_ACT_ALLOW,
+	ICE_FLOW_ACT_DROP,
+	ICE_FLOW_ACT_COUNT,
+	ICE_FLOW_ACT_FWD_VSI,
+	ICE_FLOW_ACT_FWD_VSI_LIST,	/* Should be abstracted away */
+	ICE_FLOW_ACT_FWD_QUEUE,		/* Can Queues be abstracted away? */
+	ICE_FLOW_ACT_FWD_QUEUE_GROUP,	/* Can Queues be abstracted away? */
+	ICE_FLOW_ACTION_PUSH,
+	ICE_FLOW_ACTION_POP,
+	ICE_FLOW_ACTION_MODIFY,
+};
+
+struct ice_flow_action {
+	enum ice_flow_action_type type;
+	union {
+		u32 dummy;
+	} data;
+};
+
+/* TDD esp in the linux code doesn't like prototypes, so
+ * ifdef them all out, so they stop conflicting with our mocks
+ */
+u64
+ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+		   struct ice_flow_seg_info *segs, u8 segs_cnt);
+enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+		  struct ice_flow_action *acts, u8 acts_cnt,
+		  struct ice_flow_prof **prof);
+enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
+
+enum ice_status
+ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+		     u8 *hw_prof);
+
+u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id);
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+		   u64 entry_id, u16 vsi, enum ice_flow_priority prio,
+		   void *data, struct ice_flow_action *acts, u8 acts_cnt,
+		   u64 *entry_h);
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h);
+void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
+void
+ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+			u16 val_loc, u16 prefix_loc, u8 prefix_sz);
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+		     u16 val_loc, u16 mask_loc);
+void ice_rem_all_rss_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		u32 addl_hdrs);
+enum ice_status
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+		u32 addl_hdrs);
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
 #endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ice/base/meson.build b/drivers/net/ice/base/meson.build
index 77a9802f4..d7f8536bc 100644
--- a/drivers/net/ice/base/meson.build
+++ b/drivers/net/ice/base/meson.build
@@ -8,6 +8,7 @@ sources = [
 	'ice_switch.c',
 	'ice_nvm.c',
 	'ice_flex_pipe.c',
+	'ice_flow.c',
 ]
 
 error_cflags = ['-Wno-unused-value',
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [dpdk-dev] [PATCH v2 7/7] net/ice/base: free flow profile entries
  2019-01-16 14:13 ` [dpdk-dev] [PATCH v2 " Qi Zhang
                     ` (5 preceding siblings ...)
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 6/7] net/ice/base: add flow module Qi Zhang
@ 2019-01-16 14:13   ` Qi Zhang
  2019-01-17 13:31   ` [dpdk-dev] [PATCH v2 0/7] net/ice: update share code Zhang, Qi Z
  7 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2019-01-16 14:13 UTC (permalink / raw)
  To: wenzhuo.lu, qiming.yang; +Cc: paul.m.stillwell.jr, dev, ferruh.yigit, Qi Zhang

Free flow profile entries when free hw tables.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
---
 drivers/net/ice/base/ice_flex_pipe.c | 38 ++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)

diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index 1887fc38c..493bc4e9b 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -3034,6 +3034,42 @@ static void ice_fill_blk_tbls(struct ice_hw *hw, enum ice_block block_id)
 }
 
 /**
+ * ice_free_flow_profs - free flow profile entries
+ * @hw: pointer to the hardware structure
+ */
+static void ice_free_flow_profs(struct ice_hw *hw)
+{
+	u8 i;
+
+	for (i = 0; i < ICE_BLK_COUNT; i++) {
+		struct ice_flow_prof *p, *tmp;
+
+		if (!&hw->fl_profs[i])
+			continue;
+
+		/* This call is being made as part of resource deallocation
+		 * during unload. Lock acquire and release will not be
+		 * necessary here.
+		 */
+		LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[i],
+					 ice_flow_prof, l_entry) {
+			struct ice_flow_entry *e, *t;
+
+			LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
+						 ice_flow_entry, l_entry)
+				ice_flow_rem_entry(hw, ICE_FLOW_ENTRY_HNDL(e));
+
+			LIST_DEL(&p->l_entry);
+			if (p->acts)
+				ice_free(hw, p->acts);
+			ice_free(hw, p);
+		}
+
+		ice_destroy_lock(&hw->fl_profs_locks[i]);
+	}
+}
+
+/**
  * ice_free_prof_map - frees the profile map
  * @hw: pointer to the hardware structure
  * @blk: the hw block which contains the profile map to be freed
@@ -3096,6 +3132,8 @@ void ice_free_hw_tbls(struct ice_hw *hw)
 	}
 
 	ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
+
+	ice_free_flow_profs(hw);
 }
 
 /**
-- 
2.13.6

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [dpdk-dev] [PATCH v2 0/7] net/ice: update share code
  2019-01-16 14:13 ` [dpdk-dev] [PATCH v2 " Qi Zhang
                     ` (6 preceding siblings ...)
  2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 7/7] net/ice/base: free flow profile entries Qi Zhang
@ 2019-01-17 13:31   ` Zhang, Qi Z
  7 siblings, 0 replies; 21+ messages in thread
From: Zhang, Qi Z @ 2019-01-17 13:31 UTC (permalink / raw)
  To: Lu, Wenzhuo, Yang, Qiming; +Cc: Stillwell Jr, Paul M, dev, Yigit, Ferruh



> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Wednesday, January 16, 2019 10:14 PM
> To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>
> Cc: Stillwell Jr, Paul M <paul.m.stillwell.jr@intel.com>; dev@dpdk.org; Yigit,
> Ferruh <ferruh.yigit@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH v2 0/7] net/ice: update share code
> 
> For ice family NICs, package processing pipe line can be configured in a package
> file should be downloaded into device by driver during init.
> The patch set add necessary share code APIs to support package download.
> Also some code clean is included.
> 
> Though package download will not be enabled in 19.02 as plan, but we hope
> the share code part could be merged early.
> 
> Meanwhile we will submit a seperate  RFC patch for package download for
> customer early evelutation.
> 
> v2:
> - fix 32 bit compile error.
> - fix check patch warning.
> 
> Qi Zhang (7):
>   net/ice/base: code clean
>   net/ice/base: add API to support resource allocate
>   net/ice/base: add package download related data structure
>   net/ice/base: add some help macros
>   net/ice/base: add flexible pipeline module
>   net/ice/base: add flow module
>   net/ice/base: free flow profile entries
> 
>  drivers/net/ice/Makefile              |    2 +
>  drivers/net/ice/base/ice_adminq_cmd.h |   85 +
>  drivers/net/ice/base/ice_common.c     |   85 +-
>  drivers/net/ice/base/ice_common.h     |   32 +-
>  drivers/net/ice/base/ice_controlq.c   |    2 +-
>  drivers/net/ice/base/ice_flex_pipe.c  | 5056
> +++++++++++++++++++++++++++++++++  drivers/net/ice/base/ice_flex_pipe.h
> |  107 +  drivers/net/ice/base/ice_flex_type.h  |  685 +++++
>  drivers/net/ice/base/ice_flow.c       | 2080 ++++++++++++++
>  drivers/net/ice/base/ice_flow.h       |  337 +++
>  drivers/net/ice/base/ice_osdep.h      |    2 +
>  drivers/net/ice/base/ice_type.h       |   37 +-
>  drivers/net/ice/base/meson.build      |    2 +
>  13 files changed, 8479 insertions(+), 33 deletions(-)  create mode 100644
> drivers/net/ice/base/ice_flex_pipe.c
>  create mode 100644 drivers/net/ice/base/ice_flex_pipe.h
>  create mode 100644 drivers/net/ice/base/ice_flow.c
> 
> --
> 2.13.6

Applied to dpdk-next-net-intel.

Thanks
Qi

^ permalink raw reply	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2019-01-17 13:31 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-01-15 12:56 [dpdk-dev] [PATCH 0/7] net/ice: update share code Qi Zhang
2019-01-15 12:56 ` [dpdk-dev] [PATCH 1/7] net/ice/base: code clean Qi Zhang
2019-01-15 12:56 ` [dpdk-dev] [PATCH 2/7] net/ice/base: add API to support resource allocate Qi Zhang
2019-01-15 12:56 ` [dpdk-dev] [PATCH 3/7] net/ice/base: add package download related data structure Qi Zhang
2019-01-15 12:56 ` [dpdk-dev] [PATCH 4/7] net/ice/base: add some help macros Qi Zhang
2019-01-15 12:56 ` [dpdk-dev] [PATCH 5/7] net/ice/base: add flexible pipeline module Qi Zhang
2019-01-15 12:56 ` [dpdk-dev] [PATCH 6/7] net/ice/base: add flow module Qi Zhang
2019-01-16 12:14   ` Ferruh Yigit
2019-01-15 12:56 ` [dpdk-dev] [PATCH 7/7] net/ice/base: free flow profile entries Qi Zhang
2019-01-16 12:15   ` Ferruh Yigit
2019-01-16  5:05 ` [dpdk-dev] [PATCH 0/7] net/ice: update share code Lu, Wenzhuo
2019-01-16 12:16 ` Ferruh Yigit
2019-01-16 14:13 ` [dpdk-dev] [PATCH v2 " Qi Zhang
2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 1/7] net/ice/base: code clean Qi Zhang
2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 2/7] net/ice/base: add API to support resource allocate Qi Zhang
2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 3/7] net/ice/base: add package download related data structure Qi Zhang
2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 4/7] net/ice/base: add some help macros Qi Zhang
2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 5/7] net/ice/base: add flexible pipeline module Qi Zhang
2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 6/7] net/ice/base: add flow module Qi Zhang
2019-01-16 14:13   ` [dpdk-dev] [PATCH v2 7/7] net/ice/base: free flow profile entries Qi Zhang
2019-01-17 13:31   ` [dpdk-dev] [PATCH v2 0/7] net/ice: update share code Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).