DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/7] net/qede: add support for new HW
@ 2021-02-19 10:14 Rasesh Mody
  2021-02-19 10:14 ` [dpdk-dev] [PATCH 2/7] net/qede/base: changes for HSI to support " Rasesh Mody
                   ` (6 more replies)
  0 siblings, 7 replies; 8+ messages in thread
From: Rasesh Mody @ 2021-02-19 10:14 UTC (permalink / raw)
  To: jerinj, ferruh.yigit; +Cc: Rasesh Mody, dev, GR-Everest-DPDK-Dev

Hi,

This patch series adds support for new HW while modifying
existing driver to continue supporting previous HWs.
Highlights of changes:
 - Registers, HW specific and initialization updates for new HW
 - FW upgrade
 - Base driver upgrade, other optimizations and cleanup

The new 50xxx family of Marvell QLogic fastlinq adapters will bring in
support for higher speeds, will increase max PPS rates significantly.
This family will eventually support flexible flow steering and
various in-device switching modes.

At the same time, that’s the same architecture and design, as with
previous QEDE driver. Thus, a lot of fast path and slow path code is
expected to be the same.

Please note for checkpatch 100 character max_line_length was used.

Thanks,
Rasesh

Rasesh Mody (7):
  net/qede/base: update and add register definitions
  net/qede/base: changes for HSI to support new HW
  net/qede/base: add OS abstracted changes
  net/qede/base: update base driver to 8.62.4.0
  net/qede: changes for DMA page chain allocation and free
  net/qede: add support for new HW
  net/qede/base: clean unnecessary ifdef and comments

 drivers/net/qede/base/bcm_osal.c              |      1 -
 drivers/net/qede/base/bcm_osal.h              |     42 +-
 drivers/net/qede/base/common_hsi.h            |   1752 +-
 drivers/net/qede/base/ecore.h                 |    575 +-
 drivers/net/qede/base/ecore_attn_values.h     |      3 +-
 drivers/net/qede/base/ecore_chain.h           |    242 +-
 drivers/net/qede/base/ecore_cxt.c             |   1234 +-
 drivers/net/qede/base/ecore_cxt.h             |    149 +-
 drivers/net/qede/base/ecore_cxt_api.h         |     31 +-
 drivers/net/qede/base/ecore_dcbx.c            |    526 +-
 drivers/net/qede/base/ecore_dcbx.h            |     16 +-
 drivers/net/qede/base/ecore_dcbx_api.h        |     41 +-
 drivers/net/qede/base/ecore_dev.c             |   4083 +-
 drivers/net/qede/base/ecore_dev_api.h         |    367 +-
 drivers/net/qede/base/ecore_gtt_reg_addr.h    |     93 +-
 drivers/net/qede/base/ecore_gtt_values.h      |      4 +-
 drivers/net/qede/base/ecore_hsi_common.h      |   2722 +-
 drivers/net/qede/base/ecore_hsi_debug_tools.h |    426 +-
 drivers/net/qede/base/ecore_hsi_eth.h         |   4541 +-
 drivers/net/qede/base/ecore_hsi_func_common.h |      5 +-
 drivers/net/qede/base/ecore_hsi_init_func.h   |    707 +-
 drivers/net/qede/base/ecore_hsi_init_tool.h   |    254 +-
 drivers/net/qede/base/ecore_hw.c              |    386 +-
 drivers/net/qede/base/ecore_hw.h              |     55 +-
 drivers/net/qede/base/ecore_hw_defs.h         |     45 +-
 drivers/net/qede/base/ecore_init_fw_funcs.c   |   1365 +-
 drivers/net/qede/base/ecore_init_fw_funcs.h   |    457 +-
 drivers/net/qede/base/ecore_init_ops.c        |    159 +-
 drivers/net/qede/base/ecore_init_ops.h        |     19 +-
 drivers/net/qede/base/ecore_int.c             |   1363 +-
 drivers/net/qede/base/ecore_int.h             |     65 +-
 drivers/net/qede/base/ecore_int_api.h         |    127 +-
 drivers/net/qede/base/ecore_iov_api.h         |    118 +-
 drivers/net/qede/base/ecore_iro.h             |    427 +-
 drivers/net/qede/base/ecore_iro_values.h      |    463 +-
 drivers/net/qede/base/ecore_l2.c              |    497 +-
 drivers/net/qede/base/ecore_l2.h              |     18 +-
 drivers/net/qede/base/ecore_l2_api.h          |    148 +-
 drivers/net/qede/base/ecore_mcp.c             |   2631 +-
 drivers/net/qede/base/ecore_mcp.h             |    125 +-
 drivers/net/qede/base/ecore_mcp_api.h         |    471 +-
 drivers/net/qede/base/ecore_mng_tlv.c         |    910 +-
 drivers/net/qede/base/ecore_proto_if.h        |     69 +-
 drivers/net/qede/base/ecore_rt_defs.h         |    895 +-
 drivers/net/qede/base/ecore_sp_api.h          |      6 +-
 drivers/net/qede/base/ecore_sp_commands.c     |    141 +-
 drivers/net/qede/base/ecore_sp_commands.h     |     18 +-
 drivers/net/qede/base/ecore_spq.c             |    431 +-
 drivers/net/qede/base/ecore_spq.h             |     65 +-
 drivers/net/qede/base/ecore_sriov.c           |   1700 +-
 drivers/net/qede/base/ecore_sriov.h           |    147 +-
 drivers/net/qede/base/ecore_status.h          |      4 +-
 drivers/net/qede/base/ecore_utils.h           |     18 +-
 drivers/net/qede/base/ecore_vf.c              |    550 +-
 drivers/net/qede/base/ecore_vf.h              |     57 +-
 drivers/net/qede/base/ecore_vf_api.h          |     74 +-
 drivers/net/qede/base/ecore_vfpf_if.h         |    122 +-
 drivers/net/qede/base/eth_common.h            |    300 +-
 drivers/net/qede/base/mcp_public.h            |   2343 +-
 drivers/net/qede/base/nvm_cfg.h               |   5059 +-
 drivers/net/qede/base/reg_addr.h              | 190590 ++++++++++++++-
 drivers/net/qede/qede_debug.c                 |    117 +-
 drivers/net/qede/qede_ethdev.c                |     11 +-
 drivers/net/qede/qede_ethdev.h                |     11 +-
 drivers/net/qede/qede_if.h                    |     20 +-
 drivers/net/qede/qede_main.c                  |      4 +-
 drivers/net/qede/qede_rxtx.c                  |     89 +-
 drivers/net/qede/qede_sriov.c                 |      4 -
 lib/librte_eal/include/rte_bitops.h           |     54 +-
 69 files changed, 215373 insertions(+), 15189 deletions(-)

-- 
2.18.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 2/7] net/qede/base: changes for HSI to support new HW
  2021-02-19 10:14 [dpdk-dev] [PATCH 0/7] net/qede: add support for new HW Rasesh Mody
@ 2021-02-19 10:14 ` Rasesh Mody
  2021-02-19 10:14 ` [dpdk-dev] [PATCH 3/7] net/qede/base: add OS abstracted changes Rasesh Mody
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Rasesh Mody @ 2021-02-19 10:14 UTC (permalink / raw)
  To: jerinj, ferruh.yigit; +Cc: Rasesh Mody, dev, GR-Everest-DPDK-Dev, Igor Russkikh

The changes introduced in this patch are related to hardware software
interface primarily to support the new hardware while still maintaining
the support for existing hardware.

The code architecture and design are same as with previous QEDE driver.
Thus, a lot of fastpath and slow path code is expected to be the same.
The registers, structure, functions, etc. for 4xxxx family of Marvell
fastlinq adapters and newly supported 50xxx family of Marvell fastlinq
adapters are mainly differentiated by using E4 and E5 suffixes
respectively.

This patch adds PMD support for using new firmware 8.62.0.0.

Signed-off-by: Rasesh Mody <rmody@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
---
 drivers/net/qede/base/common_hsi.h            | 1752 ++++---
 drivers/net/qede/base/ecore.h                 |   22 +-
 drivers/net/qede/base/ecore_cxt.c             |   53 +-
 drivers/net/qede/base/ecore_cxt.h             |   12 -
 drivers/net/qede/base/ecore_dev.c             |   49 +-
 drivers/net/qede/base/ecore_hsi_common.h      | 2719 ++++++----
 drivers/net/qede/base/ecore_hsi_debug_tools.h |  426 +-
 drivers/net/qede/base/ecore_hsi_eth.h         | 4541 ++++++++++++-----
 drivers/net/qede/base/ecore_hsi_func_common.h |    5 +-
 drivers/net/qede/base/ecore_hsi_init_func.h   |  705 ++-
 drivers/net/qede/base/ecore_hsi_init_tool.h   |  254 +-
 drivers/net/qede/base/ecore_init_fw_funcs.c   |  153 +-
 drivers/net/qede/base/ecore_init_ops.c        |   16 +-
 drivers/net/qede/base/ecore_int.c             |   39 +-
 drivers/net/qede/base/ecore_int.h             |    4 +-
 drivers/net/qede/base/ecore_int_api.h         |    2 +-
 drivers/net/qede/base/ecore_l2.c              |    6 +-
 drivers/net/qede/base/ecore_spq.c             |   39 +-
 drivers/net/qede/base/ecore_spq.h             |    2 +-
 drivers/net/qede/base/ecore_sriov.c           |    5 +-
 drivers/net/qede/base/ecore_vf.c              |    2 +-
 drivers/net/qede/base/eth_common.h            |    6 +-
 drivers/net/qede/qede_debug.c                 |   24 +-
 drivers/net/qede/qede_main.c                  |    2 +-
 drivers/net/qede/qede_rxtx.c                  |   15 +-
 25 files changed, 7358 insertions(+), 3495 deletions(-)

diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index 1a02d460b..523876014 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -1,67 +1,81 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef __COMMON_HSI__
 #define __COMMON_HSI__
 /********************************/
 /* PROTOCOL COMMON FW CONSTANTS */
 /********************************/
 
-/* Temporarily here should be added to HSI automatically by resource allocation
- * tool.
- */
-#define T_TEST_AGG_INT_TEMP  6
-#define M_TEST_AGG_INT_TEMP  8
-#define U_TEST_AGG_INT_TEMP  6
-#define X_TEST_AGG_INT_TEMP  14
-#define Y_TEST_AGG_INT_TEMP  4
-#define P_TEST_AGG_INT_TEMP  4
+/* Temporarily here should be added to HSI automatically by resource allocation tool. */
+#define T_TEST_AGG_INT_TEMP_E4  6
+#define M_TEST_AGG_INT_TEMP_E4  8
+#define U_TEST_AGG_INT_TEMP_E4  6
+#define X_TEST_AGG_INT_TEMP_E4  14
+#define Y_TEST_AGG_INT_TEMP_E4  4
+#define P_TEST_AGG_INT_TEMP_E4  4
+
+
+#define T_TEST_AGG_INT_TEMP_E5  3
+#define M_TEST_AGG_INT_TEMP_E5  3
+#define U_TEST_AGG_INT_TEMP_E5  3
+#define X_TEST_AGG_INT_TEMP_E5  7
+#define Y_TEST_AGG_INT_TEMP_E5  3
+#define P_TEST_AGG_INT_TEMP_E5  3
 
 #define X_FINAL_CLEANUP_AGG_INT  1
 
 #define EVENT_RING_PAGE_SIZE_BYTES          4096
 
 #define NUM_OF_GLOBAL_QUEUES				128
-#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE	64
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE	128
 
 #define ISCSI_CDU_TASK_SEG_TYPE       0
 #define FCOE_CDU_TASK_SEG_TYPE        0
 #define RDMA_CDU_TASK_SEG_TYPE        1
-#define ETH_CDU_TASK_SEG_TYPE         2
+#define ETH_CDU_TASK_SEG_TYPE         1
+
+#define ISCSI_CDU_TASK_SEG_ID           0
+#define FCOE_CDU_TASK_SEG_ID            1
+#define ROCE_CDU_TASK_SEG_ID            2
+#define PREROCE_CDU_TASK_SEG_ID         2
+#define ETH_CDU_TASK_SEG_ID             3
 
 #define FW_ASSERT_GENERAL_ATTN_IDX    32
 
-#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE	3
+#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE 3
 
 /* Queue Zone sizes in bytes */
-#define TSTORM_QZONE_SIZE    8   /*tstorm_queue_zone*/
-/*mstorm_eth_queue_zone. Used only for RX producer of VFs in backward
- * compatibility mode.
- */
-#define MSTORM_QZONE_SIZE    16
-#define USTORM_QZONE_SIZE    8   /*ustorm_queue_zone*/
-#define XSTORM_QZONE_SIZE    8   /*xstorm_eth_queue_zone*/
-#define YSTORM_QZONE_SIZE    0
-#define PSTORM_QZONE_SIZE    0
-
-/*Log of mstorm default VF zone size.*/
-#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG       7
-/*Maximum number of RX queues that can be allocated to VF by default*/
+#define TSTORM_QZONE_SIZE_E4    8   /*tstorm_queue_zone*/
+/* mstorm_eth_queue_zone. Used only for RX producer of VFs in backward compatibility mode. */
+#define MSTORM_QZONE_SIZE_E4    16
+#define USTORM_QZONE_SIZE_E4    8   /*ustorm_queue_zone*/
+#define XSTORM_QZONE_SIZE_E4    8   /*xstorm_eth_queue_zone*/
+#define YSTORM_QZONE_SIZE_E4    0
+#define PSTORM_QZONE_SIZE_E4    0
+
+#define TSTORM_QZONE_SIZE_E5    0
+/* mstorm_eth_queue_zone. Used only for RX producer of VFs in backward compatibility mode. */
+#define MSTORM_QZONE_SIZE_E5    16
+#define USTORM_QZONE_SIZE_E5    8   /*ustorm_queue_zone*/
+#define XSTORM_QZONE_SIZE_E5    8   /*xstorm_eth_queue_zone*/
+#define YSTORM_QZONE_SIZE_E5    0
+#define PSTORM_QZONE_SIZE_E5    0
+
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG       7     /*Log of mstorm default VF zone size.*/
+/* Maximum number of RX queues that can be allocated to VF by default */
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT  16
-/*Maximum number of RX queues that can be allocated to VF with doubled VF zone
- * size. Up to 96 VF supported in this mode
+/* Maximum number of RX queues that can be allocated to VF with doubled VF zone size. Up to 96 VF
+ * supported in this mode
  */
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE   48
-/*Maximum number of RX queues that can be allocated to VF with 4 VF zone size.
- * Up to 48 VF supported in this mode
+/* Maximum number of RX queues that can be allocated to VF with 4 VF zone size. Up to 48 VF
+ * supported in this mode
  */
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD     112
-
-#define ETH_RGSRC_CTX_SIZE                6 /*Size in QREGS*/
-#define ETH_TGSRC_CTX_SIZE                6 /*Size in QREGS*/
 /********************************/
 /* CORE (LIGHT L2) FW CONSTANTS */
 /********************************/
@@ -74,27 +88,29 @@
 
 #define CORE_LL2_TX_MAX_BDS_PER_PACKET				12
 
-#define CORE_SPQE_PAGE_SIZE_BYTES                       4096
+#define CORE_SPQE_PAGE_SIZE_BYTES			4096
 
 /* Number of LL2 RAM based (RX producers and statistics) queues */
-#define MAX_NUM_LL2_RX_RAM_QUEUES               32
+#define MAX_NUM_LL2_RX_QUEUES                   48
+/* Number of LL2 RAM based (RX producers and statistics) queues */
+#define MAX_NUM_LL2_RX_RAM_QUEUES               MAX_NUM_LL2_RX_QUEUES
 /* Number of LL2 context based (RX producers and statistics) queues */
-#define MAX_NUM_LL2_RX_CTX_QUEUES               208
-#define MAX_NUM_LL2_RX_QUEUES (MAX_NUM_LL2_RX_RAM_QUEUES + \
-			       MAX_NUM_LL2_RX_CTX_QUEUES)
+#define MAX_NUM_LL2_RX_CTX_QUEUES               64
+/*Total number of LL2 queue. rename to MAX_NUM_LL2_RX_QUEUES*/
+#define MAX_NUM_LL2_RX_QUEUES_TOTAL             \
+	(MAX_NUM_LL2_RX_RAM_QUEUES + MAX_NUM_LL2_RX_CTX_QUEUES)
 
 #define MAX_NUM_LL2_TX_STATS_COUNTERS			48
 
 
-/****************************************************************************/
-/* Include firmware version number only- do not add constants here to avoid */
-/* redundunt compilations                                                   */
-/****************************************************************************/
+/*///////////////////////////////////////////////////////////////////////////////////////////////*/
+/*Include firmware version number only- do not add constants here to avoid redundant compilations*/
+/*///////////////////////////////////////////////////////////////////////////////////////////////*/
 
 
 #define FW_MAJOR_VERSION		8
-#define FW_MINOR_VERSION		40
-#define FW_REVISION_VERSION		33
+#define FW_MINOR_VERSION		62
+#define FW_REVISION_VERSION		0
 #define FW_ENGINEERING_VERSION	0
 
 /***********************/
@@ -104,48 +120,68 @@
 /* PCI functions */
 #define MAX_NUM_PORTS_BB        (2)
 #define MAX_NUM_PORTS_K2        (4)
-#define MAX_NUM_PORTS           (MAX_NUM_PORTS_K2)
+#define MAX_NUM_PORTS_E5        (4)
+#define MAX_NUM_PORTS			(MAX_NUM_PORTS_E5)
 
 #define MAX_NUM_PFS_BB          (8)
 #define MAX_NUM_PFS_K2          (16)
-#define MAX_NUM_PFS             (MAX_NUM_PFS_K2)
+#define MAX_NUM_PFS_E5          (16)
+#define MAX_NUM_PFS             (MAX_NUM_PFS_E5)
 #define MAX_NUM_OF_PFS_IN_CHIP  (16) /* On both engines */
 
 #define MAX_NUM_VFS_BB          (120)
 #define MAX_NUM_VFS_K2          (192)
-#define COMMON_MAX_NUM_VFS      (MAX_NUM_VFS_K2)
+#define MAX_NUM_VFS_E4          (MAX_NUM_VFS_K2)
+#define MAX_NUM_VFS_E5          (240)
+#define MAX_NUM_VFS		(MAX_NUM_VFS_E5) /* @DPDK */
 
 #define MAX_NUM_FUNCTIONS_BB    (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
 #define MAX_NUM_FUNCTIONS_K2    (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
 
-/* in both BB and K2, the VF number starts from 16. so for arrays containing all
- * possible PFs and VFs - we need a constant for this size
- */
+/* in both BB and K2, the VF number starts from 16. so for arrays containing all */
+/* possible PFs and VFs - we need a constant for this size */
 #define MAX_FUNCTION_NUMBER_BB      (MAX_NUM_PFS + MAX_NUM_VFS_BB)
 #define MAX_FUNCTION_NUMBER_K2      (MAX_NUM_PFS + MAX_NUM_VFS_K2)
-#define COMMON_MAX_FUNCTION_NUMBER  (MAX_NUM_PFS + MAX_NUM_VFS_K2)
+#define MAX_FUNCTION_NUMBER_E4      (MAX_NUM_PFS + MAX_NUM_VFS_E4)
+#define MAX_FUNCTION_NUMBER_E5      (MAX_NUM_PFS + MAX_NUM_VFS_E5)
+#define COMMON_MAX_FUNCTION_NUMBER  (MAX_NUM_PFS + MAX_NUM_VFS_E5)
 
 #define MAX_NUM_VPORTS_K2       (208)
 #define MAX_NUM_VPORTS_BB       (160)
-#define COMMON_MAX_NUM_VPORTS   (MAX_NUM_VPORTS_K2)
+#define MAX_NUM_VPORTS_E4       (MAX_NUM_VPORTS_K2)
+#define MAX_NUM_VPORTS_E5       (256)
+#define COMMON_MAX_NUM_VPORTS   (MAX_NUM_VPORTS_E5)
 
-#define MAX_NUM_L2_QUEUES_BB	(256)
-#define MAX_NUM_L2_QUEUES_K2    (320)
+#define MAX_NUM_L2_QUEUES_BB			(256)
+#define MAX_NUM_L2_QUEUES_K2			(320)
+#define MAX_NUM_L2_QUEUES_E5			(512)
+#define COMMON_MAX_NUM_L2_QUEUES		(MAX_NUM_L2_QUEUES_E5)
 
-#define FW_LOWEST_CONSUMEDDMAE_CHANNEL   (26)
+#define FW_LOWEST_CONSUMEDDMAE_CHANNEL_E4   (26)
+#define FW_LOWEST_CONSUMEDDMAE_CHANNEL_E5   (32) /* Not used in E5 */
 
 /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
 #define NUM_PHYS_TCS_4PORT_K2     4
+#define NUM_PHYS_TCS_4PORT_TX_E5  6
+#define NUM_PHYS_TCS_4PORT_RX_E5  4
 #define NUM_OF_PHYS_TCS           8
 #define PURE_LB_TC                NUM_OF_PHYS_TCS
 #define NUM_TCS_4PORT_K2          (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_TCS_4PORT_TX_E5       (NUM_PHYS_TCS_4PORT_TX_E5 + 1)
+#define NUM_TCS_4PORT_RX_E5       (NUM_PHYS_TCS_4PORT_RX_E5 + 1)
 #define NUM_OF_TCS                (NUM_OF_PHYS_TCS + 1)
+#define PBF_TGFS_PURE_LB_TC_E5    5
 
 /* CIDs */
-#define NUM_OF_CONNECTION_TYPES (8)
+#define NUM_OF_CONNECTION_TYPES_E4			(8)
+#define NUM_OF_CONNECTION_TYPES_E5			(16)
+#define COMMON_NUM_OF_CONNECTION_TYPES		(NUM_OF_CONNECTION_TYPES_E5)
+
 #define NUM_OF_TASK_TYPES       (8)
 #define NUM_OF_LCIDS            (320)
-#define NUM_OF_LTIDS            (320)
+#define NUM_OF_LTIDS_E4         (320)
+#define NUM_OF_LTIDS_E5         (384)
+#define COMMON_NUM_OF_LTIDS		(NUM_OF_LTIDS_E5)
 
 /* Global PXP windows (GTT) */
 #define NUM_OF_GTT          19
@@ -154,33 +190,33 @@
 #define GTT_DWORD_SIZE      (1 << GTT_DWORD_SIZE_BITS)
 
 /* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION 11
 /*****************/
 /* CDU CONSTANTS */
 /*****************/
 
-#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT              (17)
-#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK             (0x1ffff)
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT		(17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK		(0x1ffff)
 
 #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT	(12)
 #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK	(0xfff)
 
 #define	CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT				(0)
 #define	CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT	(1)
-#define	CDU_CONTEXT_VALIDATION_CFG_USE_TYPE				(2)
+#define	CDU_CONTEXT_VALIDATION_CFG_USE_TYPE					(2)
 #define	CDU_CONTEXT_VALIDATION_CFG_USE_REGION				(3)
-#define	CDU_CONTEXT_VALIDATION_CFG_USE_CID				(4)
+#define	CDU_CONTEXT_VALIDATION_CFG_USE_CID					(4)
 #define	CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE				(5)
 
 /*enabled, type A, use all */
-#define	CDU_CONTEXT_VALIDATION_DEFAULT_CFG				(0x3D)
+#define	CDU_CONTEXT_VALIDATION_DEFAULT_CFG					(0x3D)
 
 /*****************/
 /* DQ CONSTANTS  */
 /*****************/
 
 /* DEMS */
-#define DQ_DEMS_LEGACY			0
+#define	DQ_DEMS_LEGACY						0
 #define DQ_DEMS_TOE_MORE_TO_SEND			3
 #define DQ_DEMS_TOE_LOCAL_ADV_WND			4
 #define DQ_DEMS_ROCE_CQ_CONS				7
@@ -196,19 +232,13 @@
 #define DQ_XCM_AGG_VAL_SEL_REG6   7
 
 /* XCM agg val selection (FW) */
-#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
-	DQ_XCM_AGG_VAL_SEL_WORD2
-#define DQ_XCM_ETH_TX_BD_CONS_CMD \
-	DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_CORE_TX_BD_CONS_CMD \
-	DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ETH_TX_BD_PROD_CMD \
-	DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_CORE_TX_BD_PROD_CMD \
-	DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_CORE_SPQ_PROD_CMD \
-	DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD            DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_CORE_TX_BD_CONS_CMD          DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_PROD_CMD          DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD            DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD         DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD           DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD           DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD        DQ_XCM_AGG_VAL_SEL_WORD5
 #define DQ_XCM_FCOE_SQ_CONS_CMD             DQ_XCM_AGG_VAL_SEL_WORD3
 #define DQ_XCM_FCOE_SQ_PROD_CMD             DQ_XCM_AGG_VAL_SEL_WORD4
 #define DQ_XCM_FCOE_X_FERQ_PROD_CMD         DQ_XCM_AGG_VAL_SEL_WORD5
@@ -263,20 +293,13 @@
 #define DQ_XCM_AGG_FLG_SHIFT_CF23   7
 
 /* XCM agg counter flag selection (FW) */
-#define DQ_XCM_ETH_DQ_CF_CMD		(1 << \
-					DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_DQ_CF_CMD		(1 << \
-					DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD	(1 << \
-					DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_TERMINATE_CMD	(1 << \
-					DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD	(1 << \
-					DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_CORE_SLOW_PATH_CMD	(1 << \
-					DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD		(1 << \
-					DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_CORE_DQ_CF_CMD               (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD           (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD           (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD                (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD            (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD            (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD               (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
 #define DQ_XCM_FCOE_SLOW_PATH_CMD           (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
 #define DQ_XCM_ISCSI_DQ_FLUSH_CMD           (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
 #define DQ_XCM_ISCSI_SLOW_PATH_CMD          (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
@@ -341,19 +364,21 @@
 #define DQ_PWM_OFFSET_UCM_FLAGS				0x69
 #define DQ_PWM_OFFSET_TCM_FLAGS				0x6B
 
-#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD		(DQ_PWM_OFFSET_XCM16_BASE + 2)
+#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD			(DQ_PWM_OFFSET_XCM16_BASE + 2)
 #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT	(DQ_PWM_OFFSET_UCM32_BASE)
 #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT	(DQ_PWM_OFFSET_UCM16_4)
-#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT	(DQ_PWM_OFFSET_UCM16_BASE + 2)
-#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS	(DQ_PWM_OFFSET_UCM_FLAGS)
-#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD		(DQ_PWM_OFFSET_TCM16_BASE + 1)
-#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD		(DQ_PWM_OFFSET_TCM16_BASE + 3)
-
-#define DQ_PWM_OFFSET_XCM_RDMA_24B_ICID_SQ_PROD \
+#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT		(DQ_PWM_OFFSET_UCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS		(DQ_PWM_OFFSET_UCM_FLAGS)
+#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD			(DQ_PWM_OFFSET_TCM16_BASE + 1)
+#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD			(DQ_PWM_OFFSET_TCM16_BASE + 3)
+/* DQ_DEMS_AGG_VAL_BASE**/
+#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE	\
+	(DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
+
+#define DQ_PWM_OFFSET_XCM_RDMA_24B_ICID_SQ_PROD			\
 	(DQ_PWM_OFFSET_XCM32_24ICID_BASE + 2)
-#define DQ_PWM_OFFSET_UCM_RDMA_24B_ICID_CQ_CONS_32BIT \
-	(DQ_PWM_OFFSET_UCM32_24ICID_BASE + 4)
-#define DQ_PWM_OFFSET_TCM_ROCE_24B_ICID_RQ_PROD	\
+#define DQ_PWM_OFFSET_UCM_RDMA_24B_ICID_CQ_CONS_32BIT	(DQ_PWM_OFFSET_UCM32_24ICID_BASE + 4)
+#define DQ_PWM_OFFSET_TCM_ROCE_24B_ICID_RQ_PROD			\
 	(DQ_PWM_OFFSET_TCM32_24ICID_BASE + 1)
 
 #define DQ_REGION_SHIFT				        (12)
@@ -369,17 +394,19 @@
 /*****************/
 
 /* number of TX queues in the QM */
-#define MAX_QM_TX_QUEUES_K2	512
-#define MAX_QM_TX_QUEUES_BB	448
-#define MAX_QM_TX_QUEUES	MAX_QM_TX_QUEUES_K2
+#define MAX_QM_TX_QUEUES_K2			512
+#define MAX_QM_TX_QUEUES_BB			448
+#define MAX_QM_TX_QUEUES_E5			MAX_QM_TX_QUEUES_K2
+#define MAX_QM_TX_QUEUES			MAX_QM_TX_QUEUES_K2
 
 /* number of Other queues in the QM */
-#define MAX_QM_OTHER_QUEUES_BB	64
-#define MAX_QM_OTHER_QUEUES_K2	128
-#define MAX_QM_OTHER_QUEUES	MAX_QM_OTHER_QUEUES_K2
+#define MAX_QM_OTHER_QUEUES_BB		64
+#define MAX_QM_OTHER_QUEUES_K2		128
+#define MAX_QM_OTHER_QUEUES_E5		MAX_QM_OTHER_QUEUES_K2
+#define MAX_QM_OTHER_QUEUES			MAX_QM_OTHER_QUEUES_K2
 
 /* number of queues in a PF queue group */
-#define QM_PF_QUEUE_GROUP_SIZE	8
+#define QM_PF_QUEUE_GROUP_SIZE		8
 
 /* the size of a single queue element in bytes */
 #define QM_PQ_ELEMENT_SIZE			4
@@ -387,14 +414,12 @@
 /* base number of Tx PQs in the CM PQ representation.
  * should be used when storing PQ IDs in CM PQ registers and context
  */
-#define CM_TX_PQ_BASE	0x200
-
-/* number of global Vport/QCN rate limiters */
-#define MAX_QM_GLOBAL_RLS			256
+#define CM_TX_PQ_BASE               0x200
 
 /* number of global rate limiters */
-#define MAX_QM_GLOBAL_RLS		256
-#define COMMON_MAX_QM_GLOBAL_RLS	(MAX_QM_GLOBAL_RLS)
+#define MAX_QM_GLOBAL_RLS_E4		256
+#define MAX_QM_GLOBAL_RLS_E5		512
+#define COMMON_MAX_QM_GLOBAL_RLS	(MAX_QM_GLOBAL_RLS_E5)
 
 /* QM registers data */
 #define QM_LINE_CRD_REG_WIDTH		16
@@ -402,9 +427,9 @@
 #define QM_BYTE_CRD_REG_WIDTH		24
 #define QM_BYTE_CRD_REG_SIGN_BIT	(1 << (QM_BYTE_CRD_REG_WIDTH - 1))
 #define QM_WFQ_CRD_REG_WIDTH		32
-#define QM_WFQ_CRD_REG_SIGN_BIT		(1U << (QM_WFQ_CRD_REG_WIDTH - 1))
-#define QM_RL_CRD_REG_WIDTH		32
-#define QM_RL_CRD_REG_SIGN_BIT		(1U << (QM_RL_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_SIGN_BIT		(1U << (QM_WFQ_CRD_REG_WIDTH - 1)) /* @DPDK */
+#define QM_RL_CRD_REG_WIDTH			32
+#define QM_RL_CRD_REG_SIGN_BIT		(1U << (QM_RL_CRD_REG_WIDTH - 1)) /* @DPDK */
 
 /*****************/
 /* CAU CONSTANTS */
@@ -414,14 +439,18 @@
 #define CAU_FSM_ETH_TX  1
 
 /* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB    12
-#define MAX_PIS_PER_SB	 PIS_PER_SB
+#define PIS_PER_SB_E4			12
+#define PIS_PER_SB_E5			10
+
+#define PIS_PER_SB_PADDING_E5	2
+#define MAX_PIS_PER_SB	 PIS_PER_SB_E4
+
 
 /* fsm is stopped or not valid for this sb */
 #define CAU_HC_STOPPED_STATE		3
-/* fsm is working without interrupt coalescing for this sb*/
+/* fsm is working without interrupt coalescing for this sb */
 #define CAU_HC_DISABLE_STATE		4
-/* fsm is working with interrupt coalescing for this sb*/
+/* fsm is working with interrupt coalescing for this sb */
 #define CAU_HC_ENABLE_STATE			0
 
 
@@ -429,40 +458,47 @@
 /* IGU CONSTANTS */
 /*****************/
 
-#define MAX_SB_PER_PATH_K2			(368)
-#define MAX_SB_PER_PATH_BB			(288)
-#define MAX_TOT_SB_PER_PATH			MAX_SB_PER_PATH_K2
+#define MAX_SB_PER_PATH_K2					(368)
+#define MAX_SB_PER_PATH_BB					(288)
+#define MAX_SB_PER_PATH_E5_BC_MODE			(496)
+#define MAX_SB_PER_PATH_E5					(512)
+#define MAX_TOT_SB_PER_PATH					MAX_SB_PER_PATH_E5
 
-#define MAX_SB_PER_PF_MIMD			129
-#define MAX_SB_PER_PF_SIMD			64
-#define MAX_SB_PER_VF				64
+#define MAX_SB_PER_PF_MIMD					129
+#define MAX_SB_PER_PF_SIMD					64
+#define MAX_SB_PER_VF						64
 
 /* Memory addresses on the BAR for the IGU Sub Block */
-#define IGU_MEM_BASE				0x0000
+#define IGU_MEM_BASE						0x0000
 
-#define IGU_MEM_MSIX_BASE			0x0000
-#define IGU_MEM_MSIX_UPPER			0x0101
-#define IGU_MEM_MSIX_RESERVED_UPPER		0x01ff
+#define IGU_MEM_MSIX_BASE					0x0000
+#define IGU_MEM_MSIX_UPPER					0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER			0x01ff
 
-#define IGU_MEM_PBA_MSIX_BASE			0x0200
-#define IGU_MEM_PBA_MSIX_UPPER			0x0202
+#define IGU_MEM_PBA_MSIX_BASE				0x0200
+#define IGU_MEM_PBA_MSIX_UPPER				0x0202
 #define IGU_MEM_PBA_MSIX_RESERVED_UPPER		0x03ff
 
-#define IGU_CMD_INT_ACK_BASE			0x0400
+#define IGU_CMD_INT_ACK_BASE				0x0400
 #define IGU_CMD_INT_ACK_RESERVED_UPPER		0x05ff
 
-#define IGU_CMD_ATTN_BIT_UPD_UPPER		0x05f0
-#define IGU_CMD_ATTN_BIT_SET_UPPER		0x05f1
-#define IGU_CMD_ATTN_BIT_CLR_UPPER		0x05f2
+#define IGU_CMD_ATTN_BIT_UPD_UPPER			0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER			0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER			0x05f2
 
 #define IGU_REG_SISR_MDPC_WMASK_UPPER		0x05f3
 #define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER	0x05f4
 #define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER	0x05f5
 #define IGU_REG_SISR_MDPC_WOMASK_UPPER		0x05f6
 
-#define IGU_CMD_PROD_UPD_BASE			0x0600
+#define IGU_CMD_PROD_UPD_BASE				0x0600
 #define IGU_CMD_PROD_UPD_RESERVED_UPPER		0x07ff
 
+#define IGU_CMD_INT_ACK_E5_BASE				0x1000
+#define IGU_CMD_INT_ACK_RESERVED_E5_UPPER	0x17FF
+
+#define IGU_CMD_PROD_UPD_E5_BASE			0x1800
+#define IGU_CMD_PROD_UPD_RESERVED_E5_UPPER	0x1FFF
 /*****************/
 /* PXP CONSTANTS */
 /*****************/
@@ -479,110 +515,92 @@
 #define PXP_BAR_DQ                                          1
 
 /* PTT and GTT */
-#define PXP_PER_PF_ENTRY_SIZE		8
-#define PXP_NUM_GLOBAL_WINDOWS		243
-#define PXP_GLOBAL_ENTRY_SIZE		4
-#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH		4
-#define PXP_PF_WINDOW_ADMIN_START	0
-#define PXP_PF_WINDOW_ADMIN_LENGTH	0x1000
-#define PXP_PF_WINDOW_ADMIN_END		(PXP_PF_WINDOW_ADMIN_START + \
-				PXP_PF_WINDOW_ADMIN_LENGTH - 1)
-#define PXP_PF_WINDOW_ADMIN_PER_PF_START	0
-#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH	(PXP_NUM_PF_WINDOWS * \
-						 PXP_PER_PF_ENTRY_SIZE)
-#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
-					PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
-#define PXP_PF_WINDOW_ADMIN_GLOBAL_START	0x200
-#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH	(PXP_NUM_GLOBAL_WINDOWS * \
-						 PXP_GLOBAL_ENTRY_SIZE)
-#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
-		(PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
-		 PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
-#define PXP_PF_GLOBAL_PRETEND_ADDR	0x1f0
-#define PXP_PF_ME_OPAQUE_MASK_ADDR	0xf4
-#define PXP_PF_ME_OPAQUE_ADDR		0x1f8
-#define PXP_PF_ME_CONCRETE_ADDR		0x1fc
-
-#define PXP_NUM_PF_WINDOWS		12
-
-#define PXP_EXTERNAL_BAR_PF_WINDOW_START	0x1000
-#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM		PXP_NUM_PF_WINDOWS
-#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE	0x1000
-#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
-	(PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
-	 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
-#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
-	(PXP_EXTERNAL_BAR_PF_WINDOW_START + \
-	 PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
-
-#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
-	(PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
-#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM		PXP_NUM_GLOBAL_WINDOWS
-#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE	0x1000
-#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
-	(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
-	 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
-#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
-	(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
-	 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+#define PXP_PER_PF_ENTRY_SIZE                               8
+#define PXP_NUM_GLOBAL_WINDOWS                              243
+#define PXP_GLOBAL_ENTRY_SIZE                               4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH                     4
+#define PXP_PF_WINDOW_ADMIN_START                           0
+#define PXP_PF_WINDOW_ADMIN_LENGTH                          0x1000
+#define PXP_PF_WINDOW_ADMIN_END                             \
+	(PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_START                    0
+#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH                   \
+	(PXP_NUM_PF_WINDOWS * PXP_PER_PF_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END                      \
+	(PXP_PF_WINDOW_ADMIN_PER_PF_START + PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_START                    0x200
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH                   \
+	(PXP_NUM_GLOBAL_WINDOWS * PXP_GLOBAL_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_END                      \
+	(PXP_PF_WINDOW_ADMIN_GLOBAL_START + PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
+#define PXP_PF_GLOBAL_PRETEND_ADDR                          0x1f0
+#define PXP_PF_ME_OPAQUE_MASK_ADDR                          0xf4
+#define PXP_PF_ME_OPAQUE_ADDR                               0x1f8
+#define PXP_PF_ME_CONCRETE_ADDR                             0x1fc
+
+#define PXP_NUM_PF_WINDOWS                                  12
+
+#define PXP_EXTERNAL_BAR_PF_WINDOW_START                    0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM                      PXP_NUM_PF_WINDOWS
+#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE              0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH                   \
+	(PXP_EXTERNAL_BAR_PF_WINDOW_NUM * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_PF_WINDOW_END                      \
+	(PXP_EXTERNAL_BAR_PF_WINDOW_START + PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
+
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START                (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM                  PXP_NUM_GLOBAL_WINDOWS
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE          0x1000
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH               \
+	(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END                  \
+	(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
 
 /* PF BAR */
 #define PXP_BAR0_START_GRC                      0x0000
 #define PXP_BAR0_GRC_LENGTH                     0x1C00000
-#define PXP_BAR0_END_GRC                        \
-	(PXP_BAR0_START_GRC + PXP_BAR0_GRC_LENGTH - 1)
+#define PXP_BAR0_END_GRC                        (PXP_BAR0_START_GRC + PXP_BAR0_GRC_LENGTH - 1)
 
 #define PXP_BAR0_START_IGU                      0x1C00000
 #define PXP_BAR0_IGU_LENGTH                     0x10000
-#define PXP_BAR0_END_IGU                        \
-	(PXP_BAR0_START_IGU + PXP_BAR0_IGU_LENGTH - 1)
+#define PXP_BAR0_END_IGU                        (PXP_BAR0_START_IGU + PXP_BAR0_IGU_LENGTH - 1)
 
 #define PXP_BAR0_START_TSDM                     0x1C80000
 #define PXP_BAR0_SDM_LENGTH                     0x40000
 #define PXP_BAR0_SDM_RESERVED_LENGTH            0x40000
-#define PXP_BAR0_END_TSDM                       \
-	(PXP_BAR0_START_TSDM + PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_TSDM                       (PXP_BAR0_START_TSDM + PXP_BAR0_SDM_LENGTH - 1)
 
 #define PXP_BAR0_START_MSDM                     0x1D00000
-#define PXP_BAR0_END_MSDM                       \
-	(PXP_BAR0_START_MSDM + PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_MSDM                       (PXP_BAR0_START_MSDM + PXP_BAR0_SDM_LENGTH - 1)
 
 #define PXP_BAR0_START_USDM                     0x1D80000
-#define PXP_BAR0_END_USDM                       \
-	(PXP_BAR0_START_USDM + PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_USDM                       (PXP_BAR0_START_USDM + PXP_BAR0_SDM_LENGTH - 1)
 
 #define PXP_BAR0_START_XSDM                     0x1E00000
-#define PXP_BAR0_END_XSDM                       \
-	(PXP_BAR0_START_XSDM + PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_XSDM                       (PXP_BAR0_START_XSDM + PXP_BAR0_SDM_LENGTH - 1)
 
 #define PXP_BAR0_START_YSDM                     0x1E80000
-#define PXP_BAR0_END_YSDM                       \
-	(PXP_BAR0_START_YSDM + PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_YSDM                       (PXP_BAR0_START_YSDM + PXP_BAR0_SDM_LENGTH - 1)
 
 #define PXP_BAR0_START_PSDM                     0x1F00000
-#define PXP_BAR0_END_PSDM                       \
-	(PXP_BAR0_START_PSDM + PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_PSDM                       (PXP_BAR0_START_PSDM + PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_FIRST_INVALID_ADDRESS          \
-	(PXP_BAR0_END_PSDM + 1)
+#define PXP_BAR0_FIRST_INVALID_ADDRESS          (PXP_BAR0_END_PSDM + 1)
 
 /* VF BAR */
 #define PXP_VF_BAR0                             0
 
 #define PXP_VF_BAR0_START_IGU                   0
 #define PXP_VF_BAR0_IGU_LENGTH                  0x3000
-#define PXP_VF_BAR0_END_IGU                     \
-	(PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
+#define PXP_VF_BAR0_END_IGU                     (PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
 
 #define PXP_VF_BAR0_START_DQ                    0x3000
 #define PXP_VF_BAR0_DQ_LENGTH                   0x200
 #define PXP_VF_BAR0_DQ_OPAQUE_OFFSET            0
 #define PXP_VF_BAR0_ME_OPAQUE_ADDRESS           \
 	(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
-#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS         \
-	(PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
-#define PXP_VF_BAR0_END_DQ                      \
-	(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS         (PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
+#define PXP_VF_BAR0_END_DQ                      (PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
 
 #define PXP_VF_BAR0_START_TSDM_ZONE_B           0x3200
 #define PXP_VF_BAR0_SDM_LENGTH_ZONE_B           0x200
@@ -611,8 +629,7 @@
 
 #define PXP_VF_BAR0_START_GRC                   0x3E00
 #define PXP_VF_BAR0_GRC_LENGTH                  0x200
-#define PXP_VF_BAR0_END_GRC                     \
-	(PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
+#define PXP_VF_BAR0_END_GRC                     (PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
 
 #define PXP_VF_BAR0_START_SDM_ZONE_A            0x4000
 #define PXP_VF_BAR0_END_SDM_ZONE_A              0x10000
@@ -627,14 +644,18 @@
 #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN          12
 #define PXP_ILT_BLOCK_FACTOR_MULTIPLIER         1024
 
-// ILT Records
+/* ILT Records */
 #define PXP_NUM_ILT_RECORDS_BB 7600
 #define PXP_NUM_ILT_RECORDS_K2 11000
-#define MAX_NUM_ILT_RECORDS \
-	OSAL_MAX_T(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+#define MAX_NUM_ILT_RECORDS_E4 OSAL_MAX_T(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
 
-// Host Interface
-#define PXP_QUEUES_ZONE_MAX_NUM	320
+#define PXP_NUM_ILT_RECORDS_E5 13664
+#define MAX_NUM_ILT_RECORDS_E5 PXP_NUM_ILT_RECORDS_E5
+
+
+/* Host Interface */
+#define PXP_QUEUES_ZONE_MAX_NUM_E4	320
+#define PXP_QUEUES_ZONE_MAX_NUM_E5	512
 
 
 /*****************/
@@ -653,47 +674,56 @@
 #define SDM_OP_GEN_TRIG_INDICATE_ERROR	6
 #define SDM_OP_GEN_TRIG_INC_ORDER_CNT	9
 
-/***********************************************************/
-/* Completion types                                        */
-/***********************************************************/
-
-#define SDM_COMP_TYPE_NONE		0
-#define SDM_COMP_TYPE_WAKE_THREAD	1
-#define SDM_COMP_TYPE_AGG_INT		2
-/* Send direct message to local CM and/or remote CMs. Destinations are defined
- * by vector in CompParams.
- */
-#define SDM_COMP_TYPE_CM		3
-#define SDM_COMP_TYPE_LOADER		4
-/* Send direct message to PXP (like "internal write" command) to write to remote
- * Storm RAM via remote SDM
- */
-#define SDM_COMP_TYPE_PXP		5
-/* Indicate error per thread */
-#define SDM_COMP_TYPE_INDICATE_ERROR	6
-#define SDM_COMP_TYPE_RELEASE_THREAD	7
-/* Write to local RAM as a completion */
-#define SDM_COMP_TYPE_RAM		8
-#define SDM_COMP_TYPE_INC_ORDER_CNT	9 /* Applicable only for E4 */
+/*////////////////////////////////////////////////////////////*/
+/* Completion types */
+/*////////////////////////////////////////////////////////////*/
 
+#define SDM_COMP_TYPE_NONE				0
+#define SDM_COMP_TYPE_WAKE_THREAD		1
+#define SDM_COMP_TYPE_AGG_INT			2
+/* Send direct message to local CM and/or remote CMs. Destinations are defined by vector in
+ * CompParams.
+ */
+#define SDM_COMP_TYPE_CM				3
+#define SDM_COMP_TYPE_LOADER			4
+/* Send direct message to PXP (like "internal write" command) to write to remote Storm RAM via
+ * remote SDM
+ */
+#define SDM_COMP_TYPE_PXP				5
+#define SDM_COMP_TYPE_INDICATE_ERROR	6		/* Indicate error per thread */
+#define SDM_COMP_TYPE_RELEASE_THREAD	7		/* Obsolete in E5 */
+/* Write to local RAM as a completion */
+#define SDM_COMP_TYPE_RAM				8
+#define SDM_COMP_TYPE_INC_ORDER_CNT		9		/* Applicable only for E5 */
 
 /******************/
 /* PBF CONSTANTS  */
 /******************/
 
 /* Number of PBF command queue lines. */
-#define PBF_MAX_CMD_LINES 3328 /* Each line is 256b */
+#define PBF_MAX_CMD_LINES_E4 3328 /* Each line is 256b */
+#define PBF_MAX_CMD_LINES_E5 5280 /* Each line is 512b */
 
 /* Number of BTB blocks. Each block is 256B. */
 #define BTB_MAX_BLOCKS_BB 1440 /* 2880 blocks of 128B */
 #define BTB_MAX_BLOCKS_K2 1840 /* 3680 blocks of 128B */
-#define BTB_MAX_BLOCKS 1440
+#define BTB_MAX_BLOCKS_E5 2640
 
 /*****************/
 /* PRS CONSTANTS */
 /*****************/
 
 #define PRS_GFT_CAM_LINES_NO_MATCH  31
+/*****************/
+/* GFS CONSTANTS */
+/*****************/
+
+
+#define SRC_HEADER_MAGIC_NUMBER		        0x600DFEED
+#define GFS_HASH_SIZE_IN_BYTES          16
+#define GFS_PROFILE_MAX_ENTRIES         256
+
+
 
 /*
  * Interrupt coalescing TimeSet
@@ -708,16 +738,12 @@ struct coalescing_timeset {
 #define COALESCING_TIMESET_VALID_SHIFT   7
 };
 
+
 struct common_queue_zone {
 	__le16 ring_drv_data_consumer;
 	__le16 reserved;
 };
 
-struct nvmf_eqe_data {
-	__le16 icid /* The connection ID for which the EQE is written. */;
-	u8 reserved0[6] /* Alignment to line */;
-};
-
 
 /*
  * ETH Rx producers data
@@ -741,8 +767,7 @@ struct tcp_ulp_connect_done_params {
 struct iscsi_connect_done_results {
 	__le16 icid /* Context ID of the connection */;
 	__le16 conn_id /* Driver connection ID */;
-/* decided tcp params after connect done */
-	struct tcp_ulp_connect_done_params params;
+	struct tcp_ulp_connect_done_params params /* decided tcp params after connect done */;
 };
 
 
@@ -750,11 +775,10 @@ struct iscsi_eqe_data {
 	__le16 icid /* Context ID of the connection */;
 	__le16 conn_id /* Driver connection ID */;
 	__le16 reserved;
-/* error code - relevant only if the opcode indicates its an error */
-	u8 error_code;
+	u8 error_code /* error code - relevant only if the opcode indicates its an error */;
 	u8 error_pdu_opcode_reserved;
-/* The processed PDUs opcode on which happened the error - updated for specific
- * error codes, by default=0xFF
+/* The processed PDUs opcode on which happened the error - updated for specific error codes, by
+ * default=0xFF
  */
 #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK        0x3F
 #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT       0
@@ -776,7 +800,38 @@ enum mf_mode {
 	MAX_MF_MODE
 };
 
-/* Per-protocol connection types */
+
+struct nvmf_eqe_data {
+	__le16 icid /* The connection ID for which the EQE is written. */;
+	u8 reserved0[6] /* Alignment to line */;
+};
+
+
+/* Per protocol packet duplication enable bit vector. If set, duplicate offloaded traffic to LL2
+ * debug queueu.
+ */
+struct offload_pkt_dup_enable {
+	__le16 enable_vector;
+#define OFFLOAD_PKT_DUP_ENABLE_ISCSI_MASK      0x1 /* iSCSI */
+#define OFFLOAD_PKT_DUP_ENABLE_ISCSI_SHIFT     0
+#define OFFLOAD_PKT_DUP_ENABLE_FCOE_MASK       0x1 /* FCoE */
+#define OFFLOAD_PKT_DUP_ENABLE_FCOE_SHIFT      1
+#define OFFLOAD_PKT_DUP_ENABLE_ROCE_MASK       0x1 /* RoCE */
+#define OFFLOAD_PKT_DUP_ENABLE_ROCE_SHIFT      2
+#define OFFLOAD_PKT_DUP_ENABLE_LL2_MASK        0x1 /* LL2 */
+#define OFFLOAD_PKT_DUP_ENABLE_LL2_SHIFT       3
+#define OFFLOAD_PKT_DUP_ENABLE_RESERVED_MASK   0x1
+#define OFFLOAD_PKT_DUP_ENABLE_RESERVED_SHIFT  4
+#define OFFLOAD_PKT_DUP_ENABLE_IWARP_MASK      0x1 /* iWARP */
+#define OFFLOAD_PKT_DUP_ENABLE_IWARP_SHIFT     5
+#define OFFLOAD_PKT_DUP_ENABLE_RESERVED1_MASK  0x3FF
+#define OFFLOAD_PKT_DUP_ENABLE_RESERVED1_SHIFT 6
+};
+
+
+/*
+ * Per-protocol connection types
+ */
 enum protocol_type {
 	PROTOCOLID_ISCSI /* iSCSI */,
 	PROTOCOLID_FCOE /* FCoE */,
@@ -794,6 +849,15 @@ enum protocol_type {
 };
 
 
+/*
+ * Pstorm packet duplication config
+ */
+struct pstorm_pkt_dup_cfg {
+	struct offload_pkt_dup_enable enable /* TX packet duplication per protocol enable */;
+	__le16 reserved[3];
+};
+
+
 struct regpair {
 	__le32 lo /* low word for reg-pair */;
 	__le32 hi /* high word for reg-pair */;
@@ -820,12 +884,25 @@ struct rdma_eqe_suspend_qp {
  */
 union rdma_eqe_data {
 	struct regpair async_handle /* Host handle for the Async Completions */;
-	/* RoCE Destroy Event Data */
-	struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
-	/* RoCE Suspend QP Event Data */
-	struct rdma_eqe_suspend_qp rdma_suspend_qp_data;
+	struct rdma_eqe_destroy_qp rdma_destroy_qp_data /* RoCE Destroy Event Data */;
+	struct rdma_eqe_suspend_qp rdma_suspend_qp_data /* RoCE Suspend QP Event Data */;
 };
 
+
+
+
+
+/*
+ * Tstorm packet duplication config
+ */
+struct tstorm_pkt_dup_cfg {
+/* RX and loopback packet duplication per protocol enable */
+	struct offload_pkt_dup_enable enable;
+	__le16 reserved;
+	__le32 cid /* Light L2 debug queue CID. */;
+};
+
+
 struct tstorm_queue_zone {
 	__le32 reserved[2];
 };
@@ -835,8 +912,7 @@ struct tstorm_queue_zone {
  * Ustorm Queue Zone
  */
 struct ustorm_eth_queue_zone {
-/* Rx interrupt coalescing TimeSet */
-	struct coalescing_timeset int_coalescing_timeset;
+	struct coalescing_timeset int_coalescing_timeset /* Rx interrupt coalescing TimeSet */;
 	u8 reserved[3];
 };
 
@@ -846,28 +922,30 @@ struct ustorm_queue_zone {
 	struct common_queue_zone common;
 };
 
-/* status block structure */
+
+/*
+ * status block structure
+ */
 struct cau_pi_entry {
 	__le32 prod;
-/* A per protocol indexPROD value. */
-#define CAU_PI_ENTRY_PROD_VAL_MASK    0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_MASK    0xFFFF /* A per protocol indexPROD value. */
 #define CAU_PI_ENTRY_PROD_VAL_SHIFT   0
-/* This value determines the TimeSet that the PI is associated with */
+/* This value determines the TimeSet that the PI is associated with  */
 #define CAU_PI_ENTRY_PI_TIMESET_MASK  0x7F
 #define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
-/* Select the FSM within the SB */
-#define CAU_PI_ENTRY_FSM_SEL_MASK     0x1
+#define CAU_PI_ENTRY_FSM_SEL_MASK     0x1 /* Select the FSM within the SB */
 #define CAU_PI_ENTRY_FSM_SEL_SHIFT    23
-/* Select the FSM within the SB */
-#define CAU_PI_ENTRY_RESERVED_MASK    0xFF
+#define CAU_PI_ENTRY_RESERVED_MASK    0xFF /* Select the FSM within the SB */
 #define CAU_PI_ENTRY_RESERVED_SHIFT   24
 };
 
-/* status block structure */
+
+/*
+ * status block structure
+ */
 struct cau_sb_entry {
 	__le32 data;
-/* The SB PROD index which is sent to the IGU. */
-#define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF /* The SB PROD index which is sent to the IGU. */
 #define CAU_SB_ENTRY_SB_PROD_SHIFT     0
 #define CAU_SB_ENTRY_STATE0_MASK       0xF /* RX state */
 #define CAU_SB_ENTRY_STATE0_SHIFT      24
@@ -880,10 +958,10 @@ struct cau_sb_entry {
 /* Indicates the TX TimeSet that this SB is associated with. */
 #define CAU_SB_ENTRY_SB_TIMESET1_MASK  0x7F
 #define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
-/* This value will determine the RX FSM timer resolution in ticks */
+/* This value will determine the RX FSM timer resolution in ticks  */
 #define CAU_SB_ENTRY_TIMER_RES0_MASK   0x3
 #define CAU_SB_ENTRY_TIMER_RES0_SHIFT  14
-/* This value will determine the TX FSM timer resolution in ticks */
+/* This value will determine the TX FSM timer resolution in ticks  */
 #define CAU_SB_ENTRY_TIMER_RES1_MASK   0x3
 #define CAU_SB_ENTRY_TIMER_RES1_SHIFT  16
 #define CAU_SB_ENTRY_VF_NUMBER_MASK    0xFF
@@ -892,8 +970,8 @@ struct cau_sb_entry {
 #define CAU_SB_ENTRY_VF_VALID_SHIFT    26
 #define CAU_SB_ENTRY_PF_NUMBER_MASK    0xF
 #define CAU_SB_ENTRY_PF_NUMBER_SHIFT   27
-/* If set then indicates that the TPH STAG is equal to the SB number. Otherwise
- * the STAG will be equal to all ones.
+/* If set then indicates that the TPH STAG is equal to the SB number. Otherwise the STAG will be
+ * equal to all ones.
  */
 #define CAU_SB_ENTRY_TPH_MASK          0x1
 #define CAU_SB_ENTRY_TPH_SHIFT         31
@@ -901,8 +979,7 @@ struct cau_sb_entry {
 
 
 /*
- * Igu cleanup bit values to distinguish between clean or producer consumer
- * update.
+ * Igu cleanup bit values to distinguish between clean or producer consumer update.
  */
 enum command_type_bit {
 	IGU_COMMAND_TYPE_NOP = 0,
@@ -911,28 +988,29 @@ enum command_type_bit {
 };
 
 
-/* core doorbell data */
+/*
+ * core doorbell data
+ */
 struct core_db_data {
 	u8 params;
-/* destination of doorbell (use enum db_dest) */
-#define CORE_DB_DATA_DEST_MASK         0x3
+#define CORE_DB_DATA_DEST_MASK         0x3 /* destination of doorbell (use enum db_dest) */
 #define CORE_DB_DATA_DEST_SHIFT        0
-/* aggregative command to CM (use enum db_agg_cmd_sel) */
-#define CORE_DB_DATA_AGG_CMD_MASK      0x3
+#define CORE_DB_DATA_AGG_CMD_MASK      0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
 #define CORE_DB_DATA_AGG_CMD_SHIFT     2
 #define CORE_DB_DATA_BYPASS_EN_MASK    0x1 /* enable QM bypass */
 #define CORE_DB_DATA_BYPASS_EN_SHIFT   4
 #define CORE_DB_DATA_RESERVED_MASK     0x1
 #define CORE_DB_DATA_RESERVED_SHIFT    5
-/* aggregative value selection */
-#define CORE_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK  0x3 /* aggregative value selection */
 #define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
-/* bit for every DQ counter flags in CM context that DQ can increment */
-	u8	agg_flags;
-	__le16	spq_prod;
+	u8 agg_flags /* bit for every DQ counter flags in CM context that DQ can increment */;
+	__le16 spq_prod;
 };
 
-/* Enum of doorbell aggregative command selection */
+
+/*
+ * Enum of doorbell aggregative command selection
+ */
 enum db_agg_cmd_sel {
 	DB_AGG_CMD_NOP /* No operation */,
 	DB_AGG_CMD_SET /* Set the value */,
@@ -941,7 +1019,10 @@ enum db_agg_cmd_sel {
 	MAX_DB_AGG_CMD_SEL
 };
 
-/* Enum of doorbell destination */
+
+/*
+ * Enum of doorbell destination
+ */
 enum db_dest {
 	DB_DEST_XCM /* TX doorbell to XCM */,
 	DB_DEST_UCM /* RX doorbell to UCM */,
@@ -957,42 +1038,37 @@ enum db_dest {
 enum db_dpm_type {
 	DPM_LEGACY /* Legacy DPM- to Xstorm RAM */,
 	DPM_RDMA /* RDMA DPM (only RoCE in E4) - to NIG */,
-/* L2 DPM inline- to PBF, with packet data on doorbell */
-	DPM_L2_INLINE,
+	DPM_L2_INLINE /* L2 DPM inline- to PBF, with packet data on doorbell */,
 	DPM_L2_BD /* L2 DPM with BD- to PBF, with TX BD data on doorbell */,
 	MAX_DB_DPM_TYPE
 };
 
+
 /*
- * Structure for doorbell data, in L2 DPM mode, for the first doorbell in a DPM
- * burst
+ * Structure for doorbell data, in L2 DPM mode, for the first doorbell in a DPM burst
  */
 struct db_l2_dpm_data {
 	__le16 icid /* internal CID */;
 	__le16 bd_prod /* bd producer value to update */;
 	__le32 params;
-/* Size in QWORD-s of the DPM burst */
-#define DB_L2_DPM_DATA_SIZE_MASK        0x3F
-#define DB_L2_DPM_DATA_SIZE_SHIFT       0
-/* Type of DPM transaction (DPM_L2_INLINE or DPM_L2_BD) (use enum db_dpm_type)
- */
-#define DB_L2_DPM_DATA_DPM_TYPE_MASK    0x3
-#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT   6
-#define DB_L2_DPM_DATA_NUM_BDS_MASK     0xFF /* number of BD-s */
-#define DB_L2_DPM_DATA_NUM_BDS_SHIFT    8
-/* size of the packet to be transmitted in bytes */
-#define DB_L2_DPM_DATA_PKT_SIZE_MASK    0x7FF
-#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT   16
-#define DB_L2_DPM_DATA_RESERVED0_MASK   0x1
-#define DB_L2_DPM_DATA_RESERVED0_SHIFT  27
-/* In DPM_L2_BD mode: the number of SGE-s */
-#define DB_L2_DPM_DATA_SGE_NUM_MASK     0x7
-#define DB_L2_DPM_DATA_SGE_NUM_SHIFT    28
-/* Flag indicating whether to enable GFS search */
-#define DB_L2_DPM_DATA_RESERVED1_MASK   0x1
-#define DB_L2_DPM_DATA_RESERVED1_SHIFT  31
+#define DB_L2_DPM_DATA_SIZE_MASK         0x3F /* Size in QWORD-s of the DPM burst */
+#define DB_L2_DPM_DATA_SIZE_SHIFT        0
+/* Type of DPM transaction (DPM_L2_INLINE or DPM_L2_BD) (use enum db_dpm_type) */
+#define DB_L2_DPM_DATA_DPM_TYPE_MASK     0x3
+#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT    6
+#define DB_L2_DPM_DATA_NUM_BDS_MASK      0xFF /* number of BD-s */
+#define DB_L2_DPM_DATA_NUM_BDS_SHIFT     8
+#define DB_L2_DPM_DATA_PKT_SIZE_MASK     0x7FF /* size of the packet to be transmitted in bytes */
+#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT    16
+#define DB_L2_DPM_DATA_RESERVED0_MASK    0x1
+#define DB_L2_DPM_DATA_RESERVED0_SHIFT   27
+#define DB_L2_DPM_DATA_SGE_NUM_MASK      0x7 /* In DPM_L2_BD mode: the number of SGE-s */
+#define DB_L2_DPM_DATA_SGE_NUM_SHIFT     28
+#define DB_L2_DPM_DATA_TGFS_SRC_EN_MASK  0x1 /* Flag indicating whether to enable TGFS search */
+#define DB_L2_DPM_DATA_TGFS_SRC_EN_SHIFT 31
 };
 
+
 /*
  * Structure for SGE in a DPM doorbell of type DPM_L2_BD
  */
@@ -1000,48 +1076,31 @@ struct db_l2_dpm_sge {
 	struct regpair addr /* Single continuous buffer */;
 	__le16 nbytes /* Number of bytes in this BD. */;
 	__le16 bitfields;
-/* The TPH STAG index value */
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK  0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK  0x1FF /* The TPH STAG index value */
 #define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
 #define DB_L2_DPM_SGE_RESERVED0_MASK     0x3
 #define DB_L2_DPM_SGE_RESERVED0_SHIFT    9
-/* Indicate if ST hint is requested or not */
-#define DB_L2_DPM_SGE_ST_VALID_MASK      0x1
+#define DB_L2_DPM_SGE_ST_VALID_MASK      0x1 /* Indicate if ST hint is requested or not */
 #define DB_L2_DPM_SGE_ST_VALID_SHIFT     11
 #define DB_L2_DPM_SGE_RESERVED1_MASK     0xF
 #define DB_L2_DPM_SGE_RESERVED1_SHIFT    12
 	__le32 reserved2;
 };
 
-/* Structure for doorbell address, in legacy mode */
+
+/*
+ * Structure for doorbell address, in legacy mode
+ */
 struct db_legacy_addr {
 	__le32 addr;
 #define DB_LEGACY_ADDR_RESERVED0_MASK  0x3
 #define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
-/* doorbell extraction mode specifier- 0 if not used */
-#define DB_LEGACY_ADDR_DEMS_MASK       0x7
+#define DB_LEGACY_ADDR_DEMS_MASK       0x7 /* doorbell extraction mode specifier- 0 if not used */
 #define DB_LEGACY_ADDR_DEMS_SHIFT      2
 #define DB_LEGACY_ADDR_ICID_MASK       0x7FFFFFF /* internal CID */
 #define DB_LEGACY_ADDR_ICID_SHIFT      5
 };
 
-/*
- * Structure for doorbell address, in PWM mode
- */
-struct db_pwm_addr {
-	__le32 addr;
-#define DB_PWM_ADDR_RESERVED0_MASK  0x7
-#define DB_PWM_ADDR_RESERVED0_SHIFT 0
-/* Offset in PWM address space */
-#define DB_PWM_ADDR_OFFSET_MASK     0x7F
-#define DB_PWM_ADDR_OFFSET_SHIFT    3
-#define DB_PWM_ADDR_WID_MASK        0x3 /* Window ID */
-#define DB_PWM_ADDR_WID_SHIFT       10
-#define DB_PWM_ADDR_DPI_MASK        0xFFFF /* Doorbell page ID */
-#define DB_PWM_ADDR_DPI_SHIFT       12
-#define DB_PWM_ADDR_RESERVED1_MASK  0xF
-#define DB_PWM_ADDR_RESERVED1_SHIFT 28
-};
 
 /*
  * Structure for doorbell address, in legacy mode, without DEMS
@@ -1056,37 +1115,23 @@ struct db_legacy_wo_dems_addr {
 
 
 /*
- * Parameters to RDMA firmware, passed in EDPM doorbell
+ * Structure for doorbell address, in PWM mode
  */
-struct db_rdma_dpm_params {
-	__le32 params;
-/* Size in QWORD-s of the DPM burst */
-#define DB_RDMA_DPM_PARAMS_SIZE_MASK                0x3F
-#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT               0
-/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK            0x3
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT           6
-/* opcode for RDMA operation */
-#define DB_RDMA_DPM_PARAMS_OPCODE_MASK              0xFF
-#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT             8
-/* the size of the WQE payload in bytes */
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK            0x7FF
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT           16
-#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK           0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT          27
-/* RoCE ack request (will be set 1) */
-#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK         0x1
-#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT        28
-#define DB_RDMA_DPM_PARAMS_S_FLG_MASK               0x1 /* RoCE S flag */
-#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT              29
-/* RoCE completion flag for FW use */
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK      0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT     30
-/* Connection type is iWARP */
-#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK  0x1
-#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
+struct db_pwm_addr {
+	__le32 addr;
+#define DB_PWM_ADDR_RESERVED0_MASK  0x7
+#define DB_PWM_ADDR_RESERVED0_SHIFT 0
+#define DB_PWM_ADDR_OFFSET_MASK     0x7F /* Offset in PWM address space */
+#define DB_PWM_ADDR_OFFSET_SHIFT    3
+#define DB_PWM_ADDR_WID_MASK        0x3 /* Window ID */
+#define DB_PWM_ADDR_WID_SHIFT       10
+#define DB_PWM_ADDR_DPI_MASK        0xFFFF /* Doorbell page ID */
+#define DB_PWM_ADDR_DPI_SHIFT       12
+#define DB_PWM_ADDR_RESERVED1_MASK  0xF
+#define DB_PWM_ADDR_RESERVED1_SHIFT 28
 };
 
+
 /*
  * Parameters to RDMA firmware, passed in EDPM doorbell
  */
@@ -1098,11 +1143,9 @@ struct db_rdma_24b_icid_dpm_params {
 /* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
 #define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_MASK            0x3
 #define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_SHIFT           6
-/* opcode for RDMA operation */
-#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_MASK              0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_MASK              0xFF /* opcode for RDMA operation */
 #define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_SHIFT             8
-/* ICID extension */
-#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_MASK            0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_MASK            0xFF /* ICID extension */
 #define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_SHIFT           16
 /* Number of invalid bytes in last QWROD of the DPM transaction */
 #define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_MASK        0x7
@@ -1110,41 +1153,571 @@ struct db_rdma_24b_icid_dpm_params {
 /* Flag indicating 24b icid mode is enabled */
 #define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_MASK    0x1
 #define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_SHIFT   27
-/* RoCE completion flag */
-#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_MASK      0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_MASK      0x1 /* RoCE completion flag */
 #define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_SHIFT     28
-/* RoCE S flag */
-#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_MASK               0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_MASK               0x1 /* RoCE S flag */
 #define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_SHIFT              29
 #define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_MASK           0x1
 #define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_SHIFT          30
-/* Connection type is iWARP */
-#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK  0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK  0x1 /* Connection type is iWARP */
 #define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
 };
 
 
 /*
- * Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
- * DPM burst
+ * Parameters to RDMA firmware, passed in EDPM doorbell
+ */
+struct db_rdma_dpm_params {
+	__le32 params;
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK                0x3F /* Size in QWORD-s of the DPM burst */
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT               0
+/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK            0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT           6
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK              0xFF /* opcode for RDMA operation */
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT             8
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK            0x7FF /* the size of the WQE payload in bytes */
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT           16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK           0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT          27
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK         0x1 /* RoCE ack request (will be set 1) */
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT        28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK               0x1 /* RoCE S flag */
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT              29
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK      0x1 /* RoCE completion flag for FW use */
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT     30
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK  0x1 /* Connection type is iWARP */
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
+};
+
+/*
+ * Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a DPM burst
  */
 struct db_rdma_dpm_data {
 	__le16 icid /* internal CID */;
 	__le16 prod_val /* aggregated value to update */;
-/* parameters passed to RDMA firmware */
-	struct db_rdma_dpm_params params;
+	struct db_rdma_dpm_params params /* parameters passed to RDMA firmware */;
+};
+
+
+
+/*
+ * Rdif context
+ */
+struct e4_rdif_task_context {
+	__le32 initial_ref_tag;
+	__le16 app_tag_value;
+	__le16 app_tag_mask;
+	u8 flags0;
+#define E4_RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK             0x1
+#define E4_RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT            0
+#define E4_RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK      0x1
+#define E4_RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT     1
+#define E4_RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK            0x1 /* 0 = IP checksum, 1 = CRC */
+#define E4_RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT           2
+#define E4_RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK         0x1
+#define E4_RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT        3
+#define E4_RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK            0x3 /* 1/2/3 - Protection Type */
+#define E4_RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT           4
+#define E4_RDIF_TASK_CONTEXT_CRC_SEED_MASK                   0x1 /* 0=0x0000, 1=0xffff */
+#define E4_RDIF_TASK_CONTEXT_CRC_SEED_SHIFT                  6
+#define E4_RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK         0x1 /* Keep reference tag constant */
+#define E4_RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT        7
+	u8 partial_dif_data[7];
+	__le16 partial_crc_value;
+	__le16 partial_checksum_value;
+	__le32 offset_in_io;
+	__le16 flags1;
+#define E4_RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK             0x1
+#define E4_RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT            0
+#define E4_RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK           0x1
+#define E4_RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT          1
+#define E4_RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK           0x1
+#define E4_RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT          2
+#define E4_RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK              0x1
+#define E4_RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT             3
+#define E4_RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK            0x1
+#define E4_RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT           4
+#define E4_RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK            0x1
+#define E4_RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT           5
+/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
+#define E4_RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK              0x7
+#define E4_RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT             6
+#define E4_RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK             0x3 /* 0=None, 1=DIF, 2=DIX */
+#define E4_RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT            9
+/* DIF tag right at the beginning of DIF interval */
+#define E4_RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK            0x1
+#define E4_RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT           11
+#define E4_RDIF_TASK_CONTEXT_RESERVED0_MASK                  0x1
+#define E4_RDIF_TASK_CONTEXT_RESERVED0_SHIFT                 12
+#define E4_RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK          0x1 /* 0=None, 1=DIF */
+#define E4_RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT         13
+/* Forward application tag with mask */
+#define E4_RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK  0x1
+#define E4_RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
+/* Forward reference tag with mask */
+#define E4_RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK  0x1
+#define E4_RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
+	__le16 state;
+#define E4_RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK    0xF
+#define E4_RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT   0
+#define E4_RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK  0xF
+#define E4_RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
+#define E4_RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK                0x1
+#define E4_RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT               8
+#define E4_RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK          0x1
+#define E4_RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT         9
+/* mask for refernce tag handling */
+#define E4_RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK               0xF
+#define E4_RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT              10
+#define E4_RDIF_TASK_CONTEXT_RESERVED1_MASK                  0x3
+#define E4_RDIF_TASK_CONTEXT_RESERVED1_SHIFT                 14
+	__le32 reserved2;
 };
 
-/* Igu interrupt command */
+
+/*
+ * Tdif context
+ */
+struct e4_tdif_task_context {
+	__le32 initial_ref_tag;
+	__le16 app_tag_value;
+	__le16 app_tag_mask;
+	__le16 partial_crc_value_b;
+	__le16 partial_checksum_value_b;
+	__le16 stateB;
+#define E4_TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK    0xF
+#define E4_TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT   0
+#define E4_TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK  0xF
+#define E4_TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
+#define E4_TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK                0x1
+#define E4_TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT               8
+#define E4_TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK             0x1
+#define E4_TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT            9
+#define E4_TDIF_TASK_CONTEXT_RESERVED0_MASK                    0x3F
+#define E4_TDIF_TASK_CONTEXT_RESERVED0_SHIFT                   10
+	u8 reserved1;
+	u8 flags0;
+#define E4_TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK               0x1
+#define E4_TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT              0
+#define E4_TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK        0x1
+#define E4_TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT       1
+#define E4_TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK              0x1 /* 0 = IP checksum, 1 = CRC */
+#define E4_TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT             2
+#define E4_TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK           0x1
+#define E4_TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT          3
+#define E4_TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK              0x3 /* 1/2/3 - Protection Type */
+#define E4_TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT             4
+#define E4_TDIF_TASK_CONTEXT_CRC_SEED_MASK                     0x1 /* 0=0x0000, 1=0xffff */
+#define E4_TDIF_TASK_CONTEXT_CRC_SEED_SHIFT                    6
+#define E4_TDIF_TASK_CONTEXT_RESERVED2_MASK                    0x1
+#define E4_TDIF_TASK_CONTEXT_RESERVED2_SHIFT                   7
+	__le32 flags1;
+#define E4_TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK               0x1
+#define E4_TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT              0
+#define E4_TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK             0x1
+#define E4_TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT            1
+#define E4_TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK             0x1
+#define E4_TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT            2
+#define E4_TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK                0x1
+#define E4_TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT               3
+#define E4_TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK              0x1
+#define E4_TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT             4
+#define E4_TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK              0x1
+#define E4_TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT             5
+/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
+#define E4_TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK                0x7
+#define E4_TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT               6
+#define E4_TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK               0x3 /* 0=None, 1=DIF, 2=DIX */
+#define E4_TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT              9
+/* DIF tag right at the beginning of DIF interval */
+#define E4_TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK              0x1
+#define E4_TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT             11
+#define E4_TDIF_TASK_CONTEXT_RESERVED3_MASK                    0x1 /* reserved */
+#define E4_TDIF_TASK_CONTEXT_RESERVED3_SHIFT                   12
+#define E4_TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK            0x1 /* 0=None, 1=DIF */
+#define E4_TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT           13
+#define E4_TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK    0xF
+#define E4_TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT   14
+#define E4_TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK  0xF
+#define E4_TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
+#define E4_TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK                0x1
+#define E4_TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT               22
+#define E4_TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK          0x1
+#define E4_TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT         23
+/* mask for refernce tag handling */
+#define E4_TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK                 0xF
+#define E4_TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT                24
+/* Forward application tag with mask */
+#define E4_TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK    0x1
+#define E4_TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT   28
+/* Forward reference tag with mask */
+#define E4_TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK    0x1
+#define E4_TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT   29
+#define E4_TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK           0x1 /* Keep reference tag constant */
+#define E4_TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT          30
+#define E4_TDIF_TASK_CONTEXT_RESERVED4_MASK                    0x1
+#define E4_TDIF_TASK_CONTEXT_RESERVED4_SHIFT                   31
+	__le32 offset_in_io_b;
+	__le16 partial_crc_value_a;
+	__le16 partial_checksum_value_a;
+	__le32 offset_in_io_a;
+	u8 partial_dif_data_a[8];
+	u8 partial_dif_data_b[8];
+};
+
+
+/*
+ * Rdif context
+ */
+struct e5_rdif_task_context {
+	__le32 initial_ref_tag;
+	__le16 app_tag_value;
+	__le16 app_tag_mask;
+	u8 flags0;
+#define E5_RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK             0x1
+#define E5_RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT            0
+#define E5_RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK      0x1
+#define E5_RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT     1
+#define E5_RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK            0x1 /* 0 = IP checksum, 1 = CRC */
+#define E5_RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT           2
+#define E5_RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK         0x1
+#define E5_RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT        3
+#define E5_RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK            0x3 /* 1/2/3 - Protection Type */
+#define E5_RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT           4
+/* 0=0x0000, 1=0xffff used only in reg_e4_backward_compatible_mode */
+#define E5_RDIF_TASK_CONTEXT_CRC_SEED_MASK                   0x1
+#define E5_RDIF_TASK_CONTEXT_CRC_SEED_SHIFT                  6
+#define E5_RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK         0x1 /* Keep reference tag constant */
+#define E5_RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT        7
+	u8 partial_dif_data[7];
+	__le16 partial_crc_value;
+	__le16 partial_checksum_value;
+	__le32 offset_in_io;
+	__le16 flags1;
+#define E5_RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK             0x1
+#define E5_RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT            0
+#define E5_RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK           0x1
+#define E5_RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT          1
+#define E5_RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK           0x1
+#define E5_RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT          2
+#define E5_RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK              0x1
+#define E5_RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT             3
+#define E5_RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK            0x1
+#define E5_RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT           4
+#define E5_RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK            0x1
+#define E5_RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT           5
+/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
+#define E5_RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK              0x7
+#define E5_RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT             6
+#define E5_RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK             0x3 /* 0=None, 1=DIF, 2=DIX */
+#define E5_RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT            9
+#define E5_RDIF_TASK_CONTEXT_RESERVED0_MASK                  0x3
+#define E5_RDIF_TASK_CONTEXT_RESERVED0_SHIFT                 11
+#define E5_RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK          0x1 /* 0=None, 1=DIF */
+#define E5_RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT         13
+/* Forward application tag with mask */
+#define E5_RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK  0x1
+#define E5_RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
+/* Forward reference tag with mask */
+#define E5_RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK  0x1
+#define E5_RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
+	__le16 state;
+#define E5_RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK    0xF
+#define E5_RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT   0
+#define E5_RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK  0xF
+#define E5_RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
+#define E5_RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK                0x1
+#define E5_RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT               8
+#define E5_RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK          0x1
+#define E5_RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT         9
+/* mask for refernce tag handling; used only in reg_e4_backward_compatible_mod */
+#define E5_RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK               0xF
+#define E5_RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT              10
+#define E5_RDIF_TASK_CONTEXT_RESERVED1_MASK                  0x3
+#define E5_RDIF_TASK_CONTEXT_RESERVED1_SHIFT                 14
+/* mask for refernce tag handling; used only in 0=reg_e4_backward_compatible_mod **/
+	__le32 ref_tag_mask_e5;
+	__le16 crc_seed_e5 /* Full CRC seed; used only in 0=tdif_reg_e4_backward_compatible_mode */;
+	__le16 reserved2[7] /* padding to 128bit */;
+};
+
+
+/*
+ * Tdif context
+ */
+struct e5_tdif_task_context {
+	__le32 initial_ref_tag;
+	__le16 app_tag_value;
+	__le16 app_tag_mask;
+	__le16 partial_crc_value_b;
+	__le16 partial_checksum_value_b;
+	__le16 stateB;
+#define E5_TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK    0xF
+#define E5_TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT   0
+#define E5_TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK  0xF
+#define E5_TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
+#define E5_TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK                0x1
+#define E5_TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT               8
+#define E5_TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK             0x1
+#define E5_TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT            9
+#define E5_TDIF_TASK_CONTEXT_RESERVED0_MASK                    0x3F
+#define E5_TDIF_TASK_CONTEXT_RESERVED0_SHIFT                   10
+	u8 reserved1;
+	u8 flags0;
+#define E5_TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK               0x1
+#define E5_TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT              0
+#define E5_TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK        0x1
+#define E5_TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT       1
+#define E5_TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK              0x1 /* 0 = IP checksum, 1 = CRC */
+#define E5_TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT             2
+#define E5_TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK           0x1
+#define E5_TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT          3
+#define E5_TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK              0x3 /* 1/2/3 - Protection Type */
+#define E5_TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT             4
+/* 0=0x0000, 1=0xffff; used only in tdif_reg_e4_backward_compatible_mode */
+#define E5_TDIF_TASK_CONTEXT_CRC_SEED_MASK                     0x1
+#define E5_TDIF_TASK_CONTEXT_CRC_SEED_SHIFT                    6
+#define E5_TDIF_TASK_CONTEXT_RESERVED2_MASK                    0x1
+#define E5_TDIF_TASK_CONTEXT_RESERVED2_SHIFT                   7
+	__le32 flags1;
+#define E5_TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK               0x1
+#define E5_TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT              0
+#define E5_TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK             0x1
+#define E5_TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT            1
+#define E5_TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK             0x1
+#define E5_TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT            2
+#define E5_TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK                0x1
+#define E5_TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT               3
+#define E5_TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK              0x1
+#define E5_TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT             4
+#define E5_TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK              0x1
+#define E5_TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT             5
+/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
+#define E5_TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK                0x7
+#define E5_TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT               6
+#define E5_TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK               0x3 /* 0=None, 1=DIF, 2=DIX */
+#define E5_TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT              9
+#define E5_TDIF_TASK_CONTEXT_RESERVED3_MASK                    0x3 /* reserved */
+#define E5_TDIF_TASK_CONTEXT_RESERVED3_SHIFT                   11
+#define E5_TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK            0x1 /* 0=None, 1=DIF */
+#define E5_TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT           13
+#define E5_TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK    0xF
+#define E5_TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT   14
+#define E5_TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK  0xF
+#define E5_TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
+#define E5_TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK                0x1
+#define E5_TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT               22
+#define E5_TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK          0x1
+#define E5_TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT         23
+/* mask for refernce tag handlingl used only in tdif_reg_e4_backward_compatible_mode */
+#define E5_TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK                 0xF
+#define E5_TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT                24
+/* Forward application tag with mask */
+#define E5_TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK    0x1
+#define E5_TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT   28
+/* Forward reference tag with mask */
+#define E5_TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK    0x1
+#define E5_TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT   29
+#define E5_TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK           0x1 /* Keep reference tag constant */
+#define E5_TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT          30
+#define E5_TDIF_TASK_CONTEXT_RESERVED4_MASK                    0x1
+#define E5_TDIF_TASK_CONTEXT_RESERVED4_SHIFT                   31
+	__le32 offset_in_io_b;
+	__le16 partial_crc_value_a;
+	__le16 partial_checksum_value_a;
+	__le32 offset_in_io_a;
+	u8 partial_dif_data_a[8];
+	u8 partial_dif_data_b[8];
+/* Full reference tagmask seed; used only in 0=tdif_reg_e4_backward_compatible_mode **/
+	__le32 ref_tag_mask_e5;
+	__le16 crc_seed_e5 /* Full CRC seed; used only in 0=tdif_reg_e4_backward_compatible_mode */;
+	__le16 reserved5 /* padding to 64bit */;
+	__le32 reserved6[2] /* padding to 128bit */;
+};
+
+
+/*
+ * Describes L2 upper protocol
+ */
+enum gfs_ether_type_enum {
+	e_gfs_ether_type_other_protocol,
+	e_gfs_ether_type_ipv4,
+	e_gfs_ether_type_ipv6,
+	e_gfs_ether_type_arp,
+	e_gfs_ether_type_roce,
+	e_gfs_ether_type_fcoe,
+	MAX_GFS_ETHER_TYPE_ENUM
+};
+
+
+/*
+ * GFS CAM line struct with fields breakout
+ */
+struct gfs_profile_cam_line {
+	__le32 reg0;
+#define GFS_PROFILE_CAM_LINE_PF_MASK                         0xF
+#define GFS_PROFILE_CAM_LINE_PF_SHIFT                        0
+#define GFS_PROFILE_CAM_LINE_TUNNEL_EXISTS_MASK              0x1
+#define GFS_PROFILE_CAM_LINE_TUNNEL_EXISTS_SHIFT             4
+/*  (use enum gfs_tunnel_type_enum) */
+#define GFS_PROFILE_CAM_LINE_TUNNEL_TYPE_MASK                0xF
+#define GFS_PROFILE_CAM_LINE_TUNNEL_TYPE_SHIFT               5
+#define GFS_PROFILE_CAM_LINE_TUNNEL_IP_VERSION_MASK          0x1
+#define GFS_PROFILE_CAM_LINE_TUNNEL_IP_VERSION_SHIFT         9
+#define GFS_PROFILE_CAM_LINE_L2_HEADER_EXISTS_MASK           0x1
+#define GFS_PROFILE_CAM_LINE_L2_HEADER_EXISTS_SHIFT          10
+/*  (use enum gfs_ether_type_enum) */
+#define GFS_PROFILE_CAM_LINE_ETHER_TYPE_PROTOCOL_MASK        0x7
+#define GFS_PROFILE_CAM_LINE_ETHER_TYPE_PROTOCOL_SHIFT       11
+#define GFS_PROFILE_CAM_LINE_OVER_IP_PROTOCOL_MASK           0xFF
+#define GFS_PROFILE_CAM_LINE_OVER_IP_PROTOCOL_SHIFT          14
+#define GFS_PROFILE_CAM_LINE_IS_IP_MASK                      0x1
+#define GFS_PROFILE_CAM_LINE_IS_IP_SHIFT                     22
+#define GFS_PROFILE_CAM_LINE_IS_TCP_UDP_SCTP_MASK            0x1
+#define GFS_PROFILE_CAM_LINE_IS_TCP_UDP_SCTP_SHIFT           23
+#define GFS_PROFILE_CAM_LINE_IS_UNICAST_MASK                 0x1
+#define GFS_PROFILE_CAM_LINE_IS_UNICAST_SHIFT                24
+#define GFS_PROFILE_CAM_LINE_IP_FRAGMENT_MASK                0x1
+#define GFS_PROFILE_CAM_LINE_IP_FRAGMENT_SHIFT               25
+#define GFS_PROFILE_CAM_LINE_CALCULATED_TCP_FLAGS_MASK       0x3F
+#define GFS_PROFILE_CAM_LINE_CALCULATED_TCP_FLAGS_SHIFT      26
+	__le32 reg1;
+#define GFS_PROFILE_CAM_LINE_INNER_CVLAN_EXISTS_MASK         0x1
+#define GFS_PROFILE_CAM_LINE_INNER_CVLAN_EXISTS_SHIFT        0
+#define GFS_PROFILE_CAM_LINE_INNER_SVLAN_EXISTS_MASK         0x1
+#define GFS_PROFILE_CAM_LINE_INNER_SVLAN_EXISTS_SHIFT        1
+#define GFS_PROFILE_CAM_LINE_TUNNEL_CVLAN_EXISTS_MASK        0x1
+#define GFS_PROFILE_CAM_LINE_TUNNEL_CVLAN_EXISTS_SHIFT       2
+#define GFS_PROFILE_CAM_LINE_TUNNEL_SVLAN_EXISTS_MASK        0x1
+#define GFS_PROFILE_CAM_LINE_TUNNEL_SVLAN_EXISTS_SHIFT       3
+#define GFS_PROFILE_CAM_LINE_MPLS_EXISTS_MASK                0x1
+#define GFS_PROFILE_CAM_LINE_MPLS_EXISTS_SHIFT               4
+#define GFS_PROFILE_CAM_LINE_FLEX_BYTE0_MASK                 0xFF
+#define GFS_PROFILE_CAM_LINE_FLEX_BYTE0_SHIFT                5
+#define GFS_PROFILE_CAM_LINE_FLEX_BYTE1_MASK                 0xFF
+#define GFS_PROFILE_CAM_LINE_FLEX_BYTE1_SHIFT                13
+#define GFS_PROFILE_CAM_LINE_FLEX_BYTE2_LSB_MASK             0xF
+#define GFS_PROFILE_CAM_LINE_FLEX_BYTE2_LSB_SHIFT            21
+#define GFS_PROFILE_CAM_LINE_FLEX_BYTE3_MSB_MASK             0xF
+#define GFS_PROFILE_CAM_LINE_FLEX_BYTE3_MSB_SHIFT            25
+#define GFS_PROFILE_CAM_LINE_RESERVED_MASK                   0x3
+#define GFS_PROFILE_CAM_LINE_RESERVED_SHIFT                  29
+#define GFS_PROFILE_CAM_LINE_VALID_MASK                      0x1
+#define GFS_PROFILE_CAM_LINE_VALID_SHIFT                     31
+	__le32 reg2;
+#define GFS_PROFILE_CAM_LINE_MASK_PF_MASK                    0xF
+#define GFS_PROFILE_CAM_LINE_MASK_PF_SHIFT                   0
+#define GFS_PROFILE_CAM_LINE_MASK_TUNNEL_EXISTS_MASK         0x1
+#define GFS_PROFILE_CAM_LINE_MASK_TUNNEL_EXISTS_SHIFT        4
+#define GFS_PROFILE_CAM_LINE_MASK_TUNNEL_TYPE_MASK           0xF
+#define GFS_PROFILE_CAM_LINE_MASK_TUNNEL_TYPE_SHIFT          5
+#define GFS_PROFILE_CAM_LINE_MASK_TUNNEL_IP_VERSION_MASK     0x1
+#define GFS_PROFILE_CAM_LINE_MASK_TUNNEL_IP_VERSION_SHIFT    9
+#define GFS_PROFILE_CAM_LINE_MASK_L2HEADER_EXISTS_MASK       0x1
+#define GFS_PROFILE_CAM_LINE_MASK_L2HEADER_EXISTS_SHIFT      10
+#define GFS_PROFILE_CAM_LINE_MASK_ETHERTYPE_PROTOCOL_MASK    0x7
+#define GFS_PROFILE_CAM_LINE_MASK_ETHERTYPE_PROTOCOL_SHIFT   11
+#define GFS_PROFILE_CAM_LINE_MASK_OVER_IP_PROTOCOL_MASK      0xFF
+#define GFS_PROFILE_CAM_LINE_MASK_OVER_IP_PROTOCOL_SHIFT     14
+#define GFS_PROFILE_CAM_LINE_MASK_IS_IP_MASK                 0x1
+#define GFS_PROFILE_CAM_LINE_MASK_IS_IP_SHIFT                22
+#define GFS_PROFILE_CAM_LINE_MASK_IS_TCP_UDP_SCTP_MASK       0x1
+#define GFS_PROFILE_CAM_LINE_MASK_IS_TCP_UDP_SCTP_SHIFT      23
+#define GFS_PROFILE_CAM_LINE_MASK_IS_UNICAST_MASK            0x1
+#define GFS_PROFILE_CAM_LINE_MASK_IS_UNICAST_SHIFT           24
+#define GFS_PROFILE_CAM_LINE_MASK_IP_FRAGMENT_MASK           0x1
+#define GFS_PROFILE_CAM_LINE_MASK_IP_FRAGMENT_SHIFT          25
+#define GFS_PROFILE_CAM_LINE_MASK_CALCULATED_TCP_FLAGS_MASK  0x3F
+#define GFS_PROFILE_CAM_LINE_MASK_CALCULATED_TCP_FLAGS_SHIFT 26
+	__le32 reg3;
+#define GFS_PROFILE_CAM_LINE_MASK_INNER_CVLAN_EXISTS_MASK    0x1
+#define GFS_PROFILE_CAM_LINE_MASK_INNER_CVLAN_EXISTS_SHIFT   0
+#define GFS_PROFILE_CAM_LINE_MASK_INNER_SVLAN_EXISTS_MASK    0x1
+#define GFS_PROFILE_CAM_LINE_MASK_INNER_SVLAN_EXISTS_SHIFT   1
+#define GFS_PROFILE_CAM_LINE_MASK_TUNNEL_CVLAN_EXISTS_MASK   0x1
+#define GFS_PROFILE_CAM_LINE_MASK_TUNNEL_CVLAN_EXISTS_SHIFT  2
+#define GFS_PROFILE_CAM_LINE_MASK_TUNNEL_SVLAN_EXISTS_MASK   0x1
+#define GFS_PROFILE_CAM_LINE_MASK_TUNNEL_SVLAN_EXISTS_SHIFT  3
+#define GFS_PROFILE_CAM_LINE_MASK_MPLS_EXISTS_MASK           0x1
+#define GFS_PROFILE_CAM_LINE_MASK_MPLS_EXISTS_SHIFT          4
+#define GFS_PROFILE_CAM_LINE_MASK_FLEX_BYTE0_MASK            0xFF
+#define GFS_PROFILE_CAM_LINE_MASK_FLEX_BYTE0_SHIFT           5
+#define GFS_PROFILE_CAM_LINE_MASK_FLEX_BYTE1_MASK            0xFF
+#define GFS_PROFILE_CAM_LINE_MASK_FLEX_BYTE1_SHIFT           13
+#define GFS_PROFILE_CAM_LINE_MASK_FLEX_BYTE2_LSB_MASK        0xF
+#define GFS_PROFILE_CAM_LINE_MASK_FLEX_BYTE2_LSB_SHIFT       21
+#define GFS_PROFILE_CAM_LINE_MASK_FLEX_BYTE3_MSB_MASK        0xF
+#define GFS_PROFILE_CAM_LINE_MASK_FLEX_BYTE3_MSB_SHIFT       25
+#define GFS_PROFILE_CAM_LINE_RESERVED1_MASK                  0x7
+#define GFS_PROFILE_CAM_LINE_RESERVED1_SHIFT                 29
+};
+
+
+/*
+ * used to write to HASH_PROF_ENABLE_VEC_TWO_BIT_MODIFY register
+ */
+struct gfs_profile_modify_enable {
+	u8 index0 /* The index of the bit to be modified */;
+	u8 index1 /* The index of the bit to be modified */;
+	u8 flags;
+/* The operation to perform on the bit. 0 - reset, 1 - set. */
+#define GFS_PROFILE_MODIFY_ENABLE_SETRESET0_MASK  0x1
+#define GFS_PROFILE_MODIFY_ENABLE_SETRESET0_SHIFT 0
+/* The operation to perform on the bit. 0 - reset, 1 - set. */
+#define GFS_PROFILE_MODIFY_ENABLE_SETRESET1_MASK  0x1
+#define GFS_PROFILE_MODIFY_ENABLE_SETRESET1_SHIFT 1
+/* The operation will be applyed to the bit only if this bit is set. */
+#define GFS_PROFILE_MODIFY_ENABLE_VALID0_MASK     0x1
+#define GFS_PROFILE_MODIFY_ENABLE_VALID0_SHIFT    2
+/* The operation will be applyed to the bit only if this bit is set. */
+#define GFS_PROFILE_MODIFY_ENABLE_VALID1_MASK     0x1
+#define GFS_PROFILE_MODIFY_ENABLE_VALID1_SHIFT    3
+#define GFS_PROFILE_MODIFY_ENABLE_RESERVED1_MASK  0xF
+#define GFS_PROFILE_MODIFY_ENABLE_RESERVED1_SHIFT 4
+	u8 reserved2;
+};
+
+
+enum gfs_swap_i2o_enum {
+	e_no_swap,
+	e_swap_if_tunnel_exist,
+	e_swap_if_tunnel_not_exist,
+	e_swap_always,
+	MAX_GFS_SWAP_I2O_ENUM
+};
+
+
+/*
+ * Describes tunnel type
+ */
+enum gfs_tunnel_type_enum {
+	e_gfs_tunnel_type_no_tunnel = 0,
+	e_gfs_tunnel_type_geneve = 1,
+	e_gfs_tunnel_type_gre = 2,
+	e_gfs_tunnel_type_vxlan = 3,
+	e_gfs_tunnel_type_mpls = 4,
+	e_gfs_tunnel_type_gre_mpls = 5,
+	e_gfs_tunnel_type_udp_mpls = 6,
+	MAX_GFS_TUNNEL_TYPE_ENUM
+};
+
+
+/*
+ * Igu interrupt command
+ */
 enum igu_int_cmd {
-	IGU_INT_ENABLE	= 0,
+	IGU_INT_ENABLE = 0,
 	IGU_INT_DISABLE = 1,
-	IGU_INT_NOP	= 2,
-	IGU_INT_NOP2	= 3,
+	IGU_INT_NOP = 2,
+	IGU_INT_NOP2 = 3,
 	MAX_IGU_INT_CMD
 };
 
-/* IGU producer or consumer update command */
+
+/*
+ * IGU producer or consumer update command
+ */
 struct igu_prod_cons_update {
 	__le32 sb_id_and_flags;
 #define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK        0xFFFFFF
@@ -1154,8 +1727,7 @@ struct igu_prod_cons_update {
 /* interrupt enable/disable/nop (use enum igu_int_cmd) */
 #define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK      0x3
 #define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT     25
-/*  (use enum igu_seg_access) */
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK  0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK  0x1 /*  (use enum igu_seg_access) */
 #define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
 #define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK      0x1
 #define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT     28
@@ -1167,18 +1739,19 @@ struct igu_prod_cons_update {
 	__le32 reserved1;
 };
 
-/* Igu segments access for default status block only */
+
+/*
+ * Igu segments access for default status block only
+ */
 enum igu_seg_access {
-	IGU_SEG_ACCESS_REG	= 0,
-	IGU_SEG_ACCESS_ATTN	= 1,
+	IGU_SEG_ACCESS_REG = 0,
+	IGU_SEG_ACCESS_ATTN = 1,
 	MAX_IGU_SEG_ACCESS
 };
 
 
-/*
- * Enumeration for L3 type field of parsing_and_err_flags_union. L3Type:
- * 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled according
- * to the last-ethertype)
+/* Enumeration for L3 type field of parsing_and_err_flags. L3Type: 0 - unknown (not ip) ,1 - Ipv4,
+ * 2 - Ipv6 (this field can be filled according to the last-ethertype)
  */
 enum l3_type {
 	e_l3_type_unknown,
@@ -1188,10 +1761,9 @@ enum l3_type {
 };
 
 
-/*
- * Enumeration for l4Protocol field of parsing_and_err_flags_union. L4-protocol
- * 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and its not the
- * first fragment, the protocol-type should be set to none.
+/* Enumeration for l4Protocol field of parsing_and_err_flags. L4-protocol 0 - none, 1 - TCP, 2- UDP.
+ * if the packet is IPv4 fragment, and its not the first fragment, the protocol-type should be set
+ * to none.
  */
 enum l4_protocol {
 	e_l4_protocol_none,
@@ -1206,59 +1778,55 @@ enum l4_protocol {
  */
 struct parsing_and_err_flags {
 	__le16 flags;
-/* L3Type: 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled
- * according to the last-ethertype) (use enum l3_type)
+/* L3Type: 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled according to the
+ * last-ethertype) (use enum l3_type)
  */
 #define PARSING_AND_ERR_FLAGS_L3TYPE_MASK                      0x3
 #define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT                     0
-/* L4-protocol 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and
- * its not the first fragment, the protocol-type should be set to none.
- * (use enum l4_protocol)
+/* L4-protocol 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and its not the first
+ * fragment, the protocol-type should be set to none. (use enum l4_protocol)
  */
 #define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK                  0x3
 #define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT                 2
-/* Set if the packet is IPv4 fragment. */
+/* Set if the packet is IPv4/IPv6 fragment. */
 #define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK                    0x1
 #define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT                   4
-/* Set if VLAN tag exists. Invalid if tunnel type are IP GRE or IP GENEVE. */
+/* corresponds to the same 8021q tag that is selected for 8021q-tag fiel. This flag should be set if
+ * the tag appears in the packet, regardless of its value.
+ */
 #define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK               0x1
 #define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT              5
-/* Set if L4 checksum was calculated. */
+/* Set if L4 checksum was calculated. taken from the EOP descriptor. */
 #define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK        0x1
 #define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT       6
-/* Set for PTP packet. */
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK                 0x1 /* Set for PTP packet. */
 #define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT                7
 /* Set if PTP timestamp recorded. */
 #define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK           0x1
 #define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT          8
-/* Set if either version-mismatch or hdr-len-error or ipv4-cksm is set or ipv6
- * ver mismatch
- */
+/* Set if either version-mismatch or hdr-len-error or ipv4-cksm is set or ipv6 ver mismatch */
 #define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK                  0x1
 #define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT                 9
-/* Set if L4 checksum validation failed. Valid only if L4 checksum was
- * calculated.
- */
+/* Set if L4 checksum validation failed. Valid only if L4 checksum was calculated. */
 #define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK                0x1
 #define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT               10
 /* Set if GRE/VXLAN/GENEVE tunnel detected. */
 #define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK                 0x1
 #define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT                11
-/* Set if VLAN tag exists in tunnel header. */
+/* This flag should be set if the tag appears in the packet tunnel header, regardless of its value..
+ *
+ */
 #define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK         0x1
 #define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT        12
-/* Set if either tunnel-ipv4-version-mismatch or tunnel-ipv4-hdr-len-error or
- * tunnel-ipv4-cksm is set or tunneling ipv6 ver mismatch
+/* Set if either tunnel-ipv4-version-mismatch or tunnel-ipv4-hdr-len-error or tunnel-ipv4-cksm is
+ * set or tunneling ipv6 ver mismatch
  */
 #define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK            0x1
 #define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT           13
-/* Set if GRE or VXLAN/GENEVE UDP checksum was calculated. */
+/* taken from the EOP descriptor. */
 #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK  0x1
 #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
-/* Set if tunnel L4 checksum validation failed. Valid only if tunnel L4 checksum
- * was calculated.
- */
+/* Set if tunnel L4 checksum validation failed. Valid only if tunnel L4 checksum was calculated. */
 #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK          0x1
 #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT         15
 };
@@ -1269,8 +1837,7 @@ struct parsing_and_err_flags {
  */
 struct parsing_err_flags {
 	__le16 flags;
-/* MAC error indication */
-#define PARSING_ERR_FLAGS_MAC_ERROR_MASK                          0x1
+#define PARSING_ERR_FLAGS_MAC_ERROR_MASK                          0x1 /* MAC error indication */
 #define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT                         0
 /* truncation error indication */
 #define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK                        0x1
@@ -1278,49 +1845,47 @@ struct parsing_err_flags {
 /* packet too small indication */
 #define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK                      0x1
 #define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT                     2
-/* Header Missing Tag */
-#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK                0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK                0x1 /* Header Missing Tag */
 #define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT               3
-/* from frame cracker output */
+/* From FC. See spec for detailed description */
 #define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK             0x1
 #define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT            4
-/* from frame cracker output */
+/* From FC. See spec for detailed description */
 #define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK    0x1
 #define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT   5
-/* set this error if: 1. total-len is smaller than hdr-len 2. total-ip-len
- * indicates number that is bigger than real packet length 3. tunneling:
- * total-ip-length of the outer header points to offset that is smaller than
- * the one pointed to by the total-ip-len of the inner hdr.
+/* set this error if: 1. total-len is smaller than hdr-len 2. total-ip-len indicates number that is
+ * bigger than real packet length 3. tunneling: total-ip-length of the outer header points to offset
+ * that is smaller than the one pointed to by the total-ip-len of the inner hdr.
  */
 #define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK           0x1
 #define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT          6
-/* from frame cracker output */
+/* From FC. See spec for detailed description */
 #define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK                  0x1
 #define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT                 7
-/* from frame cracker output. for either TCP or UDP */
+/* From FC. See spec for detailed description. for either TCP or UDP */
 #define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK          0x1
 #define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT         8
-/* from frame cracker output */
+/* From FC. See spec for detailed description */
 #define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK               0x1
 #define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT              9
-/* cksm calculated and value isn't 0xffff or L4-cksm-wasnt-calculated for any
- * reason, like: udp/ipv4 checksum is 0 etc.
+/* cksm calculated and value isn't 0xffff or L4-cksm-wasnt-calculated for any reason, like: udp/ipv4
+ * checksum is 0 etc.
  */
 #define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK               0x1
 #define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT              10
-/* from frame cracker output */
+/* From FC. See spec for detailed description */
 #define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK        0x1
 #define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT       11
-/* from frame cracker output */
+/* From FC. See spec for detailed description */
 #define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK  0x1
 #define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12
 /* set if geneve option size was over 32 byte */
 #define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK            0x1
 #define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT           13
-/* from frame cracker output */
+/* From FC. See spec for detailed description */
 #define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK           0x1
 #define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT          14
-/* from frame cracker output */
+/* From FC. See spec for detailed description */
 #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK              0x1
 #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT             15
 };
@@ -1333,25 +1898,33 @@ struct pb_context {
 	__le32 crc[4];
 };
 
-/* Concrete Function ID. */
+
+/*
+ * Concrete Function ID.
+ */
 struct pxp_concrete_fid {
 	__le16 fid;
-#define PXP_CONCRETE_FID_PFID_MASK     0xF /* Parent PFID */
+#define PXP_CONCRETE_FID_PFID_MASK     0xF /* Parent PFID (Relative to path) */
 #define PXP_CONCRETE_FID_PFID_SHIFT    0
-#define PXP_CONCRETE_FID_PORT_MASK     0x3 /* port number */
+#define PXP_CONCRETE_FID_PORT_MASK     0x3 /* port number (Relative to path) */
 #define PXP_CONCRETE_FID_PORT_SHIFT    4
 #define PXP_CONCRETE_FID_PATH_MASK     0x1 /* path number */
 #define PXP_CONCRETE_FID_PATH_SHIFT    6
 #define PXP_CONCRETE_FID_VFVALID_MASK  0x1
 #define PXP_CONCRETE_FID_VFVALID_SHIFT 7
-#define PXP_CONCRETE_FID_VFID_MASK     0xFF
+#define PXP_CONCRETE_FID_VFID_MASK     0xFF /* (Relative to path) */
 #define PXP_CONCRETE_FID_VFID_SHIFT    8
 };
 
+
+/*
+ * Concrete Function ID.
+ */
 struct pxp_pretend_concrete_fid {
 	__le16 fid;
-#define PXP_PRETEND_CONCRETE_FID_PFID_MASK      0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK      0xF /* Parent PFID */
 #define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT     0
+/* port number. Only when part of ME register. */
 #define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK  0x7
 #define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
 #define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK   0x1
@@ -1360,15 +1933,20 @@ struct pxp_pretend_concrete_fid {
 #define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT     8
 };
 
+/*
+ * Function ID.
+ */
 union pxp_pretend_fid {
 	struct pxp_pretend_concrete_fid concrete_fid;
-	__le16				opaque_fid;
+	__le16 opaque_fid;
 };
 
-/* Pxp Pretend Command Register. */
+/*
+ * Pxp Pretend Command Register.
+ */
 struct pxp_pretend_cmd {
-	union pxp_pretend_fid	fid;
-	__le16			control;
+	union pxp_pretend_fid fid;
+	__le16 control;
 #define PXP_PRETEND_CMD_PATH_MASK              0x1
 #define PXP_PRETEND_CMD_PATH_SHIFT             0
 #define PXP_PRETEND_CMD_USE_PORT_MASK          0x1
@@ -1379,24 +1957,29 @@ struct pxp_pretend_cmd {
 #define PXP_PRETEND_CMD_RESERVED0_SHIFT        4
 #define PXP_PRETEND_CMD_RESERVED1_MASK         0xF
 #define PXP_PRETEND_CMD_RESERVED1_SHIFT        8
-#define PXP_PRETEND_CMD_PRETEND_PATH_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK      0x1 /* is pretend mode? */
 #define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT     12
-#define PXP_PRETEND_CMD_PRETEND_PORT_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK      0x1 /* is pretend mode? */
 #define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT     13
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK  0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK  0x1 /* is pretend mode? */
 #define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
-#define PXP_PRETEND_CMD_IS_CONCRETE_MASK       0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK       0x1 /* is fid concrete? */
 #define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT      15
 };
 
-/* PTT Record in PXP Admin Window. */
+
+
+
+/*
+ * PTT Record in PXP Admin Window.
+ */
 struct pxp_ptt_entry {
-	__le32			offset;
+	__le32 offset;
 #define PXP_PTT_ENTRY_OFFSET_MASK     0x7FFFFF
 #define PXP_PTT_ENTRY_OFFSET_SHIFT    0
 #define PXP_PTT_ENTRY_RESERVED0_MASK  0x1FF
 #define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
-	struct pxp_pretend_cmd	pretend;
+	struct pxp_pretend_cmd pretend;
 };
 
 
@@ -1416,214 +1999,72 @@ struct pxp_vf_zone_a_permission {
 };
 
 
+
 /*
- * Rdif context
+ * Searcher Table struct
  */
-struct rdif_task_context {
-	__le32 initial_ref_tag;
-	__le16 app_tag_value;
-	__le16 app_tag_mask;
-	u8 flags0;
-#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK             0x1
-#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT            0
-#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK      0x1
-#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT     1
-/* 0 = IP checksum, 1 = CRC */
-#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK            0x1
-#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT           2
-#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK         0x1
-#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT        3
-/* 1/2/3 - Protection Type */
-#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK            0x3
-#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT           4
-/* 0=0x0000, 1=0xffff */
-#define RDIF_TASK_CONTEXT_CRC_SEED_MASK                   0x1
-#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT                  6
-/* Keep reference tag constant */
-#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK         0x1
-#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT        7
-	u8 partial_dif_data[7];
-	__le16 partial_crc_value;
-	__le16 partial_checksum_value;
-	__le32 offset_in_io;
-	__le16 flags1;
-#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK             0x1
-#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT            0
-#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK           0x1
-#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT          1
-#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK           0x1
-#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT          2
-#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK              0x1
-#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT             3
-#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK            0x1
-#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT           4
-#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK            0x1
-#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT           5
-/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
-#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK              0x7
-#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT             6
-/* 0=None, 1=DIF, 2=DIX */
-#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK             0x3
-#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT            9
-/* DIF tag right at the beginning of DIF interval */
-#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK            0x1
-#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT           11
-#define RDIF_TASK_CONTEXT_RESERVED0_MASK                  0x1
-#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT                 12
-/* 0=None, 1=DIF */
-#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK          0x1
-#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT         13
-/* Forward application tag with mask */
-#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK  0x1
-#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
-/* Forward reference tag with mask */
-#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK  0x1
-#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
-	__le16 state;
-#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK    0xF
-#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT   0
-#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK  0xF
-#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
-#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK                0x1
-#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT               8
-#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK          0x1
-#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT         9
-/* mask for refernce tag handling */
-#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK               0xF
-#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT              10
-#define RDIF_TASK_CONTEXT_RESERVED1_MASK                  0x3
-#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT                 14
-	__le32 reserved2;
+struct src_entry_header {
+	__le32 flags;
+/* Address type : Physical = 0 , Logical = 1 (use enum src_header_next_ptr_type_enum) */
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_MASK  0x1
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_SHIFT 0
+#define SRC_ENTRY_HEADER_EMPTY_MASK          0x1 /* In T1 (first hop) indicates an empty hash bin */
+#define SRC_ENTRY_HEADER_EMPTY_SHIFT         1
+#define SRC_ENTRY_HEADER_RESERVED_MASK       0x3FFFFFFF
+#define SRC_ENTRY_HEADER_RESERVED_SHIFT      2
+	__le32 magic_number /* Must be SRC_HEADER_MAGIC_NUMBER (0x600DFEED) */;
+	struct regpair next_ptr /* Pointer to next bin */;
 };
 
+
 /*
- * RSS hash type
- */
-enum rss_hash_type {
-	RSS_HASH_TYPE_DEFAULT = 0,
-	RSS_HASH_TYPE_IPV4 = 1,
-	RSS_HASH_TYPE_TCP_IPV4 = 2,
-	RSS_HASH_TYPE_IPV6 = 3,
-	RSS_HASH_TYPE_TCP_IPV6 = 4,
-	RSS_HASH_TYPE_UDP_IPV4 = 5,
-	RSS_HASH_TYPE_UDP_IPV6 = 6,
-	MAX_RSS_HASH_TYPE
+ * Enumeration for address type
+ */
+enum src_header_next_ptr_type_enum {
+	e_physical_addr /* Address of type physical */,
+	e_logical_addr /* Address of type logical */,
+	MAX_SRC_HEADER_NEXT_PTR_TYPE_ENUM
 };
 
+
 /*
  * status block structure
  */
-struct status_block {
-	__le16 pi_array[PIS_PER_SB];
+struct status_block_e4 {
+	__le16 pi_array[PIS_PER_SB_E4];
 	__le32 sb_num;
-#define STATUS_BLOCK_SB_NUM_MASK      0x1FF
-#define STATUS_BLOCK_SB_NUM_SHIFT     0
-#define STATUS_BLOCK_ZERO_PAD_MASK    0x7F
-#define STATUS_BLOCK_ZERO_PAD_SHIFT   9
-#define STATUS_BLOCK_ZERO_PAD2_MASK   0xFFFF
-#define STATUS_BLOCK_ZERO_PAD2_SHIFT  16
+#define STATUS_BLOCK_E4_SB_NUM_MASK      0x1FF
+#define STATUS_BLOCK_E4_SB_NUM_SHIFT     0
+#define STATUS_BLOCK_E4_ZERO_PAD_MASK    0x7F
+#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT   9
+#define STATUS_BLOCK_E4_ZERO_PAD2_MASK   0xFFFF
+#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT  16
 	__le32 prod_index;
-#define STATUS_BLOCK_PROD_INDEX_MASK  0xFFFFFF
-#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD3_MASK   0xFF
-#define STATUS_BLOCK_ZERO_PAD3_SHIFT  24
+#define STATUS_BLOCK_E4_PROD_INDEX_MASK  0xFFFFFF
+#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD3_MASK   0xFF
+#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT  24
 };
 
 
 /*
- * Tdif context
+ * status block structure
  */
-struct tdif_task_context {
-	__le32 initial_ref_tag;
-	__le16 app_tag_value;
-	__le16 app_tag_mask;
-	__le16 partial_crc_value_b;
-	__le16 partial_checksum_value_b;
-	__le16 stateB;
-#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK    0xF
-#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT   0
-#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK  0xF
-#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
-#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK                0x1
-#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT               8
-#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK             0x1
-#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT            9
-#define TDIF_TASK_CONTEXT_RESERVED0_MASK                    0x3F
-#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT                   10
-	u8 reserved1;
-	u8 flags0;
-#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK               0x1
-#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT              0
-#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK        0x1
-#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT       1
-/* 0 = IP checksum, 1 = CRC */
-#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK              0x1
-#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT             2
-#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK           0x1
-#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT          3
-/* 1/2/3 - Protection Type */
-#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK              0x3
-#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT             4
-/* 0=0x0000, 1=0xffff */
-#define TDIF_TASK_CONTEXT_CRC_SEED_MASK                     0x1
-#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT                    6
-#define TDIF_TASK_CONTEXT_RESERVED2_MASK                    0x1
-#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT                   7
-	__le32 flags1;
-#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK               0x1
-#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT              0
-#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK             0x1
-#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT            1
-#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK             0x1
-#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT            2
-#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK                0x1
-#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT               3
-#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK              0x1
-#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT             4
-#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK              0x1
-#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT             5
-/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
-#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK                0x7
-#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT               6
-/* 0=None, 1=DIF, 2=DIX */
-#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK               0x3
-#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT              9
-/* DIF tag right at the beginning of DIF interval */
-#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK              0x1
-#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT             11
-#define TDIF_TASK_CONTEXT_RESERVED3_MASK                    0x1 /* reserved */
-#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT                   12
-/* 0=None, 1=DIF */
-#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK            0x1
-#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT           13
-#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK    0xF
-#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT   14
-#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK  0xF
-#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
-#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK                0x1
-#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT               22
-#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK          0x1
-#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT         23
-/* mask for refernce tag handling */
-#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK                 0xF
-#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT                24
-/* Forward application tag with mask */
-#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK    0x1
-#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT   28
-/* Forward reference tag with mask */
-#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK    0x1
-#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT   29
-/* Keep reference tag constant */
-#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK           0x1
-#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT          30
-#define TDIF_TASK_CONTEXT_RESERVED4_MASK                    0x1
-#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT                   31
-	__le32 offset_in_io_b;
-	__le16 partial_crc_value_a;
-	__le16 partial_checksum_value_a;
-	__le32 offset_in_io_a;
-	u8 partial_dif_data_a[8];
-	u8 partial_dif_data_b[8];
+struct status_block_e5 {
+	__le16 pi_array[PIS_PER_SB_E5];
+	__le16 pi_array_padding[PIS_PER_SB_PADDING_E5];
+	__le32 sb_num;
+#define STATUS_BLOCK_E5_SB_NUM_MASK      0x1FF
+#define STATUS_BLOCK_E5_SB_NUM_SHIFT     0
+#define STATUS_BLOCK_E5_ZERO_PAD_MASK    0x7F
+#define STATUS_BLOCK_E5_ZERO_PAD_SHIFT   9
+#define STATUS_BLOCK_E5_ZERO_PAD2_MASK   0xFFFF
+#define STATUS_BLOCK_E5_ZERO_PAD2_SHIFT  16
+	__le32 prod_index;
+#define STATUS_BLOCK_E5_PROD_INDEX_MASK  0xFFFFFF
+#define STATUS_BLOCK_E5_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E5_ZERO_PAD3_MASK   0xFF
+#define STATUS_BLOCK_E5_ZERO_PAD3_SHIFT  24
 };
 
 
@@ -1637,11 +2078,9 @@ struct timers_context {
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT    0
 #define TIMERS_CONTEXT_RESERVED0_MASK             0x1
 #define TIMERS_CONTEXT_RESERVED0_SHIFT            27
-/* Valid bit of logical client 0 */
-#define TIMERS_CONTEXT_VALIDLC0_MASK              0x1
+#define TIMERS_CONTEXT_VALIDLC0_MASK              0x1 /* Valid bit of logical client 0 */
 #define TIMERS_CONTEXT_VALIDLC0_SHIFT             28
-/* Active bit of logical client 0 */
-#define TIMERS_CONTEXT_ACTIVELC0_MASK             0x1
+#define TIMERS_CONTEXT_ACTIVELC0_MASK             0x1 /* Active bit of logical client 0 */
 #define TIMERS_CONTEXT_ACTIVELC0_SHIFT            29
 #define TIMERS_CONTEXT_RESERVED1_MASK             0x3
 #define TIMERS_CONTEXT_RESERVED1_SHIFT            30
@@ -1651,11 +2090,9 @@ struct timers_context {
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT    0
 #define TIMERS_CONTEXT_RESERVED2_MASK             0x1
 #define TIMERS_CONTEXT_RESERVED2_SHIFT            27
-/* Valid bit of logical client 1 */
-#define TIMERS_CONTEXT_VALIDLC1_MASK              0x1
+#define TIMERS_CONTEXT_VALIDLC1_MASK              0x1 /* Valid bit of logical client 1 */
 #define TIMERS_CONTEXT_VALIDLC1_SHIFT             28
-/* Active bit of logical client 1 */
-#define TIMERS_CONTEXT_ACTIVELC1_MASK             0x1
+#define TIMERS_CONTEXT_ACTIVELC1_MASK             0x1 /* Active bit of logical client 1 */
 #define TIMERS_CONTEXT_ACTIVELC1_SHIFT            29
 #define TIMERS_CONTEXT_RESERVED3_MASK             0x3
 #define TIMERS_CONTEXT_RESERVED3_SHIFT            30
@@ -1665,11 +2102,9 @@ struct timers_context {
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT    0
 #define TIMERS_CONTEXT_RESERVED4_MASK             0x1
 #define TIMERS_CONTEXT_RESERVED4_SHIFT            27
-/* Valid bit of logical client 2 */
-#define TIMERS_CONTEXT_VALIDLC2_MASK              0x1
+#define TIMERS_CONTEXT_VALIDLC2_MASK              0x1 /* Valid bit of logical client 2 */
 #define TIMERS_CONTEXT_VALIDLC2_SHIFT             28
-/* Active bit of logical client 2 */
-#define TIMERS_CONTEXT_ACTIVELC2_MASK             0x1
+#define TIMERS_CONTEXT_ACTIVELC2_MASK             0x1 /* Active bit of logical client 2 */
 #define TIMERS_CONTEXT_ACTIVELC2_SHIFT            29
 #define TIMERS_CONTEXT_RESERVED5_MASK             0x3
 #define TIMERS_CONTEXT_RESERVED5_SHIFT            30
@@ -1679,8 +2114,7 @@ struct timers_context {
 #define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
 #define TIMERS_CONTEXT_RESERVED6_MASK             0x1
 #define TIMERS_CONTEXT_RESERVED6_SHIFT            27
-/* Valid bit of host expiration */
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK  0x1
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK  0x1 /* Valid bit of host expiration */
 #define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
 #define TIMERS_CONTEXT_RESERVED7_MASK             0x7
 #define TIMERS_CONTEXT_RESERVED7_SHIFT            29
@@ -1688,7 +2122,7 @@ struct timers_context {
 
 
 /*
- * Enum for next_protocol field of tunnel_parsing_flags
+ * Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc
  */
 enum tunnel_next_protocol {
 	e_unknown = 0,
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 6c8e6d407..3fb8fd2ce 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -516,8 +516,10 @@ struct ecore_fw_data {
 	const u8 *modes_tree_buf;
 	union init_op *init_ops;
 	const u32 *arr_data;
-	const u32 *fw_overlays;
-	u32 fw_overlays_len;
+	const u32 *fw_overlays_e4;
+	u32 fw_overlays_e4_len;
+	const u32 *fw_overlays_e5;
+	u32 fw_overlays_e5_len;
 	u32 init_ops_size;
 };
 
@@ -730,6 +732,7 @@ enum ecore_mf_mode {
 enum ecore_dev_type {
 	ECORE_DEV_TYPE_BB,
 	ECORE_DEV_TYPE_AH,
+	ECORE_DEV_TYPE_E5,
 };
 
 /* @DPDK */
@@ -775,12 +778,15 @@ struct ecore_dev {
 #endif
 #define ECORE_IS_AH(dev)	((dev)->type == ECORE_DEV_TYPE_AH)
 #define ECORE_IS_K2(dev)	ECORE_IS_AH(dev)
+#define ECORE_IS_E4(dev)	(ECORE_IS_BB(dev) || ECORE_IS_AH(dev))
+#define ECORE_IS_E5(dev)	((dev)->type == ECORE_DEV_TYPE_E5)
 
 	u16 vendor_id;
 	u16 device_id;
 #define ECORE_DEV_ID_MASK	0xff00
 #define ECORE_DEV_ID_MASK_BB	0x1600
 #define ECORE_DEV_ID_MASK_AH	0x8000
+#define ECORE_DEV_ID_MASK_E5	0x8100
 
 	u16				chip_num;
 #define CHIP_NUM_MASK			0xffff
@@ -1105,8 +1111,18 @@ void ecore_set_platform_str(struct ecore_hwfn *p_hwfn,
 			    char *buf_str, u32 buf_size);
 
 #define TSTORM_QZONE_START	PXP_VF_BAR0_START_SDM_ZONE_A
+#define TSTORM_QZONE_SIZE(dev) \
+	(ECORE_IS_E4(dev) ? TSTORM_QZONE_SIZE_E4 : TSTORM_QZONE_SIZE_E5)
 
 #define MSTORM_QZONE_START(dev) \
-	(TSTORM_QZONE_START + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+	(TSTORM_QZONE_START + (TSTORM_QZONE_SIZE(dev) * NUM_OF_L2_QUEUES(dev)))
+#define MSTORM_QZONE_SIZE(dev) \
+	(ECORE_IS_E4(dev) ? MSTORM_QZONE_SIZE_E4 : MSTORM_QZONE_SIZE_E5)
+
+#define USTORM_QZONE_START(dev) \
+	(MSTORM_QZONE_START(dev) + \
+	 (MSTORM_QZONE_SIZE(dev) *  NUM_OF_L2_QUEUES(dev)))
+#define USTORM_QZONE_SIZE(dev) \
+	(ECORE_IS_E4(dev) ? USTORM_QZONE_SIZE_E4 : USTORM_QZONE_SIZE_E5)
 
 #endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index d3025724b..221cec22b 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -51,18 +51,33 @@
 #define ILT_REG_SIZE_IN_BYTES		4
 
 /* connection context union */
-union conn_context {
-	struct core_conn_context core_ctx;
-	struct eth_conn_context eth_ctx;
+#define CONTEXT_PADDING			2048
+union e4_conn_context {
+	struct e4_core_conn_context core_ctx;
+	struct e4_eth_conn_context eth_ctx;
+	u8 padding[CONTEXT_PADDING];
+};
+
+union e5_conn_context {
+	struct e5_core_conn_context core_ctx;
+	struct e5_eth_conn_context eth_ctx;
 };
 
 /* TYPE-0 task context - iSCSI, FCOE */
-union type0_task_context {
+
+union e4_type0_task_context {
+};
+
+union e5_type0_task_context {
+};
+
+/* TYPE-1 task context - RDMA */
+
+union e4_type1_task_context {
 };
 
-/* TYPE-1 task context - ROCE */
-union type1_task_context {
-	struct regpair reserved; /* @DPDK */
+union e5_type1_task_context {
+	struct e5_eth_task_context eth_ctx;
 };
 
 struct src_ent {
@@ -74,15 +89,33 @@ struct src_ent {
 #define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
 
 #define CONN_CXT_SIZE(p_hwfn) \
-	ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+	(ECORE_IS_E4(((p_hwfn)->p_dev)) ? \
+	 ALIGNED_TYPE_SIZE(union e4_conn_context, (p_hwfn)) : \
+	 ALIGNED_TYPE_SIZE(union e5_conn_context, (p_hwfn)))
 
 #define SRQ_CXT_SIZE (sizeof(struct regpair) * 8) /* @DPDK */
 
 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \
-	ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
+	(ECORE_IS_E4(((p_hwfn)->p_dev)) ? \
+	 ALIGNED_TYPE_SIZE(union e4_type0_task_context, (p_hwfn)) : \
+	 ALIGNED_TYPE_SIZE(union e5_type0_task_context, (p_hwfn)))
 
 /* Alignment is inherent to the type1_task_context structure */
-#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
+#define TYPE1_TASK_CXT_SIZE(p_hwfn) \
+	(ECORE_IS_E4(((p_hwfn)->p_dev)) ? \
+	 sizeof(union e4_type1_task_context) : \
+	 sizeof(union e5_type1_task_context))
+
+/* check if resources/configuration is required according to protocol type */
+static bool src_proto(enum protocol_type type)
+{
+	return	type == PROTOCOLID_ISCSI	||
+		type == PROTOCOLID_FCOE		||
+#ifdef CONFIG_ECORE_ROCE_PVRDMA
+		type == PROTOCOLID_ROCE		||
+#endif
+		type == PROTOCOLID_IWARP;
+}
 
 static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
 {
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index 1a539bbc7..cd0e32e3b 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -22,18 +22,6 @@ enum ecore_cxt_elem_type {
 	ECORE_ELEM_TASK
 };
 
-enum ilt_clients {
-	ILT_CLI_CDUC,
-	ILT_CLI_CDUT,
-	ILT_CLI_QM,
-	ILT_CLI_TM,
-	ILT_CLI_SRC,
-	ILT_CLI_TSDM,
-	ILT_CLI_RGFS,
-	ILT_CLI_TGFS,
-	MAX_ILT_CLIENTS
-};
-
 u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
 				  enum protocol_type type,
 				  u32 *vf_cid);
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index c4d757d66..5db02d0c4 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -3820,14 +3820,19 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 			goto load_err;
 
 		/* Clear the pglue_b was_error indication.
-		 * It must be done after the BME and the internal FID_enable for
-		 * the PF are set, since VDMs may cause the indication to be set
-		 * again.
+		 * In E4 it must be done after the BME and the internal
+		 * FID_enable for the PF are set, since VDMs may cause the
+		 * indication to be set again.
 		 */
 		ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
 
-		fw_overlays = p_dev->fw_data->fw_overlays;
-		fw_overlays_len = p_dev->fw_data->fw_overlays_len;
+		if (ECORE_IS_E4(p_hwfn->p_dev)) {
+			fw_overlays = p_dev->fw_data->fw_overlays_e4;
+			fw_overlays_len = p_dev->fw_data->fw_overlays_e4_len;
+		} else {
+			fw_overlays = p_dev->fw_data->fw_overlays_e5;
+			fw_overlays_len = p_dev->fw_data->fw_overlays_e5_len;
+		}
 		p_hwfn->fw_overlay_mem =
 			ecore_fw_overlay_mem_alloc(p_hwfn, fw_overlays,
 						   fw_overlays_len);
@@ -4437,23 +4442,27 @@ __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
 	return ECORE_SUCCESS;
 }
 
-#define RDMA_NUM_STATISTIC_COUNTERS_K2                  MAX_NUM_VPORTS_K2
-#define RDMA_NUM_STATISTIC_COUNTERS_BB                  MAX_NUM_VPORTS_BB
+/* @DPDK */
+#define RDMA_NUM_STATISTIC_COUNTERS_K2			MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB			MAX_NUM_VPORTS_BB
+#define RDMA_NUM_STATISTIC_COUNTERS_E5			MAX_NUM_VPORTS_E5
 
 static u32 ecore_hsi_def_val[][MAX_CHIP_IDS] = {
-	{MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},
-	{MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},
-	{MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},
-	{MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2, },
-	{MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},
-	{MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},
-	{ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},
-	{MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},
-	{PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},
-	{RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},
-	{MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},
-	{PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},
-	{BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},
+	{MAX_NUM_VFS_BB, MAX_NUM_VFS_K2, MAX_NUM_VFS_E5},
+	{MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2, MAX_NUM_L2_QUEUES_E5},
+	{MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2, MAX_NUM_PORTS_E5},
+	{MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2, MAX_SB_PER_PATH_E5},
+	{MAX_NUM_PFS_BB, MAX_NUM_PFS_K2, MAX_NUM_PFS_E5},
+	{MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2, MAX_NUM_VPORTS_E5},
+	{ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2, ETH_RSS_ENGINE_NUM_E5},
+	{MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2, MAX_QM_TX_QUEUES_E5},
+	{PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2,
+	 PXP_NUM_ILT_RECORDS_E5},
+	{RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2,
+	 RDMA_NUM_STATISTIC_COUNTERS_E5},
+	{MAX_QM_GLOBAL_RLS_E4, MAX_QM_GLOBAL_RLS_E4, MAX_QM_GLOBAL_RLS_E5},
+	{PBF_MAX_CMD_LINES_E4, PBF_MAX_CMD_LINES_E4, PBF_MAX_CMD_LINES_E5},
+	{BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2, BTB_MAX_BLOCKS_E5},
 };
 
 u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev, enum ecore_hsi_def_type type)
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 578c798a9..638178c30 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -1,17 +1,16 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef __ECORE_HSI_COMMON__
 #define __ECORE_HSI_COMMON__
 /********************************/
 /* Add include to common target */
 /********************************/
 #include "common_hsi.h"
-#include "mcp_public.h"
-
+#include "mcp_public.h" /* @DPDK */
 
 /*
  * opcodes for the event ring
@@ -47,522 +46,6 @@ enum common_ramrod_cmd_id {
 };
 
 
-/*
- * The core storm context for the Ystorm
- */
-struct ystorm_core_conn_st_ctx {
-	__le32 reserved[4];
-};
-
-/*
- * The core storm context for the Pstorm
- */
-struct pstorm_core_conn_st_ctx {
-	__le32 reserved[20];
-};
-
-/*
- * Core Slowpath Connection storm context of Xstorm
- */
-struct xstorm_core_conn_st_ctx {
-	__le32 spq_base_lo /* SPQ Ring Base Address low dword */;
-	__le32 spq_base_hi /* SPQ Ring Base Address high dword */;
-/* Consolidation Ring Base Address */
-	struct regpair consolid_base_addr;
-	__le16 spq_cons /* SPQ Ring Consumer */;
-	__le16 consolid_cons /* Consolidation Ring Consumer */;
-	__le32 reserved0[55] /* Pad to 15 cycles */;
-};
-
-struct xstorm_core_conn_ag_ctx {
-	u8 reserved0 /* cdu_validation */;
-	u8 state /* state */;
-	u8 flags0;
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1 /* exist_in_qm0 */
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1 /* exist_in_qm1 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1 /* exist_in_qm2 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1 /* exist_in_qm3 */
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1 /* bit4 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
-/* cf_array_active */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1 /* bit6 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1 /* bit7 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
-	u8 flags1;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1 /* bit8 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1 /* bit9 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1 /* bit10 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1 /* bit11 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1 /* bit12 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1 /* bit13 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1 /* bit14 */
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1 /* bit15 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
-	u8 flags2;
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3 /* timer0cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3 /* timer1cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3 /* timer2cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
-/* timer_stop_all */
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
-	u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3 /* cf4 */
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3 /* cf5 */
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3 /* cf6 */
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3 /* cf7 */
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
-	u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3 /* cf8 */
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3 /* cf9 */
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3 /* cf10 */
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3 /* cf11 */
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
-	u8 flags5;
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3 /* cf12 */
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3 /* cf13 */
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3 /* cf14 */
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3 /* cf15 */
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
-	u8 flags6;
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3 /* cf16 */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3 /* cf_array_cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3 /* cf18 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3 /* cf19 */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
-	u8 flags7;
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3 /* cf20 */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3 /* cf21 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3 /* cf22 */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1 /* cf0en */
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1 /* cf1en */
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
-	u8 flags8;
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1 /* cf2en */
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1 /* cf3en */
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1 /* cf4en */
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1 /* cf5en */
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1 /* cf6en */
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1 /* cf7en */
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1 /* cf8en */
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1 /* cf9en */
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
-	u8 flags9;
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1 /* cf10en */
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1 /* cf11en */
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1 /* cf12en */
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1 /* cf13en */
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1 /* cf14en */
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1 /* cf15en */
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1 /* cf16en */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
-/* cf_array_cf_en */
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
-	u8 flags10;
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1 /* cf18en */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1 /* cf19en */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1 /* cf20en */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1 /* cf21en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1 /* cf22en */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1 /* cf23en */
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1 /* rule0en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1 /* rule1en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
-	u8 flags11;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1 /* rule2en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1 /* rule3en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1 /* rule4en */
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1 /* rule5en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1 /* rule6en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1 /* rule7en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1 /* rule8en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1 /* rule9en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
-	u8 flags12;
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1 /* rule10en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1 /* rule11en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1 /* rule12en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1 /* rule13en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1 /* rule14en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1 /* rule15en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1 /* rule16en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1 /* rule17en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
-	u8 flags13;
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1 /* rule18en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1 /* rule19en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1 /* rule20en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1 /* rule21en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1 /* rule22en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1 /* rule23en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1 /* rule24en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1 /* rule25en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
-	u8 flags14;
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1 /* bit16 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1 /* bit17 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1 /* bit18 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1 /* bit19 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1 /* bit20 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1 /* bit21 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3 /* cf23 */
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
-	u8 byte2 /* byte2 */;
-	__le16 physical_q0 /* physical_q0 */;
-	__le16 consolid_prod /* physical_q1 */;
-	__le16 reserved16 /* physical_q2 */;
-	__le16 tx_bd_cons /* word3 */;
-	__le16 tx_bd_or_spq_prod /* word4 */;
-	__le16 updated_qm_pq_id /* word5 */;
-	__le16 conn_dpi /* conn_dpi */;
-	u8 byte3 /* byte3 */;
-	u8 byte4 /* byte4 */;
-	u8 byte5 /* byte5 */;
-	u8 byte6 /* byte6 */;
-	__le32 reg0 /* reg0 */;
-	__le32 reg1 /* reg1 */;
-	__le32 reg2 /* reg2 */;
-	__le32 reg3 /* reg3 */;
-	__le32 reg4 /* reg4 */;
-	__le32 reg5 /* cf_array0 */;
-	__le32 reg6 /* cf_array1 */;
-	__le16 word7 /* word7 */;
-	__le16 word8 /* word8 */;
-	__le16 word9 /* word9 */;
-	__le16 word10 /* word10 */;
-	__le32 reg7 /* reg7 */;
-	__le32 reg8 /* reg8 */;
-	__le32 reg9 /* reg9 */;
-	u8 byte7 /* byte7 */;
-	u8 byte8 /* byte8 */;
-	u8 byte9 /* byte9 */;
-	u8 byte10 /* byte10 */;
-	u8 byte11 /* byte11 */;
-	u8 byte12 /* byte12 */;
-	u8 byte13 /* byte13 */;
-	u8 byte14 /* byte14 */;
-	u8 byte15 /* byte15 */;
-	u8 e5_reserved /* e5_reserved */;
-	__le16 word11 /* word11 */;
-	__le32 reg10 /* reg10 */;
-	__le32 reg11 /* reg11 */;
-	__le32 reg12 /* reg12 */;
-	__le32 reg13 /* reg13 */;
-	__le32 reg14 /* reg14 */;
-	__le32 reg15 /* reg15 */;
-	__le32 reg16 /* reg16 */;
-	__le32 reg17 /* reg17 */;
-	__le32 reg18 /* reg18 */;
-	__le32 reg19 /* reg19 */;
-	__le16 word12 /* word12 */;
-	__le16 word13 /* word13 */;
-	__le16 word14 /* word14 */;
-	__le16 word15 /* word15 */;
-};
-
-struct tstorm_core_conn_ag_ctx {
-	u8 byte0 /* cdu_validation */;
-	u8 byte1 /* state */;
-	u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
-	u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
-	u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
-	u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
-	u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
-	u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
-	__le32 reg0 /* reg0 */;
-	__le32 reg1 /* reg1 */;
-	__le32 reg2 /* reg2 */;
-	__le32 reg3 /* reg3 */;
-	__le32 reg4 /* reg4 */;
-	__le32 reg5 /* reg5 */;
-	__le32 reg6 /* reg6 */;
-	__le32 reg7 /* reg7 */;
-	__le32 reg8 /* reg8 */;
-	u8 byte2 /* byte2 */;
-	u8 byte3 /* byte3 */;
-	__le16 word0 /* word0 */;
-	u8 byte4 /* byte4 */;
-	u8 byte5 /* byte5 */;
-	__le16 word1 /* word1 */;
-	__le16 word2 /* conn_dpi */;
-	__le16 word3 /* word3 */;
-	__le32 reg9 /* reg9 */;
-	__le32 reg10 /* reg10 */;
-};
-
-struct ustorm_core_conn_ag_ctx {
-	u8 reserved /* cdu_validation */;
-	u8 byte1 /* state */;
-	u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
-	u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT     6
-	u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
-	u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
-	u8 byte2 /* byte2 */;
-	u8 byte3 /* byte3 */;
-	__le16 word0 /* conn_dpi */;
-	__le16 word1 /* word1 */;
-	__le32 rx_producers /* reg0 */;
-	__le32 reg1 /* reg1 */;
-	__le32 reg2 /* reg2 */;
-	__le32 reg3 /* reg3 */;
-	__le16 word2 /* word2 */;
-	__le16 word3 /* word3 */;
-};
-
-/*
- * The core storm context for the Mstorm
- */
-struct mstorm_core_conn_st_ctx {
-	__le32 reserved[40];
-};
-
-/*
- * The core storm context for the Ustorm
- */
-struct ustorm_core_conn_st_ctx {
-	__le32 reserved[20];
-};
-
-/*
- * The core storm context for the Tstorm
- */
-struct tstorm_core_conn_st_ctx {
-	__le32 reserved[4];
-};
-
-/*
- * core connection context
- */
-struct core_conn_context {
-/* ystorm storm context */
-	struct ystorm_core_conn_st_ctx ystorm_st_context;
-	struct regpair ystorm_st_padding[2] /* padding */;
-/* pstorm storm context */
-	struct pstorm_core_conn_st_ctx pstorm_st_context;
-	struct regpair pstorm_st_padding[2] /* padding */;
-/* xstorm storm context */
-	struct xstorm_core_conn_st_ctx xstorm_st_context;
-/* xstorm aggregative context */
-	struct xstorm_core_conn_ag_ctx xstorm_ag_context;
-/* tstorm aggregative context */
-	struct tstorm_core_conn_ag_ctx tstorm_ag_context;
-/* ustorm aggregative context */
-	struct ustorm_core_conn_ag_ctx ustorm_ag_context;
-/* mstorm storm context */
-	struct mstorm_core_conn_st_ctx mstorm_st_context;
-/* ustorm storm context */
-	struct ustorm_core_conn_st_ctx ustorm_st_context;
-	struct regpair ustorm_st_padding[2] /* padding */;
-/* tstorm storm context */
-	struct tstorm_core_conn_st_ctx tstorm_st_context;
-	struct regpair tstorm_st_padding[2] /* padding */;
-};
-
-
 /*
  * How ll2 should deal with packet upon errors
  */
@@ -616,20 +99,22 @@ struct core_ll2_port_stats {
  * LL2 TX Per Queue Stats
  */
 struct core_ll2_pstorm_per_queue_stat {
-/* number of total bytes sent without errors */
-	struct regpair sent_ucast_bytes;
-/* number of total bytes sent without errors */
-	struct regpair sent_mcast_bytes;
-/* number of total bytes sent without errors */
-	struct regpair sent_bcast_bytes;
-/* number of total packets sent without errors */
-	struct regpair sent_ucast_pkts;
-/* number of total packets sent without errors */
-	struct regpair sent_mcast_pkts;
-/* number of total packets sent without errors */
-	struct regpair sent_bcast_pkts;
-/* number of total packets dropped due to errors */
-	struct regpair error_drop_pkts;
+	struct regpair sent_ucast_bytes /* number of total bytes sent without errors */;
+	struct regpair sent_mcast_bytes /* number of total bytes sent without errors */;
+	struct regpair sent_bcast_bytes /* number of total bytes sent without errors */;
+	struct regpair sent_ucast_pkts /* number of total packets sent without errors */;
+	struct regpair sent_mcast_pkts /* number of total packets sent without errors */;
+	struct regpair sent_bcast_pkts /* number of total packets sent without errors */;
+	struct regpair error_drop_pkts /* number of total packets dropped due to errors */;
+};
+
+
+/*
+ * Light-L2 context RX Producers. Temporary struct. core_ll2_rx_prod replace it.
+ */
+struct core_ll2_rx_ctx_prod {
+	__le16 bd_prod /* BD Producer */;
+	__le16 cqe_prod /* CQE Producer */;
 };
 
 
@@ -649,6 +134,11 @@ struct core_ll2_ustorm_per_queue_stat {
 	struct regpair rcv_bcast_pkts;
 };
 
+struct core_ll2_rx_per_queue_stat {
+	struct core_ll2_tstorm_per_queue_stat tstorm_stat /* TSTORM per queue statistics */;
+	struct core_ll2_ustorm_per_queue_stat ustorm_stat /* USTORM per queue statistics */;
+};
+
 
 /*
  * Light-L2 RX Producers
@@ -656,13 +146,13 @@ struct core_ll2_ustorm_per_queue_stat {
 struct core_ll2_rx_prod {
 	__le16 bd_prod /* BD Producer */;
 	__le16 cqe_prod /* CQE Producer */;
+	__le32 reserved;
 };
 
 
 
 struct core_ll2_tx_per_queue_stat {
-/* PSTORM per queue statistics */
-	struct core_ll2_pstorm_per_queue_stat pstorm_stat;
+	struct core_ll2_pstorm_per_queue_stat pstorm_stat /* PSTORM per queue statistics */;
 };
 
 
@@ -674,14 +164,12 @@ struct core_pwm_prod_update_data {
 	__le16 icid /* internal CID */;
 	u8 reserved0;
 	u8 params;
-/* aggregative command. Set DB_AGG_CMD_SET for producer update
- * (use enum db_agg_cmd_sel)
- */
+/* aggregative command. Set DB_AGG_CMD_SET for producer update (use enum db_agg_cmd_sel) */
 #define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK    0x3
 #define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT   0
 #define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK  0x3F /* Set 0. */
 #define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2
-	struct core_ll2_rx_prod prod /* Producers. */;
+	struct core_ll2_rx_ctx_prod prod /* Producers. */;
 };
 
 
@@ -692,12 +180,12 @@ struct core_queue_stats_query_ramrod_data {
 	u8 rx_stat /* If set, collect RX queue statistics. */;
 	u8 tx_stat /* If set, collect TX queue statistics. */;
 	__le16 reserved[3];
-/* Address of RX statistic buffer. core_ll2_rx_per_queue_stat struct will be
- * write to this address.
+/* Address of RX statistic buffer. core_ll2_rx_per_queue_stat struct will be write to this address.
+ *
  */
 	struct regpair rx_stat_addr;
-/* Address of TX statistic buffer. core_ll2_tx_per_queue_stat struct will be
- * write to this address.
+/* Address of TX statistic buffer. core_ll2_tx_per_queue_stat struct will be write to this address.
+ *
  */
 	struct regpair tx_stat_addr;
 };
@@ -768,8 +256,7 @@ struct core_rx_bd_with_buff_len {
  */
 union core_rx_bd_union {
 	struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
-/* Core Rx Bd with dynamic buffer length */
-	struct core_rx_bd_with_buff_len rx_bd_with_len;
+	struct core_rx_bd_with_buff_len rx_bd_with_len /* Core Rx Bd with dynamic buffer length */;
 };
 
 
@@ -798,19 +285,18 @@ enum core_rx_cqe_type {
  * Core RX CQE for Light L2 .
  */
 struct core_rx_fast_path_cqe {
-	u8 type /* CQE type */;
-/* Offset (in bytes) of the packet from start of the buffer */
-	u8 placement_offset;
-/* Parsing and error flags from the parser */
-	struct parsing_and_err_flags parse_flags;
+	u8 type /* CQE type (use enum core_rx_cqe_type) */;
+	u8 placement_offset /* Offset (in bytes) of the packet from start of the buffer */;
+	struct parsing_and_err_flags parse_flags /* Parsing and error flags from the parser */;
 	__le16 packet_length /* Total packet length (from the parser) */;
 	__le16 vlan /* 802.1q VLAN tag */;
 	struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
-/* bit- map: each bit represents a specific error. errors indications are
- * provided by the cracker. see spec for detailed description
+/* bit- map: each bit represents a specific error. errors indications are provided by the FC. see
+ * spec for detailed description
  */
 	struct parsing_err_flags err_flags;
-	__le16 reserved0;
+	u8 packet_source /* RX packet source. (use enum core_rx_pkt_source) */;
+	u8 reserved0;
 	__le32 reserved1[3];
 };
 
@@ -818,26 +304,25 @@ struct core_rx_fast_path_cqe {
  * Core Rx CM offload CQE .
  */
 struct core_rx_gsi_offload_cqe {
-	u8 type /* CQE type */;
-	u8 data_length_error /* set if gsi data is bigger than buff */;
-/* Parsing and error flags from the parser */
-	struct parsing_and_err_flags parse_flags;
+	u8 type /* CQE type (use enum core_rx_cqe_type) */;
+	u8 data_length_error /* set if GSI data is bigger than the buffer */;
+	struct parsing_and_err_flags parse_flags /* Parsing and error flags from the parser */;
 	__le16 data_length /* Total packet length (from the parser) */;
 	__le16 vlan /* 802.1q VLAN tag */;
 	__le32 src_mac_addrhi /* hi 4 bytes source mac address */;
 	__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
-/* These are the lower 16 bit of QP id in RoCE BTH header */
-	__le16 qp_id;
+	__le16 qp_id /* These are the lower 16 bit of QP id in RoCE BTH header */;
 	__le32 src_qp /* Source QP from DETH header */;
 	struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
-	__le32 reserved;
+	u8 packet_source /* RX packet source. (use enum core_rx_pkt_source) */;
+	u8 reserved[3];
 };
 
 /*
  * Core RX CQE for Light L2 .
  */
 struct core_rx_slow_path_cqe {
-	u8 type /* CQE type */;
+	u8 type /* CQE type (use enum core_rx_cqe_type) */;
 	u8 ramrod_cmd_id;
 	__le16 echo;
 	struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
@@ -856,6 +341,17 @@ union core_rx_cqe_union {
 
 
 
+/*
+ * RX packet source.
+ */
+enum core_rx_pkt_source {
+	CORE_RX_PKT_SOURCE_NETWORK = 0 /* Regular RX packet from network port */,
+	CORE_RX_PKT_SOURCE_LB /* Loop back packet */,
+	CORE_RX_PKT_SOURCE_TX /* TX packet duplication. Used for debug. */,
+	MAX_CORE_RX_PKT_SOURCE
+};
+
+
 
 /*
  * Ramrod data for rx queue start ramrod
@@ -870,32 +366,28 @@ struct core_rx_start_ramrod_data {
 	u8 complete_event_flg /* if set - post completion to the event ring */;
 	u8 drop_ttl0_flg /* if set - drop packet with ttl=0 */;
 	__le16 num_of_pbl_pages /* Number of pages in CQE PBL */;
-/* if set - 802.1q tag will be removed and copied to CQE */
+/* if set - 802.1q tag will be removed and copied to CQE. Set only if vport_id_valid flag clear. If
+ * vport_id_valid flag set, VPORT configuration used instead.
+ */
 	u8 inner_vlan_stripping_en;
-/* if set - outer tag wont be stripped, valid only in MF OVLAN mode. */
-	u8 outer_vlan_stripping_dis;
+/* if set and inner vlan does not exist, the outer vlan will copied to CQE as inner vlan. should be
+ * used in MF_OVLAN mode only.
+ */
+	u8 report_outer_vlan;
 	u8 queue_id /* Light L2 RX Queue ID */;
 	u8 main_func_queue /* Set if this is the main PFs LL2 queue */;
-/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
- * main_func_queue is set.
- */
+/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if main_func_queue is set. */
 	u8 mf_si_bcast_accept_all;
-/* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if
- * main_func_queue is set.
- */
+/* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if main_func_queue is set. */
 	u8 mf_si_mcast_accept_all;
-/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
- * zero out, used for TenantDcb
- */
 /* Specifies how ll2 should deal with RX packets errors */
 	struct core_rx_action_on_error action_on_error;
 	u8 gsi_offload_flag /* set for GSI offload mode */;
-/* If set, queue is subject for RX VFC classification. */
-	u8 vport_id_valid;
+	u8 vport_id_valid /* If set, queue is subject for RX VFC classification. */;
 	u8 vport_id /* Queue VPORT for RX VFC classification. */;
 	u8 zero_prod_flg /* If set, zero RX producers. */;
-/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
- * zero out, used for TenantDcb
+/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be zero out, used for
+ * TenantDcb
  */
 	u8 wipe_inner_vlan_pri_en;
 	u8 reserved[2];
@@ -906,8 +398,8 @@ struct core_rx_start_ramrod_data {
  * Ramrod data for rx queue stop ramrod
  */
 struct core_rx_stop_ramrod_data {
-	u8 complete_cqe_flg /* post completion to the CQE ring if set */;
-	u8 complete_event_flg /* post completion to the event ring if set */;
+	u8 complete_cqe_flg /* if set - post completion to the CQE ring */;
+	u8 complete_event_flg /* if set - post completion to the event ring */;
 	u8 queue_id /* Light L2 RX Queue ID */;
 	u8 reserved1;
 	__le16 reserved2[2];
@@ -922,9 +414,7 @@ struct core_tx_bd_data {
 /* Do not allow additional VLAN manipulations on this packet (DCB) */
 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK         0x1
 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT        0
-/* Insert VLAN into packet. Cannot be set for LB packets
- * (tx_dst == CORE_TX_DEST_LB)
- */
+/* Insert VLAN into packet. Cannot be set for LB packets (tx_dst == CORE_TX_DEST_LB) */
 #define CORE_TX_BD_DATA_VLAN_INSERTION_MASK          0x1
 #define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT         1
 /* This is the first BD of the packet (for debug) */
@@ -936,16 +426,13 @@ struct core_tx_bd_data {
 /* Calculate the L4 checksum for the packet */
 #define CORE_TX_BD_DATA_L4_CSUM_MASK                 0x1
 #define CORE_TX_BD_DATA_L4_CSUM_SHIFT                4
-/* Packet is IPv6 with extensions */
-#define CORE_TX_BD_DATA_IPV6_EXT_MASK                0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK                0x1 /* Packet is IPv6 with extensions */
 #define CORE_TX_BD_DATA_IPV6_EXT_SHIFT               5
-/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
- * 0-TCP, 1-UDP
- */
+/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol: 0-TCP, 1-UDP */
 #define CORE_TX_BD_DATA_L4_PROTOCOL_MASK             0x1
 #define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT            6
-/* The pseudo checksum mode to place in the L4 checksum field. Required only
- * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
+/* The pseudo checksum mode to place in the L4 checksum field. Required only when IPv6+ext and
+ * l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
  */
 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK     0x1
 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT    7
@@ -954,13 +441,12 @@ struct core_tx_bd_data {
  */
 #define CORE_TX_BD_DATA_NBDS_MASK                    0xF
 #define CORE_TX_BD_DATA_NBDS_SHIFT                   8
-/* Use roce_flavor enum - Differentiate between Roce flavors is valid when
- * connType is ROCE (use enum core_roce_flavor_type)
+/* Use roce_flavor enum - Differentiate between Roce flavors is valid when connType is ROCE (use
+ * enum core_roce_flavor_type)
  */
 #define CORE_TX_BD_DATA_ROCE_FLAV_MASK               0x1
 #define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT              12
-/* Calculate ip length */
-#define CORE_TX_BD_DATA_IP_LEN_MASK                  0x1
+#define CORE_TX_BD_DATA_IP_LEN_MASK                  0x1 /* Calculate ip length */
 #define CORE_TX_BD_DATA_IP_LEN_SHIFT                 13
 /* disables the STAG insertion, relevant only in MF OVLAN mode. */
 #define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK  0x1
@@ -975,14 +461,14 @@ struct core_tx_bd_data {
 struct core_tx_bd {
 	struct regpair addr /* Buffer Address */;
 	__le16 nbytes /* Number of Bytes in Buffer */;
-/* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack
- * packets: echo data to pass to Rx
+/* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack packets: echo data to
+ * pass to Rx
  */
 	__le16 nw_vlan_or_lb_echo;
-	struct core_tx_bd_data bd_data /* BD Flags */;
+	struct core_tx_bd_data bd_data /* BD flags */;
 	__le16 bitfield1;
-/* L4 Header Offset from start of packet (in Words). This is needed if both
- * l4_csum and ipv6_ext are set
+/* L4 Header Offset from start of packet (in Words). This is needed if both l4_csum and ipv6_ext are
+ * set
  */
 #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK  0x3FFF
 #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
@@ -1010,54 +496,1061 @@ enum core_tx_dest {
  */
 struct core_tx_start_ramrod_data {
 	struct regpair pbl_base_addr /* Address of the pbl page */;
-	__le16 mtu /* Maximum transmission unit */;
+	__le16 mtu /* MTU */;
 	__le16 sb_id /* Status block ID */;
-	u8 sb_index /* Status block protocol index */;
-	u8 stats_en /* Statistics Enable */;
-	u8 stats_id /* Statistics Counter ID */;
-	u8 conn_type /* connection type that loaded ll2 */;
+	u8 sb_index /* Status block index */;
+	u8 stats_en /* Ram statistics enable */;
+	u8 stats_id /* Ram statistics counter ID */;
+	u8 conn_type /* Parent protocol type (use enum protocol_type) */;
 	__le16 pbl_size /* Number of BD pages pointed by PBL */;
 	__le16 qm_pq_id /* QM PQ ID */;
 	u8 gsi_offload_flag /* set for GSI offload mode */;
 	u8 ctx_stats_en /* Context statistics enable */;
-/* If set, queue is part of VPORT and subject for TX switching. */
-	u8 vport_id_valid;
-/* vport id of the current connection, used to access non_rdma_in_to_in_pri_map
- * which is per vport
+	u8 vport_id_valid /* If set, queue is part of VPORT and subject for TX switching. */;
+/* vport id of the current connection, used to access non_rdma_in_to_in_pri_map which is per vport
+ *
  */
 	u8 vport_id;
+/* if set, security checks will be made for this connection. If set, disable_stag_insertion flag
+ * must be clear in TX BD. If set and anti-spoofing enable for associated VPORT, only packets with
+ * self destination MAC can be forced to LB.
+ */
+	u8 enforce_security_flag;
+	u8 reserved[7];
+};
+
+
+/*
+ * Ramrod data for tx queue stop ramrod
+ */
+struct core_tx_stop_ramrod_data {
+	__le32 reserved0[2];
+};
+
+
+/*
+ * Ramrod data for tx queue update ramrod
+ */
+struct core_tx_update_ramrod_data {
+	u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
+	u8 reserved0;
+	__le16 qm_pq_id /* Updated QM PQ ID */;
+	__le32 reserved1[1];
+};
+
+
+/*
+ * Enum flag for what type of DCB data to update
+ */
+enum dcb_dscp_update_mode {
+	DONT_UPDATE_DCB_DSCP /* Set when no change should be done to DCB data */,
+	UPDATE_DCB /* Set to update only L2 (vlan) priority */,
+	UPDATE_DSCP /* Set to update only IP DSCP */,
+	UPDATE_DCB_DSCP /* Set to update vlan pri and DSCP */,
+	MAX_DCB_DSCP_UPDATE_MODE
+};
+
+
+/*
+ * The core storm context for the Ystorm
+ */
+struct ystorm_core_conn_st_ctx {
+	__le32 reserved[4];
+};
+
+/*
+ * The core storm context for the Pstorm
+ */
+struct pstorm_core_conn_st_ctx {
+	__le32 reserved[20];
+};
+
+/*
+ * Core Slowpath Connection storm context of Xstorm
+ */
+struct xstorm_core_conn_st_ctx {
+	__le32 spq_base_lo /* SPQ Ring Base Address low dword */;
+	__le32 spq_base_hi /* SPQ Ring Base Address high dword */;
+	struct regpair consolid_base_addr /* Consolidation Ring Base Address */;
+	__le16 spq_cons /* SPQ Ring Consumer */;
+	__le16 consolid_cons /* Consolidation Ring Consumer */;
+	__le32 reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct e4_xstorm_core_conn_ag_ctx {
+	u8 reserved0 /* cdu_validation */;
+	u8 state /* state */;
+	u8 flags0;
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1 /* exist_in_qm0 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1 /* exist_in_qm1 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1 /* exist_in_qm2 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1 /* exist_in_qm3 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1 /* bit4 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1 /* cf_array_active */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1 /* bit6 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1 /* bit7 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
+	u8 flags1;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1 /* bit8 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1 /* bit9 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1 /* bit10 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1 /* bit11 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1 /* bit12 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1 /* bit13 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1 /* bit14 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1 /* bit15 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
+	u8 flags2;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3 /* timer0cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3 /* timer1cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3 /* timer2cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3 /* timer_stop_all */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
+	u8 flags3;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3 /* cf4 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3 /* cf5 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3 /* cf6 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3 /* cf7 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
+	u8 flags4;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3 /* cf8 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3 /* cf9 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3 /* cf10 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3 /* cf11 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
+	u8 flags5;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3 /* cf12 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3 /* cf13 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3 /* cf14 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3 /* cf15 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
+	u8 flags6;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3 /* cf16 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3 /* cf_array_cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3 /* cf18 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3 /* cf19 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
+	u8 flags7;
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3 /* cf20 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3 /* cf21 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3 /* cf22 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1 /* cf0en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1 /* cf1en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
+	u8 flags8;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1 /* cf2en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1 /* cf3en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1 /* cf4en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1 /* cf5en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1 /* cf6en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1 /* cf7en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1 /* cf8en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1 /* cf9en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
+	u8 flags9;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1 /* cf10en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1 /* cf11en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1 /* cf12en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1 /* cf13en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1 /* cf14en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1 /* cf15en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1 /* cf16en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1 /* cf_array_cf_en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
+	u8 flags10;
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1 /* cf18en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1 /* cf19en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1 /* cf20en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1 /* cf21en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1 /* cf22en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1 /* cf23en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1 /* rule0en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1 /* rule1en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
+	u8 flags11;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1 /* rule2en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1 /* rule3en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1 /* rule4en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1 /* rule5en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1 /* rule6en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1 /* rule7en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1 /* rule8en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1 /* rule9en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
+	u8 flags12;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1 /* rule10en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1 /* rule11en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1 /* rule12en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1 /* rule13en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1 /* rule14en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1 /* rule15en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1 /* rule16en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1 /* rule17en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
+	u8 flags13;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1 /* rule18en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1 /* rule19en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1 /* rule20en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1 /* rule21en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1 /* rule22en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1 /* rule23en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1 /* rule24en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1 /* rule25en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
+	u8 flags14;
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1 /* bit16 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1 /* bit17 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1 /* bit18 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1 /* bit19 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1 /* bit20 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1 /* bit21 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3 /* cf23 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
+	u8 byte2 /* byte2 */;
+	__le16 physical_q0 /* physical_q0 */;
+	__le16 consolid_prod /* physical_q1 */;
+	__le16 reserved16 /* physical_q2 */;
+	__le16 tx_bd_cons /* word3 */;
+	__le16 tx_bd_or_spq_prod /* word4 */;
+	__le16 updated_qm_pq_id /* word5 */;
+	__le16 conn_dpi /* conn_dpi */;
+	u8 byte3 /* byte3 */;
+	u8 byte4 /* byte4 */;
+	u8 byte5 /* byte5 */;
+	u8 byte6 /* byte6 */;
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+	__le32 reg4 /* reg4 */;
+	__le32 reg5 /* cf_array0 */;
+	__le32 reg6 /* cf_array1 */;
+	__le16 word7 /* word7 */;
+	__le16 word8 /* word8 */;
+	__le16 word9 /* word9 */;
+	__le16 word10 /* word10 */;
+	__le32 reg7 /* reg7 */;
+	__le32 reg8 /* reg8 */;
+	__le32 reg9 /* reg9 */;
+	u8 byte7 /* byte7 */;
+	u8 byte8 /* byte8 */;
+	u8 byte9 /* byte9 */;
+	u8 byte10 /* byte10 */;
+	u8 byte11 /* byte11 */;
+	u8 byte12 /* byte12 */;
+	u8 byte13 /* byte13 */;
+	u8 byte14 /* byte14 */;
+	u8 byte15 /* byte15 */;
+	u8 e5_reserved /* e5_reserved */;
+	__le16 word11 /* word11 */;
+	__le32 reg10 /* reg10 */;
+	__le32 reg11 /* reg11 */;
+	__le32 reg12 /* reg12 */;
+	__le32 reg13 /* reg13 */;
+	__le32 reg14 /* reg14 */;
+	__le32 reg15 /* reg15 */;
+	__le32 reg16 /* reg16 */;
+	__le32 reg17 /* reg17 */;
+	__le32 reg18 /* reg18 */;
+	__le32 reg19 /* reg19 */;
+	__le16 word12 /* word12 */;
+	__le16 word13 /* word13 */;
+	__le16 word14 /* word14 */;
+	__le16 word15 /* word15 */;
+};
+
+struct e4_tstorm_core_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state */;
+	u8 flags0;
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1 /* bit2 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1 /* bit3 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1 /* bit4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1 /* bit5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
+	u8 flags1;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
+	u8 flags2;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3 /* cf7 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3 /* cf8 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
+	u8 flags3;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3 /* cf9 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3 /* cf10 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
+	u8 flags4;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1 /* cf7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1 /* cf8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1 /* cf9en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1 /* cf10en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+	u8 flags5;
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+	__le32 reg4 /* reg4 */;
+	__le32 reg5 /* reg5 */;
+	__le32 reg6 /* reg6 */;
+	__le32 reg7 /* reg7 */;
+	__le32 reg8 /* reg8 */;
+	u8 byte2 /* byte2 */;
+	u8 byte3 /* byte3 */;
+	__le16 word0 /* word0 */;
+	u8 byte4 /* byte4 */;
+	u8 byte5 /* byte5 */;
+	__le16 word1 /* word1 */;
+	__le16 word2 /* conn_dpi */;
+	__le16 word3 /* word3 */;
+	__le32 ll2_rx_prod /* reg9 */;
+	__le32 reg10 /* reg10 */;
+};
+
+struct e4_ustorm_core_conn_ag_ctx {
+	u8 reserved /* cdu_validation */;
+	u8 byte1 /* state */;
+	u8 flags0;
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+	u8 flags1;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT     0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT     2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT     4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT     6
+	u8 flags2;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+	u8 flags3;
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+	u8 byte2 /* byte2 */;
+	u8 byte3 /* byte3 */;
+	__le16 word0 /* conn_dpi */;
+	__le16 word1 /* word1 */;
+	__le32 rx_producers /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+	__le16 word2 /* word2 */;
+	__le16 word3 /* word3 */;
+};
+
+/*
+ * The core storm context for the Mstorm
+ */
+struct mstorm_core_conn_st_ctx {
+	__le32 reserved[40];
+};
+
+/*
+ * The core storm context for the Ustorm
+ */
+struct ustorm_core_conn_st_ctx {
+	__le32 reserved[20];
+};
+
+/*
+ * The core storm context for the Tstorm
+ */
+struct tstorm_core_conn_st_ctx {
+	__le32 reserved[4];
+};
+
+/*
+ * core connection context
+ */
+struct e4_core_conn_context {
+	struct ystorm_core_conn_st_ctx ystorm_st_context /* ystorm storm context */;
+	struct regpair ystorm_st_padding[2] /* padding */;
+	struct pstorm_core_conn_st_ctx pstorm_st_context /* pstorm storm context */;
+	struct regpair pstorm_st_padding[2] /* padding */;
+	struct xstorm_core_conn_st_ctx xstorm_st_context /* xstorm storm context */;
+	struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
+	struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
+	struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
+	struct mstorm_core_conn_st_ctx mstorm_st_context /* mstorm storm context */;
+	struct ustorm_core_conn_st_ctx ustorm_st_context /* ustorm storm context */;
+	struct regpair ustorm_st_padding[2] /* padding */;
+	struct tstorm_core_conn_st_ctx tstorm_st_context /* tstorm storm context */;
+	struct regpair tstorm_st_padding[2] /* padding */;
+};
+
+
+struct e5_xstorm_core_conn_ag_ctx {
+	u8 reserved0 /* cdu_validation */;
+	u8 state_and_core_id /* state_and_core_id */;
+	u8 flags0;
+#define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1 /* exist_in_qm0 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1 /* exist_in_qm1 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1 /* exist_in_qm2 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
+#define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1 /* exist_in_qm3 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1 /* bit4 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1 /* cf_array_active */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1 /* bit6 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1 /* bit7 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
+	u8 flags1;
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1 /* bit8 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1 /* bit9 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1 /* bit10 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1 /* bit11 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1 /* bit12 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1 /* bit13 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
+#define E5_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1 /* bit14 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
+#define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1 /* bit15 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
+	u8 flags2;
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3 /* timer0cf */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3 /* timer1cf */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3 /* timer2cf */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3 /* timer_stop_all */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
+	u8 flags3;
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3 /* cf4 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3 /* cf5 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3 /* cf6 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3 /* cf7 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
+	u8 flags4;
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3 /* cf8 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3 /* cf9 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3 /* cf10 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3 /* cf11 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
+	u8 flags5;
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3 /* cf12 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3 /* cf13 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3 /* cf14 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3 /* cf15 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
+	u8 flags6;
+#define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3 /* cf16 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3 /* cf_array_cf */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
+#define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3 /* cf18 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
+#define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3 /* cf19 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
+	u8 flags7;
+#define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3 /* cf20 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3 /* cf21 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
+#define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3 /* cf22 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1 /* cf0en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1 /* cf1en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
+	u8 flags8;
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1 /* cf2en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1 /* cf3en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1 /* cf4en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1 /* cf5en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1 /* cf6en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1 /* cf7en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1 /* cf8en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1 /* cf9en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
+	u8 flags9;
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1 /* cf10en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1 /* cf11en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1 /* cf12en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1 /* cf13en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1 /* cf14en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1 /* cf15en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
+#define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1 /* cf16en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1 /* cf_array_cf_en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
+	u8 flags10;
+#define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1 /* cf18en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
+#define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1 /* cf19en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
+#define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1 /* cf20en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1 /* cf21en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
+#define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1 /* cf22en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1 /* cf23en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1 /* rule0en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1 /* rule1en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
+	u8 flags11;
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1 /* rule2en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1 /* rule3en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
+#define E5_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1 /* rule4en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1 /* rule5en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1 /* rule6en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1 /* rule7en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1 /* rule8en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1 /* rule9en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
+	u8 flags12;
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1 /* rule10en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1 /* rule11en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1 /* rule12en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1 /* rule13en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1 /* rule14en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1 /* rule15en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1 /* rule16en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1 /* rule17en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
+	u8 flags13;
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1 /* rule18en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1 /* rule19en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1 /* rule20en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1 /* rule21en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1 /* rule22en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1 /* rule23en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1 /* rule24en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1 /* rule25en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
+	u8 flags14;
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1 /* bit16 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1 /* bit17 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1 /* bit18 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1 /* bit19 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1 /* bit20 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1 /* bit21 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3 /* cf23 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
+	u8 byte2 /* byte2 */;
+	__le16 physical_q0 /* physical_q0_and_vf_id_lo */;
+	__le16 consolid_prod /* physical_q1_and_vf_id_hi */;
+	__le16 reserved16 /* physical_q2 */;
+	__le16 tx_bd_cons /* word3 */;
+	__le16 tx_bd_or_spq_prod /* word4 */;
+	__le16 updated_qm_pq_id /* word5 */;
+	__le16 conn_dpi /* conn_dpi */;
+	u8 byte3 /* byte3 */;
+	u8 byte4 /* byte4 */;
+	u8 byte5 /* byte5 */;
+	u8 byte6 /* byte6 */;
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+	__le32 reg4 /* reg4 */;
+	__le32 reg5 /* cf_array0 */;
+	__le32 reg6 /* cf_array1 */;
+	u8 flags15;
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK         0x1 /* bit22 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT        0
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK         0x1 /* bit23 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT        1
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK         0x1 /* bit24 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT        2
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK         0x3 /* cf24 */
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT        3
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK         0x1 /* cf24en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT        5
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK         0x1 /* rule26en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT        6
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_MASK         0x1 /* rule27en */
+#define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_SHIFT        7
+	u8 byte7 /* byte7 */;
+	__le16 word7 /* word7 */;
+	__le16 word8 /* word8 */;
+	__le16 word9 /* word9 */;
+	__le16 word10 /* word10 */;
+	__le16 word11 /* word11 */;
+	__le32 reg7 /* reg7 */;
+	__le32 reg8 /* reg8 */;
+	__le32 reg9 /* reg9 */;
+	u8 byte8 /* byte8 */;
+	u8 byte9 /* byte9 */;
+	u8 byte10 /* byte10 */;
+	u8 byte11 /* byte11 */;
+	u8 byte12 /* byte12 */;
+	u8 byte13 /* byte13 */;
+	u8 byte14 /* byte14 */;
+	u8 byte15 /* byte15 */;
+	__le32 reg10 /* reg10 */;
+	__le32 reg11 /* reg11 */;
+	__le32 reg12 /* reg12 */;
+	__le32 reg13 /* reg13 */;
+	__le32 reg14 /* reg14 */;
+	__le32 reg15 /* reg15 */;
+	__le32 reg16 /* reg16 */;
+	__le32 reg17 /* reg17 */;
+	__le32 reg18 /* reg18 */;
+	__le32 reg19 /* reg19 */;
+	__le16 word12 /* word12 */;
+	__le16 word13 /* word13 */;
+	__le16 word14 /* word14 */;
+	__le16 word15 /* word15 */;
 };
 
-
-/*
- * Ramrod data for tx queue stop ramrod
- */
-struct core_tx_stop_ramrod_data {
-	__le32 reserved0[2];
+struct e5_tstorm_core_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state_and_core_id */;
+	u8 flags0;
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT         0
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT         1
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK          0x1 /* bit2 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT         2
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK          0x1 /* bit3 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT         3
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK          0x1 /* bit4 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT         4
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK          0x1 /* bit5 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT         5
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF0_MASK           0x3 /* timer0cf */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT          6
+	u8 flags1;
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF1_MASK           0x3 /* timer1cf */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT          0
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF2_MASK           0x3 /* timer2cf */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT          2
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF3_MASK           0x3 /* timer_stop_all */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT          4
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF4_MASK           0x3 /* cf4 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT          6
+	u8 flags2;
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF5_MASK           0x3 /* cf5 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT          0
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF6_MASK           0x3 /* cf6 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT          2
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF7_MASK           0x3 /* cf7 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT          4
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF8_MASK           0x3 /* cf8 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT          6
+	u8 flags3;
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF9_MASK           0x3 /* cf9 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT          0
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF10_MASK          0x3 /* cf10 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT         2
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT        4
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT        5
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT        6
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK         0x1 /* cf3en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT        7
+	u8 flags4;
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK         0x1 /* cf4en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT        0
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK         0x1 /* cf5en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT        1
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK         0x1 /* cf6en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT        2
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK         0x1 /* cf7en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT        3
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK         0x1 /* cf8en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT        4
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK         0x1 /* cf9en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT        5
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK        0x1 /* cf10en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT       6
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT      7
+	u8 flags5;
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT      0
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT      1
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT      2
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT      3
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT      4
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK       0x1 /* rule6en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT      5
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK       0x1 /* rule7en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT      6
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK       0x1 /* rule8en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT      7
+	u8 flags6;
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit6 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK  0x1 /* bit7 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK  0x1 /* bit8 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK  0x3 /* cf11 */
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK  0x1 /* cf11en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK  0x1 /* rule9en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_MASK  0x1 /* rule10en */
+#define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
+	u8 byte2 /* byte2 */;
+	__le16 word0 /* word0 */;
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+	__le32 reg4 /* reg4 */;
+	__le32 reg5 /* reg5 */;
+	__le32 reg6 /* reg6 */;
+	__le32 reg7 /* reg7 */;
+	__le32 reg8 /* reg8 */;
+	u8 byte3 /* byte3 */;
+	u8 byte4 /* byte4 */;
+	u8 byte5 /* byte5 */;
+	u8 e4_reserved8 /* vf_id */;
+	__le16 word1 /* word1 */;
+	__le16 word2 /* conn_dpi */;
+	__le32 ll2_rx_prod /* reg9 */;
+	__le16 word3 /* word3 */;
+	__le16 e4_reserved9 /* word4 */;
 };
 
-
-/*
- * Ramrod data for tx queue update ramrod
- */
-struct core_tx_update_ramrod_data {
-	u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
-	u8 reserved0;
-	__le16 qm_pq_id /* Updated QM PQ ID */;
-	__le32 reserved1[1];
+struct e5_ustorm_core_conn_ag_ctx {
+	u8 reserved /* cdu_validation */;
+	u8 byte1 /* state_and_core_id */;
+	u8 flags0;
+#define E5_USTORM_CORE_CONN_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
+#define E5_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT         0
+#define E5_USTORM_CORE_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
+#define E5_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT         1
+#define E5_USTORM_CORE_CONN_AG_CTX_CF0_MASK           0x3 /* timer0cf */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT          2
+#define E5_USTORM_CORE_CONN_AG_CTX_CF1_MASK           0x3 /* timer1cf */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT          4
+#define E5_USTORM_CORE_CONN_AG_CTX_CF2_MASK           0x3 /* timer2cf */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT          6
+	u8 flags1;
+#define E5_USTORM_CORE_CONN_AG_CTX_CF3_MASK           0x3 /* timer_stop_all */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT          0
+#define E5_USTORM_CORE_CONN_AG_CTX_CF4_MASK           0x3 /* cf4 */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT          2
+#define E5_USTORM_CORE_CONN_AG_CTX_CF5_MASK           0x3 /* cf5 */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT          4
+#define E5_USTORM_CORE_CONN_AG_CTX_CF6_MASK           0x3 /* cf6 */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT          6
+	u8 flags2;
+#define E5_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT        0
+#define E5_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT        1
+#define E5_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT        2
+#define E5_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK         0x1 /* cf3en */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT        3
+#define E5_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK         0x1 /* cf4en */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT        4
+#define E5_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK         0x1 /* cf5en */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT        5
+#define E5_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK         0x1 /* cf6en */
+#define E5_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT        6
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT      7
+	u8 flags3;
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT      0
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT      1
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT      2
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT      3
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT      4
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK       0x1 /* rule6en */
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT      5
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK       0x1 /* rule7en */
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT      6
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK       0x1 /* rule8en */
+#define E5_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT      7
+	u8 flags4;
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit2 */
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK  0x1 /* bit3 */
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK  0x3 /* cf7 */
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK  0x3 /* cf8 */
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT 4
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK  0x1 /* cf7en */
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT 6
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK  0x1 /* cf8en */
+#define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT 7
+	u8 byte2 /* byte2 */;
+	__le16 word0 /* conn_dpi */;
+	__le16 word1 /* word1 */;
+	__le32 rx_producers /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+	__le16 word2 /* word2 */;
+	__le16 word3 /* word3 */;
 };
 
-
 /*
- * Enum flag for what type of dcb data to update
+ * core connection context
  */
-enum dcb_dscp_update_mode {
-/* use when no change should be done to DCB data */
-	DONT_UPDATE_DCB_DSCP,
-	UPDATE_DCB /* use to update only L2 (vlan) priority */,
-	UPDATE_DSCP /* use to update only IP DSCP */,
-	UPDATE_DCB_DSCP /* update vlan pri and DSCP */,
-	MAX_DCB_DSCP_UPDATE_FLAG
+struct e5_core_conn_context {
+	struct ystorm_core_conn_st_ctx ystorm_st_context /* ystorm storm context */;
+	struct regpair ystorm_st_padding[2] /* padding */;
+	struct pstorm_core_conn_st_ctx pstorm_st_context /* pstorm storm context */;
+	struct regpair pstorm_st_padding[2] /* padding */;
+	struct xstorm_core_conn_st_ctx xstorm_st_context /* xstorm storm context */;
+	struct regpair xstorm_st_padding[2] /* padding */;
+	struct e5_xstorm_core_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
+	struct regpair xstorm_ag_padding[2] /* padding */;
+	struct e5_tstorm_core_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
+	struct e5_ustorm_core_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
+	struct mstorm_core_conn_st_ctx mstorm_st_context /* mstorm storm context */;
+	struct ustorm_core_conn_st_ctx ustorm_st_context /* ustorm storm context */;
+	struct regpair ustorm_st_padding[2] /* padding */;
+	struct tstorm_core_conn_st_ctx tstorm_st_context /* tstorm storm context */;
+	struct regpair tstorm_st_padding[2] /* padding */;
 };
 
 
@@ -1070,7 +1563,7 @@ struct eth_mstorm_per_pf_stat {
 
 
 struct eth_mstorm_per_queue_stat {
-/* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (IPv6) */
+/* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (in IPv6) */
 	struct regpair ttl0_discard;
 /* Number of packets discarded because they are bigger than MTU */
 	struct regpair packet_too_big_discard;
@@ -1078,14 +1571,13 @@ struct eth_mstorm_per_queue_stat {
 	struct regpair no_buff_discard;
 /* Number of packets discarded because of no active Rx connection */
 	struct regpair not_active_discard;
-/* number of coalesced packets in all TPA aggregations */
-	struct regpair tpa_coalesced_pkts;
-/* total number of TPA aggregations */
-	struct regpair tpa_coalesced_events;
-/* number of aggregations, which abnormally ended */
-	struct regpair tpa_aborts_num;
-/* total TCP payload length in all TPA aggregations */
-	struct regpair tpa_coalesced_bytes;
+/* number of coalesced packets in all TPA aggregations / Discarded Hairpin Packet received from Tx
+ * Loopback
+ */
+	struct regpair tpa_coalesced_pkts_lb_hairpin_discard;
+	struct regpair tpa_coalesced_events /* total number of TPA aggregations */;
+	struct regpair tpa_aborts_num /* number of aggregations, which abnormally ended */;
+	struct regpair tpa_coalesced_bytes /* total TCP payload length in all TPA aggregations */;
 };
 
 
@@ -1121,10 +1613,8 @@ struct eth_pstorm_per_pf_stat {
 	struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
 	struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
 	struct regpair mpls_drop_pkts /* Dropped MPLS TX packets (E5 Only) */;
-/* Dropped GRE MPLS TX packets (E5 Only) */
-	struct regpair gre_mpls_drop_pkts;
-/* Dropped UDP MPLS TX packets (E5 Only) */
-	struct regpair udp_mpls_drop_pkts;
+	struct regpair gre_mpls_drop_pkts /* Dropped GRE MPLS TX packets (E5 Only) */;
+	struct regpair udp_mpls_drop_pkts /* Dropped UDP MPLS TX packets (E5 Only) */;
 };
 
 
@@ -1132,20 +1622,13 @@ struct eth_pstorm_per_pf_stat {
  * Ethernet TX Per Queue Stats
  */
 struct eth_pstorm_per_queue_stat {
-/* number of total bytes sent without errors */
-	struct regpair sent_ucast_bytes;
-/* number of total bytes sent without errors */
-	struct regpair sent_mcast_bytes;
-/* number of total bytes sent without errors */
-	struct regpair sent_bcast_bytes;
-/* number of total packets sent without errors */
-	struct regpair sent_ucast_pkts;
-/* number of total packets sent without errors */
-	struct regpair sent_mcast_pkts;
-/* number of total packets sent without errors */
-	struct regpair sent_bcast_pkts;
-/* number of total packets dropped due to errors */
-	struct regpair error_drop_pkts;
+	struct regpair sent_ucast_bytes /* number of total bytes sent without errors */;
+	struct regpair sent_mcast_bytes /* number of total bytes sent without errors */;
+	struct regpair sent_bcast_bytes /* number of total bytes sent without errors */;
+	struct regpair sent_ucast_pkts /* number of total packets sent without errors */;
+	struct regpair sent_mcast_pkts /* number of total packets sent without errors */;
+	struct regpair sent_bcast_pkts /* number of total packets sent without errors */;
+	struct regpair error_drop_pkts /* number of total packets dropped due to errors */;
 };
 
 
@@ -1155,25 +1638,22 @@ struct eth_pstorm_per_queue_stat {
 struct eth_rx_rate_limit {
 /* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */
 	__le16 mult;
-/* Constant term to add (or subtract from number of cycles) */
-	__le16 cnst;
+	__le16 cnst /* Constant term to add (or subtract from number of cycles) */;
 	u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
 	u8 reserved0;
 	__le16 reserved1;
 };
 
 
-/* Update RSS indirection table entry command. One outstanding command supported
- * per PF.
+/*
+ * Update RSS indirection table entry command. One outstanding command supported per PF.
  */
 struct eth_tstorm_rss_update_data {
-/* Valid flag. Driver must set this flag, FW clear valid flag when ready for new
- * RSS update command.
+/* Valid flag. Driver must set this flag, FW clear valid flag when ready for new RSS update command.
+ *
  */
 	u8 valid;
-/* Global VPORT ID. If RSS is disable for VPORT, RSS update command will be
- * ignored.
- */
+/* Global VPORT ID. If RSS is disable for VPORT, RSS update command will be ignored. */
 	u8 vport_id;
 	u8 ind_table_index /* RSS indirect table index that will be updated. */;
 	u8 reserved;
@@ -1250,8 +1730,7 @@ union event_ring_data {
 	union rdma_eqe_data rdma_data /* Dedicated field for RDMA data */;
 	struct nvmf_eqe_data nvmf_data /* Dedicated field for NVMf data */;
 	struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
-/* VF Initial Cleanup data */
-	struct initial_cleanup_eqe_data vf_init_cleanup;
+	struct initial_cleanup_eqe_data vf_init_cleanup /* VF Initial Cleanup data */;
 };
 
 
@@ -1262,10 +1741,10 @@ struct event_ring_entry {
 	u8 protocol_id /* Event Protocol ID (use enum protocol_type) */;
 	u8 opcode /* Event Opcode (Per Protocol Type) */;
 	u8 reserved0 /* Reserved */;
-	u8 vfId /* vfId for this event, 0xFF if this is a PF event */;
+	u8 vf_id /* VF ID for this event, 0xFF if this is a PF event */;
 	__le16 echo /* Echo value from ramrod data on the host */;
-/* FW return code for SP ramrods. Use (according to protocol) eth_return_code,
- * or rdma_fw_return_code, or fcoe_completion_status
+/* FW return code for SP ramrods. Use (according to protocol) eth_return_code, or
+ * rdma_fw_return_code, or fcoe_completion_status, or LL2: 0-succeeded 1-failed
  */
 	u8 fw_return_code;
 	u8 flags;
@@ -1290,12 +1769,12 @@ struct event_ring_next_addr {
  */
 union event_ring_element {
 	struct event_ring_entry entry /* Event Ring Entry */;
-/* Event Ring Next Page Address */
-	struct event_ring_next_addr next_addr;
+	struct event_ring_next_addr next_addr /* Event Ring Next Page Address */;
 };
 
 
 
+
 /*
  * Ports mode
  */
@@ -1310,14 +1789,12 @@ enum fw_flow_ctrl_mode {
  * GFT profile type.
  */
 enum gft_profile_type {
-/* tunnel type, inner 4 tuple, IP type and L4 type match. */
-	GFT_PROFILE_TYPE_4_TUPLE,
+	GFT_PROFILE_TYPE_4_TUPLE /* tunnel type, inner 4 tuple, IP type and L4 type match. */,
 /* tunnel type, inner L4 destination port, IP type and L4 type match. */
 	GFT_PROFILE_TYPE_L4_DST_PORT,
 /* tunnel type, inner IP destination address and IP type match. */
 	GFT_PROFILE_TYPE_IP_DST_ADDR,
-/* tunnel type, inner IP source address and IP type match. */
-	GFT_PROFILE_TYPE_IP_SRC_ADDR,
+	GFT_PROFILE_TYPE_IP_SRC_ADDR /* tunnel type, inner IP source address and IP type match. */,
 	GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */,
 	MAX_GFT_PROFILE_TYPE
 };
@@ -1332,6 +1809,21 @@ struct hsi_fp_ver_struct {
 };
 
 
+/*
+ * TID HSI Structure
+ */
+struct hsi_tid {
+	__le32 bitfields;
+#define HSI_TID_ITID_MASK        0x3FFFF /* iTID. */
+#define HSI_TID_ITID_SHIFT       0
+#define HSI_TID_SEGMENT_ID_MASK  0x3 /* Segment ID (Set by FW / HSI function). */
+#define HSI_TID_SEGMENT_ID_SHIFT 18
+#define HSI_TID_OPAQUE_FID_MASK  0xFFF /* Opaque FID. */
+#define HSI_TID_OPAQUE_FID_SHIFT 20
+};
+
+
+
 /*
  * Integration Phase
  */
@@ -1339,6 +1831,12 @@ enum integ_phase {
 	INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */,
 	INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */,
 	INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */,
+/* E5 A0 - phase 1, ETH VFC bypass. Force RX vportId = portId */
+	INTEG_PHASE_E5_A0_VFC_BYPASS = 50,
+	INTEG_PHASE_E5_A0_PHASE_1 = 51 /* E5 A0 - phase 1 */,
+	INTEG_PHASE_E5_A0_PHASE_2 = 52 /* E5 A0 - phase 2 */,
+	INTEG_PHASE_E5_A0_PHASE_3_NO_RGFS = 53 /* E5 A0 - phase 3. RGFS not used. */,
+	INTEG_PHASE_E5_A0_PHASE_3 = 54 /* E5 A0 - phase 3 */,
 	MAX_INTEG_PHASE
 };
 
@@ -1347,72 +1845,57 @@ enum integ_phase {
  * Ports mode
  */
 enum iwarp_ll2_tx_queues {
-/* LL2 queue for OOO packets sent in-order by the driver */
-	IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
-/* LL2 queue for unaligned packets sent aligned by the driver */
-	IWARP_LL2_ALIGNED_TX_QUEUE,
-/* LL2 queue for unaligned packets sent aligned and was right-trimmed by the
- * driver
- */
+	IWARP_LL2_IN_ORDER_TX_QUEUE = 1 /* LL2 queue for OOO packets sent in-order by the driver */,
+	IWARP_LL2_ALIGNED_TX_QUEUE /* LL2 queue for unaligned packets sent aligned by the driver */,
+/* LL2 queue for unaligned packets sent aligned and was right-trimmed by the driver **/
 	IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
 	IWARP_LL2_ERROR /* Error indication */,
 	MAX_IWARP_LL2_TX_QUEUES
 };
 
 
+
 /*
  * Malicious VF error ID
  */
 enum malicious_vf_error_id {
-	MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
-/* Writing to VF/PF channel when it is not ready */
-	VF_PF_CHANNEL_NOT_READY,
+	MALICIOUS_VF_NO_ERROR,
+	VF_PF_CHANNEL_NOT_READY /* Writing to VF/PF channel when it is not ready */,
 	VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
 	VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
-/* TX packet is shorter then reported on BDs or from minimal size */
-	ETH_PACKET_TOO_SMALL,
-/* Tx packet with marked as insert VLAN when its illegal */
-	ETH_ILLEGAL_VLAN_MODE,
+	ETH_PACKET_TOO_SMALL /* TX packet is shorter then reported on BDs or from minimal size */,
+	ETH_ILLEGAL_VLAN_MODE /* Tx packet with marked as insert VLAN when its illegal */,
 	ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
-/* TX packet has illegal inband tags marked */
-	ETH_ILLEGAL_INBAND_TAGS,
-/* Vlan cant be added to inband tag */
-	ETH_VLAN_INSERT_AND_INBAND_VLAN,
-/* indicated number of BDs for the packet is illegal */
-	ETH_ILLEGAL_NBDS,
+	ETH_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */,
+	ETH_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */,
+	ETH_ILLEGAL_NBDS /* indicated number of BDs for the packet is illegal */,
 	ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
-/* There are not enough BDs for transmission of even one packet */
-	ETH_INSUFFICIENT_BDS,
+	ETH_INSUFFICIENT_BDS /* There are not enough BDs for transmission of even one packet */,
 	ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
 	ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
-/* empty BD (which not contains control flags) is illegal  */
-	ETH_ZERO_SIZE_BD,
+	ETH_ZERO_SIZE_BD /* empty BD (which not contains control flags) is illegal  */,
 	ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit  */,
-/* In LSO its expected that on the local BD ring there will be at least MSS
- * bytes of data
- */
+/* In LSO its expected that on the local BD ring there will be at least MSS bytes of data **/
 	ETH_INSUFFICIENT_PAYLOAD,
 	ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
 /* Tunneled packet with IPv6+Ext without a proper number of BDs */
 	ETH_TUNN_IPV6_EXT_NBD_ERR,
 	ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
 	ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
-/* packet scanned is too large (can be 9700 at most) */
-	ETH_PACKET_SIZE_TOO_LARGE,
-/* Tx packet with marked as insert VLAN when its illegal */
-	CORE_ILLEGAL_VLAN_MODE,
-/* indicated number of BDs for the packet is illegal */
-	CORE_ILLEGAL_NBDS,
+	ETH_PACKET_SIZE_TOO_LARGE /* packet scanned is too large (can be 9700 at most) */,
+	CORE_ILLEGAL_VLAN_MODE /* Tx packet with marked as insert VLAN when its illegal */,
+	CORE_ILLEGAL_NBDS /* indicated number of BDs for the packet is illegal */,
 	CORE_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
-/* There are not enough BDs for transmission of even one packet */
-	CORE_INSUFFICIENT_BDS,
-/* TX packet is shorter then reported on BDs or from minimal size */
-	CORE_PACKET_TOO_SMALL,
+	CORE_INSUFFICIENT_BDS /* There are not enough BDs for transmission of even one packet */,
+	CORE_PACKET_TOO_SMALL /* TX packet is shorter then reported on BDs or from minimal size */,
 	CORE_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */,
 	CORE_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */,
 	CORE_MTU_VIOLATION /* TX packet is greater then MTU */,
 	CORE_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
 	CORE_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
+	CORE_PACKET_SIZE_TOO_LARGE /* packet scanned is too large (can be 9700 at most) */,
+	CORE_ILLEGAL_BD_FLAGS /* TX packet has illegal BD flags. */,
+	CORE_GSI_PACKET_VIOLATION /* TX packet GSI validation fail */,
 	MAX_MALICIOUS_VF_ERROR_ID
 };
 
@@ -1422,11 +1905,9 @@ enum malicious_vf_error_id {
  * Mstorm non-triggering VF zone
  */
 struct mstorm_non_trigger_vf_zone {
-/* VF statistic bucket */
-	struct eth_mstorm_per_queue_stat eth_queue_stat;
+	struct eth_mstorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
 /* VF RX queues producers */
-	struct eth_rx_prod_data
-		eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
+	struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
 };
 
 
@@ -1434,8 +1915,7 @@ struct mstorm_non_trigger_vf_zone {
  * Mstorm VF zone
  */
 struct mstorm_vf_zone {
-/* non-interrupt-triggering zone */
-	struct mstorm_non_trigger_vf_zone non_trigger;
+	struct mstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
 };
 
 
@@ -1451,35 +1931,43 @@ struct vlan_header {
  * outer tag configurations
  */
 struct outer_tag_config_struct {
-/* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
- * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
- * else - 0.
+/* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette Davis, UFP with Host
+ * Control mode, and UFP with DCB over base interface. Else - 0.
  */
 	u8 enable_stag_pri_change;
-/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
-	u8 pri_map_valid;
+	u8 pri_map_valid /* When set, inner_to_outer_pri_map will be used */;
 	u8 reserved[2];
-/* In case mf_mode is MF_OVLAN, this field specifies the outer tag protocol
- * identifier and outer tag control information
+/* In case mf_mode is MF_OVLAN, this field specifies the outer Tag Protocol Identifier and outer Tag
+ * Control Information
  */
 	struct vlan_header outer_tag;
-/* Map from inner to outer priority. Set pri_map_valid when init map */
+/* Map from inner to outer priority. Used if pri_map_valid is set */
 	u8 inner_to_outer_pri_map[8];
 };
 
 
+/*
+ * Integration Phase
+ */
+enum path_init_actions {
+	PATH_INIT_NONE = 0 /* No Action */,
+	PATH_INIT_PBF_HALT_FIX = 1 /* CQ 103263 - PBF Halts under Heavy traffic load */,
+	MAX_PATH_INIT_ACTIONS
+};
+
+
 /*
  * personality per PF
  */
 enum personality_type {
 	BAD_PERSONALITY_TYP,
 	PERSONALITY_ISCSI /* iSCSI and LL2 */,
-	PERSONALITY_FCOE /* Fcoe and LL2 */,
-	PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
-	PERSONALITY_RDMA /* Roce and LL2 */,
-	PERSONALITY_CORE /* CORE(LL2) */,
+	PERSONALITY_FCOE /* FCoE and LL2 */,
+	PERSONALITY_RDMA_AND_ETH /* RoCE or IWARP, Ethernet and LL2 */,
+	PERSONALITY_RDMA /* RoCE and LL2 */,
+	PERSONALITY_CORE /* Core (LL2) */,
 	PERSONALITY_ETH /* Ethernet */,
-	PERSONALITY_TOE /* Toe and LL2 */,
+	PERSONALITY_TOE /* TOE and LL2 */,
 	MAX_PERSONALITY_TYPE
 };
 
@@ -1488,32 +1976,33 @@ enum personality_type {
  * tunnel configuration
  */
 struct pf_start_tunnel_config {
-/* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set -
- * FW will use a default port
+/* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set - FW will use a default port
+ *
  */
 	u8 set_vxlan_udp_port_flg;
-/* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set -
- * FW will use a default port
+/* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set - FW will use a default
+ * port
  */
 	u8 set_geneve_udp_port_flg;
-/* Set no-innet-L2 VXLAN tunnel UDP destination port to
- * no_inner_l2_vxlan_udp_port. If not set - FW will use a default port
+/* Set no-inner-L2 VXLAN tunnel UDP destination port to no_inner_l2_vxlan_udp_port. If not set - FW
+ * will use a default port
  */
 	u8 set_no_inner_l2_vxlan_udp_port_flg;
-	u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */;
-/* Rx classification scheme for l2 GENEVE tunnel. */
+/* Rx classification scheme for VXLAN tunnel. (use enum tunnel_clss) */
+	u8 tunnel_clss_vxlan;
+/* Rx classification scheme for L2 GENEVE tunnel. (use enum tunnel_clss) */
 	u8 tunnel_clss_l2geneve;
-/* Rx classification scheme for ip GENEVE tunnel. */
+/* Rx classification scheme for IP GENEVE tunnel. (use enum tunnel_clss) */
 	u8 tunnel_clss_ipgeneve;
-	u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */;
-	u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */;
+/* Rx classification scheme for L2 GRE tunnel. (use enum tunnel_clss) */
+	u8 tunnel_clss_l2gre;
+/* Rx classification scheme for IP GRE tunnel. (use enum tunnel_clss) */
+	u8 tunnel_clss_ipgre;
 /* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
 	__le16 vxlan_udp_port;
 /* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
 	__le16 geneve_udp_port;
-/* no-innet-L2 VXLAN  tunnel UDP destination port. Valid if
- * set_no_inner_l2_vxlan_udp_port_flg=1
- */
+/* no-inner-L2 VXLAN  tunnel UDP destination port. Valid if set_no_inner_l2_vxlan_udp_port_flg=1 */
 	__le16 no_inner_l2_vxlan_udp_port;
 	__le16 reserved[3];
 };
@@ -1523,34 +2012,27 @@ struct pf_start_tunnel_config {
  */
 struct pf_start_ramrod_data {
 	struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
-/* PBL address of consolidation queue */
-	struct regpair consolid_q_pbl_addr;
-/* tunnel configuration. */
-	struct pf_start_tunnel_config tunnel_config;
+	struct regpair consolid_q_pbl_addr /* PBL address of consolidation queue */;
+	struct pf_start_tunnel_config tunnel_config /* tunnel configuration. */;
 	__le16 event_ring_sb_id /* Status block ID */;
-/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
-	u8 base_vf_id;
-	u8 num_vfs /* Amount of vfs owned by PF */;
+	u8 base_vf_id /* All Vf IDs owned by PF will start from baseVfId till baseVfId+numVfs */;
+	u8 num_vfs /* Number of VFs owned by PF */;
 	u8 event_ring_num_pages /* Number of PBL pages in event ring */;
 	u8 event_ring_sb_index /* Status block index */;
 	u8 path_id /* HW path ID (engine ID) */;
 	u8 warning_as_error /* In FW asserts, treat warning as error */;
-/* If not set - throw a warning for each ramrod (for debug) */
-	u8 dont_log_ramrods;
-	u8 personality /* define what type of personality is new PF */;
-/* Log type mask. Each bit set enables a corresponding event type logging.
- * Event types are defined as ASSERT_LOG_TYPE_xxx
+	u8 dont_log_ramrods /* If set, FW will not log ramrods */;
+	u8 personality /* PFs personality (use enum personality_type) */;
+/* Log type mask. Each bit set enables a corresponding event type logging. Event types are defined
+ * as ASSERT_LOG_TYPE_xxx
  */
 	__le16 log_type_mask;
-	u8 mf_mode /* Multi function mode */;
-	u8 integ_phase /* Integration phase */;
-/* If set, inter-pf tx switching is allowed in Switch Independent func mode */
-	u8 allow_npar_tx_switching;
-	u8 reserved0;
-/* FP HSI version to be used by FW */
-	struct hsi_fp_ver_struct hsi_fp_ver;
-/* Outer tag configurations */
-	struct outer_tag_config_struct outer_tag_config;
+	u8 mf_mode /* Multi function mode (use enum mf_mode) */;
+	u8 integ_phase /* Integration phase (use enum integ_phase) */;
+	u8 allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in NPAR mode */;
+	u8 disable_path_init /* Disable Path Initialization bitmap (use enum path_init_actions) */;
+	struct hsi_fp_ver_struct hsi_fp_ver /* FP HSI version to be used by FW */;
+	struct outer_tag_config_struct outer_tag_config /* Outer tag configurations */;
 };
 
 
@@ -1564,9 +2046,7 @@ struct protocol_dcb_data {
 	u8 dcb_priority /* DCB priority */;
 	u8 dcb_tc /* DCB TC */;
 	u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */;
-/* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged
- * frames
- */
+/* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged frames **/
 	u8 dcb_dont_add_vlan0;
 };
 
@@ -1574,34 +2054,30 @@ struct protocol_dcb_data {
  * Update tunnel configuration
  */
 struct pf_update_tunnel_config {
-/* Update RX per PF tunnel classification scheme. */
-	u8 update_rx_pf_clss;
-/* Update per PORT default tunnel RX classification scheme for traffic with
- * unknown unicast outer MAC in NPAR mode.
+	u8 update_rx_pf_clss /* Update per-PF RX tunnel classification scheme. */;
+/* Update per-PORT default tunnel RX classification scheme for traffic with unknown unicast outer
+ * MAC in NPAR mode.
  */
 	u8 update_rx_def_ucast_clss;
-/* Update per PORT default tunnel RX classification scheme for traffic with non
- * unicast outer MAC in NPAR mode.
+/* Update per-PORT default tunnel RX classification scheme for traffic with non unicast outer MAC in
+ * NPAR mode.
  */
 	u8 update_rx_def_non_ucast_clss;
-/* Update VXLAN tunnel UDP destination port. */
-	u8 set_vxlan_udp_port_flg;
-/* Update GENEVE tunnel UDP destination port. */
-	u8 set_geneve_udp_port_flg;
-/* Update no-innet-L2 VXLAN  tunnel UDP destination port. */
+	u8 set_vxlan_udp_port_flg /* Update VXLAN tunnel UDP destination port. */;
+	u8 set_geneve_udp_port_flg /* Update GENEVE tunnel UDP destination port. */;
+/* Update no-inner-L2 VXLAN  tunnel UDP destination port. */
 	u8 set_no_inner_l2_vxlan_udp_port_flg;
-	u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
-/* Classification scheme for l2 GENEVE tunnel. */
+	u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. (use enum tunnel_clss) */;
+/* Classification scheme for L2 GENEVE tunnel. (use enum tunnel_clss) */
 	u8 tunnel_clss_l2geneve;
-/* Classification scheme for ip GENEVE tunnel. */
+/* Classification scheme for IP GENEVE tunnel. (use enum tunnel_clss) */
 	u8 tunnel_clss_ipgeneve;
-	u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
-	u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+	u8 tunnel_clss_l2gre /* Classification scheme for L2 GRE tunnel. (use enum tunnel_clss) */;
+	u8 tunnel_clss_ipgre /* Classification scheme for IP GRE tunnel. (use enum tunnel_clss) */;
 	u8 reserved;
 	__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
 	__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
-/* no-innet-L2 VXLAN  tunnel UDP destination port. */
-	__le16 no_inner_l2_vxlan_udp_port;
+	__le16 no_inner_l2_vxlan_udp_port /* no-inner-L2 VXLAN  tunnel UDP destination port. */;
 	__le16 reserved1[3];
 };
 
@@ -1609,37 +2085,33 @@ struct pf_update_tunnel_config {
  * Data for port update ramrod
  */
 struct pf_update_ramrod_data {
-/* Update Eth DCB  data indication (use enum dcb_dscp_update_mode) */
+/* If set - Update Eth DCB data (use enum dcb_dscp_update_mode) */
 	u8 update_eth_dcb_data_mode;
-/* Update FCOE DCB  data indication (use enum dcb_dscp_update_mode) */
+/* If set - Update FCOE DCB data (use enum dcb_dscp_update_mode) */
 	u8 update_fcoe_dcb_data_mode;
-/* Update iSCSI DCB  data indication (use enum dcb_dscp_update_mode) */
+/* If set - Update iSCSI DCB data (use enum dcb_dscp_update_mode) */
 	u8 update_iscsi_dcb_data_mode;
-	u8 update_roce_dcb_data_mode /* Update ROCE DCB  data indication */;
-/* Update RROCE (RoceV2) DCB  data indication */
+/* If set - Update ROCE DCB data (use enum dcb_dscp_update_mode) */
+	u8 update_roce_dcb_data_mode;
+/* If set - Update RROCE (RoceV2) DCB data (use enum dcb_dscp_update_mode) */
 	u8 update_rroce_dcb_data_mode;
-	u8 update_iwarp_dcb_data_mode /* Update IWARP DCB  data indication */;
-	u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
-/* Update Enable STAG Priority Change indication */
-	u8 update_enable_stag_pri_change;
-	struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
-	struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
-/* core iscsi related fields */
-	struct protocol_dcb_data iscsi_dcb_data;
-	struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
-/* core roce related fields */
-	struct protocol_dcb_data rroce_dcb_data;
-/* core iwarp related fields */
-	struct protocol_dcb_data iwarp_dcb_data;
-	__le16 mf_vlan /* new outer vlan id value */;
-/* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
- * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
- * else - 0
+/* If set - Update IWARP DCB  data (use enum dcb_dscp_update_mode) */
+	u8 update_iwarp_dcb_data_mode;
+	u8 update_mf_vlan_flag /* If set - Update MF Tag TCI */;
+	u8 update_enable_stag_pri_change /* If set - Update Enable STAG Priority Change */;
+	struct protocol_dcb_data eth_dcb_data /* eth  DCB data */;
+	struct protocol_dcb_data fcoe_dcb_data /* fcoe DCB data */;
+	struct protocol_dcb_data iscsi_dcb_data /* iscsi DCB data */;
+	struct protocol_dcb_data roce_dcb_data /* roce DCB data */;
+	struct protocol_dcb_data rroce_dcb_data /* roce DCB data */;
+	struct protocol_dcb_data iwarp_dcb_data /* iwarp DCB data */;
+	__le16 mf_vlan /* MF Tag TCI */;
+/* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette Davis, UFP with Host
+ * Control mode, and UFP with DCB over base interface. else - 0.
  */
 	u8 enable_stag_pri_change;
 	u8 reserved;
-/* tunnel configuration. */
-	struct pf_update_tunnel_config tunnel_config;
+	struct pf_update_tunnel_config tunnel_config /* tunnel configuration. */;
 };
 
 
@@ -1681,8 +2153,7 @@ struct rdma_sent_stats {
  * Pstorm non-triggering VF zone
  */
 struct pstorm_non_trigger_vf_zone {
-/* VF statistic bucket */
-	struct eth_pstorm_per_queue_stat eth_queue_stat;
+	struct eth_pstorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
 	struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
 };
 
@@ -1691,8 +2162,7 @@ struct pstorm_non_trigger_vf_zone {
  * Pstorm VF zone
  */
 struct pstorm_vf_zone {
-/* non-interrupt-triggering zone */
-	struct pstorm_non_trigger_vf_zone non_trigger;
+	struct pstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
 	struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
 };
 
@@ -1703,7 +2173,7 @@ struct pstorm_vf_zone {
 struct ramrod_header {
 	__le32 cid /* Slowpath Connection CID */;
 	u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
-	u8 protocol_id /* Ramrod Protocol ID */;
+	u8 protocol_id /* Ramrod Protocol ID (use enum protocol_type) */;
 	__le16 echo /* Ramrod echo */;
 };
 
@@ -1723,22 +2193,20 @@ struct rdma_rcv_stats {
  */
 struct rl_update_ramrod_data {
 	u8 qcn_update_param_flg /* Update QCN global params: timeout. */;
-/* Update DCQCN global params: timeout, g, k. */
-	u8 dcqcn_update_param_flg;
+	u8 dcqcn_update_param_flg /* Update DCQCN global params: timeout, g, k. */;
 	u8 rl_init_flg /* Init RL parameters, when RL disabled. */;
 	u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */;
 	u8 rl_stop_flg /* Stop RL. */;
-	u8 rl_id_first /* ID of first or single RL, that will be updated. */;
-/* ID of last RL, that will be updated. If clear, single RL will updated. */
-	u8 rl_id_last;
 	u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
 /* If set, alpha will be reset to 1 when the state machine is idle. */
 	u8 dcqcn_reset_alpha_on_idle;
-/* Byte counter threshold to change rate increase stage. */
-	u8 rl_bc_stage_th;
-/* Timer threshold to change rate increase stage. */
-	u8 rl_timer_stage_th;
+	u8 rl_bc_stage_th /* Byte counter threshold to change rate increase stage. */;
+	u8 rl_timer_stage_th /* Timer threshold to change rate increase stage. */;
 	u8 reserved1;
+	__le16 rl_id_first /* ID of first or single RL, that will be updated. */;
+/* ID of last RL, that will be updated. If clear, single RL will updated. */
+	__le16 rl_id_last;
+	__le16 reserved2;
 	__le32 rl_bc_rate /* Byte Counter Limit. */;
 	__le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
 	__le16 rl_r_ai /* Active increase rate. */;
@@ -1747,7 +2215,6 @@ struct rl_update_ramrod_data {
 	__le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
 	__le32 dcqcn_timeuot_us /* DCQCN timeout. */;
 	__le32 qcn_timeuot_us /* QCN timeout. */;
-	__le32 reserved2;
 };
 
 
@@ -1760,6 +2227,7 @@ struct slow_path_element {
 };
 
 
+
 /*
  * Tstorm non-triggering VF zone
  */
@@ -1769,41 +2237,26 @@ struct tstorm_non_trigger_vf_zone {
 
 
 struct tstorm_per_port_stat {
-/* packet is dropped because it was truncated in NIG */
-	struct regpair trunc_error_discard;
-/* packet is dropped because of Ethernet FCS error */
-	struct regpair mac_error_discard;
+	struct regpair trunc_error_discard /* packet is dropped because it was truncated in NIG */;
+	struct regpair mac_error_discard /* packet is dropped because of Ethernet FCS error */;
 /* packet is dropped because classification was unsuccessful */
 	struct regpair mftag_filter_discard;
 /* packet was passed to Ethernet and dropped because of no mac filter match */
 	struct regpair eth_mac_filter_discard;
-/* packet passed to Light L2 and dropped because Light L2 is not configured for
- * this PF
- */
+/* packet passed to Light L2 and dropped because Light L2 is not configured for this PF */
 	struct regpair ll2_mac_filter_discard;
-/* packet passed to Light L2 and dropped because Light L2 is not configured for
- * this PF
- */
+/* packet passed to Light L2 and dropped because Light L2 is not configured for this PF */
 	struct regpair ll2_conn_disabled_discard;
-/* packet is an ISCSI irregular packet */
-	struct regpair iscsi_irregular_pkt;
-/* packet is an FCOE irregular packet */
-	struct regpair fcoe_irregular_pkt;
-/* packet is an ROCE irregular packet */
-	struct regpair roce_irregular_pkt;
-/* packet is an IWARP irregular packet */
-	struct regpair iwarp_irregular_pkt;
-/* packet is an ETH irregular packet */
-	struct regpair eth_irregular_pkt;
-/* packet is an TOE irregular packet */
-	struct regpair toe_irregular_pkt;
-/* packet is an PREROCE irregular packet */
-	struct regpair preroce_irregular_pkt;
+	struct regpair iscsi_irregular_pkt /* packet is an ISCSI irregular packet */;
+	struct regpair fcoe_irregular_pkt /* packet is an FCOE irregular packet */;
+	struct regpair roce_irregular_pkt /* packet is an ROCE irregular packet */;
+	struct regpair iwarp_irregular_pkt /* packet is an IWARP irregular packet */;
+	struct regpair eth_irregular_pkt /* packet is an ETH irregular packet */;
+	struct regpair toe_irregular_pkt /* packet is an TOE irregular packet */;
+	struct regpair preroce_irregular_pkt /* packet is an PREROCE irregular packet */;
 	struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
-/* VXLAN dropped packets */
-	struct regpair eth_vxlan_tunn_filter_discard;
-/* GENEVE dropped packets */
-	struct regpair eth_geneve_tunn_filter_discard;
+	struct regpair eth_vxlan_tunn_filter_discard /* VXLAN dropped packets */;
+	struct regpair eth_geneve_tunn_filter_discard /* GENEVE dropped packets */;
 	struct regpair eth_gft_drop_pkt /* GFT dropped packets */;
 };
 
@@ -1812,8 +2265,7 @@ struct tstorm_per_port_stat {
  * Tstorm VF zone
  */
 struct tstorm_vf_zone {
-/* non-interrupt-triggering zone */
-	struct tstorm_non_trigger_vf_zone non_trigger;
+	struct tstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
 };
 
 
@@ -1821,20 +2273,16 @@ struct tstorm_vf_zone {
  * Tunnel classification scheme
  */
 enum tunnel_clss {
-/* Use MAC and VLAN from first L2 header for vport classification. */
+/* Use MAC and VLAN from outermost L2 header for vport classification. */
 	TUNNEL_CLSS_MAC_VLAN = 0,
-/* Use MAC from first L2 header and VNI from tunnel header for vport
- * classification
- */
+/* Use MAC from outermost L2 header and VNI from tunnel header for vport classification */
 	TUNNEL_CLSS_MAC_VNI,
-/* Use MAC and VLAN from last L2 header for vport classification */
+/* Use MAC and VLAN from inner L2 header for vport classification */
 	TUNNEL_CLSS_INNER_MAC_VLAN,
-/* Use MAC from last L2 header and VNI from tunnel header for vport
- * classification
- */
+/* Use MAC from inner L2 header and VNI from tunnel header for vport classification */
 	TUNNEL_CLSS_INNER_MAC_VNI,
-/* Use MAC and VLAN from last L2 header for vport classification. If no exact
- * match, use MAC and VLAN from first L2 header for classification.
+/* Use MAC and VLAN from inner L2 header for vport classification. If no exact match, use MAC and
+ * VLAN from outermost L2 header for vport classification.
  */
 	TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
 	MAX_TUNNEL_CLSS
@@ -1846,8 +2294,7 @@ enum tunnel_clss {
  * Ustorm non-triggering VF zone
  */
 struct ustorm_non_trigger_vf_zone {
-/* VF statistic bucket */
-	struct eth_ustorm_per_queue_stat eth_queue_stat;
+	struct eth_ustorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
 	struct regpair vf_pf_msg_addr /* VF-PF message address */;
 };
 
@@ -1865,8 +2312,7 @@ struct ustorm_trigger_vf_zone {
  * Ustorm VF zone
  */
 struct ustorm_vf_zone {
-/* non-interrupt-triggering zone */
-	struct ustorm_non_trigger_vf_zone non_trigger;
+	struct ustorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
 	struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
 };
 
@@ -1875,19 +2321,18 @@ struct ustorm_vf_zone {
  * VF-PF channel data
  */
 struct vf_pf_channel_data {
-/* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel
- * is ready for a new transaction.
+/* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel is ready for a new
+ * transaction.
  */
 	__le32 ready;
-/* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is
- * valid.
- */
+/* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is valid. */
 	u8 valid;
 	u8 reserved0;
 	__le16 reserved1;
 };
 
 
+
 /*
  * Ramrod data for VF start ramrod
  */
@@ -1896,10 +2341,9 @@ struct vf_start_ramrod_data {
 /* If set, initial cleanup ack will be sent to parent PF SP event queue */
 	u8 enable_flr_ack;
 	__le16 opaque_fid /* VF opaque FID */;
-	u8 personality /* define what type of personality is new VF */;
+	u8 personality /* VFs personality (use enum personality_type) */;
 	u8 reserved[7];
-/* FP HSI version to be used by FW */
-	struct hsi_fp_ver_struct hsi_fp_ver;
+	struct hsi_fp_ver_struct hsi_fp_ver /* FP HSI version to be used by FW */;
 };
 
 
@@ -1918,12 +2362,9 @@ struct vf_stop_ramrod_data {
  * VF zone size mode.
  */
 enum vf_zone_size_mode {
-/* Default VF zone size. Up to 192 VF supported. */
-	VF_ZONE_SIZE_MODE_DEFAULT,
-/* Doubled VF zone size. Up to 96 VF supported. */
-	VF_ZONE_SIZE_MODE_DOUBLE,
-/* Quad VF zone size. Up to 48 VF supported. */
-	VF_ZONE_SIZE_MODE_QUAD,
+	VF_ZONE_SIZE_MODE_DEFAULT /* Default VF zone size. Up to 192 VF supported. */,
+	VF_ZONE_SIZE_MODE_DOUBLE /* Doubled VF zone size. Up to 96 VF supported. */,
+	VF_ZONE_SIZE_MODE_QUAD /* Quad VF zone size. Up to 48 VF supported. */,
 	MAX_VF_ZONE_SIZE_MODE
 };
 
@@ -1942,8 +2383,7 @@ struct xstorm_non_trigger_vf_zone {
  * Tstorm VF zone
  */
 struct xstorm_vf_zone {
-/* non-interrupt-triggering zone */
-	struct xstorm_non_trigger_vf_zone non_trigger;
+	struct xstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
 };
 
 
@@ -1968,9 +2408,7 @@ struct dmae_cmd {
 /* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */
 #define DMAE_CMD_SRC_MASK              0x1
 #define DMAE_CMD_SRC_SHIFT             0
-/* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None
- * (use enum dmae_cmd_dst_enum)
- */
+/* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None (use enum dmae_cmd_dst_enum) */
 #define DMAE_CMD_DST_MASK              0x3
 #define DMAE_CMD_DST_SHIFT             1
 /* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */
@@ -1979,33 +2417,29 @@ struct dmae_cmd {
 /* Reset the CRC result (do not use the previous result as the seed) */
 #define DMAE_CMD_CRC_RESET_MASK        0x1
 #define DMAE_CMD_CRC_RESET_SHIFT       4
-/* Reset the source address in the next go to the same source address of the
- * previous go
- */
+/* Reset the source address in the next go to the same source address of the previous go */
 #define DMAE_CMD_SRC_ADDR_RESET_MASK   0x1
 #define DMAE_CMD_SRC_ADDR_RESET_SHIFT  5
-/* Reset the destination address in the next go to the same destination address
- * of the previous go
+/* Reset the destination address in the next go to the same destination address of the previous go
+ *
  */
 #define DMAE_CMD_DST_ADDR_RESET_MASK   0x1
 #define DMAE_CMD_DST_ADDR_RESET_SHIFT  6
-/* 0   completion function is the same as src function, 1 - 0 completion
- * function is the same as dst function (use enum dmae_cmd_comp_func_enum)
+/* 0   completion function is the same as src function, 1 - 0   completion function is the same as
+ * dst function (use enum dmae_cmd_comp_func_enum)
  */
 #define DMAE_CMD_COMP_FUNC_MASK        0x1
 #define DMAE_CMD_COMP_FUNC_SHIFT       7
-/* 0 - Do not write a completion word, 1 - Write a completion word
- * (use enum dmae_cmd_comp_word_en_enum)
+/* 0 - Do not write a completion word, 1 - Write a completion word (use enum
+ * dmae_cmd_comp_word_en_enum)
  */
 #define DMAE_CMD_COMP_WORD_EN_MASK     0x1
 #define DMAE_CMD_COMP_WORD_EN_SHIFT    8
-/* 0 - Do not write a CRC word, 1 - Write a CRC word
- * (use enum dmae_cmd_comp_crc_en_enum)
- */
+/* 0 - Do not write a CRC word, 1 - Write a CRC word (use enum dmae_cmd_comp_crc_en_enum) */
 #define DMAE_CMD_COMP_CRC_EN_MASK      0x1
 #define DMAE_CMD_COMP_CRC_EN_SHIFT     9
-/* The CRC word should be taken from the DMAE address space from address 9+X,
- * where X is the value in these bits.
+/* The CRC word should be taken from the DMAE address space from address 9+X, where X is the value
+ * in these bits.
  */
 #define DMAE_CMD_COMP_CRC_OFFSET_MASK  0x7
 #define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
@@ -2013,23 +2447,20 @@ struct dmae_cmd {
 #define DMAE_CMD_RESERVED1_SHIFT       13
 #define DMAE_CMD_ENDIANITY_MODE_MASK   0x3
 #define DMAE_CMD_ENDIANITY_MODE_SHIFT  14
-/* The field specifies how the completion word is affected by PCIe read error. 0
- * Send a regular completion, 1 - Send a completion with an error indication,
- * 2 do not send a completion (use enum dmae_cmd_error_handling_enum)
+/* The field specifies how the completion word is affected by PCIe read error. 0   Send a regular
+ * completion, 1 - Send a completion with an error indication, 2   do not send a completion (use
+ * enum dmae_cmd_error_handling_enum)
  */
 #define DMAE_CMD_ERR_HANDLING_MASK     0x3
 #define DMAE_CMD_ERR_HANDLING_SHIFT    16
-/* The port ID to be placed on the  RF FID  field of the GRC bus. this field is
- * used both when GRC is the destination and when it is the source of the DMAE
- * transaction.
+/* The port ID to be placed on the  RF FID  field of the GRC bus. this field is used both when GRC
+ * is the destination and when it is the source of the DMAE transaction.
  */
 #define DMAE_CMD_PORT_ID_MASK          0x3
 #define DMAE_CMD_PORT_ID_SHIFT         18
-/* Source PCI function number [3:0] */
-#define DMAE_CMD_SRC_PF_ID_MASK        0xF
+#define DMAE_CMD_SRC_PF_ID_MASK        0xF /* Source PCI function number [3:0] */
 #define DMAE_CMD_SRC_PF_ID_SHIFT       20
-/* Destination PCI function number [3:0] */
-#define DMAE_CMD_DST_PF_ID_MASK        0xF
+#define DMAE_CMD_DST_PF_ID_MASK        0xF /* Destination PCI function number [3:0] */
 #define DMAE_CMD_DST_PF_ID_SHIFT       24
 #define DMAE_CMD_SRC_VF_ID_VALID_MASK  0x1 /* Source VFID valid */
 #define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
@@ -2037,10 +2468,8 @@ struct dmae_cmd {
 #define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
 #define DMAE_CMD_RESERVED2_MASK        0x3
 #define DMAE_CMD_RESERVED2_SHIFT       30
-/* PCIe source address low in bytes or GRC source address in DW */
-	__le32 src_addr_lo;
-/* PCIe source address high in bytes or reserved (if source is GRC) */
-	__le32 src_addr_hi;
+	__le32 src_addr_lo /* PCIe source address low in bytes or GRC source address in DW */;
+	__le32 src_addr_hi /* PCIe source address high in bytes or reserved (if source is GRC) */;
 /* PCIe destination address low in bytes or GRC destination address in DW */
 	__le32 dst_addr_lo;
 /* PCIe destination address high in bytes or reserved (if destination is GRC) */
@@ -2053,9 +2482,7 @@ struct dmae_cmd {
 #define DMAE_CMD_DST_VF_ID_SHIFT       8
 /* PCIe completion address low in bytes or GRC completion address in DW */
 	__le32 comp_addr_lo;
-/* PCIe completion address high in bytes or reserved (if completion address is
- * GRC)
- */
+/* PCIe completion address high in bytes or reserved (if completion address is GRC) */
 	__le32 comp_addr_hi;
 	__le32 comp_val /* Value to write to completion address */;
 	__le32 crc32 /* crc16 result */;
@@ -2115,9 +2542,7 @@ enum dmae_cmd_dst_enum {
 enum dmae_cmd_error_handling_enum {
 /* Send a regular completion (with no error indication) */
 	dmae_cmd_error_handling_send_regular_comp,
-/* Send a completion with an error indication (i.e. set bit 31 of the completion
- * word)
- */
+/* Send a completion with an error indication (i.e. set bit 31 of the completion word) */
 	dmae_cmd_error_handling_send_comp_with_err,
 	dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */,
 	MAX_DMAE_CMD_ERROR_HANDLING_ENUM
@@ -2136,40 +2561,37 @@ enum dmae_cmd_src_enum {
  */
 struct dmae_params {
 	__le32 flags;
-/* If set and the source is a block of length DMAE_MAX_RW_SIZE and the
- * destination is larger, the source block will be duplicated as many
- * times as required to fill the destination block. This is used mostly
- * to write a zeroed buffer to destination address using DMA
+/* If set and the source is a block of length DMAE_MAX_RW_SIZE and the destination is larger, the
+ * source block will be duplicated as many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address using DMA
  */
 #define DMAE_PARAMS_RW_REPL_SRC_MASK     0x1
 #define DMAE_PARAMS_RW_REPL_SRC_SHIFT    0
-/* If set, the source is a VF, and the source VF ID is taken from the
- * src_vf_id parameter.
- */
+/* If set, the source is a VF, and the source VF ID is taken from the src_vf_id parameter. */
 #define DMAE_PARAMS_SRC_VF_VALID_MASK    0x1
 #define DMAE_PARAMS_SRC_VF_VALID_SHIFT   1
-/* If set, the destination is a VF, and the destination VF ID is taken
- * from the dst_vf_id parameter.
+/* If set, the destination is a VF, and the destination VF ID is taken from the dst_vf_id parameter.
+ *
  */
 #define DMAE_PARAMS_DST_VF_VALID_MASK    0x1
 #define DMAE_PARAMS_DST_VF_VALID_SHIFT   2
-/* If set, a completion is sent to the destination function.
- * Otherwise its sent to the source function.
+/* If set, a completion is sent to the destination function. Otherwise its sent to the source
+ * function.
  */
 #define DMAE_PARAMS_COMPLETION_DST_MASK  0x1
 #define DMAE_PARAMS_COMPLETION_DST_SHIFT 3
-/* If set, the port ID is taken from the port_id parameter.
- * Otherwise, the current port ID is used.
+/* If set, the port ID is taken from the port_id parameter. Otherwise, the current port ID is used.
+ *
  */
 #define DMAE_PARAMS_PORT_VALID_MASK      0x1
 #define DMAE_PARAMS_PORT_VALID_SHIFT     4
-/* If set, the source PF ID is taken from the src_pf_id parameter.
- * Otherwise, the current PF ID is used.
+/* If set, the source PF ID is taken from the src_pf_id parameter. Otherwise, the current PF ID is
+ * used.
  */
 #define DMAE_PARAMS_SRC_PF_VALID_MASK    0x1
 #define DMAE_PARAMS_SRC_PF_VALID_SHIFT   5
-/* If set, the destination PF ID is taken from the dst_pf_id parameter.
- * Otherwise, the current PF ID is used
+/* If set, the destination PF ID is taken from the dst_pf_id parameter. Otherwise, the current PF ID
+ * is used.
  */
 #define DMAE_PARAMS_DST_PF_VALID_MASK    0x1
 #define DMAE_PARAMS_DST_PF_VALID_SHIFT   6
@@ -2185,15 +2607,184 @@ struct dmae_params {
 };
 
 
+struct e4_mstorm_core_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state */;
+	u8 flags0;
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+	u8 flags1;
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+	__le16 word0 /* word0 */;
+	__le16 word1 /* word1 */;
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e4_ystorm_core_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state */;
+	u8 flags0;
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+	u8 flags1;
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+	u8 byte2 /* byte2 */;
+	u8 byte3 /* byte3 */;
+	__le16 word0 /* word0 */;
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le16 word1 /* word1 */;
+	__le16 word2 /* word2 */;
+	__le16 word3 /* word3 */;
+	__le16 word4 /* word4 */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+};
+
+
+struct e5_mstorm_core_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state_and_core_id */;
+	u8 flags0;
+#define E5_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
+#define E5_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define E5_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
+#define E5_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+	u8 flags1;
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
+#define E5_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define E5_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
+#define E5_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E5_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
+#define E5_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E5_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
+#define E5_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E5_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
+#define E5_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E5_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
+#define E5_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+	__le16 word0 /* word0 */;
+	__le16 word1 /* word1 */;
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e5_ystorm_core_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state_and_core_id */;
+	u8 flags0;
+#define E5_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
+#define E5_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define E5_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
+#define E5_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+	u8 flags1;
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
+#define E5_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define E5_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
+#define E5_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E5_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
+#define E5_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E5_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
+#define E5_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E5_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
+#define E5_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E5_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
+#define E5_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+	u8 byte2 /* byte2 */;
+	u8 byte3 /* byte3 */;
+	__le16 word0 /* word0 */;
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le16 word1 /* word1 */;
+	__le16 word2 /* word2 */;
+	__le16 word3 /* word3 */;
+	__le16 word4 /* word4 */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+};
+
+
 struct fw_asserts_ram_section {
 /* The offset of the section in the RAM in RAM lines (64-bit units) */
 	__le16 section_ram_line_offset;
-/* The size of the section in RAM lines (64-bit units) */
-	__le16 section_ram_line_size;
-/* The offset of the asserts list within the section in dwords */
-	u8 list_dword_offset;
-/* The size of an assert list element in dwords */
-	u8 list_element_dword_size;
+	__le16 section_ram_line_size /* The size of the section in RAM lines (64-bit units) */;
+	u8 list_dword_offset /* The offset of the asserts list within the section in dwords */;
+	u8 list_element_dword_size /* The size of an assert list element in dwords */;
 	u8 list_num_elements /* The number of elements in the asserts list */;
 /* The offset of the next list index field within the section in dwords */
 	u8 list_next_index_dword_offset;
@@ -2225,46 +2816,12 @@ struct fw_info {
 
 struct fw_info_location {
 	__le32 grc_addr /* GRC address where the fw_info struct is located. */;
-/* Size of the fw_info structure (thats located at the grc_addr). */
-	__le32 size;
-};
-
-
-/* DMAE parameters */
-struct ecore_dmae_params {
-	u32 flags;
-/* If QED_DMAE_PARAMS_RW_REPL_SRC flag is set and the
- * source is a block of length DMAE_MAX_RW_SIZE and the
- * destination is larger, the source block will be duplicated as
- * many times as required to fill the destination block. This is
- * used mostly to write a zeroed buffer to destination address
- * using DMA
- */
-#define ECORE_DMAE_PARAMS_RW_REPL_SRC_MASK        0x1
-#define ECORE_DMAE_PARAMS_RW_REPL_SRC_SHIFT       0
-#define ECORE_DMAE_PARAMS_SRC_VF_VALID_MASK       0x1
-#define ECORE_DMAE_PARAMS_SRC_VF_VALID_SHIFT      1
-#define ECORE_DMAE_PARAMS_DST_VF_VALID_MASK       0x1
-#define ECORE_DMAE_PARAMS_DST_VF_VALID_SHIFT      2
-#define ECORE_DMAE_PARAMS_COMPLETION_DST_MASK     0x1
-#define ECORE_DMAE_PARAMS_COMPLETION_DST_SHIFT    3
-#define ECORE_DMAE_PARAMS_PORT_VALID_MASK         0x1
-#define ECORE_DMAE_PARAMS_PORT_VALID_SHIFT        4
-#define ECORE_DMAE_PARAMS_SRC_PF_VALID_MASK       0x1
-#define ECORE_DMAE_PARAMS_SRC_PF_VALID_SHIFT      5
-#define ECORE_DMAE_PARAMS_DST_PF_VALID_MASK       0x1
-#define ECORE_DMAE_PARAMS_DST_PF_VALID_SHIFT      6
-#define ECORE_DMAE_PARAMS_RESERVED_MASK           0x1FFFFFF
-#define ECORE_DMAE_PARAMS_RESERVED_SHIFT          7
-	u8 src_vfid;
-	u8 dst_vfid;
-	u8 port_id;
-	u8 src_pfid;
-	u8 dst_pfid;
-	u8 reserved1;
-	__le16 reserved2;
+	__le32 size /* Size of the fw_info structure (thats located at the grc_addr). */;
 };
 
+
+
+
 /*
  * IGU cleanup command
  */
@@ -2272,13 +2829,11 @@ struct igu_cleanup {
 	__le32 sb_id_and_flags;
 #define IGU_CLEANUP_RESERVED0_MASK     0x7FFFFFF
 #define IGU_CLEANUP_RESERVED0_SHIFT    0
-/* cleanup clear - 0, set - 1 */
-#define IGU_CLEANUP_CLEANUP_SET_MASK   0x1
+#define IGU_CLEANUP_CLEANUP_SET_MASK   0x1 /* cleanup clear - 0, set - 1 */
 #define IGU_CLEANUP_CLEANUP_SET_SHIFT  27
 #define IGU_CLEANUP_CLEANUP_TYPE_MASK  0x7
 #define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
-/* must always be set (use enum command_type_bit) */
-#define IGU_CLEANUP_COMMAND_TYPE_MASK  0x1U
+#define IGU_CLEANUP_COMMAND_TYPE_MASK  0x1 /* must always be set (use enum command_type_bit) */
 #define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
 	__le32 reserved1;
 };
@@ -2303,8 +2858,7 @@ struct igu_command_reg_ctrl {
 #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
 #define IGU_COMMAND_REG_CTRL_RESERVED_MASK      0x7
 #define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT     12
-/* command typ: 0 - read, 1 - write */
-#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK  0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK  0x1 /* command typ: 0 - read, 1 - write */
 #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
 };
 
@@ -2348,80 +2902,39 @@ struct igu_msix_vector {
 };
 
 
-struct mstorm_core_conn_ag_ctx {
-	u8 byte0 /* cdu_validation */;
-	u8 byte1 /* state */;
-	u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
-	u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
-	__le16 word0 /* word0 */;
-	__le16 word1 /* word1 */;
-	__le32 reg0 /* reg0 */;
-	__le32 reg1 /* reg1 */;
-};
-
-
 /*
  * per encapsulation type enabling flags
  */
-struct prs_reg_encapsulation_type_en {
+struct prs_encapsulation_type_en_flags {
 	u8 flags;
 /* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK     0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT    0
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_ETH_OVER_GRE_ENABLE_MASK     0x1
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_ETH_OVER_GRE_ENABLE_SHIFT    0
 /* Enable bit for IP-over-GRE (IP GRE) encapsulation. */
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK      0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT     1
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_IP_OVER_GRE_ENABLE_MASK      0x1
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_IP_OVER_GRE_ENABLE_SHIFT     1
 /* Enable bit for VXLAN encapsulation. */
-#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK            0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT           2
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_VXLAN_ENABLE_MASK            0x1
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_VXLAN_ENABLE_SHIFT           2
 /* Enable bit for T-Tag encapsulation. */
-#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK            0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT           3
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_T_TAG_ENABLE_MASK            0x1
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_T_TAG_ENABLE_SHIFT           3
 /* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK  0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_ETH_OVER_GENEVE_ENABLE_MASK  0x1
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_ETH_OVER_GENEVE_ENABLE_SHIFT 4
 /* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK   0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT  5
-#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK                0x3
-#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT               6
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_IP_OVER_GENEVE_ENABLE_MASK   0x1
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_IP_OVER_GENEVE_ENABLE_SHIFT  5
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_RESERVED_MASK                0x3
+#define PRS_ENCAPSULATION_TYPE_EN_FLAGS_RESERVED_SHIFT               6
 };
 
 
 enum pxp_tph_st_hint {
 	TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
 	TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
-/* Device Write and Host Read, or Host Write and Device Read */
-	TPH_ST_HINT_TARGET,
-/* Device Write and Host Read, or Host Write and Device Read - with temporal
- * reuse
- */
+	TPH_ST_HINT_TARGET /* Device Write and Host Read, or Host Write and Device Read */,
+/* Device Write and Host Read, or Host Write and Device Read - with temporal reuse */
 	TPH_ST_HINT_TARGET_PRIO,
 	MAX_PXP_TPH_ST_HINT
 };
@@ -2480,25 +2993,48 @@ struct qm_rf_opportunistic_mask {
 
 
 /*
- * QM hardware structure of QM map memory
+ * E4 QM hardware structure of QM map memory
  */
-struct qm_rf_pq_map {
+struct qm_rf_pq_map_e4 {
 	__le32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK          0x1 /* PQ active */
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT         0
-#define QM_RF_PQ_MAP_RL_ID_MASK             0xFF /* RL ID */
-#define QM_RF_PQ_MAP_RL_ID_SHIFT            1
+#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK          0x1 /* PQ active */
+#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT         0
+#define QM_RF_PQ_MAP_E4_RL_ID_MASK             0xFF /* RL ID */
+#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT            1
 /* the first PQ associated with the VPORT and VOQ of this PQ */
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK          0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT         9
-#define QM_RF_PQ_MAP_VOQ_MASK               0x1F /* VOQ */
-#define QM_RF_PQ_MAP_VOQ_SHIFT              18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK  0x3 /* WRR weight */
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_RL_VALID_MASK          0x1 /* RL active */
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT         25
-#define QM_RF_PQ_MAP_RESERVED_MASK          0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT         26
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK          0x1FF
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT         9
+#define QM_RF_PQ_MAP_E4_VOQ_MASK               0x1F /* VOQ */
+#define QM_RF_PQ_MAP_E4_VOQ_SHIFT              18
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK  0x3 /* WRR weight */
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_E4_RL_VALID_MASK          0x1 /* RL active */
+#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT         25
+#define QM_RF_PQ_MAP_E4_RESERVED_MASK          0x3F
+#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT         26
+};
+
+
+/*
+ * E5 QM hardware structure of QM map memory
+ */
+struct qm_rf_pq_map_e5 {
+	__le32 reg;
+#define QM_RF_PQ_MAP_E5_PQ_VALID_MASK          0x1 /* PQ active */
+#define QM_RF_PQ_MAP_E5_PQ_VALID_SHIFT         0
+#define QM_RF_PQ_MAP_E5_RL_ID_MASK             0x1FF /* RL ID */
+#define QM_RF_PQ_MAP_E5_RL_ID_SHIFT            1
+/* the first PQ associated with the VPORT and VOQ of this PQ */
+#define QM_RF_PQ_MAP_E5_VP_PQ_ID_MASK          0x1FF
+#define QM_RF_PQ_MAP_E5_VP_PQ_ID_SHIFT         10
+#define QM_RF_PQ_MAP_E5_VOQ_MASK               0x3F /* VOQ */
+#define QM_RF_PQ_MAP_E5_VOQ_SHIFT              19
+#define QM_RF_PQ_MAP_E5_WRR_WEIGHT_GROUP_MASK  0x3 /* WRR weight */
+#define QM_RF_PQ_MAP_E5_WRR_WEIGHT_GROUP_SHIFT 25
+#define QM_RF_PQ_MAP_E5_RL_VALID_MASK          0x1 /* RL active */
+#define QM_RF_PQ_MAP_E5_RL_VALID_SHIFT         27
+#define QM_RF_PQ_MAP_E5_RESERVED_MASK          0xF
+#define QM_RF_PQ_MAP_E5_RESERVED_SHIFT         28
 };
 
 
@@ -2524,8 +3060,7 @@ struct sdm_agg_int_comp_params {
  */
 struct sdm_op_gen {
 	__le32 command;
-/* completion parameters 0-15 */
-#define SDM_OP_GEN_COMP_PARAM_MASK  0xFFFF
+#define SDM_OP_GEN_COMP_PARAM_MASK  0xFFFF /* completion parameters 0-15 */
 #define SDM_OP_GEN_COMP_PARAM_SHIFT 0
 #define SDM_OP_GEN_COMP_TYPE_MASK   0xF /* completion type 16-19 */
 #define SDM_OP_GEN_COMP_TYPE_SHIFT  16
@@ -2533,50 +3068,6 @@ struct sdm_op_gen {
 #define SDM_OP_GEN_RESERVED_SHIFT   20
 };
 
-struct ystorm_core_conn_ag_ctx {
-	u8 byte0 /* cdu_validation */;
-	u8 byte1 /* state */;
-	u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
-	u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
-	u8 byte2 /* byte2 */;
-	u8 byte3 /* byte3 */;
-	__le16 word0 /* word0 */;
-	__le32 reg0 /* reg0 */;
-	__le32 reg1 /* reg1 */;
-	__le16 word1 /* word1 */;
-	__le16 word2 /* word2 */;
-	__le16 word3 /* word3 */;
-	__le16 word4 /* word4 */;
-	__le32 reg2 /* reg2 */;
-	__le32 reg3 /* reg3 */;
-};
-
 /*********/
 /* DEBUG */
 /*********/
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index c5ef67f84..e18595d62 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -1,9 +1,9 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef __ECORE_HSI_DEBUG_TOOLS__
 #define __ECORE_HSI_DEBUG_TOOLS__
 /****************************************/
@@ -16,6 +16,9 @@ enum block_id {
 	BLOCK_MISCS,
 	BLOCK_MISC,
 	BLOCK_DBU,
+	BLOCK_PCIE_CTL,
+	BLOCK_PCIE_PHY_TOP,
+	BLOCK_PGL2PEM,
 	BLOCK_PGLUE_B,
 	BLOCK_CNIG,
 	BLOCK_CPMU,
@@ -65,6 +68,8 @@ enum block_id {
 	BLOCK_MULD,
 	BLOCK_YULD,
 	BLOCK_XYLD,
+	BLOCK_PTLD,
+	BLOCK_YPLD,
 	BLOCK_PRM,
 	BLOCK_PBF_PB1,
 	BLOCK_PBF_PB2,
@@ -78,6 +83,12 @@ enum block_id {
 	BLOCK_TCFC,
 	BLOCK_IGU,
 	BLOCK_CAU,
+	BLOCK_RGFS,
+	BLOCK_RGFC,
+	BLOCK_RGSRC,
+	BLOCK_TGFS,
+	BLOCK_TGFC,
+	BLOCK_TGSRC,
 	BLOCK_UMAC,
 	BLOCK_XMAC,
 	BLOCK_MSTAT,
@@ -93,17 +104,45 @@ enum block_id {
 	BLOCK_LED,
 	BLOCK_AVS_WRAP,
 	BLOCK_PXPREQBUS,
+	BLOCK_CFC_PD,
+	BLOCK_QM_PD,
+	BLOCK_XS_PD,
+	BLOCK_YS_PD,
+	BLOCK_PS_PD,
+	BLOCK_US_PD,
+	BLOCK_TS_PD,
+	BLOCK_MS_PD,
+	BLOCK_RX_PD,
+	BLOCK_TX_PD,
+	BLOCK_PXP_PD,
+	BLOCK_HOST_PD,
+	BLOCK_NM_PD,
+	BLOCK_NMC_PD,
+	BLOCK_NW_PD,
+	BLOCK_BMB_PD,
 	BLOCK_BAR0_MAP,
 	BLOCK_MCP_FIO,
 	BLOCK_LAST_INIT,
+	BLOCK_PRS_FC_B,
+	BLOCK_PRS_FC_A,
 	BLOCK_PRS_FC,
+	BLOCK_TSEM_HC,
+	BLOCK_MSEM_HC,
+	BLOCK_USEM_HC,
+	BLOCK_XSEM_HC,
+	BLOCK_YSEM_HC,
+	BLOCK_PSEM_HC,
+	BLOCK_PBF_FC_B,
 	BLOCK_PBF_FC,
 	BLOCK_NIG_LB_FC,
+	BLOCK_NIG_RX,
 	BLOCK_NIG_LB_FC_PLLH,
 	BLOCK_NIG_TX_FC_PLLH,
 	BLOCK_NIG_TX_FC,
 	BLOCK_NIG_RX_FC_PLLH,
 	BLOCK_NIG_RX_FC,
+	BLOCK_NIG_LB,
+	BLOCK_NIG_TX,
 	MAX_BLOCK_ID
 };
 
@@ -140,15 +179,12 @@ enum bin_dbg_buffer_type {
  */
 struct dbg_attn_bit_mapping {
 	u16 data;
-/* The index of an attention in the blocks attentions list
- * (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits
- * (if is_unused_bit_cnt=1)
+/* The index of an attention in the blocks attentions list (if is_unused_bit_cnt=0), or a number of
+ * consecutive unused attention bits (if is_unused_bit_cnt=1)
  */
 #define DBG_ATTN_BIT_MAPPING_VAL_MASK                0x7FFF
 #define DBG_ATTN_BIT_MAPPING_VAL_SHIFT               0
-/* if set, the val field indicates the number of consecutive unused attention
- * bits
- */
+/* if set, the val field indicates the number of consecutive unused attention bits */
 #define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK  0x1
 #define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
 };
@@ -158,15 +194,13 @@ struct dbg_attn_bit_mapping {
  * Attention block per-type data
  */
 struct dbg_attn_block_type_data {
-/* Offset of this block attention names in the debug attention name offsets
- * array
- */
+/* Offset of this block attention names in the debug attention name offsets array */
 	u16 names_offset;
 	u16 reserved1;
 	u8 num_regs /* Number of attention registers in this block */;
 	u8 reserved2;
-/* Offset of this blocks attention registers in the attention registers array
- * (in dbg_attn_reg units)
+/* Offset of this blocks attention registers in the attention registers array (in dbg_attn_reg
+ * units)
  */
 	u16 regs_offset;
 };
@@ -175,9 +209,7 @@ struct dbg_attn_block_type_data {
  * Block attentions
  */
 struct dbg_attn_block {
-/* attention block per-type data. Count must match the number of elements in
- * dbg_attn_type.
- */
+/* attention block per-type data. Count must match the number of elements in dbg_attn_type. */
 	struct dbg_attn_block_type_data per_type_data[2];
 };
 
@@ -193,8 +225,8 @@ struct dbg_attn_reg_result {
 /* Number of attention indexes in this register */
 #define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK  0xFF
 #define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
-/* The offset of this registers attentions within the blocks attentions list
- * (a value in the range 0..number of block attentions-1)
+/* The offset of this registers attentions within the blocks attentions list (a value in the range
+ * 0..number of block attentions-1)
  */
 	u16 block_attn_offset;
 	u16 reserved;
@@ -208,19 +240,14 @@ struct dbg_attn_reg_result {
 struct dbg_attn_block_result {
 	u8 block_id /* Registers block ID */;
 	u8 data;
-/* Value from dbg_attn_type enum */
-#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK  0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK  0x3 /* Value from dbg_attn_type enum */
 #define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
-/* Number of registers in block in which at least one attention bit is set */
+/* Number of registers in the block in which at least one attention bit is set */
 #define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK   0x3F
 #define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT  2
-/* Offset of this registers block attention names in the attention name offsets
- * array
- */
+/* Offset of this registers block attention names in the attention name offsets array */
 	u16 names_offset;
-/* result data for each register in the block in which at least one attention
- * bit is set
- */
+/* result data for each register in the block in which at least one attention bit is set */
 	struct dbg_attn_reg_result reg_results[15];
 };
 
@@ -234,9 +261,7 @@ struct dbg_mode_hdr {
 /* indicates if a mode expression should be evaluated (0/1) */
 #define DBG_MODE_HDR_EVAL_MODE_MASK         0x1
 #define DBG_MODE_HDR_EVAL_MODE_SHIFT        0
-/* offset (in bytes) in modes expression buffer. valid only if eval_mode is
- * set.
- */
+/* offset (in bytes) in modes expression buffer. valid only if eval_mode is set. */
 #define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK  0x7FFF
 #define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
 };
@@ -246,19 +271,17 @@ struct dbg_mode_hdr {
  */
 struct dbg_attn_reg {
 	struct dbg_mode_hdr mode /* Mode header */;
-/* The offset of this registers attentions within the blocks attentions list
- * (a value in the range 0..number of block attentions-1)
+/* The offset of this registers attentions within the blocks attentions list (a value in the range
+ * 0..number of block attentions-1)
  */
 	u16 block_attn_offset;
 	u32 data;
 /* STS attention register GRC address (in dwords) */
 #define DBG_ATTN_REG_STS_ADDRESS_MASK   0xFFFFFF
 #define DBG_ATTN_REG_STS_ADDRESS_SHIFT  0
-/* Number of attention in this register */
-#define DBG_ATTN_REG_NUM_REG_ATTN_MASK  0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK  0xFF /* Number of attention in this register */
 #define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
-/* STS_CLR attention register GRC address (in dwords) */
-	u32 sts_clr_address;
+	u32 sts_clr_address /* STS_CLR attention register GRC address (in dwords) */;
 	u32 mask_address /* MASK attention register GRC address (in dwords) */;
 };
 
@@ -278,9 +301,15 @@ enum dbg_attn_type {
  * Block debug data
  */
 struct dbg_block {
-	u8 name[15] /* Block name */;
+	u8 name[16] /* Block name */;
+	u8 flags;
+#define DBG_BLOCK_IS_PD_BLOCK_MASK  0x1 /* Indicates if this block is a PD block (0/1). */
+#define DBG_BLOCK_IS_PD_BLOCK_SHIFT 0
+#define DBG_BLOCK_RESERVED0_MASK    0x7F
+#define DBG_BLOCK_RESERVED0_SHIFT   1
 /* The letter (char) of the associated Storm, or 0 if no associated Storm. */
 	u8 associated_storm_letter;
+	u16 reserved1;
 };
 
 
@@ -295,64 +324,65 @@ struct dbg_block_chip {
 /* Indicates if this block has a reset register (0/1). */
 #define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK        0x1
 #define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT       1
-/* Indicates if this block should be taken out of reset before GRC Dump (0/1).
- * Valid only if has_reset_reg is set.
+/* Indicates if this block should be taken out of reset before GRC Dump (0/1). Valid only if
+ * has_reset_reg is set.
  */
 #define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK  0x1
 #define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
 /* Indicates if this block has a debug bus (0/1). */
 #define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK          0x1
 #define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT         3
-/* Indicates if this block has a latency events debug line (0/1). Valid only
- * if has_dbg_bus is set.
+/* Indicates if this block has a latency events debug line (0/1). Valid only if has_dbg_bus is set.
+ *
  */
 #define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK   0x1
 #define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT  4
 #define DBG_BLOCK_CHIP_RESERVED0_MASK            0x7
 #define DBG_BLOCK_CHIP_RESERVED0_SHIFT           5
-/* The DBG block client ID of this block/chip. Valid only if has_dbg_bus is
- * set.
- */
+/* The DBG block client ID of this block/chip. Valid only if has_dbg_bus is set. */
 	u8 dbg_client_id;
-/* The ID of the reset register of this block/chip in the dbg_reset_reg
- * array.
- */
+/* The ID of the reset register of this block/chip in the dbg_reset_reg array. */
 	u8 reset_reg_id;
-/* The bit offset of this block/chip in the reset register. Valid only if
- * has_reset_reg is set.
- */
+/* The bit offset of this block/chip in the reset register. Valid only if has_reset_reg is set. */
 	u8 reset_reg_bit_offset;
+/* The ID of the PD block that this block is connected to. Valid only if has_dbg_bus is set. */
+	u8 pd_block_id;
+/* The bit offset of this block/chip in the shift_dbg_high register of this blocks PD block. Valid
+ * only if has_dbg_bus is set.
+ */
+	u8 pd_shift_reg_bit_offset;
 	struct dbg_mode_hdr dbg_bus_mode /* Mode header */;
-	u16 reserved1;
-	u8 reserved2;
-/* Number of Debug Bus lines in this block/chip (excluding signature and latency
- * events). Valid only if has_dbg_bus is set.
+	u8 reserved1;
+/* Number of Debug Bus lines in this block/chip (excluding signature and latency events). Valid only
+ * if has_dbg_bus is set.
  */
 	u8 num_of_dbg_bus_lines;
-/* Offset of this block/chip Debug Bus lines in the Debug Bus lines array. Valid
- * only if has_dbg_bus is set.
+/* Offset of this block/chip Debug Bus lines in the Debug Bus lines array. Valid only if has_dbg_bus
+ * is set.
  */
 	u16 dbg_bus_lines_offset;
-/* GRC address of the Debug Bus dbg_select register (in dwords). Valid only if
- * has_dbg_bus is set.
+/* GRC address of the Debug Bus dbg_select register (in dwords). Valid only if has_dbg_bus is set.
+ *
  */
 	u32 dbg_select_reg_addr;
-/* GRC address of the Debug Bus dbg_dword_enable register (in dwords). Valid
- * only if has_dbg_bus is set.
+/* GRC address of the Debug Bus dbg_dword_enable register (in dwords). Valid only if has_dbg_bus is
+ * set.
  */
 	u32 dbg_dword_enable_reg_addr;
-/* GRC address of the Debug Bus dbg_shift register (in dwords). Valid only if
- * has_dbg_bus is set.
- */
+/* GRC address of the Debug Bus dbg_shift register (in dwords). Valid only if has_dbg_bus is set. */
 	u32 dbg_shift_reg_addr;
-/* GRC address of the Debug Bus dbg_force_valid register (in dwords). Valid only
- * if has_dbg_bus is set.
+/* GRC address of the Debug Bus dbg_force_valid register (in dwords). Valid only if has_dbg_bus is
+ * set.
  */
 	u32 dbg_force_valid_reg_addr;
-/* GRC address of the Debug Bus dbg_force_frame register (in dwords). Valid only
- * if has_dbg_bus is set.
+/* GRC address of the Debug Bus dbg_force_frame register (in dwords). Valid only if has_dbg_bus is
+ * set.
  */
 	u32 dbg_force_frame_reg_addr;
+/* GRC address of the Debug Bus shift_dbg_high register of the PD block of this block/chip (in
+ * dwords). Valid only if is_pd_block is set.
+ */
+	u32 pd_shift_reg_addr;
 };
 
 
@@ -360,12 +390,9 @@ struct dbg_block_chip {
  * Chip-specific block user debug data
  */
 struct dbg_block_chip_user {
-/* Number of debug bus lines in this block (excluding signature and latency
- * events).
- */
+/* Number of debug bus lines in this block (excluding signature and latency events). */
 	u8 num_of_dbg_bus_lines;
-/* Indicates if this block has a latency events debug line (0/1). */
-	u8 has_latency_events;
+	u8 has_latency_events /* Indicates if this block has a latency events debug line (0/1). */;
 /* Offset of this blocks lines in the debug bus line name offsets array. */
 	u16 names_offset;
 };
@@ -384,17 +411,16 @@ struct dbg_block_user {
  */
 struct dbg_bus_line {
 	u8 data;
-/* Number of groups in the line (0-3) */
-#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK  0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK  0xF /* Number of groups in the line (0-3) */
 #define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
 /* Indicates if this is a 128b line (0) or a 256b line (1). */
 #define DBG_BUS_LINE_IS_256B_MASK        0x1
 #define DBG_BUS_LINE_IS_256B_SHIFT       4
 #define DBG_BUS_LINE_RESERVED_MASK       0x7
 #define DBG_BUS_LINE_RESERVED_SHIFT      5
-/* Four 2-bit values, indicating the size of each group minus 1 (i.e.
- * value=0 means size=1, value=1 means size=2, etc), starting from lsb.
- * The sizes are in dwords (if is_256b=0) or in qwords (if is_256b=1).
+/* Four 2-bit values, indicating the size of each group minus 1 (i.e. value=0 means size=1, value=1
+ * means size=2, etc), starting from lsb. The sizes are in dwords (if is_256b=0) or in qwords (if
+ * is_256b=1).
  */
 	u8 group_sizes;
 };
@@ -415,17 +441,14 @@ struct dbg_dump_cond_hdr {
  */
 struct dbg_dump_mem {
 	u32 dword0;
-/* register address (in dwords) */
-#define DBG_DUMP_MEM_ADDRESS_MASK       0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_MASK       0xFFFFFF /* register address (in dwords) */
 #define DBG_DUMP_MEM_ADDRESS_SHIFT      0
 #define DBG_DUMP_MEM_MEM_GROUP_ID_MASK  0xFF /* memory group ID */
 #define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
 	u32 dword1;
-/* register size (in dwords) */
-#define DBG_DUMP_MEM_LENGTH_MASK        0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_MASK        0xFFFFFF /* register size (in dwords) */
 #define DBG_DUMP_MEM_LENGTH_SHIFT       0
-/* indicates if the register is wide-bus */
-#define DBG_DUMP_MEM_WIDE_BUS_MASK      0x1
+#define DBG_DUMP_MEM_WIDE_BUS_MASK      0x1 /* indicates if the register is wide-bus */
 #define DBG_DUMP_MEM_WIDE_BUS_SHIFT     24
 #define DBG_DUMP_MEM_RESERVED_MASK      0x7F
 #define DBG_DUMP_MEM_RESERVED_SHIFT     25
@@ -437,11 +460,9 @@ struct dbg_dump_mem {
  */
 struct dbg_dump_reg {
 	u32 data;
-/* register address (in dwords) */
 #define DBG_DUMP_REG_ADDRESS_MASK   0x7FFFFF /* register address (in dwords) */
 #define DBG_DUMP_REG_ADDRESS_SHIFT  0
-/* indicates if the register is wide-bus */
-#define DBG_DUMP_REG_WIDE_BUS_MASK  0x1
+#define DBG_DUMP_REG_WIDE_BUS_MASK  0x1 /* indicates if the register is wide-bus */
 #define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
 #define DBG_DUMP_REG_LENGTH_MASK    0xFF /* register size (in dwords) */
 #define DBG_DUMP_REG_LENGTH_SHIFT   24
@@ -475,14 +496,11 @@ struct dbg_idle_chk_cond_hdr {
  */
 struct dbg_idle_chk_cond_reg {
 	u32 data;
-/* Register GRC address (in dwords) */
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK   0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK   0x7FFFFF /* Register GRC address (in dwords) */
 #define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT  0
-/* indicates if the register is wide-bus */
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK  0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK  0x1 /* indicates if the register is wide-bus */
 #define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
-/* value from block_id enum */
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK  0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK  0xFF /* value from block_id enum */
 #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
 	u16 num_entries /* number of registers entries to check */;
 	u8 entry_size /* size of registers entry (in dwords) */;
@@ -495,14 +513,11 @@ struct dbg_idle_chk_cond_reg {
  */
 struct dbg_idle_chk_info_reg {
 	u32 data;
-/* Register GRC address (in dwords) */
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK   0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK   0x7FFFFF /* Register GRC address (in dwords) */
 #define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT  0
-/* indicates if the register is wide-bus */
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK  0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK  0x1 /* indicates if the register is wide-bus */
 #define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
-/* value from block_id enum */
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK  0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK  0xFF /* value from block_id enum */
 #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
 	u16 size /* register size in dwords */;
 	struct dbg_mode_hdr mode /* Mode header */;
@@ -536,11 +551,9 @@ struct dbg_idle_chk_result_hdr {
  */
 struct dbg_idle_chk_result_reg_hdr {
 	u8 data;
-/* indicates if this register is a memory */
-#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK  0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK  0x1 /* indicates if this register is a memory */
 #define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
-/* register index within the failing rule */
-#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK  0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK  0x7F /* register index within the failing rule */
 #define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
 	u8 start_entry /* index of the first checked entry */;
 	u16 size /* register size in dwords */;
@@ -558,13 +571,9 @@ struct dbg_idle_chk_rule {
 	u8 num_info_regs /* number of info registers */;
 	u8 num_imms /* number of immediates in the condition */;
 	u8 reserved1;
-/* offset of this rules registers in the idle check register array
- * (in dbg_idle_chk_reg units)
- */
+/* offset of this rules registers in the idle check register array (in dbg_idle_chk_reg units) */
 	u16 reg_offset;
-/* offset of this rules immediate values in the immediate values array
- * (in dwords)
- */
+/* offset of this rules immediate values in the immediate values array (in dwords) */
 	u16 imm_offset;
 };
 
@@ -587,12 +596,10 @@ struct dbg_idle_chk_rule_parsing_data {
  * idle check severity types
  */
 enum dbg_idle_chk_severity_types {
-/* idle check failure should cause an error */
-	IDLE_CHK_SEVERITY_ERROR,
-/* idle check failure should cause an error only if theres no traffic */
+	IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
+/* idle check failure should cause an error only if theres no traffic **/
 	IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
-/* idle check failure should cause a warning */
-	IDLE_CHK_SEVERITY_WARNING,
+	IDLE_CHK_SEVERITY_WARNING /* idle check failure should cause a warning */,
 	MAX_DBG_IDLE_CHK_SEVERITY_TYPES
 };
 
@@ -605,8 +612,7 @@ struct dbg_reset_reg {
 	u32 data;
 #define DBG_RESET_REG_ADDR_MASK        0xFFFFFF /* GRC address (in dwords) */
 #define DBG_RESET_REG_ADDR_SHIFT       0
-/* indicates if this register is removed (0/1). */
-#define DBG_RESET_REG_IS_REMOVED_MASK  0x1
+#define DBG_RESET_REG_IS_REMOVED_MASK  0x1 /* indicates if this register is removed (0/1). */
 #define DBG_RESET_REG_IS_REMOVED_SHIFT 24
 #define DBG_RESET_REG_RESERVED_MASK    0x7F
 #define DBG_RESET_REG_RESERVED_SHIFT   25
@@ -617,24 +623,25 @@ struct dbg_reset_reg {
  * Debug Bus block data
  */
 struct dbg_bus_block_data {
-/* 4 bit value, bit i set -> dword/qword i is enabled in block. */
-	u8 enable_mask;
-/* Number of dwords/qwords to cyclically  right the blocks output (0-3). */
-	u8 right_shift;
-/* 4 bit value, bit i set -> dword/qword i is forced valid in block. */
-	u8 force_valid_mask;
+	u8 enable_mask /* 4 bit value, bit i set -> dword/qword i is enabled in block. */;
+	u8 right_shift /* Number of dwords/qwords to cyclically  right the blocks output (0-3). */;
+	u8 force_valid_mask /* 4 bit value, bit i set -> dword/qword i is forced valid in block. */;
 /* 4 bit value, bit i set -> dword/qword i frame bit is forced in block. */
 	u8 force_frame_mask;
-/* bit i set -> dword i contains this blocks data (after shifting). */
-	u8 dword_mask;
+	u8 dword_mask /* bit i set -> dword i contains this blocks data (after shifting). */;
 	u8 line_num /* Debug line number to select */;
 	u8 hw_id /* HW ID associated with the block */;
 	u8 flags;
+/* 0/1. If 1, the debug line (after right shift) is shifted left by 4 dwords, to the upper 128 bits
+ * of the debug bus. Valid only for 128b debug lines.
+ */
+#define DBG_BUS_BLOCK_DATA_SHIFT_LEFT_4_MASK  0x1
+#define DBG_BUS_BLOCK_DATA_SHIFT_LEFT_4_SHIFT 0
 /* 0/1. If 1, the debug line is 256b, otherwise its 128b. */
 #define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK  0x1
-#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
-#define DBG_BUS_BLOCK_DATA_RESERVED_MASK      0x7F
-#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT     1
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 1
+#define DBG_BUS_BLOCK_DATA_RESERVED_MASK      0x3F
+#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT     2
 };
 
 
@@ -660,17 +667,15 @@ enum dbg_bus_constraint_ops {
  * Debug Bus trigger state data
  */
 struct dbg_bus_trigger_state_data {
-/* Message length (in cycles) to be used for message-based trigger constraints.
- * If set to 0, message length is based only on frame bit received from HW.
+/* Message length (in cycles) to be used for message-based trigger constraints. If set to 0, message
+ * length is based only on frame bit received from HW.
  */
 	u8 msg_len;
-/* A bit for each dword in the debug bus cycle, indicating if this dword appears
- * in a trigger constraint (1) or not (0)
+/* A bit for each dword in the debug bus cycle, indicating if this dword appears in a trigger
+ * constraint (1) or not (0)
  */
 	u8 constraint_dword_mask;
-/* Storm ID to trigger on. Valid only when triggering on Storm data.
- * (use enum dbg_storms)
- */
+/* Storm ID to trigger on. Valid only when triggering on Storm data. (use enum dbg_storms) */
 	u8 storm_id;
 	u8 reserved;
 };
@@ -712,10 +717,8 @@ struct dbg_bus_storm_eid_mask_params {
  * Debug Bus Storm EID filter params
  */
 union dbg_bus_storm_eid_params {
-/* EID range filter params */
-	struct dbg_bus_storm_eid_range_params range;
-/* EID mask filter params */
-	struct dbg_bus_storm_eid_mask_params mask;
+	struct dbg_bus_storm_eid_range_params range /* EID range filter params */;
+	struct dbg_bus_storm_eid_mask_params mask /* EID mask filter params */;
 };
 
 /*
@@ -723,12 +726,11 @@ union dbg_bus_storm_eid_params {
  */
 struct dbg_bus_storm_data {
 	u8 enabled /* indicates if the Storm is enabled for recording */;
-	u8 mode /* Storm debug mode, valid only if the Storm is enabled */;
+/* Storm debug mode, valid only if the Storm is enabled (use enum dbg_bus_storm_modes) */
+	u8 mode;
 	u8 hw_id /* HW ID associated with the Storm */;
 	u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
-/* 1 = EID range filter, 0 = EID mask filter. Valid only if eid_filter_en is
- * set,
- */
+/* 1 = EID range filter, 0 = EID mask filter. Valid only if eid_filter_en is set,  */
 	u8 eid_range_not_mask;
 	u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
 /* EID filter params to filter on. Valid only if eid_filter_en is set. */
@@ -741,76 +743,66 @@ struct dbg_bus_storm_data {
  */
 struct dbg_bus_data {
 	u32 app_version /* The tools version number of the application */;
-	u8 state /* The current debug bus state */;
-	u8 mode_256b_en /* Indicates if the 256 bit mode is enabled */;
+	u8 state /* The current debug bus state (use enum dbg_bus_states) */;
+	u8 e4_256b_mode_en /* Indicates if the E4 256 bit mode is enabled */;
 	u8 num_enabled_blocks /* Number of blocks enabled for recording */;
 	u8 num_enabled_storms /* Number of Storms enabled for recording */;
-	u8 target /* Output target */;
+	u8 target /* Output target (use enum dbg_bus_targets) */;
 	u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
 	u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
-/* Indicates if timestamp recording is enabled (0/1) */
-	u8 timestamp_input_en;
+	u8 timestamp_input_en /* Indicates if timestamp recording is enabled (0/1) */;
 	u8 filter_en /* Indicates if the recording filter is enabled (0/1) */;
-/* If true, the next added constraint belong to the filter. Otherwise,
- * it belongs to the last added trigger state. Valid only if either filter or
- * triggers are enabled.
+/* If true, the next added constraint belong to the filter. Otherwise, it belongs to the last added
+ * trigger state. Valid only if either filter or triggers are enabled.
  */
 	u8 adding_filter;
-/* Indicates if the recording filter should be applied before the trigger.
- * Valid only if both filter and trigger are enabled (0/1)
+/* Indicates if the recording filter should be applied before the trigger. Valid only if both filter
+ * and trigger are enabled (0/1)
  */
 	u8 filter_pre_trigger;
-/* Indicates if the recording filter should be applied after the trigger.
- * Valid only if both filter and trigger are enabled (0/1)
+/* Indicates if the recording filter should be applied after the trigger. Valid only if both filter
+ * and trigger are enabled (0/1)
  */
 	u8 filter_post_trigger;
-/* Indicates if the recording trigger is enabled (0/1) */
-	u8 trigger_en;
-/* A bit for each dword in the debug bus cycle, indicating if this dword
- * appears in a filter constraint (1) or not (0)
+	u8 trigger_en /* Indicates if the recording trigger is enabled (0/1) */;
+/* A bit for each dword in the debug bus cycle, indicating if this dword appears in a filter
+ * constraint (1) or not (0)
  */
 	u8 filter_constraint_dword_mask;
 	u8 next_trigger_state /* ID of next trigger state to be added */;
-/* ID of next filter/trigger constraint to be added */
-	u8 next_constraint_id;
-/* trigger states data */
-	struct dbg_bus_trigger_state_data trigger_states[3];
-/* Message length (in cycles) to be used for message-based filter constraints.
- * If set to 0 message length is based only on frame bit received from HW.
+	u8 next_constraint_id /* ID of next filter/trigger constraint to be added */;
+	struct dbg_bus_trigger_state_data trigger_states[3] /* trigger states data */;
+/* Message length (in cycles) to be used for message-based filter constraints. If set to 0, message
+ * length is based only on frame bit received from HW.
  */
 	u8 filter_msg_len;
 /* Indicates if the other engine sends it NW recording to this engine (0/1) */
 	u8 rcv_from_other_engine;
-/* A bit for each dword in the debug bus cycle, indicating if this dword is
- * recorded (1) or not (0)
+/* A bit for each dword in the debug bus cycle, indicating if this dword is recorded (1) or not (0)
+ *
  */
 	u8 blocks_dword_mask;
-/* Indicates if there are dwords in the debug bus cycle which are recorded
- * by more tan one block (0/1)
+/* Indicates if there are dwords in the debug bus cycle which are recorded by more than one block
+ * (0/1)
  */
 	u8 blocks_dword_overlap;
-/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the
- * HW ID of dword/qword i
- */
+/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the HW ID of dword/qword i */
 	u32 hw_id_mask;
-/* Debug Bus PCI buffer data. Valid only when the target is
- * DBG_BUS_TARGET_ID_PCI.
- */
+/* Debug Bus PCI buffer data. Valid only when the target is DBG_BUS_TARGET_ID_PCI. */
 	struct dbg_bus_pci_buf_data pci_buf;
-/* Debug Bus data for each block */
-	struct dbg_bus_block_data blocks[132];
-/* Debug Bus data for each block */
-	struct dbg_bus_storm_data storms[6];
+	struct dbg_bus_block_data blocks[132] /* Debug Bus data for each block */;
+	struct dbg_bus_storm_data storms[6] /* Debug Bus data for each block */;
 };
 
 
+
+
 /*
  * Debug bus states
  */
 enum dbg_bus_states {
 	DBG_BUS_STATE_IDLE /* debug bus idle state (not recording) */,
-/* debug bus is ready for configuration and recording */
-	DBG_BUS_STATE_READY,
+	DBG_BUS_STATE_READY /* debug bus is ready for configuration and recording */,
 	DBG_BUS_STATE_RECORDING /* debug bus is currently recording */,
 	DBG_BUS_STATE_STOPPED /* debug bus recording has stopped */,
 	MAX_DBG_BUS_STATES
@@ -831,11 +823,13 @@ enum dbg_bus_storm_modes {
 	DBG_BUS_STORM_MODE_DRA_W /* DRA write data (fast debug) */,
 	DBG_BUS_STORM_MODE_LD_ST_ADDR /* load/store address (fast debug) */,
 	DBG_BUS_STORM_MODE_DRA_FSM /* DRA state machines (fast debug) */,
+	DBG_BUS_STORM_MODE_FAST_DBGMUX /* fast DBGMUX (fast debug - E5 only) */,
 	DBG_BUS_STORM_MODE_RH /* recording handlers (fast debug) */,
-/* recording handlers with store messages (fast debug) */
-	DBG_BUS_STORM_MODE_RH_WITH_STORE,
-	DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug) */,
-	DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow) */,
+/* recording handlers with no compression (fast debug - E5 only) */
+	DBG_BUS_STORM_MODE_RH_NO_COMPRESS,
+	DBG_BUS_STORM_MODE_RH_WITH_STORE /* recording handlers with store messages (fast debug) */,
+	DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug - E4 only) */,
+	DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow debug - E4 only) */,
 	MAX_DBG_BUS_STORM_MODES
 };
 
@@ -844,8 +838,7 @@ enum dbg_bus_storm_modes {
  * Debug bus target IDs
  */
 enum dbg_bus_targets {
-/* records debug bus to DBG block internal buffer */
-	DBG_BUS_TARGET_ID_INT_BUF,
+	DBG_BUS_TARGET_ID_INT_BUF /* records debug bus to DBG block internal buffer */,
 	DBG_BUS_TARGET_ID_NIG /* records debug bus to the NW */,
 	DBG_BUS_TARGET_ID_PCI /* records debug bus to a PCI buffer */,
 	MAX_DBG_BUS_TARGETS
@@ -857,12 +850,10 @@ enum dbg_bus_targets {
  * GRC Dump data
  */
 struct dbg_grc_data {
-/* Indicates if the GRC parameters were initialized */
-	u8 params_initialized;
+	u8 params_initialized /* Indicates if the GRC parameters were initialized */;
 	u8 reserved1;
 	u16 reserved2;
-/* Value of each GRC parameter. Array size must match the enum dbg_grc_params.
- */
+/* Value of each GRC parameter. Array size must match the enum dbg_grc_params. */
 	u32 param_val[48];
 };
 
@@ -894,7 +885,7 @@ enum dbg_grc_params {
 	DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_BTB /* dump BTB memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_BMB /* dump BMB memories (0/1) */,
-	DBG_GRC_PARAM_RESERVD1 /* reserved */,
+	DBG_GRC_PARAM_DUMP_NIG /* dump NIG memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_MULD /* dump MULD memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_PRS /* dump PRS memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_DMAE /* dump PRS memories (0/1) */,
@@ -903,20 +894,16 @@ enum dbg_grc_params {
 	DBG_GRC_PARAM_DUMP_DIF /* dump DIF memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_STATIC /* dump static debug data (0/1) */,
 	DBG_GRC_PARAM_UNSTALL /* un-stall Storms after dump (0/1) */,
-	DBG_GRC_PARAM_RESERVED2 /* reserved */,
-/* MCP Trace meta data size in bytes */
-	DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
-/* preset: exclude all memories from dump (1 only) */
-	DBG_GRC_PARAM_EXCLUDE_ALL,
-/* preset: include memories for crash dump (1 only) */
-	DBG_GRC_PARAM_CRASH,
-/* perform dump only if MFW is responding (0/1) */
-	DBG_GRC_PARAM_PARITY_SAFE,
+	DBG_GRC_PARAM_DUMP_SEM /* dump SEM memories (0/1) */,
+	DBG_GRC_PARAM_MCP_TRACE_META_SIZE /* MCP Trace meta data size in bytes */,
+	DBG_GRC_PARAM_EXCLUDE_ALL /* preset: exclude all memories from dump (1 only) */,
+	DBG_GRC_PARAM_CRASH /* preset: include memories for crash dump (1 only) */,
+	DBG_GRC_PARAM_PARITY_SAFE /* perform dump only if MFW is responding (0/1) */,
 	DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_PHY /* dump PHY memories (0/1) */,
 	DBG_GRC_PARAM_NO_MCP /* dont perform MCP commands (0/1) */,
 	DBG_GRC_PARAM_NO_FW_VER /* dont read FW/MFW version (0/1) */,
-	DBG_GRC_PARAM_RESERVED3 /* reserved */,
+	DBG_GRC_PARAM_DUMP_GFS /* dump GFS memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_MCP_HW_DUMP /* dump MCP HW Dump (0/1) */,
 	DBG_GRC_PARAM_DUMP_ILT_CDUC /* dump ILT CDUC client (0/1) */,
 	DBG_GRC_PARAM_DUMP_ILT_CDUT /* dump ILT CDUT client (0/1) */,
@@ -988,6 +975,19 @@ enum dbg_status {
 	DBG_STATUS_FILTER_SINGLE_HW_ID,
 	DBG_STATUS_TRIGGER_SINGLE_HW_ID,
 	DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
+	DBG_STATUS_MDUMP2_INVALID_LOG_DATA,
+	DBG_STATUS_MDUMP2_INVALID_LOG_SIZE,
+	DBG_STATUS_MDUMP2_INVALID_SIGNATURE,
+	DBG_STATUS_MDUMP2_INVALID_LOG_HDR,
+	DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS,
+	DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE,
+	DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC,
+	DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF,
+	DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS,
+	DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS,
+	DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP,
+	DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG,
+	DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE,
 	MAX_DBG_STATUS
 };
 
@@ -1011,8 +1011,7 @@ enum dbg_storms {
  */
 struct idle_chk_data {
 	u32 buf_size /* Idle check buffer size in dwords */;
-/* Indicates if the idle check buffer size was set (0/1) */
-	u8 buf_size_set;
+	u8 buf_size_set /* Indicates if the idle check buffer size was set (0/1) */;
 	u8 reserved1;
 	u16 reserved2;
 };
@@ -1034,8 +1033,7 @@ struct dbg_tools_data {
 	struct dbg_bus_data bus /* Debug Bus data */;
 	struct idle_chk_data idle_chk /* Idle Check data */;
 	u8 mode_enable[40] /* Indicates if a mode is enabled (0/1) */;
-/* Indicates if a block is in reset state (0/1) */
-	u8 block_in_reset[132];
+	u8 block_in_reset[132] /* Indicates if a block is in reset state (0/1) */;
 	u8 chip_id /* Chip ID (from enum chip_ids) */;
 	u8 hw_type /* HW Type */;
 	u8 num_ports /* Number of ports in the chip */;
@@ -1045,8 +1043,24 @@ struct dbg_tools_data {
 	u8 use_dmae /* Indicates if DMAE should be used */;
 	u8 reserved;
 	struct pretend_params pretend /* Current pretend parameters */;
-/* Numbers of registers that were read since last log */
-	u32 num_regs_read;
+	u32 num_regs_read /* Numbers of registers that were read since last log */;
+};
+
+
+
+/*
+ * ILT Clients
+ */
+enum ilt_clients {
+	ILT_CLI_CDUC,
+	ILT_CLI_CDUT,
+	ILT_CLI_QM,
+	ILT_CLI_TM,
+	ILT_CLI_SRC,
+	ILT_CLI_TSDM,
+	ILT_CLI_RGFS,
+	ILT_CLI_TGFS,
+	MAX_ILT_CLIENTS
 };
 
 
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index bd7bd8658..b49de54ae 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -1,9 +1,9 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef __ECORE_HSI_ETH__
 #define __ECORE_HSI_ETH__
 /************************************************************************/
@@ -14,242 +14,234 @@
 /*
  * The eth storm context for the Tstorm
  */
-struct tstorm_eth_conn_st_ctx {
+struct e4_tstorm_eth_conn_st_ctx {
 	__le32 reserved[4];
 };
 
 /*
  * The eth storm context for the Pstorm
  */
-struct pstorm_eth_conn_st_ctx {
+struct e4_pstorm_eth_conn_st_ctx {
 	__le32 reserved[8];
 };
 
 /*
  * The eth storm context for the Xstorm
  */
-struct xstorm_eth_conn_st_ctx {
+struct e4_xstorm_eth_conn_st_ctx {
 	__le32 reserved[60];
 };
 
-struct xstorm_eth_conn_ag_ctx {
+struct e4_xstorm_eth_conn_ag_ctx {
 	u8 reserved0 /* cdu_validation */;
 	u8 state /* state */;
 	u8 flags0;
-/* exist_in_qm0 */
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
-/* exist_in_qm1 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK               0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT              1
-/* exist_in_qm2 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK               0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT              2
-/* exist_in_qm3 */
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK               0x1 /* bit4 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT              4
-/* cf_array_active */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK               0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT              5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK               0x1 /* bit6 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT              6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK               0x1 /* bit7 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT              7
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1 /* exist_in_qm0 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK               0x1 /* exist_in_qm1 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK               0x1 /* exist_in_qm2 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1 /* exist_in_qm3 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK               0x1 /* bit4 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK               0x1 /* cf_array_active */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK               0x1 /* bit6 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK               0x1 /* bit7 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT              7
 	u8 flags1;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK               0x1 /* bit8 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT              0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK               0x1 /* bit9 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT              1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK               0x1 /* bit10 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT              2
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK                   0x1 /* bit11 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT                  3
-#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK            0x1 /* bit12 */
-#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT           4
-#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK            0x1 /* bit13 */
-#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT           5
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1 /* bit14 */
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1 /* bit15 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK               0x1 /* bit8 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK               0x1 /* bit9 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK               0x1 /* bit10 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK                   0x1 /* bit11 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT                  3
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK            0x1 /* bit12 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT           4
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK            0x1 /* bit13 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT           5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1 /* bit14 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1 /* bit15 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
 	u8 flags2;
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK                     0x3 /* timer0cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT                    0
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK                     0x3 /* timer1cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT                    2
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3 /* timer2cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    4
-/* timer_stop_all */
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK                     0x3 /* timer0cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT                    0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK                     0x3 /* timer1cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT                    2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3 /* timer2cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3 /* timer_stop_all */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    6
 	u8 flags3;
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK                     0x3 /* cf4 */
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT                    0
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK                     0x3 /* cf5 */
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT                    2
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK                     0x3 /* cf6 */
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT                    4
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK                     0x3 /* cf7 */
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT                    6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK                     0x3 /* cf4 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT                    0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK                     0x3 /* cf5 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT                    2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK                     0x3 /* cf6 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT                    4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK                     0x3 /* cf7 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT                    6
 	u8 flags4;
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK                     0x3 /* cf8 */
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT                    0
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK                     0x3 /* cf9 */
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT                    2
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK                    0x3 /* cf10 */
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT                   4
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK                    0x3 /* cf11 */
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT                   6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK                     0x3 /* cf8 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT                    0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK                     0x3 /* cf9 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT                    2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK                    0x3 /* cf10 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT                   4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK                    0x3 /* cf11 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT                   6
 	u8 flags5;
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK                    0x3 /* cf12 */
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT                   0
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK                    0x3 /* cf13 */
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT                   2
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK                    0x3 /* cf14 */
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT                   4
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK                    0x3 /* cf15 */
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT                   6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK                    0x3 /* cf12 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT                   0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK                    0x3 /* cf13 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT                   2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK                    0x3 /* cf14 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT                   4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK                    0x3 /* cf15 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT                   6
 	u8 flags6;
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3 /* cf16 */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
-/* cf_array_cf */
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK                   0x3 /* cf18 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT                  4
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK            0x3 /* cf19 */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3 /* cf16 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3 /* cf_array_cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK                   0x3 /* cf18 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK            0x3 /* cf19 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
 	u8 flags7;
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK                0x3 /* cf20 */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK              0x3 /* cf21 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT             2
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK               0x3 /* cf22 */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT              4
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK                   0x1 /* cf0en */
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT                  6
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK                   0x1 /* cf1en */
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT                  7
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK                0x3 /* cf20 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK              0x3 /* cf21 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK               0x3 /* cf22 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK                   0x1 /* cf0en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK                   0x1 /* cf1en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT                  7
 	u8 flags8;
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1 /* cf2en */
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  0
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1 /* cf3en */
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK                   0x1 /* cf4en */
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT                  2
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK                   0x1 /* cf5en */
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT                  3
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK                   0x1 /* cf6en */
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT                  4
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK                   0x1 /* cf7en */
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT                  5
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK                   0x1 /* cf8en */
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT                  6
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK                   0x1 /* cf9en */
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT                  7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1 /* cf2en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1 /* cf3en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK                   0x1 /* cf4en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK                   0x1 /* cf5en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK                   0x1 /* cf6en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK                   0x1 /* cf7en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK                   0x1 /* cf8en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK                   0x1 /* cf9en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT                  7
 	u8 flags9;
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK                  0x1 /* cf10en */
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT                 0
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK                  0x1 /* cf11en */
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT                 1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK                  0x1 /* cf12en */
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT                 2
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK                  0x1 /* cf13en */
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT                 3
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK                  0x1 /* cf14en */
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT                 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK                  0x1 /* cf15en */
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT                 5
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1 /* cf16en */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
-/* cf_array_cf_en */
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK                  0x1 /* cf10en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK                  0x1 /* cf11en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK                  0x1 /* cf12en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK                  0x1 /* cf13en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK                  0x1 /* cf14en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK                  0x1 /* cf15en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1 /* cf16en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1 /* cf_array_cf_en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
 	u8 flags10;
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK                0x1 /* cf18en */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1 /* cf19en */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1 /* cf20en */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK              0x1 /* cf21en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT             3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1 /* cf22en */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1 /* cf23en */
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK              0x1 /* rule0en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT             6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK              0x1 /* rule1en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT             7
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK                0x1 /* cf18en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1 /* cf19en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1 /* cf20en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK              0x1 /* cf21en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1 /* cf22en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1 /* cf23en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK              0x1 /* rule0en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK              0x1 /* rule1en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT             7
 	u8 flags11;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK              0x1 /* rule2en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT             0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK              0x1 /* rule3en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT             1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1 /* rule4en */
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                 0x1 /* rule5en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                3
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                 0x1 /* rule6en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                4
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                 0x1 /* rule7en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK            0x1 /* rule8en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK                 0x1 /* rule9en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT                7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK              0x1 /* rule2en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK              0x1 /* rule3en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1 /* rule4en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                 0x1 /* rule5en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                 0x1 /* rule6en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                 0x1 /* rule7en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK            0x1 /* rule8en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK                 0x1 /* rule9en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT                7
 	u8 flags12;
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK                0x1 /* rule10en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT               0
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK                0x1 /* rule11en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT               1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK            0x1 /* rule12en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK            0x1 /* rule13en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK                0x1 /* rule14en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT               4
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK                0x1 /* rule15en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT               5
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK                0x1 /* rule16en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT               6
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK                0x1 /* rule17en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT               7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK                0x1 /* rule10en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK                0x1 /* rule11en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK            0x1 /* rule12en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK            0x1 /* rule13en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK                0x1 /* rule14en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK                0x1 /* rule15en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK                0x1 /* rule16en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK                0x1 /* rule17en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT               7
 	u8 flags13;
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK                0x1 /* rule18en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT               0
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK                0x1 /* rule19en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT               1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK            0x1 /* rule20en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK            0x1 /* rule21en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK            0x1 /* rule22en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK            0x1 /* rule23en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK            0x1 /* rule24en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK            0x1 /* rule25en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK                0x1 /* rule18en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK                0x1 /* rule19en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK            0x1 /* rule20en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK            0x1 /* rule21en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK            0x1 /* rule22en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK            0x1 /* rule23en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK            0x1 /* rule24en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK            0x1 /* rule25en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
 	u8 flags14;
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1 /* bit16 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1 /* bit17 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1 /* bit18 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1 /* bit19 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1 /* bit20 */
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1 /* bit21 */
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK              0x3 /* cf23 */
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1 /* bit16 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1 /* bit17 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1 /* bit18 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1 /* bit19 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1 /* bit20 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1 /* bit21 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK              0x3 /* cf23 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
 	u8 edpm_event_id /* byte2 */;
 	__le16 physical_q0 /* physical_q0 */;
 	__le16 e5_reserved1 /* physical_q1 */;
@@ -303,89 +295,89 @@ struct xstorm_eth_conn_ag_ctx {
 	__le16 word15 /* word15 */;
 };
 
-struct tstorm_eth_conn_ag_ctx {
+struct e4_tstorm_eth_conn_ag_ctx {
 	u8 byte0 /* cdu_validation */;
 	u8 byte1 /* state */;
 	u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK      0x1 /* exist_in_qm0 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT     0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK      0x1 /* exist_in_qm1 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT     1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK      0x1 /* bit2 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT     2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK      0x1 /* bit3 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT     3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK      0x1 /* bit4 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT     4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK      0x1 /* bit5 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT     5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK       0x3 /* timer0cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT      6
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK      0x1 /* exist_in_qm0 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT     0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK      0x1 /* exist_in_qm1 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT     1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK      0x1 /* bit2 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT     2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK      0x1 /* bit3 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT     3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK      0x1 /* bit4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT     4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK      0x1 /* bit5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT     5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK       0x3 /* timer0cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT      6
 	u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK       0x3 /* timer1cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT      0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK       0x3 /* timer2cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT      2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK       0x3 /* timer_stop_all */
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT      4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK       0x3 /* cf4 */
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT      6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK       0x3 /* timer1cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT      0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK       0x3 /* timer2cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT      2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK       0x3 /* timer_stop_all */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT      4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK       0x3 /* cf4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT      6
 	u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK       0x3 /* cf5 */
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT      0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK       0x3 /* cf6 */
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT      2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK       0x3 /* cf7 */
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT      4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK       0x3 /* cf8 */
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT      6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK       0x3 /* cf5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT      0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK       0x3 /* cf6 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT      2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK       0x3 /* cf7 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT      4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK       0x3 /* cf8 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT      6
 	u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK       0x3 /* cf9 */
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT      0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK      0x3 /* cf10 */
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT     2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK     0x1 /* cf0en */
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT    4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK     0x1 /* cf1en */
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT    5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK     0x1 /* cf2en */
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT    6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK     0x1 /* cf3en */
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT    7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK       0x3 /* cf9 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT      0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK      0x3 /* cf10 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT     2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK     0x1 /* cf0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT    4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK     0x1 /* cf1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT    5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK     0x1 /* cf2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT    6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK     0x1 /* cf3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT    7
 	u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK     0x1 /* cf4en */
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT    0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK     0x1 /* cf5en */
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT    1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK     0x1 /* cf6en */
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT    2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK     0x1 /* cf7en */
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT    3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK     0x1 /* cf8en */
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT    4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK     0x1 /* cf9en */
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT    5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK    0x1 /* cf10en */
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT   6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK   0x1 /* rule0en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT  7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK     0x1 /* cf4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT    0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK     0x1 /* cf5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT    1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK     0x1 /* cf6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT    2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK     0x1 /* cf7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT    3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK     0x1 /* cf8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT    4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK     0x1 /* cf9en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT    5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK    0x1 /* cf10en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT   6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK   0x1 /* rule0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT  7
 	u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK   0x1 /* rule1en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT  0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK   0x1 /* rule2en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT  1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK   0x1 /* rule3en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT  2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK   0x1 /* rule4en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT  3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK   0x1 /* rule5en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT  4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK  0x1 /* rule6en */
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK   0x1 /* rule7en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT  6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK   0x1 /* rule8en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT  7
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK   0x1 /* rule1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT  0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK   0x1 /* rule2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT  1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK   0x1 /* rule3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT  2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK   0x1 /* rule4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT  3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK   0x1 /* rule5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT  4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK  0x1 /* rule6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK   0x1 /* rule7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT  6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK   0x1 /* rule8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT  7
 	__le32 reg0 /* reg0 */;
 	__le32 reg1 /* reg1 */;
 	__le32 reg2 /* reg2 */;
@@ -410,41 +402,41 @@ struct tstorm_eth_conn_ag_ctx {
 /*
  * The eth storm context for the Ystorm
  */
-struct ystorm_eth_conn_st_ctx {
+struct e4_ystorm_eth_conn_st_ctx {
 	__le32 reserved[8];
 };
 
-struct ystorm_eth_conn_ag_ctx {
+struct e4_ystorm_eth_conn_ag_ctx {
 	u8 byte0 /* cdu_validation */;
 	u8 state /* state */;
 	u8 flags0;
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK                  0x1 /* exist_in_qm0 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                 0
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK                  0x1 /* exist_in_qm1 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK     0x3 /* cf0 */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT    2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK      0x3 /* cf1 */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT     4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK                   0x3 /* cf2 */
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                  6
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK                  0x1 /* exist_in_qm0 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK                  0x1 /* exist_in_qm1 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK     0x3 /* cf0 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT    2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK      0x3 /* cf1 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT     4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK                   0x3 /* cf2 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                  6
 	u8 flags1;
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK  0x1 /* cf0en */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK   0x1 /* cf1en */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT  1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                 0x1 /* cf2en */
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                2
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK               0x1 /* rule0en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT              3
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK               0x1 /* rule1en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT              4
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK               0x1 /* rule2en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT              5
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK               0x1 /* rule3en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT              6
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK               0x1 /* rule4en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT              7
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK  0x1 /* cf0en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK   0x1 /* cf1en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT  1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                 0x1 /* cf2en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                2
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK               0x1 /* rule0en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT              3
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK               0x1 /* rule1en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT              4
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK               0x1 /* rule2en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT              5
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK               0x1 /* rule3en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT              6
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK               0x1 /* rule4en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT              7
 	u8 tx_q0_int_coallecing_timeset /* byte2 */;
 	u8 byte3 /* byte3 */;
 	__le16 word0 /* word0 */;
@@ -458,66 +450,63 @@ struct ystorm_eth_conn_ag_ctx {
 	__le32 reg3 /* reg3 */;
 };
 
-struct ustorm_eth_conn_ag_ctx {
+struct e4_ustorm_eth_conn_ag_ctx {
 	u8 byte0 /* cdu_validation */;
 	u8 byte1 /* state */;
 	u8 flags0;
-/* exist_in_qm0 */
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK                    0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                   0
-/* exist_in_qm1 */
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK                    0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                   1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK     0x3 /* timer0cf */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT    2
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK     0x3 /* timer1cf */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT    4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3 /* timer2cf */
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    6
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK                    0x1 /* exist_in_qm0 */
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                   0
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK                    0x1 /* exist_in_qm1 */
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                   1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK     0x3 /* timer0cf */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT    2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK     0x3 /* timer1cf */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT    4
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3 /* timer2cf */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    6
 	u8 flags1;
-/* timer_stop_all */
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK               0x3 /* cf4 */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT              2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK               0x3 /* cf5 */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT              4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK       0x3 /* cf6 */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT      6
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3 /* timer_stop_all */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    0
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK               0x3 /* cf4 */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT              2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK               0x3 /* cf5 */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT              4
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK       0x3 /* cf6 */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT      6
 	u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK  0x1 /* cf0en */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK  0x1 /* cf1en */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1 /* cf2en */
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1 /* cf3en */
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK            0x1 /* cf4en */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT           4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK            0x1 /* cf5en */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT           5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK    0x1 /* cf6en */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT   6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK                 0x1 /* rule0en */
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT                7
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK  0x1 /* cf0en */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK  0x1 /* cf1en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1 /* cf2en */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  2
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1 /* cf3en */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK            0x1 /* cf4en */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT           4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK            0x1 /* cf5en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT           5
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK    0x1 /* cf6en */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT   6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK                 0x1 /* rule0en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT                7
 	u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK                 0x1 /* rule1en */
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT                0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK                 0x1 /* rule2en */
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT                1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK                 0x1 /* rule3en */
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT                2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK                 0x1 /* rule4en */
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT                3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                 0x1 /* rule5en */
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                 0x1 /* rule6en */
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                 0x1 /* rule7en */
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK                 0x1 /* rule8en */
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT                7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK                 0x1 /* rule1en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT                0
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK                 0x1 /* rule2en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT                1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK                 0x1 /* rule3en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT                2
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK                 0x1 /* rule4en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT                3
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                 0x1 /* rule5en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                4
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                 0x1 /* rule6en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                5
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                 0x1 /* rule7en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK                 0x1 /* rule8en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT                7
 	u8 byte2 /* byte2 */;
 	u8 byte3 /* byte3 */;
 	__le16 word0 /* conn_dpi */;
@@ -533,68 +522,801 @@ struct ustorm_eth_conn_ag_ctx {
 /*
  * The eth storm context for the Ustorm
  */
-struct ustorm_eth_conn_st_ctx {
+struct e4_ustorm_eth_conn_st_ctx {
 	__le32 reserved[40];
 };
 
 /*
  * The eth storm context for the Mstorm
  */
-struct mstorm_eth_conn_st_ctx {
+struct e4_mstorm_eth_conn_st_ctx {
 	__le32 reserved[8];
 };
 
 /*
  * eth connection context
  */
-struct eth_conn_context {
-/* tstorm storm context */
-	struct tstorm_eth_conn_st_ctx tstorm_st_context;
+struct e4_eth_conn_context {
+	struct e4_tstorm_eth_conn_st_ctx tstorm_st_context /* tstorm storm context */;
 	struct regpair tstorm_st_padding[2] /* padding */;
-/* pstorm storm context */
-	struct pstorm_eth_conn_st_ctx pstorm_st_context;
-/* xstorm storm context */
-	struct xstorm_eth_conn_st_ctx xstorm_st_context;
-/* xstorm aggregative context */
-	struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
-/* tstorm aggregative context */
-	struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
-/* ystorm storm context */
-	struct ystorm_eth_conn_st_ctx ystorm_st_context;
-/* ystorm aggregative context */
-	struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
-/* ustorm aggregative context */
-	struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
-/* ustorm storm context */
-	struct ustorm_eth_conn_st_ctx ustorm_st_context;
-/* mstorm storm context */
-	struct mstorm_eth_conn_st_ctx mstorm_st_context;
+	struct e4_pstorm_eth_conn_st_ctx pstorm_st_context /* pstorm storm context */;
+	struct e4_xstorm_eth_conn_st_ctx xstorm_st_context /* xstorm storm context */;
+	struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
+	struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
+	struct e4_ystorm_eth_conn_st_ctx ystorm_st_context /* ystorm storm context */;
+	struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context /* ystorm aggregative context */;
+	struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
+	struct e4_ustorm_eth_conn_st_ctx ustorm_st_context /* ustorm storm context */;
+	struct e4_mstorm_eth_conn_st_ctx mstorm_st_context /* mstorm storm context */;
+};
+
+
+
+
+
+
+
+
+/*
+ * The eth storm context for the Tstorm
+ */
+struct e5_tstorm_eth_conn_st_ctx {
+	__le32 reserved[4];
+};
+
+/*
+ * The eth storm context for the Pstorm
+ */
+struct e5_pstorm_eth_conn_st_ctx {
+	__le32 reserved[8];
+};
+
+/*
+ * The eth storm context for the Xstorm
+ */
+struct e5_xstorm_eth_conn_st_ctx {
+	__le32 reserved[72];
+};
+
+struct e5_xstorm_eth_conn_ag_ctx {
+	u8 reserved0 /* cdu_validation */;
+	u8 state_and_core_id /* state_and_core_id */;
+	u8 flags0;
+#define E5_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK                   0x1 /* exist_in_qm0 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT                  0
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK                      0x1 /* exist_in_qm1 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT                     1
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK                      0x1 /* exist_in_qm2 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT                     2
+#define E5_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK                   0x1 /* exist_in_qm3 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT                  3
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK                      0x1 /* bit4 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT                     4
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK                      0x1 /* cf_array_active */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT                     5
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK                      0x1 /* bit6 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT                     6
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK                      0x1 /* bit7 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT                     7
+	u8 flags1;
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK                      0x1 /* bit8 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT                     0
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK                      0x1 /* bit9 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT                     1
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK                      0x1 /* bit10 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT                     2
+#define E5_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK                          0x1 /* bit11 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT                         3
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_COPY_CONDITION_LO_MASK         0x1 /* bit12 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_COPY_CONDITION_LO_SHIFT        4
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_COPY_CONDITION_HI_MASK         0x1 /* bit13 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_COPY_CONDITION_HI_SHIFT        5
+#define E5_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK                 0x1 /* bit14 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT                6
+#define E5_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK                   0x1 /* bit15 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT                  7
+	u8 flags2;
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF0_MASK                            0x3 /* timer0cf */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT                           0
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF1_MASK                            0x3 /* timer1cf */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT                           2
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF2_MASK                            0x3 /* timer2cf */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                           4
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF3_MASK                            0x3 /* timer_stop_all */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT                           6
+	u8 flags3;
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF4_MASK                            0x3 /* cf4 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT                           0
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF5_MASK                            0x3 /* cf5 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT                           2
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF6_MASK                            0x3 /* cf6 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT                           4
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF7_MASK                            0x3 /* cf7 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT                           6
+	u8 flags4;
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF8_MASK                            0x3 /* cf8 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT                           0
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF9_MASK                            0x3 /* cf9 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT                           2
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF10_MASK                           0x3 /* cf10 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT                          4
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF11_MASK                           0x3 /* cf11 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT                          6
+	u8 flags5;
+#define E5_XSTORM_ETH_CONN_AG_CTX_HP_CF_MASK                          0x3 /* cf12 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_HP_CF_SHIFT                         0
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF13_MASK                           0x3 /* cf13 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT                          2
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF14_MASK                           0x3 /* cf14 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT                          4
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF15_MASK                           0x3 /* cf15 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT                          6
+	u8 flags6;
+#define E5_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK               0x3 /* cf16 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT              0
+#define E5_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK               0x3 /* cf_array_cf */
+#define E5_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT              2
+#define E5_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK                          0x3 /* cf18 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT                         4
+#define E5_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK                   0x3 /* cf19 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT                  6
+	u8 flags7;
+#define E5_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK                       0x3 /* cf20 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT                      0
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK                     0x3 /* cf21 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT                    2
+#define E5_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK                      0x3 /* cf22 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT                     4
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK                          0x1 /* cf0en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT                         6
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK                          0x1 /* cf1en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT                         7
+	u8 flags8;
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                          0x1 /* cf2en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                         0
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK                          0x1 /* cf3en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                         1
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK                          0x1 /* cf4en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT                         2
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK                          0x1 /* cf5en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT                         3
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK                          0x1 /* cf6en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT                         4
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK                          0x1 /* cf7en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT                         5
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK                          0x1 /* cf8en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT                         6
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK                          0x1 /* cf9en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT                         7
+	u8 flags9;
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK                         0x1 /* cf10en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT                        0
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK                         0x1 /* cf11en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT                        1
+#define E5_XSTORM_ETH_CONN_AG_CTX_HP_CF_EN_MASK                       0x1 /* cf12en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_HP_CF_EN_SHIFT                      2
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK                         0x1 /* cf13en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT                        3
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK                         0x1 /* cf14en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT                        4
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK                         0x1 /* cf15en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT                        5
+#define E5_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK            0x1 /* cf16en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT           6
+#define E5_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK            0x1 /* cf_array_cf_en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT           7
+	u8 flags10;
+#define E5_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK                       0x1 /* cf18en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT                      0
+#define E5_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK                0x1 /* cf19en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT               1
+#define E5_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK                    0x1 /* cf20en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT                   2
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK                     0x1 /* cf21en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT                    3
+#define E5_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK                   0x1 /* cf22en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT                  4
+#define E5_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK         0x1 /* cf23en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT        5
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK                     0x1 /* rule0en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT                    6
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK                     0x1 /* rule1en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT                    7
+	u8 flags11;
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK                     0x1 /* rule2en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT                    0
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK                     0x1 /* rule3en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT                    1
+#define E5_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK                 0x1 /* rule4en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT                2
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                        0x1 /* rule5en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                       3
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                        0x1 /* rule6en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                       4
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                        0x1 /* rule7en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                       5
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK                   0x1 /* rule8en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT                  6
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK                        0x1 /* rule9en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT                       7
+	u8 flags12;
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK                       0x1 /* rule10en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT                      0
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK                       0x1 /* rule11en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT                      1
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK                   0x1 /* rule12en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT                  2
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK                   0x1 /* rule13en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT                  3
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK                       0x1 /* rule14en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT                      4
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK                       0x1 /* rule15en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT                      5
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK                       0x1 /* rule16en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT                      6
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK                       0x1 /* rule17en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT                      7
+	u8 flags13;
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK                       0x1 /* rule18en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT                      0
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK                       0x1 /* rule19en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT                      1
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK                   0x1 /* rule20en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT                  2
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK                   0x1 /* rule21en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT                  3
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK                   0x1 /* rule22en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT                  4
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK                   0x1 /* rule23en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT                  5
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK                   0x1 /* rule24en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT                  6
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK                   0x1 /* rule25en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT                  7
+	u8 flags14;
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK               0x1 /* bit16 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT              0
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK             0x1 /* bit17 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT            1
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK           0x1 /* bit18 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT          2
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK           0x1 /* bit19 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT          3
+#define E5_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK                 0x1 /* bit20 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT                4
+#define E5_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK               0x1 /* bit21 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT              5
+#define E5_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK                     0x3 /* cf23 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT                    6
+	u8 edpm_vport /* byte2 */;
+	__le16 physical_q0 /* physical_q0_and_vf_id_lo */;
+	__le16 tx_l2_edpm_usg_cnt_and_vf_id_hi /* physical_q1_and_vf_id_hi */;
+	__le16 edpm_num_bds /* physical_q2 */;
+	__le16 tx_bd_cons /* word3 */;
+	__le16 tx_bd_prod /* word4 */;
+	__le16 updated_qm_pq_id /* word5 */;
+	__le16 conn_dpi /* conn_dpi */;
+	u8 fw_spare_data0 /* byte3 */;
+	u8 fw_spare_data1 /* byte4 */;
+	u8 fw_spare_data2 /* byte5 */;
+	u8 fw_spare_data3 /* byte6 */;
+	__le32 fw_spare_data4 /* reg0 */;
+	__le32 fw_spare_data5 /* reg1 */;
+	__le32 fw_spare_data6 /* reg2 */;
+	__le32 fw_spare_data7 /* reg3 */;
+	__le32 fw_spare_data8 /* reg4 */;
+	__le32 fw_spare_data9 /* cf_array0 */;
+	__le32 fw_spare_data10 /* cf_array1 */;
+	u8 flags15;
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_REDIRECTION_CONDITION_LO_MASK  0x1 /* bit22 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_REDIRECTION_CONDITION_LO_SHIFT 0
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_REDIRECTION_CONDITION_HI_MASK  0x1 /* bit23 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_EDPM_REDIRECTION_CONDITION_HI_SHIFT 1
+#define E5_XSTORM_ETH_CONN_AG_CTX_E4_RESERVED3_MASK                   0x1 /* bit24 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_E4_RESERVED3_SHIFT                  2
+#define E5_XSTORM_ETH_CONN_AG_CTX_E4_RESERVED4_MASK                   0x3 /* cf24 */
+#define E5_XSTORM_ETH_CONN_AG_CTX_E4_RESERVED4_SHIFT                  3
+#define E5_XSTORM_ETH_CONN_AG_CTX_E4_RESERVED5_MASK                   0x1 /* cf24en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_E4_RESERVED5_SHIFT                  5
+#define E5_XSTORM_ETH_CONN_AG_CTX_E4_RESERVED6_MASK                   0x1 /* rule26en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_E4_RESERVED6_SHIFT                  6
+#define E5_XSTORM_ETH_CONN_AG_CTX_E4_RESERVED7_MASK                   0x1 /* rule27en */
+#define E5_XSTORM_ETH_CONN_AG_CTX_E4_RESERVED7_SHIFT                  7
+	u8 fw_spare_data11 /* byte7 */;
+	__le16 fw_spare_data12 /* word7 */;
+	__le16 fw_spare_data13 /* word8 */;
+	__le16 fw_spare_data14 /* word9 */;
+	__le16 fw_spare_data15 /* word10 */;
+	__le16 fw_spare_data16 /* word11 */;
+	__le32 fw_spare_data17 /* reg7 */;
+	__le32 fw_data0 /* reg8 */;
+	__le32 fw_data1 /* reg9 */;
+	u8 fw_data2 /* byte8 */;
+	u8 fw_data3 /* byte9 */;
+	u8 fw_data4 /* byte10 */;
+	u8 fw_data5 /* byte11 */;
+	u8 fw_data6 /* byte12 */;
+	u8 fw_data7 /* byte13 */;
+	u8 fw_data8 /* byte14 */;
+	u8 fw_data9 /* byte15 */;
+	__le32 fw_data10 /* reg10 */;
+	__le32 fw_data11 /* reg11 */;
+	__le32 fw_data12 /* reg12 */;
+	__le32 fw_data13 /* reg13 */;
+	__le32 fw_data14 /* reg14 */;
+	__le32 fw_data15 /* reg15 */;
+	__le32 fw_data16 /* reg16 */;
+	__le32 fw_data17 /* reg17 */;
+	__le32 fw_data18 /* reg18 */;
+	__le32 fw_data19 /* reg19 */;
+	__le16 fw_data20 /* word12 */;
+	__le16 fw_data21 /* word13 */;
+	__le16 fw_data22 /* word14 */;
+	__le16 fw_data23 /* word15 */;
+};
+
+struct e5_tstorm_eth_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state_and_core_id */;
+	u8 flags0;
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT         0
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK          0x1 /* bit2 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT         2
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK          0x1 /* bit3 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT         3
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK          0x1 /* bit4 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT         4
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK          0x1 /* bit5 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT         5
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF0_MASK           0x3 /* timer0cf */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          6
+	u8 flags1;
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF1_MASK           0x3 /* timer1cf */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          0
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF2_MASK           0x3 /* timer2cf */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          2
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF3_MASK           0x3 /* timer_stop_all */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT          4
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF4_MASK           0x3 /* cf4 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT          6
+	u8 flags2;
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF5_MASK           0x3 /* cf5 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT          0
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF6_MASK           0x3 /* cf6 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT          2
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF7_MASK           0x3 /* cf7 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT          4
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF8_MASK           0x3 /* cf8 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT          6
+	u8 flags3;
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF9_MASK           0x3 /* cf9 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT          0
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF10_MASK          0x3 /* cf10 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT         2
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        4
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        5
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        6
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK         0x1 /* cf3en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT        7
+	u8 flags4;
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK         0x1 /* cf4en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT        0
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK         0x1 /* cf5en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT        1
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK         0x1 /* cf6en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT        2
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK         0x1 /* cf7en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT        3
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK         0x1 /* cf8en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT        4
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK         0x1 /* cf9en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT        5
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK        0x1 /* cf10en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT       6
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      7
+	u8 flags5;
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      0
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      1
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      2
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      3
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT      4
+#define E5_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK      0x1 /* rule6en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT     5
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK       0x1 /* rule7en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT      6
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK       0x1 /* rule8en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT      7
+	u8 flags6;
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit6 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED2_MASK  0x1 /* bit7 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED3_MASK  0x1 /* bit8 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED4_MASK  0x3 /* cf11 */
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED5_MASK  0x1 /* cf11en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED6_MASK  0x1 /* rule9en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED7_MASK  0x1 /* rule10en */
+#define E5_TSTORM_ETH_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
+	u8 byte2 /* byte2 */;
+	__le16 rx_bd_cons /* word0 */;
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+	__le32 reg4 /* reg4 */;
+	__le32 reg5 /* reg5 */;
+	__le32 reg6 /* reg6 */;
+	__le32 reg7 /* reg7 */;
+	__le32 reg8 /* reg8 */;
+	u8 byte3 /* byte3 */;
+	u8 byte4 /* byte4 */;
+	u8 byte5 /* byte5 */;
+	u8 e4_reserved8 /* vf_id */;
+	__le16 rx_bd_prod /* word1 */;
+	__le16 word2 /* conn_dpi */;
+	__le32 reg9 /* reg9 */;
+	__le16 word3 /* word3 */;
+	__le16 e4_reserved9 /* word4 */;
+};
+
+/*
+ * The eth storm context for the Ystorm
+ */
+struct e5_ystorm_eth_conn_st_ctx {
+	__le32 reserved[8];
+};
+
+struct e5_ystorm_eth_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 state_and_core_id /* state_and_core_id */;
+	u8 flags0;
+#define E5_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK                  0x1 /* exist_in_qm0 */
+#define E5_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                 0
+#define E5_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK                  0x1 /* exist_in_qm1 */
+#define E5_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                 1
+#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK     0x3 /* cf0 */
+#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT    2
+#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK      0x3 /* cf1 */
+#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT     4
+#define E5_YSTORM_ETH_CONN_AG_CTX_CF2_MASK                   0x3 /* cf2 */
+#define E5_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                  6
+	u8 flags1;
+#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK  0x1 /* cf0en */
+#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK   0x1 /* cf1en */
+#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT  1
+#define E5_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                 0x1 /* cf2en */
+#define E5_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                2
+#define E5_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK               0x1 /* rule0en */
+#define E5_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT              3
+#define E5_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK               0x1 /* rule1en */
+#define E5_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT              4
+#define E5_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK               0x1 /* rule2en */
+#define E5_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT              5
+#define E5_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK               0x1 /* rule3en */
+#define E5_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT              6
+#define E5_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK               0x1 /* rule4en */
+#define E5_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT              7
+	u8 tx_q0_int_coallecing_timeset /* byte2 */;
+	u8 byte3 /* byte3 */;
+	__le16 word0 /* word0 */;
+	__le32 terminate_spqe /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le16 tx_bd_cons_upd /* word1 */;
+	__le16 word2 /* word2 */;
+	__le16 word3 /* word3 */;
+	__le16 word4 /* word4 */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+};
+
+struct e5_ustorm_eth_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state_and_core_id */;
+	u8 flags0;
+#define E5_USTORM_ETH_CONN_AG_CTX_BIT0_MASK                    0x1 /* exist_in_qm0 */
+#define E5_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                   0
+#define E5_USTORM_ETH_CONN_AG_CTX_BIT1_MASK                    0x1 /* exist_in_qm1 */
+#define E5_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                   1
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK     0x3 /* timer0cf */
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT    2
+#define E5_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK     0x3 /* timer1cf */
+#define E5_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT    4
+#define E5_USTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3 /* timer2cf */
+#define E5_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    6
+	u8 flags1;
+#define E5_USTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3 /* timer_stop_all */
+#define E5_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    0
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK               0x3 /* cf4 */
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT              2
+#define E5_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK               0x3 /* cf5 */
+#define E5_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT              4
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK       0x3 /* cf6 */
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT      6
+	u8 flags2;
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK  0x1 /* cf0en */
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define E5_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK  0x1 /* cf1en */
+#define E5_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E5_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1 /* cf2en */
+#define E5_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  2
+#define E5_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1 /* cf3en */
+#define E5_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  3
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK            0x1 /* cf4en */
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT           4
+#define E5_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK            0x1 /* cf5en */
+#define E5_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT           5
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK    0x1 /* cf6en */
+#define E5_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT   6
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK                 0x1 /* rule0en */
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT                7
+	u8 flags3;
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK                 0x1 /* rule1en */
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT                0
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK                 0x1 /* rule2en */
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT                1
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK                 0x1 /* rule3en */
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT                2
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK                 0x1 /* rule4en */
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT                3
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                 0x1 /* rule5en */
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                4
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                 0x1 /* rule6en */
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                5
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                 0x1 /* rule7en */
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                6
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK                 0x1 /* rule8en */
+#define E5_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT                7
+	u8 flags4;
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED1_MASK            0x1 /* bit2 */
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED1_SHIFT           0
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED2_MASK            0x1 /* bit3 */
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED2_SHIFT           1
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED3_MASK            0x3 /* cf7 */
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED3_SHIFT           2
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED4_MASK            0x3 /* cf8 */
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED4_SHIFT           4
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED5_MASK            0x1 /* cf7en */
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED5_SHIFT           6
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED6_MASK            0x1 /* cf8en */
+#define E5_USTORM_ETH_CONN_AG_CTX_E4_RESERVED6_SHIFT           7
+	u8 byte2 /* byte2 */;
+	__le16 word0 /* conn_dpi */;
+	__le16 tx_bd_cons /* word1 */;
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le32 reg2 /* reg2 */;
+	__le32 tx_int_coallecing_timeset /* reg3 */;
+	__le16 tx_drv_bd_cons /* word2 */;
+	__le16 rx_drv_cqe_cons /* word3 */;
 };
 
+/*
+ * The eth storm context for the Ustorm
+ */
+struct e5_ustorm_eth_conn_st_ctx {
+	__le32 reserved[52];
+};
+
+/*
+ * The eth storm context for the Mstorm
+ */
+struct e5_mstorm_eth_conn_st_ctx {
+	__le32 reserved[8];
+};
+
+/*
+ * eth connection context
+ */
+struct e5_eth_conn_context {
+	struct e5_tstorm_eth_conn_st_ctx tstorm_st_context /* tstorm storm context */;
+	struct regpair tstorm_st_padding[2] /* padding */;
+	struct e5_pstorm_eth_conn_st_ctx pstorm_st_context /* pstorm storm context */;
+	struct e5_xstorm_eth_conn_st_ctx xstorm_st_context /* xstorm storm context */;
+	struct e5_xstorm_eth_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
+	struct regpair xstorm_ag_padding[2] /* padding */;
+	struct e5_tstorm_eth_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
+	struct e5_ystorm_eth_conn_st_ctx ystorm_st_context /* ystorm storm context */;
+	struct e5_ystorm_eth_conn_ag_ctx ystorm_ag_context /* ystorm aggregative context */;
+	struct e5_ustorm_eth_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
+	struct e5_ustorm_eth_conn_st_ctx ustorm_st_context /* ustorm storm context */;
+	struct regpair ustorm_st_padding[2] /* padding */;
+	struct e5_mstorm_eth_conn_st_ctx mstorm_st_context /* mstorm storm context */;
+};
+
+
+struct e5_tstorm_eth_task_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state_and_core_id */;
+	__le16 word0 /* icid */;
+	u8 flags0;
+#define E5_TSTORM_ETH_TASK_AG_CTX_NIBBLE0_MASK  0xF /* connection_type */
+#define E5_TSTORM_ETH_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT0_SHIFT    4
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT1_SHIFT    5
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT2_MASK     0x1 /* bit2 */
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT2_SHIFT    6
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT3_MASK     0x1 /* bit3 */
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT3_SHIFT    7
+	u8 flags1;
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT4_MASK     0x1 /* bit4 */
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT4_SHIFT    0
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT5_MASK     0x1 /* bit5 */
+#define E5_TSTORM_ETH_TASK_AG_CTX_BIT5_SHIFT    1
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF0_MASK      0x3 /* timer0cf */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF0_SHIFT     2
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF1_MASK      0x3 /* timer1cf */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF1_SHIFT     4
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF2_MASK      0x3 /* timer2cf */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF2_SHIFT     6
+	u8 flags2;
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF3_SHIFT     0
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF4_MASK      0x3 /* cf4 */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF4_SHIFT     2
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF5_MASK      0x3 /* cf5 */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF5_SHIFT     4
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF6_MASK      0x3 /* cf6 */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF6_SHIFT     6
+	u8 flags3;
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF7_MASK      0x3 /* cf7 */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF7_SHIFT     0
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF0EN_SHIFT   2
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF1EN_SHIFT   3
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF2EN_SHIFT   4
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF3EN_SHIFT   5
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF4EN_SHIFT   6
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF5EN_SHIFT   7
+	u8 flags4;
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF6EN_SHIFT   0
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF7EN_MASK    0x1 /* cf7en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_CF7EN_SHIFT   1
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
+#define E5_TSTORM_ETH_TASK_AG_CTX_RULE5EN_SHIFT 7
+	u8 timestamp /* byte2 */;
+	__le16 word1 /* word1 */;
+	__le32 reg0 /* reg0 */;
+	struct regpair packets64 /* regpair0 */;
+	struct regpair bytes64 /* regpair1 */;
+};
+
+struct e5_mstorm_eth_task_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state_and_core_id */;
+	__le16 word0 /* icid */;
+	u8 flags0;
+#define E5_MSTORM_ETH_TASK_AG_CTX_NIBBLE0_MASK       0xF /* connection_type */
+#define E5_MSTORM_ETH_TASK_AG_CTX_NIBBLE0_SHIFT      0
+#define E5_MSTORM_ETH_TASK_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
+#define E5_MSTORM_ETH_TASK_AG_CTX_BIT0_SHIFT         4
+#define E5_MSTORM_ETH_TASK_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
+#define E5_MSTORM_ETH_TASK_AG_CTX_BIT1_SHIFT         5
+#define E5_MSTORM_ETH_TASK_AG_CTX_BIT2_MASK          0x1 /* bit2 */
+#define E5_MSTORM_ETH_TASK_AG_CTX_BIT2_SHIFT         6
+#define E5_MSTORM_ETH_TASK_AG_CTX_BIT3_MASK          0x1 /* bit3 */
+#define E5_MSTORM_ETH_TASK_AG_CTX_BIT3_SHIFT         7
+	u8 flags1;
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF0_MASK           0x3 /* cf0 */
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF0_SHIFT          0
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF1_MASK           0x3 /* cf1 */
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF1_SHIFT          2
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF2_MASK           0x3 /* cf2 */
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF2_SHIFT          4
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF0EN_SHIFT        6
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF1EN_SHIFT        7
+	u8 flags2;
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_CF2EN_SHIFT        0
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE0EN_SHIFT      1
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE1EN_SHIFT      2
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE2EN_SHIFT      3
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE3EN_SHIFT      4
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE4EN_SHIFT      5
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE5EN_SHIFT      6
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE6EN_MASK       0x1 /* rule6en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_RULE6EN_SHIFT      7
+	u8 flags3;
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit4 */
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED1_SHIFT 0
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED2_MASK  0x3 /* cf3 */
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED2_SHIFT 1
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED3_MASK  0x3 /* cf4 */
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED3_SHIFT 3
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED4_MASK  0x1 /* cf3en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED4_SHIFT 5
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED5_MASK  0x1 /* cf4en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED5_SHIFT 6
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED6_MASK  0x1 /* rule7en */
+#define E5_MSTORM_ETH_TASK_AG_CTX_E4_RESERVED6_SHIFT 7
+	__le32 reg0 /* reg0 */;
+	u8 timestamp /* byte2 */;
+	u8 byte3 /* byte3 */;
+	u8 byte4 /* byte4 */;
+	u8 e4_reserved7 /* icid_ext */;
+	struct regpair packets64 /* regpair0 */;
+	struct regpair bytes64 /* regpair1 */;
+};
+
+/*
+ * eth task context
+ */
+struct e5_eth_task_context {
+	struct e5_tstorm_eth_task_ag_ctx tstorm_ag_context /* tstorm task aggregative context */;
+	struct e5_mstorm_eth_task_ag_ctx mstorm_ag_context /* mstorm task aggregative context */;
+};
+
+
+
+
+
+
+
+
+
 
 /*
  * Ethernet filter types: mac/vlan/pair
  */
 enum eth_error_code {
 	ETH_OK = 0x00 /* command succeeded */,
-/* mac add filters command failed due to cam full state */
-	ETH_FILTERS_MAC_ADD_FAIL_FULL,
+	ETH_FILTERS_MAC_ADD_FAIL_FULL /* mac add filters command failed due to cam full state */,
 /* mac add filters command failed due to mtt2 full state */
 	ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2,
-/* mac add filters command failed due to duplicate mac address */
+/* mac add filters command failed due to duplicate mac address **/
 	ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2,
-/* mac add filters command failed due to duplicate mac address */
+/* mac add filters command failed due to duplicate mac address **/
 	ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2,
-/* mac delete filters command failed due to not found state */
-	ETH_FILTERS_MAC_DEL_FAIL_NOF,
+	ETH_FILTERS_MAC_DEL_FAIL_NOF /* mac delete filters command failed due to not found state */,
 /* mac delete filters command failed due to not found state */
 	ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2,
 /* mac delete filters command failed due to not found state */
 	ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2,
-/* mac add filters command failed due to MAC Address of 00:00:00:00:00:00 */
+/* mac add filters command failed due to MAC Address of 00:00:00:00:00:00 **/
 	ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC,
-/* vlan add filters command failed due to cam full state */
-	ETH_FILTERS_VLAN_ADD_FAIL_FULL,
+	ETH_FILTERS_VLAN_ADD_FAIL_FULL /* vlan add filters command failed due to cam full state */,
 /* vlan add filters command failed due to duplicate VLAN filter */
 	ETH_FILTERS_VLAN_ADD_FAIL_DUP,
 /* vlan delete filters command failed due to not found state */
@@ -603,21 +1325,33 @@ enum eth_error_code {
 	ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1,
 /* pair add filters command failed due to duplicate request */
 	ETH_FILTERS_PAIR_ADD_FAIL_DUP,
-/* pair add filters command failed due to full state */
-	ETH_FILTERS_PAIR_ADD_FAIL_FULL,
-/* pair add filters command failed due to full state */
-	ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC,
-/* pair add filters command failed due not found state */
-	ETH_FILTERS_PAIR_DEL_FAIL_NOF,
-/* pair add filters command failed due not found state */
-	ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1,
-/* pair add filters command failed due to MAC Address of 00:00:00:00:00:00 */
+	ETH_FILTERS_PAIR_ADD_FAIL_FULL /* pair add filters command failed due to full state */,
+	ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC /* pair add filters command failed due to full state */,
+	ETH_FILTERS_PAIR_DEL_FAIL_NOF /* pair add filters command failed due not found state */,
+	ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1 /* pair add filters command failed due not found state */,
+/* pair add filters command failed due to MAC Address of 00:00:00:00:00:00 **/
 	ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
-/* vni add filters command failed due to cam full state */
-	ETH_FILTERS_VNI_ADD_FAIL_FULL,
+	ETH_FILTERS_VNI_ADD_FAIL_FULL /* vni add filters command failed due to cam full state */,
 /* vni add filters command failed due to duplicate VNI filter */
 	ETH_FILTERS_VNI_ADD_FAIL_DUP,
 	ETH_FILTERS_GFT_UPDATE_FAIL /* Fail update GFT filter. */,
+/* Fail load VF data like BD or CQE ring next page address. */
+	ETH_RX_QUEUE_FAIL_LOAD_VF_DATA,
+	ETH_FILTERS_GFS_ADD_FILTER_FAIL_MAX_HOPS /* Fail to add a GFS filter - max hops reached. */,
+/* Fail to add a GFS filter - no free entry to add, database full. */
+	ETH_FILTERS_GFS_ADD_FILTER_FAIL_NO_FREE_ENRTY,
+/* Fail to add a GFS filter  - entry already added. */
+	ETH_FILTERS_GFS_ADD_FILTER_FAIL_ALREADY_EXISTS,
+	ETH_FILTERS_GFS_ADD_FILTER_FAIL_PCI_ERROR /* Fail to add a GFS filter  - PCI error. */,
+/* Fail to add a GFS filter  - magic number error. */
+	ETH_FILTERS_GFS_ADD_FILTER_FAIL_MAGIC_NUM_ERROR,
+/* Fail to delete GFS filter - max hops reached. */
+	ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAX_HOPS,
+/* Fail to delete GFS filter - no matched entry to detele. */
+	ETH_FILTERS_GFS_DEL_FILTER_FAIL_NO_MATCH_ENRTY,
+	ETH_FILTERS_GFS_DEL_FILTER_FAIL_PCI_ERROR /* Fail to delete GFS filter - PCI error. */,
+/* Fail to delete GFS filter - magic number error. */
+	ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAGIC_NUM_ERROR,
 	MAX_ETH_ERROR_CODE
 };
 
@@ -644,6 +1378,12 @@ enum eth_event_opcode {
 	ETH_EVENT_RX_CREATE_GFT_ACTION,
 	ETH_EVENT_RX_GFT_UPDATE_FILTER,
 	ETH_EVENT_TX_QUEUE_UPDATE,
+	ETH_EVENT_RGFS_ADD_FILTER,
+	ETH_EVENT_RGFS_DEL_FILTER,
+	ETH_EVENT_TGFS_ADD_FILTER,
+	ETH_EVENT_TGFS_DEL_FILTER,
+	ETH_EVENT_GFS_COUNTERS_REPORT,
+	ETH_EVENT_HAIRPIN_QUEUE_STOP,
 	MAX_ETH_EVENT_OPCODE
 };
 
@@ -655,8 +1395,7 @@ enum eth_filter_action {
 	ETH_FILTER_ACTION_UNUSED,
 	ETH_FILTER_ACTION_REMOVE,
 	ETH_FILTER_ACTION_ADD,
-/* Remove all filters of given type and vport ID. */
-	ETH_FILTER_ACTION_REMOVE_ALL,
+	ETH_FILTER_ACTION_REMOVE_ALL /* Remove all filters of given type and vport ID. */,
 	MAX_ETH_FILTER_ACTION
 };
 
@@ -665,9 +1404,9 @@ enum eth_filter_action {
  * Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$
  */
 struct eth_filter_cmd {
-	u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+	u8 type /* Filter Type (MAC/VLAN/Pair/VNI) (use enum eth_filter_type) */;
 	u8 vport_id /* the vport id */;
-	u8 action /* filter command action: add/remove/replace */;
+	u8 action /* filter command action: add/remove/replace (use enum eth_filter_action) */;
 	u8 reserved0;
 	__le32 vni;
 	__le16 mac_lsb;
@@ -684,8 +1423,8 @@ struct eth_filter_cmd_header {
 	u8 rx /* If set, apply these commands to the RX path */;
 	u8 tx /* If set, apply these commands to the TX path */;
 	u8 cmd_cnt /* Number of filter commands */;
-/* 0 - dont assert in case of filter configuration error. Just return an error
- * code. 1 - assert in case of filter configuration error.
+/* 0 - dont assert in case of filter configuration error. Just return an error code. 1 - assert in
+ * case of filter configuration error.
  */
 	u8 assert_on_error;
 	u8 reserved1[4];
@@ -703,29 +1442,136 @@ enum eth_filter_type {
 	ETH_FILTER_TYPE_INNER_MAC /* Add/remove a inner MAC address */,
 	ETH_FILTER_TYPE_INNER_VLAN /* Add/remove a inner VLAN */,
 	ETH_FILTER_TYPE_INNER_PAIR /* Add/remove a inner MAC-VLAN pair */,
-/* Add/remove a inner MAC-VNI pair */
-	ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
+	ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR /* Add/remove a inner MAC-VNI pair */,
 	ETH_FILTER_TYPE_MAC_VNI_PAIR /* Add/remove a MAC-VNI pair */,
 	ETH_FILTER_TYPE_VNI /* Add/remove a VNI */,
 	MAX_ETH_FILTER_TYPE
 };
 
 
+/*
+ * GFS copy condition.
+ */
+enum eth_gfs_copy_condition {
+	ETH_GFS_COPY_ALWAYS /* Copy always. */,
+	ETH_GFS_COPY_TTL0 /* Copy if TTL=0. */,
+	ETH_GFS_COPY_TTL1 /* Copy if TTL=1. */,
+	ETH_GFS_COPY_TTL0_TTL1 /* Copy if TTL=0 or TTL=1. */,
+	ETH_GFS_COPY_TCP_FLAGS /* Copy if one of TCP flags from tcp_flags set in packet. */,
+	MAX_ETH_GFS_COPY_CONDITION
+};
+
+
+/*
+ * GFS Flow Counters Report Table Header
+ */
+struct eth_gfs_counters_report_header {
+	__le16 num_valid_entries;
+	u8 reserved[2];
+	__le32 timestamp /* table report timestamp */;
+};
+
+/*
+ * GFS Flow Counters Report Entry
+ */
+struct eth_gfs_counters_report_entry {
+	struct regpair packets /* Packets Counter */;
+	struct regpair bytes /* Bytes Counter */;
+/* TID of GFS Counter. Shall be Reported by FW with zeroised segment_id bits (as received from host
+ * upon counter action configuration)
+ */
+	struct hsi_tid tid;
+	u8 timestamp /* Last-Updated timestamp with timestamp_log2_factor applied  */;
+	u8 reserved[3];
+};
+
+/*
+ * GFS Flow Counters Report Table.
+ */
+struct eth_gfs_counters_report {
+	struct eth_gfs_counters_report_header table_header;
+	struct eth_gfs_counters_report_entry table_entries[NUM_OF_LTIDS_E5];
+};
+
+
+
+
+/*
+ * GFS packet destination type.
+ */
+enum eth_gfs_dest_type {
+	ETH_GFS_DEST_DROP /* Drop TX or RX packet. */,
+	ETH_GFS_DEST_RX_VPORT /* Forward RX or TX packet to RX VPORT. */,
+	ETH_GFS_DEST_RX_QID /* Forward RX packet to RX queue. */,
+	ETH_GFS_DEST_TX_CID /* Forward RX packet to TX queue given by CID (hairpinning). */,
+/* Bypass TX switching for TX packet. TX destination given by tx_dest value. */
+	ETH_GFS_DEST_TXSW_BYPASS,
+	MAX_ETH_GFS_DEST_TYPE
+};
+
+
+/*
+ * GFS packet modified header position.
+ */
+enum eth_gfs_modified_header_position {
+	ETH_GFS_MODIFY_HDR_SINGLE /* Modify single header in no tunnel packet. */,
+	ETH_GFS_MODIFY_HDR_TUNNEL /* Modify tunnel header. */,
+	ETH_GFS_MODIFY_HDR_INNER /* Modify inner header in tunnel packet. */,
+	MAX_ETH_GFS_MODIFIED_HEADER_POSITION
+};
+
+
+enum eth_gfs_pop_hdr_type {
+	ETH_GFS_POP_UNUSED /* Reserved for initialized check */,
+	ETH_GFS_POP_ETH /* Pop outer ETH header. */,
+	ETH_GFS_POP_TUNN /* Pop tunnel header. */,
+	ETH_GFS_POP_TUNN_ETH /* Pop tunnel and inner ETH header. */,
+	MAX_ETH_GFS_POP_HDR_TYPE
+};
+
+
+enum eth_gfs_push_hdr_type {
+	ETH_GFS_PUSH_UNUSED /* Reserved for initialized check */,
+	ETH_GFS_PUSH_ETH /* Push ETH header. */,
+	ETH_GFS_PUSH_GRE_V4 /* Push GRE over IPV4 tunnel header (ETH-IPV4-GRE). */,
+	ETH_GFS_PUSH_GRE_V6 /* Push GRE over IPV6 tunnel header (ETH-IPV6-GRE). */,
+	ETH_GFS_PUSH_VXLAN_V4 /* Push VXLAN over IPV4 tunnel header (ETH-IPV4-UDP-VXLAN). */,
+	ETH_GFS_PUSH_VXLAN_V6 /* Push VXLAN over IPV6 tunnel header (ETH-IPV6-UDP-VXLAN). */,
+/* Push VXLAN over IPV4 tunnel header and inner ETH header (ETH-IPV4-UDP-VXLAN-ETH). */
+	ETH_GFS_PUSH_VXLAN_V4_ETH,
+/* Push VXLAN over IPV6 tunnel header and inner ETH header (ETH-IPV6-UDP-VXLAN-ETH). */
+	ETH_GFS_PUSH_VXLAN_V6_ETH,
+	MAX_ETH_GFS_PUSH_HDR_TYPE
+};
+
+
+/*
+ * GFS redirect condition.
+ */
+enum eth_gfs_redirect_condition {
+	ETH_GFS_REDIRECT_ALWAYS /* Redirect always. */,
+	ETH_GFS_REDIRECT_TTL0 /* Redirect if TTL=0. */,
+	ETH_GFS_REDIRECT_TTL1 /* Redirect if TTL=1. */,
+	ETH_GFS_REDIRECT_TTL0_TTL1 /* Redirect if TTL=0 or TTL=1. */,
+	MAX_ETH_GFS_REDIRECT_CONDITION
+};
+
+
 /*
  * inner to inner vlan priority translation configurations
  */
 struct eth_in_to_in_pri_map_cfg {
-/* If set, non_rdma_in_to_in_pri_map or rdma_in_to_in_pri_map will be used for
- * inner to inner priority mapping depending on protocol type
+/* If set, non_rdma_in_to_in_pri_map or rdma_in_to_in_pri_map will be used for inner to inner
+ * priority mapping depending on protocol type
  */
 	u8 inner_vlan_pri_remap_en;
 	u8 reserved[7];
-/* Map for inner to inner vlan priority translation for Non RDMA protocols, used
- * for TenantDcb. Set inner_vlan_pri_remap_en, when init the map.
+/* Map for inner to inner vlan priority translation for Non RDMA protocols, used for TenantDcb. Set
+ * inner_vlan_pri_remap_en, when init the map.
  */
 	u8 non_rdma_in_to_in_pri_map[8];
-/* Map for inner to inner vlan priority translation for RDMA protocols, used for
- * TenantDcb. Set inner_vlan_pri_remap_en, when init the map.
+/* Map for inner to inner vlan priority translation for RDMA protocols, used for TenantDcb. Set
+ * inner_vlan_pri_remap_en, when init the map.
  */
 	u8 rdma_in_to_in_pri_map[8];
 };
@@ -736,10 +1582,8 @@ struct eth_in_to_in_pri_map_cfg {
  */
 enum eth_ipv4_frag_type {
 	ETH_IPV4_NOT_FRAG /* IPV4 Packet Not Fragmented */,
-/* First Fragment of IPv4 Packet (contains headers) */
-	ETH_IPV4_FIRST_FRAG,
-/* Non-First Fragment of IPv4 Packet (does not contain headers) */
-	ETH_IPV4_NON_FIRST_FRAG,
+	ETH_IPV4_FIRST_FRAG /* First Fragment of IPv4 Packet (contains headers) */,
+	ETH_IPV4_NON_FIRST_FRAG /* Non-First Fragment of IPv4 Packet (does not contain headers) */,
 	MAX_ETH_IPV4_FRAG_TYPE
 };
 
@@ -768,20 +1612,21 @@ enum eth_ramrod_cmd_id {
 	ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
 	ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
 	ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
-/* RX - Create an Openflow Action */
-	ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION,
-/* RX - Add an Openflow Filter to the Searcher */
-	ETH_RAMROD_RX_ADD_OPENFLOW_FILTER,
-/* RX - Delete an Openflow Filter to the Searcher */
-	ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER,
-/* RX - Add a UDP Filter to the Searcher */
-	ETH_RAMROD_RX_ADD_UDP_FILTER,
-/* RX - Delete a UDP Filter to the Searcher */
-	ETH_RAMROD_RX_DELETE_UDP_FILTER,
+	ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION /* RX - Create an Openflow Action */,
+	ETH_RAMROD_RX_ADD_OPENFLOW_FILTER /* RX - Add an Openflow Filter to the Searcher */,
+	ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER /* RX - Delete an Openflow Filter to the Searcher */,
+	ETH_RAMROD_RX_ADD_UDP_FILTER /* RX - Add a UDP Filter to the Searcher */,
+	ETH_RAMROD_RX_DELETE_UDP_FILTER /* RX - Delete a UDP Filter to the Searcher */,
 	ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create a Gft Action */,
-/* RX - Add/Delete a GFT Filter to the Searcher */
-	ETH_RAMROD_GFT_UPDATE_FILTER,
+	ETH_RAMROD_RX_UPDATE_GFT_FILTER /* RX - Add/Delete a GFT Filter to the Searcher */,
 	ETH_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
+	ETH_RAMROD_RGFS_FILTER_ADD /* add new RGFS filter */,
+	ETH_RAMROD_RGFS_FILTER_DEL /* delete RGFS filter */,
+	ETH_RAMROD_TGFS_FILTER_ADD /* add new TGFS filter */,
+	ETH_RAMROD_TGFS_FILTER_DEL /* delete TGFS filter */,
+	ETH_RAMROD_GFS_COUNTERS_REPORT /* GFS Flow Counters Report */,
+/* Hairpin Queue Stop Ramrod (Stops both Rx and Tx Hairpin Queues) */
+	ETH_RAMROD_HAIRPIN_QUEUE_STOP,
 	MAX_ETH_RAMROD_CMD_ID
 };
 
@@ -791,12 +1636,11 @@ enum eth_ramrod_cmd_id {
  */
 struct eth_return_code {
 	u8 value;
-/* error code (use enum eth_error_code) */
-#define ETH_RETURN_CODE_ERR_CODE_MASK  0x3F
+#define ETH_RETURN_CODE_ERR_CODE_MASK  0x3F /* error code (use enum eth_error_code) */
 #define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
 #define ETH_RETURN_CODE_RESERVED_MASK  0x1
 #define ETH_RETURN_CODE_RESERVED_SHIFT 6
-/* rx path - 0, tx path - 1 */
+/* Vport filter update ramrod fail path: 0 - rx path, 1 - tx path */
 #define ETH_RETURN_CODE_RX_TX_MASK     0x1
 #define ETH_RETURN_CODE_RX_TX_SHIFT    7
 };
@@ -806,14 +1650,12 @@ struct eth_return_code {
  * tx destination enum
  */
 enum eth_tx_dst_mode_config_enum {
-/* tx destination configuration override is disabled */
-	ETH_TX_DST_MODE_CONFIG_DISABLE,
-/* tx destination configuration override is enabled, vport and tx dst will be
- * taken from from 4th bd
+	ETH_TX_DST_MODE_CONFIG_DISABLE /* tx destination configuration override is disabled */,
+/* tx destination configuration override is enabled, vport and tx dst will be taken from 4th bd
  */
 	ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_BD,
-/* tx destination configuration override is enabled, vport and tx dst will be
- * taken from from vport data
+/* tx destination configuration override is enabled, vport and tx dst will be taken from vport
+ * data
  */
 	ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT,
 	MAX_ETH_TX_DST_MODE_CONFIG_ENUM
@@ -825,8 +1667,7 @@ enum eth_tx_dst_mode_config_enum {
  */
 enum eth_tx_err {
 	ETH_TX_ERR_DROP /* Drop erroneous packet. */,
-/* Assert an interrupt for PF, declare as malicious for VF */
-	ETH_TX_ERR_ASSERT_MALICIOUS,
+	ETH_TX_ERR_ASSERT_MALICIOUS /* Assert an interrupt for PF, declare as malicious for VF */,
 	MAX_ETH_TX_ERR
 };
 
@@ -848,21 +1689,22 @@ struct eth_tx_err_vals {
 /* Packet with illegal type of inband tag (use enum eth_tx_err) */
 #define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK          0x1
 #define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT         3
-/* Packet marked for VLAN insertion when inband tag is present
- * (use enum eth_tx_err)
- */
+/* Packet marked for VLAN insertion when inband tag is present (use enum eth_tx_err) */
 #define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK  0x1
 #define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
 /* Non LSO packet larger than MTU (use enum eth_tx_err) */
 #define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK                0x1
 #define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT               5
-/* VF/PF has sent LLDP/PFC or any other type of control packet which is not
- * allowed to (use enum eth_tx_err)
+/* VF/PF has sent LLDP/PFC or any other type of control packet which is not allowed to (use enum
+ * eth_tx_err)
  */
 #define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK        0x1
 #define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT       6
-#define ETH_TX_ERR_VALS_RESERVED_MASK                     0x1FF
-#define ETH_TX_ERR_VALS_RESERVED_SHIFT                    7
+/* Wrong BD flags. (use enum eth_tx_err) */
+#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_MASK             0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_SHIFT            7
+#define ETH_TX_ERR_VALS_RESERVED_MASK                     0xFF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT                    8
 };
 
 
@@ -892,157 +1734,746 @@ struct eth_vport_rss_config {
 /* configuration of the 5-tuple capability */
 #define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK  0x1
 #define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
-/* if set update the rss keys */
-#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK              0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK              0x1FF /* if set update the rss keys */
 #define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT             7
-/* The RSS engine ID. Must be allocated to each vport with RSS enabled.
- * Total number of RSS engines is ETH_RSS_ENGINE_NUM_ , according to chip type.
+/* The RSS engine ID. Must be allocated to each vport with RSS enabled. Total number of RSS engines
+ * is ETH_RSS_ENGINE_NUM_ , according to chip type.
  */
 	u8 rss_id;
-	u8 rss_mode /* The RSS mode for this function */;
-	u8 update_rss_key /* if set update the rss key */;
-/* if set update the indirection table values */
+	u8 rss_mode /* The RSS mode for this function (use enum eth_vport_rss_mode) */;
+	u8 update_rss_key /* if set - update the rss key. rss_id must be valid.  */;
+/* if set - update the indirection table values. rss_id must be valid. */
 	u8 update_rss_ind_table;
-/* if set update the capabilities and indirection table size. */
+/* if set - update the capabilities and indirection table size. rss_id must be valid. */
 	u8 update_rss_capabilities;
 	u8 tbl_size /* rss mask (Tbl size) */;
-	__le32 reserved2[2];
-/* RSS indirection table */
-	__le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
-/* RSS key supplied to us by OS */
-	__le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
-	__le32 reserved3[2];
+/* If set and update_rss_ind_table set, update part of indirection table according to
+ * ind_table_mask.
+ */
+	u8 ind_table_mask_valid;
+	u8 reserved2[3];
+	__le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM] /* RSS indirection table */;
+/* RSS indirection table update mask. Used if update_rss_ind_table and ind_table_mask_valid set. */
+	__le32 ind_table_mask[ETH_RSS_IND_TABLE_MASK_SIZE_REGS];
+	__le32 rss_key[ETH_RSS_KEY_SIZE_REGS] /* RSS key supplied to us by OS */;
+	__le32 reserved3;
+};
+
+
+/*
+ * eth vport RSS mode
+ */
+enum eth_vport_rss_mode {
+	ETH_VPORT_RSS_MODE_DISABLED /* RSS Disabled */,
+	ETH_VPORT_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */,
+	MAX_ETH_VPORT_RSS_MODE
+};
+
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_rx_mode {
+	__le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK          0x1 /* drop all unicast packets */
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT         0
+/* accept all unicast packets (subject to vlan) */
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT       1
+/* accept all unmatched unicast packets */
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK  0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK          0x1 /* drop all multicast packets */
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT         3
+/* accept all multicast packets (subject to vlan) */
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT       4
+/* accept all broadcast packets (subject to vlan) */
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT       5
+/* accept any VNI in tunnel VNI classification. Used for default queue. */
+#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK          0x1
+#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT         6
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK               0x1FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT              7
+};
+
+
+/*
+ * Command for setting tpa parameters
+ */
+struct eth_vport_tpa_param {
+	u8 tpa_ipv4_en_flg /* Enable TPA for IPv4 packets */;
+	u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */;
+	u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */;
+	u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
+/* If set, start each TPA segment on new BD (GRO mode). One BD per segment allowed. */
+	u8 tpa_pkt_split_flg;
+/* If set, put header of first TPA segment on first BD and data on second BD. */
+	u8 tpa_hdr_data_split_flg;
+	u8 tpa_gro_consistent_flg /* If set, GRO data consistent will checked for TPA continue */;
+	u8 tpa_max_aggs_num /* maximum number of opened aggregations per v-port  */;
+	__le16 tpa_max_size /* maximal size for the aggregated TPA packets */;
+/* minimum TCP payload size for a packet to start aggregation */
+	__le16 tpa_min_size_to_start;
+/* minimum TCP payload size for a packet to continue aggregation */
+	__le16 tpa_min_size_to_cont;
+	u8 max_buff_num /* maximal number of buffers that can be used for one aggregation */;
+	u8 reserved;
+};
+
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_tx_mode {
+	__le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK    0x1 /* drop all unicast packets */
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT   0
+/* accept all unicast packets (subject to vlan) */
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK    0x1 /* drop all multicast packets */
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT   2
+/* accept all multicast packets (subject to vlan) */
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+/* accept all broadcast packets (subject to vlan) */
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK         0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT        5
+};
+
+
+/*
+ * GFS action data vlan insert
+ */
+struct gfs_action_data_vlan_insert {
+	__le16 reserved1;
+	__le16 vlan;
+	__le32 reserved2[14];
+};
+
+/*
+ * GFS action data vlan remove
+ */
+struct gfs_action_data_vlan_remove {
+	__le32 reserved2[15];
+};
+
+/*
+ * GFS destination.
+ */
+struct gfs_dest_data {
+	u8 type /* Destination type (use enum eth_gfs_dest_type) */;
+	u8 vport /* Destination vport id (For Hairpinning - Tx Vport to redirect to) */;
+	__le16 rx_qid /* Destination queue ID. */;
+/* Hairpin Queue Index from base_hairpin_cid of hairpin_pf_id to redirect traffic to */
+	u8 hairpin_queue_index;
+	u8 hairpin_pf_id /* PF ID to redirect traffic to */;
+/* if set, vport field is used for hairpin traffic redirection, otherwise, the default vport of the
+ * PF is used (set upon 1st Hiarpin [Tx/Rx] Queue Start Ramrod)
+ */
+	u8 hairpin_vport_valid;
+	u8 tx_dest /* TX destination for TX switch bypass. (use enum dst_port_mode) */;
+/* GFS redirect or copy action hint value. Limited to 3 bit. Reported by RX CQE for RX packets or TX
+ * packets, forwarded to RX VPORT.
+ */
+	u8 drv_hint;
+	u8 reserved[3];
+};
+
+/*
+ * GFS redirect action data.
+ */
+struct gfs_action_data_redirect {
+	struct gfs_dest_data destination /* GFS destination */;
+	u8 condition /* GFS redirect condition. (use enum eth_gfs_redirect_condition) */;
+	u8 reserved1[3];
+	__le32 reserved2[11];
+};
+
+/*
+ * GFS copy action data.
+ */
+struct gfs_action_data_copy {
+	struct gfs_dest_data destination /* GFS destination */;
+/* Maximum packet length for copy. If packet exceed the length, packet will be truncated. */
+	__le16 sample_len;
+	u8 original_flg /* If set, copy before header modification */;
+	u8 condition /* GFS redirect condition. (use enum eth_gfs_copy_condition) */;
+	u8 tcp_flags /* TCP flags for copy condition. */;
+	u8 reserved1[3];
+	__le32 reserved2[10];
+};
+
+/*
+ * GFS count action data
+ */
+struct gfs_action_data_count {
+/* Note: the Segment ID is set/overridden by internal HSI Function / FW */
+	struct hsi_tid tid;
+	u8 reset_cnt /* if set, reset counter value. */;
+	u8 bitfields;
+/* Log2 factor applied to timestamp base. */
+#define GFS_ACTION_DATA_COUNT_TIMESTAMP_LOG2_FACTOR_MASK  0x1F
+#define GFS_ACTION_DATA_COUNT_TIMESTAMP_LOG2_FACTOR_SHIFT 0
+#define GFS_ACTION_DATA_COUNT_RESERVED_MASK               0x7
+#define GFS_ACTION_DATA_COUNT_RESERVED_SHIFT              5
+	u8 reserved1[2];
+	__le32 reserved2[13];
+};
+
+/*
+ * GFS modify ETH header action data
+ */
+struct gfs_action_data_hdr_modify_eth {
+/* Modified header position. (use enum eth_gfs_modified_header_position) */
+	u8 header_position;
+	u8 set_dst_mac_flg /* If set, modify destination MAC. */;
+	u8 set_src_mac_flg /* If set, modify source MAC. */;
+	u8 set_vlan_id_flg /* If set, modify VLAN ID. */;
+	u8 set_vlan_pri_flg /* If set, modify VLAN priority. */;
+	u8 vlan_pri /* VLAN priority */;
+	__le16 vlan_id /* VID */;
+	__le16 dst_mac_ind_index /* Destination MAC indirect data table index. */;
+	__le16 src_mac_ind_index /* Source MAC indirect data table index. */;
+	__le16 dst_mac_hi /* Destination Mac Bytes 0 to 1 */;
+	__le16 dst_mac_mid /* Destination Mac Bytes 2 to 3 */;
+	__le16 dst_mac_lo /* Destination Mac Bytes 4 to 5 */;
+	__le16 src_mac_hi /* Source Mac Bytes 0 to 1 */;
+	__le16 src_mac_mid /* Source Mac Bytes 2 to 3 */;
+	__le16 src_mac_lo /* Source Mac Bytes 4 to 5 */;
+	__le32 reserved2[9];
+};
+
+/*
+ * GFS modify IPV4 header action data
+ */
+struct gfs_action_data_hdr_modify_ipv4 {
+/* Modified header position. (use enum eth_gfs_modified_header_position) */
+	u8 header_position;
+	u8 set_dst_addr_flg /* If set, modify destination IP address. */;
+	u8 set_src_addr_flg /* If set, modify source IP address. */;
+	u8 set_dscp_flg /* If set, modify DSCP. */;
+	u8 set_ttl_flg /* If set, set TTL value. */;
+	u8 dec_ttl_flg /* If set, decrement TTL by 1. */;
+	u8 dscp /* DSCP */;
+	u8 ttl /* TTL */;
+	__le32 dst_addr /* IP Destination Address */;
+	__le32 src_addr /* IP Source Address */;
+	__le32 reserved2[11];
+};
+
+/*
+ * GFS modify IPV6 header action data
+ */
+struct gfs_action_data_hdr_modify_ipv6 {
+/* Modified header position. (use enum eth_gfs_modified_header_position) */
+	u8 header_position;
+	u8 set_dst_addr_flg /* If set, modify destination IP address. */;
+	u8 set_src_addr_flg /* If set, modify source IP address. */;
+	u8 set_dscp_flg /* If set, modify DSCP. */;
+	u8 set_hop_limit_flg /* If set, set hop limit value. */;
+	u8 dec_hop_limit_flg /* If set, decrement hop limit by 1. */;
+	u8 dscp /* DSCP */;
+	u8 hop_limit /* hop limit */;
+	__le16 dst_addr_ind_index /* Destination IP address indirect data table index. */;
+	__le16 src_addr_ind_index /* Source IP address indirect data table index. */;
+	__le32 dst_addr[4] /* IP Destination Address */;
+	__le32 src_addr[4] /* IP Source Address */;
+	__le32 reserved2[4];
+};
+
+/*
+ * GFS modify UDP header action data.
+ */
+struct gfs_action_data_hdr_modify_udp {
+	u8 set_dst_port_flg /* If set, modify destination port. */;
+	u8 set_src_port_flg /* If set, modify source port. */;
+	__le16 reserved1;
+	__le16 dst_port /* UDP Destination Port */;
+	__le16 src_port /* UDP Source Port */;
+	__le32 reserved2[13];
+};
+
+/*
+ * GFS modify TCP header action data.
+ */
+struct gfs_action_data_hdr_modify_tcp {
+	u8 set_dst_port_flg /* If set, modify destination port. */;
+	u8 set_src_port_flg /* If set, modify source port. */;
+	__le16 reserved1;
+	__le16 dst_port /* TCP Destination Port */;
+	__le16 src_port /* TCP Source Port */;
+	__le32 reserved2[13];
+};
+
+/*
+ * GFS modify VXLAN header action data.
+ */
+struct gfs_action_data_hdr_modify_vxlan {
+	u8 set_vni_flg /* If set, modify VNI. */;
+	u8 set_entropy_flg /* If set, use constant value for tunnel UDP source port. */;
+/* If set, calculate tunnel UDP source port from inner headers. */
+	u8 set_entropy_from_inner;
+	u8 reserved1[3];
+	__le16 entropy;
+	__le32 vni /* VNI value. */;
+	__le32 reserved2[12];
+};
+
+/*
+ * GFS modify GRE header action data.
+ */
+struct gfs_action_data_hdr_modify_gre {
+	u8 set_key_flg /* If set, modify key. */;
+	u8 reserved1[3];
+	__le32 key /* KEY value */;
+	__le32 reserved2[13];
+};
+
+/*
+ * GFS pop header action data.
+ */
+struct gfs_action_data_pop_hdr {
+/* Headers, that must be removed from packet. (use enum eth_gfs_pop_hdr_type) */
+	u8 hdr_pop_type;
+	u8 reserved1[3];
+	__le32 reserved2[14];
+};
+
+/*
+ * GFS push header action data.
+ */
+struct gfs_action_data_push_hdr {
+	u8 hdr_push_type /* Headers, that will be added. (use enum eth_gfs_push_hdr_type) */;
+	u8 reserved1[3];
+	__le32 reserved2[14];
+};
+
+union gfs_action_data_union {
+	struct gfs_action_data_vlan_insert vlan_insert;
+	struct gfs_action_data_vlan_remove vlan_remove;
+	struct gfs_action_data_redirect redirect /* Redirect action. */;
+	struct gfs_action_data_copy copy /* Copy action. */;
+	struct gfs_action_data_count count /* Count action. */;
+	struct gfs_action_data_hdr_modify_eth modify_eth /* Modify ETH header action. */;
+	struct gfs_action_data_hdr_modify_ipv4 modify_ipv4 /* Modify IPV4 header action. */;
+	struct gfs_action_data_hdr_modify_ipv6 modify_ipv6 /* Modify IPV6 header action. */;
+	struct gfs_action_data_hdr_modify_udp modify_udp /* Modify UDP header action. */;
+	struct gfs_action_data_hdr_modify_tcp modify_tcp /* Modify TCP header action. */;
+	struct gfs_action_data_hdr_modify_vxlan modify_vxlan /* Modify VXLAN header action. */;
+	struct gfs_action_data_hdr_modify_gre modify_gre /* Modify GRE header action. */;
+	struct gfs_action_data_pop_hdr pop_hdr /* Pop  headers action. */;
+	struct gfs_action_data_push_hdr push_hdr /* Push headers action. */;
+};
+
+struct gfs_action {
+	u8 action_type /* GFS action type (use enum gfs_action_type) */;
+	u8 reserved[3];
+	union gfs_action_data_union action_data /* GFS action data */;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * GFS action type enum
+ */
+enum gfs_action_type {
+	ETH_GFS_ACTION_UNUSED,
+	ETH_GFS_ACTION_INSERT_VLAN,
+	ETH_GFS_ACTION_REMOVE_VLAN,
+	ETH_GFS_ACTION_REDIRECT /* Change packet destination */,
+	ETH_GFS_ACTION_COPY /* Copy or sample packet to VPORT */,
+	ETH_GFS_ACTION_COUNT /* Count flow traffic bytes and packets. */,
+	ETH_GFS_ACTION_HDR_MODIFY_ETH /* Modify ETH header */,
+	ETH_GFS_ACTION_HDR_MODIFY_IPV4 /* Modify IPV4 header */,
+	ETH_GFS_ACTION_HDR_MODIFY_IPV6 /* Modify IPV6 header */,
+	ETH_GFS_ACTION_HDR_MODIFY_UDP /* Modify UDP header */,
+	ETH_GFS_ACTION_HDR_MODIFY_TCP /* Modify TCP header */,
+	ETH_GFS_ACTION_HDR_MODIFY_VXLAN /* Modify VXLAN header */,
+	ETH_GFS_ACTION_HDR_MODIFY_GRE /* Modify GRE header */,
+	ETH_GFS_ACTION_POP_HDR /* Pop headers */,
+	ETH_GFS_ACTION_PUSH_HDR /* Push headers */,
+	ETH_GFS_ACTION_APPEND_CONTEXT /* For testing only.  */,
+	MAX_GFS_ACTION_TYPE
+};
+
+
+/*
+ * Add GFS filter command header
+ */
+struct gfs_add_filter_cmd_header {
+	struct regpair pkt_hdr_addr /* Pointer to packet header that defines GFS filter */;
+/* Wtite GFS hash to this address if return_hash_flg set. */
+	struct regpair return_hash_addr;
+	__le32 filter_pri /* filter priority */;
+	__le16 pkt_hdr_length /* Packet header length */;
+	u8 profile_id /* Profile id. */;
+	u8 vport_id /* Vport id. */;
+	u8 return_hash_flg /* If set, FW will write gfs_filter_hash_value to return_hash_addr. */;
+/* 0 - dont assert in case of filter configuration error, return an error code. 1 - assert in case
+ * of filter configuration error
+ */
+	u8 assert_on_error;
+	u8 reserved[6];
+};
+
+
+/*
+ * Ipv6 and MAC addresses to be stored in indirect table in storm ram
+ */
+struct gfs_indirect_data {
+	__le32 src_ipv6_addr[4] /* Inner or single source IPv6 address. */;
+	__le32 dst_ipv6_addr[4] /* Inner or single destination IPv6 address. */;
+	__le32 tunn_src_ipv6_addr[4] /* Tunnel source IPv6 address. */;
+	__le32 tunn_dst_ipv6_addr[4] /* Tunnel destination IPv6 address. */;
+	__le16 src_mac_addr_hi /* Inner or single source MAC address. */;
+	__le16 src_mac_addr_mid /* Inner or single source MAC address. */;
+	__le16 src_mac_addr_lo /* Inner or single source MAC address. */;
+	__le16 dst_mac_addr_hi /* Inner or single destination MAC address. */;
+	__le16 dst_mac_addr_mid /* Inner or single destination MAC address. */;
+	__le16 dst_mac_addr_lo /* Inner or single destination MAC address. */;
+	__le16 tunn_src_mac_addr_hi /* Tunnel source MAC address. */;
+	__le16 tunn_src_mac_addr_mid /* Tunnel source MAC address. */;
+	__le16 tunn_src_mac_addr_lo /* Tunnel source MAC address. */;
+	__le16 tunn_dst_mac_addr_hi /* Tunnel destination MAC address. */;
+	__le16 tunn_dst_mac_addr_mid /* Tunnel destination MAC address. */;
+	__le16 tunn_dst_mac_addr_lo /* Tunnel destination MAC address. */;
+	__le16 ipid /* Identification field in IPv4 header */;
+	u8 ipid_valid_flg /* if set, use ipid field is valid */;
+/* if set, fw will update inner or single ipv6 address according to ipv6 address above */
+	u8 update_ipv6_addr;
+/* if set, fw will update tunnel ipv6 address according to ipv6 address above */
+	u8 update_tunn_ipv6_addr;
+	u8 reset_cnt_0_flg /* if set, reset counter 0 */;
+	u8 reset_cnt_1_flg /* if set, reset counter 1 */;
+	u8 reserved;
+};
+
+/*
+ * GFS filter context.
+ */
+struct gfs_filter_context_data {
+	__le32 context[24] /* GFS context. */;
+/* Ipv6 and MAC addresses to be stored in indirect table in storm ram */
+	struct gfs_indirect_data indirect_data;
+};
+
+/*
+ * GFS filter header
+ */
+struct gfs_filter_header {
+/* flow id associated with this filter. Must be unique to allow performance optimization. Limited to
+ * 31 bit.
+ */
+	__le32 flow_id;
+	__le32 flow_mark /* filter flow mark. Reported by RX CQE. */;
+	u8 num_of_actions /* Number of valid actions in actions list. */;
+/* If set, exception context will be updated. Packet header not needed in this case. Exception
+ * context allocated per PF.
+ */
+	u8 exception_context_flg;
+	u8 reserved[6];
+};
+
+/*
+ * GFS push ETH header data
+ */
+struct gfs_filter_push_data_eth {
+	u8 vlan_exist_flg /* If set, VLAN TAG exist in ETH header. */;
+	u8 reserved;
+	__le16 vlan_id /* VID */;
+	__le16 dst_mac_ind_index /* Destination MAC indirect data table index. */;
+	__le16 src_mac_ind_index /* Source MAC indirect data table index. */;
+	__le16 dst_mac_hi /* Destination Mac Bytes 0 to 1 */;
+	__le16 dst_mac_mid /* Destination Mac Bytes 2 to 3 */;
+	__le16 dst_mac_lo /* Destination Mac Bytes 4 to 5 */;
+	__le16 src_mac_hi /* Source Mac Bytes 0 to 1 */;
+	__le16 src_mac_mid /* Source Mac Bytes 2 to 3 */;
+	__le16 src_mac_lo /* Source Mac Bytes 4 to 5 */;
+};
+
+/*
+ * GFS push IPV4 header data
+ */
+struct gfs_filter_push_data_ipv4 {
+	u8 tc /* TC value */;
+	u8 ttl /* TTL value */;
+	u8 dont_frag_flag /* dont frag flag value. */;
+	u8 set_ipid_in_ram /* If set, update IPID value in ram to ipid_val . */;
+	__le16 ipid_ind_index /* IPID counter indirect data table index. */;
+	__le16 ipid_val /* Initial IPID value. Used if set_ipid_in_ram flag set. */;
+	__le32 dst_addr /* IP destination Address */;
+	__le32 src_addr /* IP source Address */;
+	__le32 reserved[7];
+};
+
+/*
+ * GFS push IPV6 header data
+ */
+struct gfs_filter_push_data_ipv6 {
+	u8 traffic_class /* traffic class */;
+	u8 hop_limit /* hop limit */;
+	__le16 dst_addr_ind_index /* destination IPv6 address indirect data table index. */;
+	__le16 src_addr_ind_index /* source IPv6 address indirect data table index. */;
+	__le16 reserved;
+	__le32 flow_label /* flow label. */;
+	__le32 dst_addr[4] /* IP Destination Address */;
+	__le32 src_addr[4] /* IP Source Address */;
+};
+
+/*
+ * GFS push IP header data
+ */
+union gfs_filter_push_data_ip {
+	struct gfs_filter_push_data_ipv4 ip_v4 /* IPV4 data */;
+	struct gfs_filter_push_data_ipv6 ip_v6 /* IPV6 data */;
+};
+
+/*
+ * GFS push VXLAN tunnel header data.
+ */
+struct gfs_filter_push_data_vxlan {
+	u8 oob_vni /* If set, use out of band VNI from RX path for hairpin traffic. */;
+	u8 udp_checksum_exist_flg /* If set, calculate UDP checksum in tunnel header. */;
+	u8 i_bit /* VXLAN I bit value. Set if VNI valid. */;
+	u8 entropy_from_inner /* If set, calculate tunnel UDP source port from inner headers. */;
+	__le16 reserved1;
+	__le16 entropy;
+	__le32 vni /* VNI value. */;
+};
+
+/*
+ * GFS push GRE tunnel header data.
+ */
+struct gfs_filter_push_data_gre {
+	u8 oob_key /* If set, use out of band KEY from RX path for hairpin traffic. */;
+	u8 checksum_exist_flg /* If set, GRE checksum exist in tunnel header. */;
+	u8 key_exist_flg /* If set, GRE key exist in tunnel header. */;
+	u8 reserved1;
+	__le32 reserved2;
+	__le32 key /* KEY value */;
+};
+
+/*
+ * GFS push tunnel header data
+ */
+union gfs_filter_push_data_tunnel_hdr {
+	struct gfs_filter_push_data_vxlan vxlan /* VXLAN data */;
+	struct gfs_filter_push_data_gre gre /* GRE data */;
+};
+
+/*
+ * GFS filter push data.
+ */
+struct gfs_filter_push_data {
+	struct gfs_filter_push_data_eth tunn_eth_header /* Tunnel ETH header data */;
+	struct gfs_filter_push_data_eth inner_eth_header /* Inner ETH header data */;
+	union gfs_filter_push_data_ip tunn_ip_header /* Tunnel IP header data */;
+	union gfs_filter_push_data_tunnel_hdr tunn_header /* Tunnel header data */;
+};
+
+/*
+ * GFS filter context build input data.
+ */
+struct gfs_filter_ctx_build_data {
+	struct gfs_filter_header filter_hdr /* GFS filter header */;
+	struct gfs_filter_push_data push_data /* Push action data. */;
+	struct gfs_action actions[ETH_GFS_NUM_OF_ACTIONS] /*  GFS actions */;
+};
+
+/*
+ * add GFS filter - filter is packet header of type of packet wished to pass certain FW flow
+ */
+struct gfs_add_filter_ramrod_data {
+	struct gfs_add_filter_cmd_header filter_cmd_hdr /* Add GFS filter command header */;
+	struct gfs_filter_context_data context_data /* GFS filter context. */;
+	struct gfs_filter_ctx_build_data filter_data /* GFS filter data. */;
+};
+
+
+/*
+ * GFS Flow Counters Report Request Ramrod
+ */
+struct gfs_counters_report_ramrod_data {
+	u8 tx_table_valid /* 1: Valid Tx Reporting Table, 0: No Tx Reporting Required */;
+	u8 rx_table_valid /* 1: Valid Rx Reporting Table, 0: No Rx Reporting Required */;
+	u8 timestamp_log2_factor /* Log2 factor applied to timestamp base. */;
+	u8 reserved1[5];
+/* Valid Tx Counter Values reporting Table (type is eth_gfs_counters_report) */
+	struct regpair tx_counters_data_table_address;
+/* Valid Rx Counter Values reporting Table (type is eth_gfs_counters_report) */
+	struct regpair rx_counters_data_table_address;
+};
+
+
+/*
+ * GFS filter hash value.
+ */
+struct gfs_filter_hash_value {
+	__le32 hash[4] /* GFS hash. */;
+};
+
+/*
+ * del GFS filter - filter is packet header of type of packet wished to pass certain FW flow
+ */
+struct gfs_del_filter_ramrod_data {
+	struct regpair pkt_hdr_addr /* Pointer to Packet Header That Defines GFS Filter */;
+	__le16 pkt_hdr_length /* Packet Header Length */;
+	u8 profile_id /* profile id. */;
+	u8 vport_id /* vport id. */;
+/* 0 - dont assert in case of filter configuration error, return an error code. 1 - assert in case
+ * of filter configuration error
+ */
+	u8 assert_on_error;
+	u8 use_hash_flg /* If set, hash value used for delete filter instead packet header. */;
+	__le16 reserved;
+	struct gfs_filter_hash_value hash /* GFS filter hash value. */;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+enum gfs_module_type {
+	e_rgfs,
+	e_tgfs,
+	MAX_GFS_MODULE_TYPE
 };
 
 
 /*
- * eth vport RSS mode
+ * GFT filter update action type.
  */
-enum eth_vport_rss_mode {
-	ETH_VPORT_RSS_MODE_DISABLED /* RSS Disabled */,
-	ETH_VPORT_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */,
-	MAX_ETH_VPORT_RSS_MODE
+enum gft_filter_update_action {
+	GFT_ADD_FILTER,
+	GFT_DELETE_FILTER,
+	MAX_GFT_FILTER_UPDATE_ACTION
 };
 
 
 /*
- * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ * Entry (per PF) within the hairpin CIDs allocation table on X-storm RAM
  */
-struct eth_vport_rx_mode {
-	__le16 state;
-/* drop all unicast packets */
-#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK          0x1
-#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT         0
-/* accept all unicast packets (subject to vlan) */
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK        0x1
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT       1
-/* accept all unmatched unicast packets */
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK  0x1
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
-/* drop all multicast packets */
-#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK          0x1
-#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT         3
-/* accept all multicast packets (subject to vlan) */
-#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK        0x1
-#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT       4
-/* accept all broadcast packets (subject to vlan) */
-#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK        0x1
-#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT       5
-/* accept any VNI in tunnel VNI classification. Used for default queue. */
-#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK          0x1
-#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT         6
-#define ETH_VPORT_RX_MODE_RESERVED1_MASK               0x1FF
-#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT              7
+struct hairpin_per_pf_cid_allocation {
+/* Set Only Upon the 1st ever Tx/Rx Queue Start. i.e. only if the cell is 0. Verified by assert on
+ * each consecutive CID. HSI Documentation defines that per-PF, the 1st Hairpin (Tx/Rx) Queue Start
+ * CID to be the Base CID.
+ */
+	__le32 base_hairpin_cid;
+/* Appropriate PF s numPfHairpinQueues is increased per either Hairpin Tx/Rx Queue start. HSI
+ * Documentation requires that per-PF, the Hairpin Allocated CIDs are consecutive.
+ */
+	u8 num_pf_hairpin_queues;
+/* Default Vport for redirection. vport_id Field in First Tx/Rx Queue Start Ramrod. HSI
+ * Documentation defines that per-PF, the 1st Hairpin (Tx/Rx) Queue Start shall define the default
+ * vport to redirect hairpin traffic to.
+ */
+	u8 vport;
+	u8 reserved[2];
 };
 
 
 /*
- * Command for setting tpa parameters
- */
-struct eth_vport_tpa_param {
-	u8 tpa_ipv4_en_flg /* Enable TPA for IPv4 packets */;
-	u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */;
-	u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */;
-	u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
-/* If set, start each TPA segment on new BD (GRO mode). One BD per segment
- * allowed.
+ * Ramrod data for hairpin queue stop ramrod.
  */
-	u8 tpa_pkt_split_flg;
-/* If set, put header of first TPA segment on first BD and data on second BD. */
-	u8 tpa_hdr_data_split_flg;
-/* If set, GRO data consistent will checked for TPA continue */
-	u8 tpa_gro_consistent_flg;
-/* maximum number of opened aggregations per v-port  */
-	u8 tpa_max_aggs_num;
-	__le16 tpa_max_size /* maximal size for the aggregated TPA packets */;
-/* minimum TCP payload size for a packet to start aggregation */
-	__le16 tpa_min_size_to_start;
-/* minimum TCP payload size for a packet to continue aggregation */
-	__le16 tpa_min_size_to_cont;
-/* maximal number of buffers that can be used for one aggregation */
-	u8 max_buff_num;
-	u8 reserved;
+struct hairpin_queue_stop_ramrod_data {
+	__le16 reserved[4];
 };
 
 
 /*
- * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ * RGFS Command List
  */
-struct eth_vport_tx_mode {
-	__le16 state;
-/* drop all unicast packets */
-#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK    0x1
-#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT   0
-/* accept all unicast packets (subject to vlan) */
-#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK  0x1
-#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
-/* drop all multicast packets */
-#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK    0x1
-#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT   2
-/* accept all multicast packets (subject to vlan) */
-#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK  0x1
-#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
-/* accept all broadcast packets (subject to vlan) */
-#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK  0x1
-#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
-#define ETH_VPORT_TX_MODE_RESERVED1_MASK         0x7FF
-#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT        5
+enum rgfs_commands_list {
+	RGFS_COMMAND_NOP = 0 /* Nop command. Used by default unmatch flow. */,
+	RGFS_COMMAND_REGULAR /* Regular */,
+	RGFS_COMMAND_SIMPLE /* RGFS simple */,
+	RGFS_COMMAND_4TUPLE /* RGFS 4-tuple modify */,
+	RGFS_COMMAND_ORIGINAL /* RGFS 4-tuple modify, original flag set. */,
+	RGFS_COMMAND_INNER_HDR /* RGFS inner header modify */,
+	RGFS_COMMAND_FULL /* RGFS full */,
+	RGFS_STEERING_REGULAR /* Regular */,
+	RGFS_STEERING_SIMPLE /* RGFS simple */,
+	RGFS_STEERING_4TUPLE /* RGFS 4-tuple modify */,
+	RGFS_STEERING_ORIGINAL /* RGFS 4-tuple modify, original flag set. */,
+	RGFS_STEERING_INNER_HDR /* RGFS inner header modify */,
+	RGFS_STEERING_FULL /* RGFS full */,
+	RGFS_HAIRPINNING_FULL /* RGFS send to TX queue (inner header and full) */,
+/* RGFS send to TX queue. optimized (regular, simple and 4-tuple modify) */
+	RGFS_HAIRPINNING_OPT,
+	RGFS_HAIRPINNING_ORIGINAL /* RGFS send to TX queue. Original flag set. */,
+	RGFS_DROP /* RGFS drop (all flows) */,
+	RGFS_COMMAND_INTEG_TEST /* Integration test flow */,
+	MAX_RGFS_COMMANDS_LIST
 };
 
 
 /*
- * GFT filter update action type.
+ * Ramrod data for rx create gft action
  */
-enum gft_filter_update_action {
-	GFT_ADD_FILTER,
-	GFT_DELETE_FILTER,
-	MAX_GFT_FILTER_UPDATE_ACTION
+struct rx_create_gft_action_ramrod_data {
+	u8 vport_id /* Vport Id of GFT Action  */;
+	u8 reserved[7];
 };
 
 
+/*
+ * Ramrod data for rx create openflow action
+ */
+struct rx_create_openflow_action_ramrod_data {
+	u8 vport_id /* ID of RX queue */;
+	u8 reserved[7];
+};
 
 
 /*
  * Ramrod data for rx add openflow filter
  */
-struct rx_add_openflow_filter_data {
+struct rx_openflow_filter_ramrod_data {
 	__le16 action_icid /* CID of Action to run for this filter */;
 	u8 priority /* Searcher String - Packet priority */;
 	u8 reserved0;
 	__le32 tenant_id /* Searcher String - Tenant ID */;
-/* Searcher String - Destination Mac Bytes 0 to 1 */
-	__le16 dst_mac_hi;
-/* Searcher String - Destination Mac Bytes 2 to 3 */
-	__le16 dst_mac_mid;
-/* Searcher String - Destination Mac Bytes 4 to 5 */
-	__le16 dst_mac_lo;
+	__le16 dst_mac_hi /* Searcher String - Destination Mac Bytes 0 to 1 */;
+	__le16 dst_mac_mid /* Searcher String - Destination Mac Bytes 2 to 3 */;
+	__le16 dst_mac_lo /* Searcher String - Destination Mac Bytes 4 to 5 */;
 	__le16 src_mac_hi /* Searcher String - Source Mac 0 to 1 */;
 	__le16 src_mac_mid /* Searcher String - Source Mac 2 to 3 */;
 	__le16 src_mac_lo /* Searcher String - Source Mac 4 to 5 */;
 	__le16 vlan_id /* Searcher String - Vlan ID */;
 	__le16 l2_eth_type /* Searcher String - Last L2 Ethertype */;
 	u8 ipv4_dscp /* Searcher String - IPv4 6 MSBs of the TOS Field */;
-	u8 ipv4_frag_type /* Searcher String - IPv4 Fragmentation Type */;
+/* Searcher String - IPv4 Fragmentation Type (use enum eth_ipv4_frag_type) */
+	u8 ipv4_frag_type;
 	u8 ipv4_over_ip /* Searcher String - IPv4 Over IP Type */;
 	u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
 	__le32 ipv4_dst_addr /* Searcher String - IPv4 Destination Address */;
@@ -1052,60 +2483,48 @@ struct rx_add_openflow_filter_data {
 };
 
 
-/*
- * Ramrod data for rx create gft action
- */
-struct rx_create_gft_action_data {
-	u8 vport_id /* Vport Id of GFT Action  */;
-	u8 reserved[7];
-};
-
-
-/*
- * Ramrod data for rx create openflow action
- */
-struct rx_create_openflow_action_data {
-	u8 vport_id /* ID of RX queue */;
-	u8 reserved[7];
-};
-
-
 /*
  * Ramrod data for rx queue start ramrod
  */
 struct rx_queue_start_ramrod_data {
-	__le16 rx_queue_id /* ID of RX queue */;
+/* RX queue ID or [E5-only] Hairpin Queue ID when is_hairpin is set. For Hairpin queues, range is
+ * [0..ETH_MAX_NUM_L2_HAIRPIN_QUEUES-1].
+ */
+	__le16 rx_queue_id;
 	__le16 num_of_pbl_pages /* Number of pages in CQE PBL */;
-	__le16 bd_max_bytes /* maximal bytes that can be places on the bd */;
+	__le16 bd_max_bytes /* maximal number of bytes that can be places on the bd */;
 	__le16 sb_id /* Status block ID */;
-	u8 sb_index /* index of the protocol index */;
-	u8 vport_id /* ID of virtual port */;
-	u8 default_rss_queue_flg /* set queue as default rss queue if set */;
-	u8 complete_cqe_flg /* post completion to the CQE ring if set */;
-	u8 complete_event_flg /* post completion to the event ring if set */;
+	u8 sb_index /* Status block index */;
+	u8 vport_id /* Vport ID */;
+	u8 default_rss_queue_flg /* if set - this queue will be the default RSS queue */;
+/* if set - post completion to the CQE ring. For Hairpinning - must be cleared. */
+	u8 complete_cqe_flg;
+/* if set - post completion to the event ring. For Hairpinning - must be set. */
+	u8 complete_event_flg;
 	u8 stats_counter_id /* Statistics counter ID */;
-	u8 pin_context /* Pin context in CCFC to improve performance */;
+/* if set - Pin CID context. Total number of pinned connections cannot exceed
+ * ETH_PINNED_CONN_MAX_NUM
+ */
+	u8 pin_context;
 	u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD/SGE fetch */;
-/* PXP command TPH Valid - for packet placement */
-	u8 pxp_tph_valid_pkt;
-/* PXP command Steering tag hint. Use enum pxp_tph_st_hint */
-	u8 pxp_st_hint;
+	u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet placement */;
+	u8 pxp_st_hint /* PXP command Steering tag hint. Use enum pxp_tph_st_hint */;
 	__le16 pxp_st_index /* PXP command Steering tag index */;
-/* Indicates that current queue belongs to poll-mode driver */
-	u8 pmd_mode;
-/* Indicates that the current queue is using the TX notification queue
- * mechanism - should be set only for PMD queue
+	u8 pmd_mode /* Indicates that current queue belongs to poll-mode driver */;
+/* Indicates that the current queue is using the TX notification queue mechanism - should be set
+ * only for PMD queue
  */
 	u8 notify_en;
-/* Initial value for the toggle valid bit - used in PMD mode */
-	u8 toggle_val;
-/* Index of RX producers in VF zone. Used for VF only. */
-	u8 vf_rx_prod_index;
-/* Backward compatibility mode. If set, unprotected mStorm queue zone will used
- * for VF RX producers instead of VF zone.
+	u8 toggle_val /* Initial value for the toggle valid bit - used in PMD mode */;
+	u8 vf_rx_prod_index /* Index of RX producers in VF zone. Used for VF only. */;
+/* Backward compatibility mode. If set, unprotected mStorm queue zone will used for VF RX producers
+ * instead of VF zone.
  */
 	u8 vf_rx_prod_use_zone_a;
-	u8 reserved[5];
+	u8 is_hairpin /* [E5-only] Hairpin Queue indication */;
+/* [E5-only] # Full hairpin Rx BD pages. Valid when is_hairpin set */
+	u8 num_hairpin_rx_bd_pages;
+	u8 reserved[3];
 	__le16 reserved1 /* FW reserved. */;
 	struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
 	struct regpair bd_base /* bd address of the first bd page */;
@@ -1117,9 +2536,9 @@ struct rx_queue_start_ramrod_data {
  * Ramrod data for rx queue stop ramrod
  */
 struct rx_queue_stop_ramrod_data {
-	__le16 rx_queue_id /* ID of RX queue */;
-	u8 complete_cqe_flg /* post completion to the CQE ring if set */;
-	u8 complete_event_flg /* post completion to the event ring if set */;
+	__le16 rx_queue_id /* RX queue ID */;
+	u8 complete_cqe_flg /* if set - post completion to the CQE ring  */;
+	u8 complete_event_flg /* if set - post completion to the event ring */;
 	u8 vport_id /* ID of virtual port */;
 	u8 reserved[3];
 };
@@ -1129,12 +2548,11 @@ struct rx_queue_stop_ramrod_data {
  * Ramrod data for rx queue update ramrod
  */
 struct rx_queue_update_ramrod_data {
-	__le16 rx_queue_id /* ID of RX queue */;
-	u8 complete_cqe_flg /* post completion to the CQE ring if set */;
-	u8 complete_event_flg /* post completion to the event ring if set */;
-	u8 vport_id /* ID of virtual port */;
-/* If set, update default rss queue to this RX queue. */
-	u8 set_default_rss_queue;
+	__le16 rx_queue_id /* RX queue ID */;
+	u8 complete_cqe_flg /* if set - post completion to the CQE ring */;
+	u8 complete_event_flg /* if set - post completion to the event ring */;
+	u8 vport_id /* Vport ID */;
+	u8 set_default_rss_queue /* If set, update default RSS queue to this queue. */;
 	u8 reserved[3];
 	u8 reserved1 /* FW reserved. */;
 	u8 reserved2 /* FW reserved. */;
@@ -1148,10 +2566,10 @@ struct rx_queue_update_ramrod_data {
 /*
  * Ramrod data for rx Add UDP Filter
  */
-struct rx_udp_filter_data {
+struct rx_udp_filter_ramrod_data {
 	__le16 action_icid /* CID of Action to run for this filter */;
 	__le16 vlan_id /* Searcher String - Vlan ID */;
-	u8 ip_type /* Searcher String - IP Type */;
+	u8 ip_type /* Searcher String - IP Type (use enum eth_ip_type) */;
 	u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
 	__le16 reserved1;
 /* Searcher String - IP Destination Address, for IPv4 use ip_dst_addr[0] only */
@@ -1164,50 +2582,69 @@ struct rx_udp_filter_data {
 };
 
 
-/*
- * add or delete GFT filter - filter is packet header of type of packet wished
- * to pass certain FW flow
+/* add or delete GFT filter - filter is packet header of type of packet wished to pass certain FW
+ * flow
  */
-struct rx_update_gft_filter_data {
-/* Pointer to Packet Header That Defines GFT Filter */
-	struct regpair pkt_hdr_addr;
+struct rx_update_gft_filter_ramrod_data {
+	struct regpair pkt_hdr_addr /* Pointer to Packet Header That Defines GFT Filter */;
 	__le16 pkt_hdr_length /* Packet Header Length */;
-/* Action icid. Valid if action_icid_valid flag set. */
-	__le16 action_icid;
+	__le16 action_icid /* Action icid. Valid if action_icid_valid flag set. */;
 	__le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */;
 	__le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */;
-/* RX vport Id. For drop flow, set to ETH_GFT_TRASHCAN_VPORT. */
-	__le16 vport_id;
-/* If set, action_icid will used for GFT filter update. */
-	u8 action_icid_valid;
-/* If set, rx_qid will used for traffic steering, in additional to vport_id.
- * flow_id_valid must be cleared. If cleared, queue ID will selected by RSS.
+	__le16 vport_id /* RX vport Id. For drop flow, set to ETH_GFT_TRASHCAN_VPORT. */;
+	u8 action_icid_valid /* If set, action_icid will used for GFT filter update. */;
+/* If set, rx_qid will used for traffic steering, in additional to vport_id. flow_id_valid must be
+ * cleared. If cleared, queue ID will selected by RSS.
  */
 	u8 rx_qid_valid;
-/* If set, flow_id will reported by CQE, rx_qid_valid must be cleared. If
- * cleared, flow_id 0 will reported by CQE.
+/* If set, flow_id will reported by CQE, rx_qid_valid must be cleared. If cleared, flow_id 0 will
+ * reported by CQE.
  */
 	u8 flow_id_valid;
-	u8 filter_action /* Use to set type of action on filter */;
-/* 0 - dont assert in case of error. Just return an error code. 1 - assert in
- * case of error.
- */
+/* Use to set type of action on filter (use enum gft_filter_update_action) */
+	u8 filter_action;
+/* 0 - dont assert in case of error. Just return an error code. 1 - assert in case of error. */
 	u8 assert_on_error;
-/* If set, inner VLAN will be removed regardless to VPORT configuration.
- * Supported by E4 only.
- */
+/* If set, inner VLAN will be removed regardless to VPORT configuration. Supported by E4 only. */
 	u8 inner_vlan_removal_en;
 };
 
 
+/*
+ * TGFS Command List
+ */
+enum tgfs_commands_list {
+	TGFS_COMMAND_NO_OPERATION /* TGFS no operation */,
+	TGFS_COMMAND_REGULAR /* Regular */,
+	TGFS_COMMAND_SIMPLE_0 /* TGFS simple */,
+	TGFS_COMMAND_SIMPLE_1 /* TGFS simple */,
+	TGFS_COMMAND_SIMPLE_2 /* TGFS simple */,
+	TGFS_COMMAND_INNER_HDR_0 /* TGFS inner header modify */,
+	TGFS_COMMAND_INNER_HDR_1 /* TGFS inner header modify */,
+	TGFS_COMMAND_INNER_HDR_2 /* TGFS inner header modify */,
+	TGFS_COMMAND_FULL_0 /* TGFS full */,
+	TGFS_COMMAND_FULL_1 /* TGFS full */,
+	TGFS_COMMAND_FULL_2 /* TGFS full */,
+	TGFS_COMMAND_FULL_ORIGINAL_1 /* TGFS full for original copy */,
+	TGFS_COMMAND_FULL_ORIGINAL_2 /* TGFS full for original copy */,
+	TGFS_COMMAND_VLAN_INSERTION /* TGFS vlan insertion      - INTEGRATION command */,
+	TGFS_COMMAND_APPEND_CONTEXT /* TGFS append context test - INTEGRATION command */,
+	TGFS_COMMAND_EDPM_VLAN_INSERTION /* TGFS EDPM vlan insertion - INTEGRATION command */,
+	TGFS_COMMAND_DROP /* TGFS drop flow. */,
+	MAX_TGFS_COMMANDS_LIST
+};
+
 
 /*
  * Ramrod data for tx queue start ramrod
  */
 struct tx_queue_start_ramrod_data {
 	__le16 sb_id /* Status block ID */;
-	u8 sb_index /* Status block protocol index */;
-	u8 vport_id /* VPort ID */;
+	u8 sb_index /* Status block index */;
+/* VPort ID. For Hairpin Queues this is the QM Vport ID, which is used for Rate Limiter
+ * functionality.
+ */
+	u8 vport_id;
 	u8 reserved0 /* FW reserved. (qcn_rl_en) */;
 	u8 stats_counter_id /* Statistics counter ID to use */;
 	__le16 qm_pq_id /* QM PQ ID */;
@@ -1218,44 +2655,47 @@ struct tx_queue_start_ramrod_data {
 /* If set, Test Mode - packets will be duplicated by Xstorm handler */
 #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK      0x1
 #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT     1
-/* If set, Test Mode - packets destination will be determined by dest_port_mode
- * field from Tx BD
- */
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK      0x1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT     2
 /* Indicates that current queue belongs to poll-mode driver */
 #define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK               0x1
-#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT              3
-/* Indicates that the current queue is using the TX notification queue
- * mechanism - should be set only for PMD queue
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT              2
+/* Indicates that the current queue is using the TX notification queue mechanism - should be set
+ * only for PMD queue
  */
 #define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK              0x1
-#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT             4
-/* Pin context in CCFC to improve performance */
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT             3
+/* If set - Pin CID context. Total number of pinned connections cannot exceed
+ * ETH_PINNED_CONN_MAX_NUM
+ */
 #define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK            0x1
-#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT           5
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT           4
+/* [E5-only] Hairpin Queue indication */
+#define TX_QUEUE_START_RAMROD_DATA_IS_HAIRPIN_MASK             0x1
+#define TX_QUEUE_START_RAMROD_DATA_IS_HAIRPIN_SHIFT            5
 #define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK              0x3
 #define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT             6
-	u8 pxp_st_hint /* PXP command Steering tag hint */;
+	u8 pxp_st_hint /* PXP command Steering tag hint (use enum pxp_tph_st_hint) */;
 	u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD fetch */;
 	u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet fetch */;
 	__le16 pxp_st_index /* PXP command Steering tag index */;
-/* TX completion min agg size - for PMD queues */
-	__le16 comp_agg_size;
+	u8 comp_agg_size /* TX completion min agg size - for PMD queues */;
+	u8 reserved3;
 	__le16 queue_zone_id /* queue zone ID to use */;
 	__le16 reserved2 /* FW reserved. (test_dup_count) */;
 	__le16 pbl_size /* Number of BD pages pointed by PBL */;
-/* unique Queue ID - currently used only by PMD flow */
+/* Unique Queue ID - used only by PMD flow. When [E5-only] is_hairpin flag is set: hairpin queue ID
+ * - Must be equal to the Hairpin Queue ID in Start Rx Queue for Hairpin Queues
+ */
 	__le16 tx_queue_id;
-/* Unique Same-As-Last Resource ID - improves performance for same-as-last
- * packets per connection (range 0..ETH_TX_NUM_SAME_AS_LAST_ENTRIES-1 IDs
- * available)
+/* For E4: Unique Same-As-Last Resource ID - improves performance for same-as-last packets per
+ * connection (range 0..ETH_TX_NUM_SAME_AS_LAST_ENTRIES_E4-1 IDs available). Switch off SAL for this
+ * tx queue by setting value to ETH_TX_INACTIVE_SAME_AS_LAST (HSI constant). For E5: Switch off SAL
+ * for this tx queue by setting value to ETH_TX_INACTIVE_SAME_AS_LAST (HSI constant), otherwise
+ * active
  */
 	__le16 same_as_last_id;
 	__le16 reserved[3];
 	struct regpair pbl_base_addr /* address of the pbl page */;
-/* BD consumer address in host - for PMD queues */
-	struct regpair bd_cons_address;
+	struct regpair bd_cons_address /* BD consumer address in host - for PMD queues */;
 };
 
 
@@ -1282,8 +2722,7 @@ struct tx_queue_update_ramrod_data {
  * Inner to Inner VLAN priority map update mode
  */
 enum update_in_to_in_pri_map_mode_enum {
-/* Inner to Inner VLAN priority map update Disabled */
-	ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED,
+	ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED /* Inner to Inner VLAN priority map update Disabled */,
 /* Update Inner to Inner VLAN priority map for non RDMA protocols */
 	ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL,
 /* Update Inner to Inner VLAN priority map for RDMA protocols */
@@ -1292,15 +2731,13 @@ enum update_in_to_in_pri_map_mode_enum {
 };
 
 
-
 /*
  * Ramrod data for vport update ramrod
  */
 struct vport_filter_update_ramrod_data {
 /* Header for Filter Commands (RX/TX, Add/Remove/Replace, etc) */
 	struct eth_filter_cmd_header filter_cmd_hdr;
-/* Filter Commands */
-	struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+	struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT] /* Filter Commands */;
 };
 
 
@@ -1315,35 +2752,44 @@ struct vport_start_ramrod_data {
 	u8 inner_vlan_removal_en;
 	struct eth_vport_rx_mode rx_mode /* Rx filter data */;
 	struct eth_vport_tx_mode tx_mode /* Tx filter data */;
-/* TPA configuration parameters */
-	struct eth_vport_tpa_param tpa_param;
+	struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */;
 	__le16 default_vlan /* Default Vlan value to be forced by FW */;
 	u8 tx_switching_en /* Tx switching is enabled for current Vport */;
-/* Anti-spoofing verification is set for current Vport */
-	u8 anti_spoofing_en;
-/* If set, the default Vlan value is forced by the FW */
-	u8 default_vlan_en;
-/* If set, the vport handles PTP Timesync Packets */
-	u8 handle_ptp_pkts;
+	u8 anti_spoofing_en /* Anti-spoofing verification is set for current Vport */;
+	u8 default_vlan_en /* If set, the default Vlan value is forced by the FW */;
+	u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */;
 /* If enable then innerVlan will be striped and not written to cqe */
 	u8 silent_vlan_removal_en;
-/* If set untagged filter (vlan0) is added to current Vport, otherwise port is
- * marked as any-vlan
- */
+/* If set untagged filter (vlan0) is added to current Vport, otherwise port is marked as any-vlan */
 	u8 untagged;
-/* Desired behavior per TX error type */
-	struct eth_tx_err_vals tx_err_behav;
-/* If set, ETH header padding will not inserted. placement_offset will be zero.
- */
+	struct eth_tx_err_vals tx_err_behav /* Desired behavior per TX error type */;
+/* If set, ETH header padding will not inserted. placement_offset will be zero. */
 	u8 zero_placement_offset;
 /* If set, control frames will be filtered according to MAC check. */
 	u8 ctl_frame_mac_check_en;
 /* If set, control frames will be filtered according to ethtype check. */
 	u8 ctl_frame_ethtype_check_en;
-/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
- * zero out, used for TenantDcb
+	u8 rgfs_search_en_e5 /* Instruct to send a RGFS Search command valid only in E5 */;
+	u8 tgfs_search_en_e5 /* Instruct to send a TGFS Search command valid only in E5 */;
+/* Configurations for tx forwarding, used in VF Representor mode (use enum
+ * eth_tx_dst_mode_config_enum)
+ */
+	u8 tx_dst_port_mode_config;
+/* destination Vport ID to forward the packet, applicable only if dst_vport_id_valid is set and when
+ * tx_dst_port_mode_config == ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT and (tx_dst_port_mode ==
+ * DST_PORT_LOOPBACK or tx_dst_port_mode == DST_PORT_PHY_LOOPBACK)
+ */
+	u8 dst_vport_id;
+/* destination tx to forward the packet, applicable only when tx_dst_port_mode_config ==
+ * ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT (use enum dst_port_mode)
+ */
+	u8 tx_dst_port_mode;
+	u8 dst_vport_id_valid /* if set, dst_vport_id has valid value */;
+/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be zero out, used for
+ * TenantDcb
  */
 	u8 wipe_inner_vlan_pri_en;
+	u8 reserved2[2];
 /* inner to inner vlan priority translation configurations */
 	struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg;
 };
@@ -1369,58 +2815,62 @@ struct vport_update_ramrod_data_cmn {
 	u8 tx_active_flg /* tx active flag value */;
 	u8 update_rx_mode_flg /* set if rx state data should be handled */;
 	u8 update_tx_mode_flg /* set if tx state data should be handled */;
-/* set if approx. mcast data should be handled */
-	u8 update_approx_mcast_flg;
+	u8 update_approx_mcast_flg /* set if approx. mcast data should be handled */;
 	u8 update_rss_flg /* set if rss data should be handled  */;
-/* set if inner_vlan_removal_en should be handled */
-	u8 update_inner_vlan_removal_en_flg;
+	u8 update_inner_vlan_removal_en_flg /* set if inner_vlan_removal_en should be handled */;
 	u8 inner_vlan_removal_en;
 /* set if tpa parameters should be handled, TPA must be disable before */
 	u8 update_tpa_param_flg;
 	u8 update_tpa_en_flg /* set if tpa enable changes */;
-/* set if tx switching en flag should be handled */
-	u8 update_tx_switching_en_flg;
+	u8 update_tx_switching_en_flg /* set if tx switching en flag should be handled */;
 	u8 tx_switching_en /* tx switching en value */;
-/* set if anti spoofing flag should be handled */
-	u8 update_anti_spoofing_en_flg;
+	u8 update_anti_spoofing_en_flg /* set if anti spoofing flag should be handled */;
 	u8 anti_spoofing_en /* Anti-spoofing verification en value */;
-/* set if handle_ptp_pkts should be handled. */
-	u8 update_handle_ptp_pkts;
-/* If set, the vport handles PTP Timesync Packets */
-	u8 handle_ptp_pkts;
-/* If set, the default Vlan enable flag is updated */
-	u8 update_default_vlan_en_flg;
-/* If set, the default Vlan value is forced by the FW */
-	u8 default_vlan_en;
-/* If set, the default Vlan value is updated */
-	u8 update_default_vlan_flg;
+	u8 update_handle_ptp_pkts /* set if handle_ptp_pkts should be handled. */;
+	u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */;
+	u8 update_default_vlan_en_flg /* If set, the default Vlan enable flag is updated */;
+	u8 default_vlan_en /* If set, the default Vlan value is forced by the FW */;
+	u8 update_default_vlan_flg /* If set, the default Vlan value is updated */;
 	__le16 default_vlan /* Default Vlan value to be forced by FW */;
-/* set if accept_any_vlan should be handled */
-	u8 update_accept_any_vlan_flg;
+	u8 update_accept_any_vlan_flg /* set if accept_any_vlan should be handled */;
 	u8 accept_any_vlan /* accept_any_vlan updated value */;
-/* Set to remove vlan silently, update_inner_vlan_removal_en_flg must be enabled
- * as well. If Rx is in noSgl mode send rx_queue_update_ramrod_data
+/* Set to remove vlan silently, update_inner_vlan_removal_en_flg must be enabled as well. If Rx is
+ * in noSgl mode send rx_queue_update_ramrod_data
  */
 	u8 silent_vlan_removal_en;
-/* If set, MTU will be updated. Vport must be not active. */
-	u8 update_mtu_flg;
+	u8 update_mtu_flg /* If set, MTU will be updated. Vport must be not active. */;
 	__le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
-/* If set, ctl_frame_mac_check_en and ctl_frame_ethtype_check_en will be
- * updated
- */
+/* If set, ctl_frame_mac_check_en and ctl_frame_ethtype_check_en will be updated */
 	u8 update_ctl_frame_checks_en_flg;
 /* If set, control frames will be filtered according to MAC check. */
 	u8 ctl_frame_mac_check_en;
 /* If set, control frames will be filtered according to ethtype check. */
 	u8 ctl_frame_ethtype_check_en;
-/* Indicates to update RDMA or NON-RDMA vlan remapping priority table according
- * to update_in_to_in_pri_map_mode_enum, used for TenantDcb (use enum
+/* Indicates to update RDMA or NON-RDMA vlan remapping priority table according to
+ * update_in_to_in_pri_map_mode_enum, used for TenantDcb (use enum
  * update_in_to_in_pri_map_mode_enum)
  */
 	u8 update_in_to_in_pri_map_mode;
 /* Map for inner to inner vlan priority translation, used for TenantDcb. */
 	u8 in_to_in_pri_map[8];
-	u8 reserved[6];
+/* If set, tx_dst_port_mode_config, tx_dst_port_mode and dst_vport_id will be updated */
+	u8 update_tx_dst_port_mode_flg;
+/* Configurations for tx forwarding, used in VF Representor mode (use enum
+ * eth_tx_dst_mode_config_enum)
+ */
+	u8 tx_dst_port_mode_config;
+/* destination Vport ID to forward the packet, applicable only if dst_vport_id_valid is set and when
+ * tx_dst_port_mode_config == ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT and (tx_dst_port_mode ==
+ * DST_PORT_LOOPBACK or tx_dst_port_mode == DST_PORT_PHY_LOOPBACK)
+ */
+	u8 dst_vport_id;
+/* destination tx to forward the packet, applicable only when tx_dst_port_mode_config ==
+ * ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT (use enum dst_port_mode)
+ */
+	u8 tx_dst_port_mode;
+/* if set, dst_vport_id has valid value. If clear, RX classification will performed. */
+	u8 dst_vport_id_valid;
+	u8 reserved;
 };
 
 struct vport_update_ramrod_mcast {
@@ -1431,13 +2881,11 @@ struct vport_update_ramrod_mcast {
  * Ramrod data for vport update ramrod
  */
 struct vport_update_ramrod_data {
-/* Common data for all vport update ramrods */
-	struct vport_update_ramrod_data_cmn common;
+	struct vport_update_ramrod_data_cmn common /* Common data for all vport update ramrods */;
 	struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */;
 	struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */;
 	__le32 reserved[3];
-/* TPA configuration parameters */
-	struct eth_vport_tpa_param tpa_param;
+	struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */;
 	struct vport_update_ramrod_mcast approx_mcast;
 	struct eth_vport_rss_config rss_config /* rss config data */;
 };
@@ -1445,313 +2893,215 @@ struct vport_update_ramrod_data {
 
 
 
-
-
 struct E4XstormEthConnAgCtxDqExtLdPart {
 	u8 reserved0 /* cdu_validation */;
 	u8 state /* state */;
 	u8 flags0;
-/* exist_in_qm0 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK            0x1 /* exist_in_qm0 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT           0
-/* exist_in_qm1 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK               0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK               0x1 /* exist_in_qm1 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT              1
-/* exist_in_qm2 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK               0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK               0x1 /* exist_in_qm2 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT              2
-/* exist_in_qm3 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK            0x1 /* exist_in_qm3 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT           3
-/* bit4 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK               0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK               0x1 /* bit4 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT              4
-/* cf_array_active */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK               0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK               0x1 /* cf_array_active */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT              5
-/* bit6 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK               0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK               0x1 /* bit6 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT              6
-/* bit7 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK               0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK               0x1 /* bit7 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT              7
 	u8 flags1;
-/* bit8 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK               0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK               0x1 /* bit8 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT              0
-/* bit9 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK               0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK               0x1 /* bit9 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT              1
-/* bit10 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK               0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK               0x1 /* bit10 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT              2
-/* bit11 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK                   0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK                   0x1 /* bit11 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT                  3
-/* bit12 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK                   0x1
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT                  4
-/* bit13 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK                   0x1
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT                  5
-/* bit14 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK            0x1 /* bit12 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT           4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK            0x1 /* bit13 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT           5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK          0x1 /* bit14 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT         6
-/* bit15 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK            0x1 /* bit15 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT           7
 	u8 flags2;
-/* timer0cf */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK                     0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK                     0x3 /* timer0cf */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT                    0
-/* timer1cf */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK                     0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK                     0x3 /* timer1cf */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT                    2
-/* timer2cf */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK                     0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK                     0x3 /* timer2cf */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT                    4
-/* timer_stop_all */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK                     0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK                     0x3 /* timer_stop_all */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT                    6
 	u8 flags3;
-/* cf4 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK                     0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK                     0x3 /* cf4 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT                    0
-/* cf5 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK                     0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK                     0x3 /* cf5 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT                    2
-/* cf6 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK                     0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK                     0x3 /* cf6 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT                    4
-/* cf7 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK                     0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK                     0x3 /* cf7 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT                    6
 	u8 flags4;
-/* cf8 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK                     0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK                     0x3 /* cf8 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT                    0
-/* cf9 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK                     0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK                     0x3 /* cf9 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT                    2
-/* cf10 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK                    0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK                    0x3 /* cf10 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT                   4
-/* cf11 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK                    0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK                    0x3 /* cf11 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT                   6
 	u8 flags5;
-/* cf12 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK                    0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK                    0x3 /* cf12 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT                   0
-/* cf13 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK                    0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK                    0x3 /* cf13 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT                   2
-/* cf14 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK                    0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK                    0x3 /* cf14 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT                   4
-/* cf15 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK                    0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK                    0x3 /* cf15 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT                   6
 	u8 flags6;
-/* cf16 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK        0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK        0x3 /* cf16 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT       0
-/* cf_array_cf */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK        0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK        0x3 /* cf_array_cf */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT       2
-/* cf18 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK                   0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK                   0x3 /* cf18 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT                  4
-/* cf19 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK            0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK            0x3 /* cf19 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT           6
 	u8 flags7;
-/* cf20 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK                0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK                0x3 /* cf20 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT               0
-/* cf21 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK              0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK              0x3 /* cf21 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT             2
-/* cf22 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK               0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK               0x3 /* cf22 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT              4
-/* cf0en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK                   0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK                   0x1 /* cf0en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT                  6
-/* cf1en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK                   0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK                   0x1 /* cf1en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT                  7
 	u8 flags8;
-/* cf2en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK                   0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK                   0x1 /* cf2en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT                  0
-/* cf3en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK                   0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK                   0x1 /* cf3en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT                  1
-/* cf4en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK                   0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK                   0x1 /* cf4en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT                  2
-/* cf5en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK                   0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK                   0x1 /* cf5en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT                  3
-/* cf6en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK                   0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK                   0x1 /* cf6en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT                  4
-/* cf7en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK                   0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK                   0x1 /* cf7en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT                  5
-/* cf8en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK                   0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK                   0x1 /* cf8en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT                  6
-/* cf9en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK                   0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK                   0x1 /* cf9en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT                  7
 	u8 flags9;
-/* cf10en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK                  0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK                  0x1 /* cf10en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                 0
-/* cf11en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK                  0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK                  0x1 /* cf11en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                 1
-/* cf12en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK                  0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK                  0x1 /* cf12en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT                 2
-/* cf13en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK                  0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK                  0x1 /* cf13en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                 3
-/* cf14en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK                  0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK                  0x1 /* cf14en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                 4
-/* cf15en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK                  0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK                  0x1 /* cf15en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                 5
-/* cf16en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK     0x1 /* cf16en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT    6
-/* cf_array_cf_en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK     0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK     0x1 /* cf_array_cf_en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT    7
 	u8 flags10;
-/* cf18en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK                0x1 /* cf18en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT               0
-/* cf19en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK         0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK         0x1 /* cf19en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT        1
-/* cf20en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK             0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK             0x1 /* cf20en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT            2
-/* cf21en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK              0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK              0x1 /* cf21en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT             3
-/* cf22en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK            0x1 /* cf22en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT           4
-/* cf23en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK  0x1 /* cf23en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
-/* rule0en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK              0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK              0x1 /* rule0en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT             6
-/* rule1en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK              0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK              0x1 /* rule1en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT             7
 	u8 flags11;
-/* rule2en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK              0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK              0x1 /* rule2en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT             0
-/* rule3en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK              0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK              0x1 /* rule3en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT             1
-/* rule4en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK          0x1 /* rule4en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT         2
-/* rule5en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK                 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK                 0x1 /* rule5en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT                3
-/* rule6en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK                 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK                 0x1 /* rule6en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT                4
-/* rule7en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK                 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK                 0x1 /* rule7en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT                5
-/* rule8en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK            0x1 /* rule8en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT           6
-/* rule9en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK                 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK                 0x1 /* rule9en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT                7
 	u8 flags12;
-/* rule10en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK                0x1 /* rule10en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT               0
-/* rule11en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK                0x1 /* rule11en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT               1
-/* rule12en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK            0x1 /* rule12en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT           2
-/* rule13en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK            0x1 /* rule13en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT           3
-/* rule14en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK                0x1 /* rule14en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT               4
-/* rule15en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK                0x1 /* rule15en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT               5
-/* rule16en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK                0x1 /* rule16en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT               6
-/* rule17en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK                0x1 /* rule17en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT               7
 	u8 flags13;
-/* rule18en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK                0x1 /* rule18en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT               0
-/* rule19en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK                0x1 /* rule19en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT               1
-/* rule20en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK            0x1 /* rule20en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT           2
-/* rule21en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK            0x1 /* rule21en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT           3
-/* rule22en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK            0x1 /* rule22en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT           4
-/* rule23en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK            0x1 /* rule23en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT           5
-/* rule24en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK            0x1 /* rule24en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT           6
-/* rule25en */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK            0x1 /* rule25en */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT           7
 	u8 flags14;
-/* bit16 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK        0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK        0x1 /* bit16 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT       0
-/* bit17 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK      0x1 /* bit17 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT     1
-/* bit18 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK    0x1 /* bit18 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT   2
-/* bit19 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK    0x1 /* bit19 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT   3
-/* bit20 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK          0x1 /* bit20 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT         4
-/* bit21 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK        0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK        0x1 /* bit21 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT       5
-/* cf23 */
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK              0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK              0x3 /* cf23 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT             6
 	u8 edpm_event_id /* byte2 */;
 	__le16 physical_q0 /* physical_q0 */;
@@ -1773,37 +3123,37 @@ struct E4XstormEthConnAgCtxDqExtLdPart {
 };
 
 
-struct mstorm_eth_conn_ag_ctx {
+struct e4_mstorm_eth_conn_ag_ctx {
 	u8 byte0 /* cdu_validation */;
 	u8 byte1 /* state */;
 	u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1 /* exist_in_qm0 */
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK           0x3 /* cf0 */
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK           0x3 /* cf1 */
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK           0x3 /* cf2 */
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1 /* exist_in_qm0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK           0x3 /* cf0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK           0x3 /* cf1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK           0x3 /* cf2 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
 	u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
 	__le16 word0 /* word0 */;
 	__le16 word1 /* word1 */;
 	__le32 reg0 /* reg0 */;
@@ -1814,243 +3164,216 @@ struct mstorm_eth_conn_ag_ctx {
 
 
 
-struct xstorm_eth_hw_conn_ag_ctx {
+struct e4_xstorm_eth_hw_conn_ag_ctx {
 	u8 reserved0 /* cdu_validation */;
-	u8 eth_state /* state */;
+	u8 state /* state */;
 	u8 flags0;
-/* exist_in_qm0 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
-/* exist_in_qm1 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK               0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
-/* exist_in_qm2 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK               0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
-/* exist_in_qm3 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK               0x1 /* bit4 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
-/* cf_array_active */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK               0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK               0x1 /* bit6 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK               0x1 /* bit7 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1 /* exist_in_qm0 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK               0x1 /* exist_in_qm1 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK               0x1 /* exist_in_qm2 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1 /* exist_in_qm3 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK               0x1 /* bit4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK               0x1 /* cf_array_active */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK               0x1 /* bit6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK               0x1 /* bit7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
 	u8 flags1;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK               0x1 /* bit8 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK               0x1 /* bit9 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK               0x1 /* bit10 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK                   0x1 /* bit11 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
-#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK            0x1 /* bit12 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT           4
-#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK            0x1 /* bit13 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT           5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1 /* bit14 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1 /* bit15 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK               0x1 /* bit8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK               0x1 /* bit9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK               0x1 /* bit10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK                   0x1 /* bit11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK            0x1 /* bit12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT           4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK            0x1 /* bit13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT           5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1 /* bit14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1 /* bit15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
 	u8 flags2;
-/* timer0cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK                     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
-/* timer1cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK                     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
-/* timer2cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK                     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
-/* timer_stop_all */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK                     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK                     0x3 /* timer0cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK                     0x3 /* timer1cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK                     0x3 /* timer2cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK                     0x3 /* timer_stop_all */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
 	u8 flags3;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK                     0x3 /* cf4 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK                     0x3 /* cf5 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK                     0x3 /* cf6 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK                     0x3 /* cf7 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK                     0x3 /* cf4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK                     0x3 /* cf5 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK                     0x3 /* cf6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK                     0x3 /* cf7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
 	u8 flags4;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK                     0x3 /* cf8 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK                     0x3 /* cf9 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK                    0x3 /* cf10 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK                    0x3 /* cf11 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK                     0x3 /* cf8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK                     0x3 /* cf9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK                    0x3 /* cf10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK                    0x3 /* cf11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
 	u8 flags5;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK                    0x3 /* cf12 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK                    0x3 /* cf13 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK                    0x3 /* cf14 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK                    0x3 /* cf15 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK                    0x3 /* cf12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK                    0x3 /* cf13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK                    0x3 /* cf14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK                    0x3 /* cf15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
 	u8 flags6;
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3 /* cf16 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
-/* cf_array_cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK                   0x3 /* cf18 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK            0x3 /* cf19 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3 /* cf16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3 /* cf_array_cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK                   0x3 /* cf18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK            0x3 /* cf19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
 	u8 flags7;
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK                0x3 /* cf20 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK              0x3 /* cf21 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK               0x3 /* cf22 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK                   0x1 /* cf0en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK                   0x1 /* cf1en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK                0x3 /* cf20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK              0x3 /* cf21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK               0x3 /* cf22 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK                   0x1 /* cf0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK                   0x1 /* cf1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
 	u8 flags8;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK                   0x1 /* cf2en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK                   0x1 /* cf3en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK                   0x1 /* cf4en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK                   0x1 /* cf5en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK                   0x1 /* cf6en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK                   0x1 /* cf7en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK                   0x1 /* cf8en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK                   0x1 /* cf9en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK                   0x1 /* cf2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK                   0x1 /* cf3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK                   0x1 /* cf4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK                   0x1 /* cf5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK                   0x1 /* cf6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK                   0x1 /* cf7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK                   0x1 /* cf8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK                   0x1 /* cf9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
 	u8 flags9;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK                  0x1 /* cf10en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK                  0x1 /* cf11en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK                  0x1 /* cf12en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK                  0x1 /* cf13en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK                  0x1 /* cf14en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK                  0x1 /* cf15en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1 /* cf16en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
-/* cf_array_cf_en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK                  0x1 /* cf10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK                  0x1 /* cf11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK                  0x1 /* cf12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK                  0x1 /* cf13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK                  0x1 /* cf14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK                  0x1 /* cf15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1 /* cf16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1 /* cf_array_cf_en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
 	u8 flags10;
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK                0x1 /* cf18en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1 /* cf19en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1 /* cf20en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK              0x1 /* cf21en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1 /* cf22en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1 /* cf23en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK              0x1 /* rule0en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK              0x1 /* rule1en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK                0x1 /* cf18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1 /* cf19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1 /* cf20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK              0x1 /* cf21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1 /* cf22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1 /* cf23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK              0x1 /* rule0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK              0x1 /* rule1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
 	u8 flags11;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK              0x1 /* rule2en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK              0x1 /* rule3en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1 /* rule4en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK                 0x1 /* rule5en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK                 0x1 /* rule6en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK                 0x1 /* rule7en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK            0x1 /* rule8en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK                 0x1 /* rule9en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK              0x1 /* rule2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK              0x1 /* rule3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1 /* rule4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK                 0x1 /* rule5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK                 0x1 /* rule6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK                 0x1 /* rule7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK            0x1 /* rule8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK                 0x1 /* rule9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
 	u8 flags12;
-/* rule10en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK                0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
-/* rule11en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK                0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
-/* rule12en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK            0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
-/* rule13en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK            0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
-/* rule14en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK                0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
-/* rule15en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK                0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
-/* rule16en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK                0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
-/* rule17en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK                0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK                0x1 /* rule10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK                0x1 /* rule11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK            0x1 /* rule12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK            0x1 /* rule13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK                0x1 /* rule14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK                0x1 /* rule15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK                0x1 /* rule16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK                0x1 /* rule17en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
 	u8 flags13;
-/* rule18en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK                0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
-/* rule19en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK                0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
-/* rule20en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK            0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
-/* rule21en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK            0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
-/* rule22en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK            0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
-/* rule23en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK            0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
-/* rule24en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK            0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
-/* rule25en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK            0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK                0x1 /* rule18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK                0x1 /* rule19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK            0x1 /* rule20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK            0x1 /* rule21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK            0x1 /* rule22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK            0x1 /* rule23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK            0x1 /* rule24en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK            0x1 /* rule25en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
 	u8 flags14;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1 /* bit16 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1 /* bit17 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1 /* bit18 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1 /* bit19 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1 /* bit20 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1 /* bit21 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK              0x3 /* cf23 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1 /* bit16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1 /* bit17 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1 /* bit18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1 /* bit19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1 /* bit20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1 /* bit21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK              0x3 /* cf23 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
 	u8 edpm_event_id /* byte2 */;
 	__le16 physical_q0 /* physical_q0 */;
 	__le16 e5_reserved1 /* physical_q1 */;
@@ -2063,61 +3386,904 @@ struct xstorm_eth_hw_conn_ag_ctx {
 
 
 
+struct e5_xstorm_eth_conn_ag_ctx_dq_ext_ld_part {
+	u8 reserved0 /* cdu_validation */;
+	u8 state_and_core_id /* state_and_core_id */;
+	u8 flags0;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK                   0x1 /* exist_in_qm0 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT                  0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK                      0x1 /* exist_in_qm1 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT                     1
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK                      0x1 /* exist_in_qm2 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT                     2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK                   0x1 /* exist_in_qm3 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT                  3
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK                      0x1 /* bit4 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT                     4
+/* cf_array_active */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK                      0x1
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT                     5
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK                      0x1 /* bit6 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT                     6
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK                      0x1 /* bit7 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT                     7
+	u8 flags1;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK                      0x1 /* bit8 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT                     0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK                      0x1 /* bit9 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT                     1
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK                      0x1 /* bit10 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT                     2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK                          0x1 /* bit11 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT                         3
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_COPY_CONDITION_LO_MASK         0x1 /* bit12 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_COPY_CONDITION_LO_SHIFT        4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_COPY_CONDITION_HI_MASK         0x1 /* bit13 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_COPY_CONDITION_HI_SHIFT        5
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK                 0x1 /* bit14 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT                6
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK                   0x1 /* bit15 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT                  7
+	u8 flags2;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK                            0x3 /* timer0cf */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT                           0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK                            0x3 /* timer1cf */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT                           2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK                            0x3 /* timer2cf */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT                           4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK                            0x3 /* timer_stop_all */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT                           6
+	u8 flags3;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK                            0x3 /* cf4 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT                           0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK                            0x3 /* cf5 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT                           2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK                            0x3 /* cf6 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT                           4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK                            0x3 /* cf7 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT                           6
+	u8 flags4;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK                            0x3 /* cf8 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT                           0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK                            0x3 /* cf9 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT                           2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK                           0x3 /* cf10 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT                          4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK                           0x3 /* cf11 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT                          6
+	u8 flags5;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_HP_CF_MASK                          0x3 /* cf12 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_HP_CF_SHIFT                         0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK                           0x3 /* cf13 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT                          2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK                           0x3 /* cf14 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT                          4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK                           0x3 /* cf15 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT                          6
+	u8 flags6;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK               0x3 /* cf16 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT              0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK               0x3 /* cf_array_cf */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT              2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK                          0x3 /* cf18 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT                         4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK                   0x3 /* cf19 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT                  6
+	u8 flags7;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK                       0x3 /* cf20 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT                      0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK                     0x3 /* cf21 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT                    2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK                      0x3 /* cf22 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT                     4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK                          0x1 /* cf0en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT                         6
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK                          0x1 /* cf1en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT                         7
+	u8 flags8;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK                          0x1 /* cf2en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT                         0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK                          0x1 /* cf3en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT                         1
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK                          0x1 /* cf4en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT                         2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK                          0x1 /* cf5en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT                         3
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK                          0x1 /* cf6en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT                         4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK                          0x1 /* cf7en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT                         5
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK                          0x1 /* cf8en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT                         6
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK                          0x1 /* cf9en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT                         7
+	u8 flags9;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK                         0x1 /* cf10en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                        0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK                         0x1 /* cf11en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                        1
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_HP_CF_EN_MASK                       0x1 /* cf12en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_HP_CF_EN_SHIFT                      2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK                         0x1 /* cf13en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                        3
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK                         0x1 /* cf14en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                        4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK                         0x1 /* cf15en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                        5
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK            0x1 /* cf16en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT           6
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK            0x1 /* cf_array_cf_en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT           7
+	u8 flags10;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK                       0x1 /* cf18en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT                      0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK                0x1 /* cf19en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT               1
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK                    0x1 /* cf20en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT                   2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK                     0x1 /* cf21en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT                    3
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK                   0x1 /* cf22en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT                  4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK         0x1 /* cf23en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT        5
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK                     0x1 /* rule0en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT                    6
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK                     0x1 /* rule1en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT                    7
+	u8 flags11;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK                     0x1 /* rule2en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT                    0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK                     0x1 /* rule3en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT                    1
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK                 0x1 /* rule4en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT                2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK                        0x1 /* rule5en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT                       3
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK                        0x1 /* rule6en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT                       4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK                        0x1 /* rule7en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT                       5
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK                   0x1 /* rule8en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT                  6
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK                        0x1 /* rule9en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT                       7
+	u8 flags12;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK                       0x1 /* rule10en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT                      0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK                       0x1 /* rule11en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT                      1
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK                   0x1 /* rule12en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT                  2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK                   0x1 /* rule13en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT                  3
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK                       0x1 /* rule14en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT                      4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK                       0x1 /* rule15en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT                      5
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK                       0x1 /* rule16en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT                      6
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK                       0x1 /* rule17en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT                      7
+	u8 flags13;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK                       0x1 /* rule18en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT                      0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK                       0x1 /* rule19en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT                      1
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK                   0x1 /* rule20en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT                  2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK                   0x1 /* rule21en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT                  3
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK                   0x1 /* rule22en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT                  4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK                   0x1 /* rule23en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT                  5
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK                   0x1 /* rule24en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT                  6
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK                   0x1 /* rule25en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT                  7
+	u8 flags14;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK               0x1 /* bit16 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT              0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK             0x1 /* bit17 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT            1
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK           0x1 /* bit18 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT          2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK           0x1 /* bit19 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT          3
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK                 0x1 /* bit20 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT                4
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK               0x1 /* bit21 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT              5
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK                     0x3 /* cf23 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT                    6
+	u8 edpm_vport /* byte2 */;
+	__le16 physical_q0 /* physical_q0_and_vf_id_lo */;
+	__le16 tx_l2_edpm_usg_cnt_and_vf_id_hi /* physical_q1_and_vf_id_hi */;
+	__le16 edpm_num_bds /* physical_q2 */;
+	__le16 tx_bd_cons /* word3 */;
+	__le16 tx_bd_prod /* word4 */;
+	__le16 updated_qm_pq_id /* word5 */;
+	__le16 conn_dpi /* conn_dpi */;
+	u8 fw_spare_data0 /* byte3 */;
+	u8 fw_spare_data1 /* byte4 */;
+	u8 fw_spare_data2 /* byte5 */;
+	u8 fw_spare_data3 /* byte6 */;
+	__le32 fw_spare_data4 /* reg0 */;
+	__le32 fw_spare_data5 /* reg1 */;
+	__le32 fw_spare_data6 /* reg2 */;
+	__le32 fw_spare_data7 /* reg3 */;
+	__le32 fw_spare_data8 /* reg4 */;
+	__le32 fw_spare_data9 /* cf_array0 */;
+	__le32 fw_spare_data10 /* cf_array1 */;
+	u8 flags15;
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_REDIRECTION_CONDITION_LO_MASK  0x1 /* bit22 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_REDIRECTION_CONDITION_LO_SHIFT 0
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_REDIRECTION_CONDITION_HI_MASK  0x1 /* bit23 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_REDIRECTION_CONDITION_HI_SHIFT 1
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_E4_RESERVED3_MASK                   0x1 /* bit24 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_E4_RESERVED3_SHIFT                  2
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_E4_RESERVED4_MASK                   0x3 /* cf24 */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_E4_RESERVED4_SHIFT                  3
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_E4_RESERVED5_MASK                   0x1 /* cf24en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_E4_RESERVED5_SHIFT                  5
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_E4_RESERVED6_MASK                   0x1 /* rule26en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_E4_RESERVED6_SHIFT                  6
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_E4_RESERVED7_MASK                   0x1 /* rule27en */
+#define E5XSTORMETHCONNAGCTXDQEXTLDPART_E4_RESERVED7_SHIFT                  7
+	u8 fw_spare_data11 /* byte7 */;
+	__le16 fw_spare_data12 /* word7 */;
+	__le16 fw_spare_data13 /* word8 */;
+	__le16 fw_spare_data14 /* word9 */;
+};
+
+
+struct e5_mstorm_eth_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state_and_core_id */;
+	u8 flags0;
+#define E5_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1 /* exist_in_qm0 */
+#define E5_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E5_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
+#define E5_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF0_MASK           0x3 /* cf0 */
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF1_MASK           0x3 /* cf1 */
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF2_MASK           0x3 /* cf2 */
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+	u8 flags1;
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
+#define E5_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
+#define E5_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
+#define E5_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define E5_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
+#define E5_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define E5_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
+#define E5_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define E5_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
+#define E5_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define E5_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
+#define E5_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+	__le16 word0 /* word0 */;
+	__le16 word1 /* word1 */;
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+
+struct e5_ustorm_eth_task_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state_and_core_id */;
+	__le16 word0 /* icid */;
+	u8 flags0;
+#define E5_USTORM_ETH_TASK_AG_CTX_NIBBLE0_MASK       0xF /* connection_type */
+#define E5_USTORM_ETH_TASK_AG_CTX_NIBBLE0_SHIFT      0
+#define E5_USTORM_ETH_TASK_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
+#define E5_USTORM_ETH_TASK_AG_CTX_BIT0_SHIFT         4
+#define E5_USTORM_ETH_TASK_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
+#define E5_USTORM_ETH_TASK_AG_CTX_BIT1_SHIFT         5
+#define E5_USTORM_ETH_TASK_AG_CTX_CF0_MASK           0x3 /* timer0cf */
+#define E5_USTORM_ETH_TASK_AG_CTX_CF0_SHIFT          6
+	u8 flags1;
+#define E5_USTORM_ETH_TASK_AG_CTX_CF1_MASK           0x3 /* timer1cf */
+#define E5_USTORM_ETH_TASK_AG_CTX_CF1_SHIFT          0
+#define E5_USTORM_ETH_TASK_AG_CTX_CF2_MASK           0x3 /* timer2cf */
+#define E5_USTORM_ETH_TASK_AG_CTX_CF2_SHIFT          2
+#define E5_USTORM_ETH_TASK_AG_CTX_CF3_MASK           0x3 /* timer_stop_all */
+#define E5_USTORM_ETH_TASK_AG_CTX_CF3_SHIFT          4
+#define E5_USTORM_ETH_TASK_AG_CTX_CF4_MASK           0x3 /* dif_error_cf */
+#define E5_USTORM_ETH_TASK_AG_CTX_CF4_SHIFT          6
+	u8 flags2;
+#define E5_USTORM_ETH_TASK_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
+#define E5_USTORM_ETH_TASK_AG_CTX_CF0EN_SHIFT        0
+#define E5_USTORM_ETH_TASK_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
+#define E5_USTORM_ETH_TASK_AG_CTX_CF1EN_SHIFT        1
+#define E5_USTORM_ETH_TASK_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
+#define E5_USTORM_ETH_TASK_AG_CTX_CF2EN_SHIFT        2
+#define E5_USTORM_ETH_TASK_AG_CTX_CF3EN_MASK         0x1 /* cf3en */
+#define E5_USTORM_ETH_TASK_AG_CTX_CF3EN_SHIFT        3
+#define E5_USTORM_ETH_TASK_AG_CTX_CF4EN_MASK         0x1 /* cf4en */
+#define E5_USTORM_ETH_TASK_AG_CTX_CF4EN_SHIFT        4
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE0EN_SHIFT      5
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE1EN_SHIFT      6
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE2EN_SHIFT      7
+	u8 flags3;
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE3EN_SHIFT      0
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE4EN_SHIFT      1
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE5EN_SHIFT      2
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE6EN_MASK       0x1 /* rule6en */
+#define E5_USTORM_ETH_TASK_AG_CTX_RULE6EN_SHIFT      3
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit2 */
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED1_SHIFT 4
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED2_MASK  0x1 /* bit3 */
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED2_SHIFT 5
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED3_MASK  0x1 /* bit4 */
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED3_SHIFT 6
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED4_MASK  0x1 /* rule7en */
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED4_SHIFT 7
+	u8 flags4;
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED5_MASK  0x3 /* cf5 */
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED5_SHIFT 0
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED6_MASK  0x1 /* cf5en */
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED6_SHIFT 2
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED7_MASK  0x1 /* rule8en */
+#define E5_USTORM_ETH_TASK_AG_CTX_E4_RESERVED7_SHIFT 3
+#define E5_USTORM_ETH_TASK_AG_CTX_NIBBLE1_MASK       0xF /* dif_error_type */
+#define E5_USTORM_ETH_TASK_AG_CTX_NIBBLE1_SHIFT      4
+	u8 byte2 /* byte2 */;
+	u8 byte3 /* byte3 */;
+	u8 e4_reserved8 /* icid_ext */;
+	__le32 reg0 /* dif_err_intervals */;
+	__le32 reg1 /* dif_error_1st_interval */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+	__le32 reg4 /* reg4 */;
+	__le32 reg5 /* reg5 */;
+	__le16 word1 /* word1 */;
+	__le16 word2 /* word2 */;
+	__le32 reg6 /* reg6 */;
+	__le32 reg7 /* reg7 */;
+};
+
+
+
+struct e5_xstorm_eth_hw_conn_ag_ctx {
+	u8 reserved0 /* cdu_validation */;
+	u8 state_and_core_id /* state_and_core_id */;
+	u8 flags0;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1 /* exist_in_qm0 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK               0x1 /* exist_in_qm1 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK               0x1 /* exist_in_qm2 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1 /* exist_in_qm3 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK               0x1 /* bit4 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK               0x1 /* cf_array_active */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK               0x1 /* bit6 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK               0x1 /* bit7 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+	u8 flags1;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK               0x1 /* bit8 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK               0x1 /* bit9 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK               0x1 /* bit10 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK                   0x1 /* bit11 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_COPY_CONDITION_LO_MASK  0x1 /* bit12 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_COPY_CONDITION_LO_SHIFT 4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_COPY_CONDITION_HI_MASK  0x1 /* bit13 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_COPY_CONDITION_HI_SHIFT 5
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1 /* bit14 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1 /* bit15 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+	u8 flags2;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK                     0x3 /* timer0cf */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK                     0x3 /* timer1cf */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK                     0x3 /* timer2cf */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK                     0x3 /* timer_stop_all */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+	u8 flags3;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK                     0x3 /* cf4 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK                     0x3 /* cf5 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK                     0x3 /* cf6 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK                     0x3 /* cf7 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+	u8 flags4;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK                     0x3 /* cf8 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK                     0x3 /* cf9 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK                    0x3 /* cf10 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK                    0x3 /* cf11 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+	u8 flags5;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_HP_CF_MASK                   0x3 /* cf12 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_HP_CF_SHIFT                  0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK                    0x3 /* cf13 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK                    0x3 /* cf14 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK                    0x3 /* cf15 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+	u8 flags6;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3 /* cf16 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3 /* cf_array_cf */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK                   0x3 /* cf18 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK            0x3 /* cf19 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+	u8 flags7;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK                0x3 /* cf20 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK              0x3 /* cf21 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK               0x3 /* cf22 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK                   0x1 /* cf0en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK                   0x1 /* cf1en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+	u8 flags8;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK                   0x1 /* cf2en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK                   0x1 /* cf3en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK                   0x1 /* cf4en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK                   0x1 /* cf5en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK                   0x1 /* cf6en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK                   0x1 /* cf7en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK                   0x1 /* cf8en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK                   0x1 /* cf9en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+	u8 flags9;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK                  0x1 /* cf10en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK                  0x1 /* cf11en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_HP_CF_EN_MASK                0x1 /* cf12en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_HP_CF_EN_SHIFT               2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK                  0x1 /* cf13en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK                  0x1 /* cf14en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK                  0x1 /* cf15en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1 /* cf16en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1 /* cf_array_cf_en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+	u8 flags10;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK                0x1 /* cf18en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1 /* cf19en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1 /* cf20en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK              0x1 /* cf21en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1 /* cf22en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1 /* cf23en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK              0x1 /* rule0en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK              0x1 /* rule1en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+	u8 flags11;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK              0x1 /* rule2en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK              0x1 /* rule3en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1 /* rule4en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK                 0x1 /* rule5en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK                 0x1 /* rule6en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK                 0x1 /* rule7en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK            0x1 /* rule8en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK                 0x1 /* rule9en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+	u8 flags12;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK                0x1 /* rule10en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK                0x1 /* rule11en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK            0x1 /* rule12en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK            0x1 /* rule13en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK                0x1 /* rule14en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK                0x1 /* rule15en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK                0x1 /* rule16en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK                0x1 /* rule17en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+	u8 flags13;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK                0x1 /* rule18en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK                0x1 /* rule19en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK            0x1 /* rule20en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK            0x1 /* rule21en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK            0x1 /* rule22en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK            0x1 /* rule23en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK            0x1 /* rule24en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK            0x1 /* rule25en */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+	u8 flags14;
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1 /* bit16 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1 /* bit17 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1 /* bit18 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1 /* bit19 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1 /* bit20 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1 /* bit21 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK              0x3 /* cf23 */
+#define E5_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+	u8 edpm_vport /* byte2 */;
+	__le16 physical_q0 /* physical_q0_and_vf_id_lo */;
+	__le16 tx_l2_edpm_usg_cnt_and_vf_id_hi /* physical_q1_and_vf_id_hi */;
+	__le16 edpm_num_bds /* physical_q2 */;
+	__le16 tx_bd_cons /* word3 */;
+	__le16 tx_bd_prod /* word4 */;
+	__le16 updated_qm_pq_id /* word5 */;
+	__le16 conn_dpi /* conn_dpi */;
+};
+
+
+
+struct e5_ystorm_eth_task_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state_and_core_id */;
+	__le16 word0 /* icid */;
+	u8 flags0;
+#define E5_YSTORM_ETH_TASK_AG_CTX_NIBBLE0_MASK       0xF /* connection_type */
+#define E5_YSTORM_ETH_TASK_AG_CTX_NIBBLE0_SHIFT      0
+#define E5_YSTORM_ETH_TASK_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
+#define E5_YSTORM_ETH_TASK_AG_CTX_BIT0_SHIFT         4
+#define E5_YSTORM_ETH_TASK_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
+#define E5_YSTORM_ETH_TASK_AG_CTX_BIT1_SHIFT         5
+#define E5_YSTORM_ETH_TASK_AG_CTX_BIT2_MASK          0x1 /* bit2 */
+#define E5_YSTORM_ETH_TASK_AG_CTX_BIT2_SHIFT         6
+#define E5_YSTORM_ETH_TASK_AG_CTX_BIT3_MASK          0x1 /* bit3 */
+#define E5_YSTORM_ETH_TASK_AG_CTX_BIT3_SHIFT         7
+	u8 flags1;
+#define E5_YSTORM_ETH_TASK_AG_CTX_CF0_MASK           0x3 /* cf0 */
+#define E5_YSTORM_ETH_TASK_AG_CTX_CF0_SHIFT          0
+#define E5_YSTORM_ETH_TASK_AG_CTX_CF1_MASK           0x3 /* cf1 */
+#define E5_YSTORM_ETH_TASK_AG_CTX_CF1_SHIFT          2
+#define E5_YSTORM_ETH_TASK_AG_CTX_CF2SPECIAL_MASK    0x3 /* cf2special */
+#define E5_YSTORM_ETH_TASK_AG_CTX_CF2SPECIAL_SHIFT   4
+#define E5_YSTORM_ETH_TASK_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_CF0EN_SHIFT        6
+#define E5_YSTORM_ETH_TASK_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_CF1EN_SHIFT        7
+	u8 flags2;
+#define E5_YSTORM_ETH_TASK_AG_CTX_BIT4_MASK          0x1 /* bit4 */
+#define E5_YSTORM_ETH_TASK_AG_CTX_BIT4_SHIFT         0
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE0EN_SHIFT      1
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE1EN_SHIFT      2
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE2EN_SHIFT      3
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE3EN_SHIFT      4
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE4EN_SHIFT      5
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE5EN_SHIFT      6
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE6EN_MASK       0x1 /* rule6en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_RULE6EN_SHIFT      7
+	u8 flags3;
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit5 */
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED1_SHIFT 0
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED2_MASK  0x3 /* cf3 */
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED2_SHIFT 1
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED3_MASK  0x3 /* cf4 */
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED3_SHIFT 3
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED4_MASK  0x1 /* cf3en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED4_SHIFT 5
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED5_MASK  0x1 /* cf4en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED5_SHIFT 6
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED6_MASK  0x1 /* rule7en */
+#define E5_YSTORM_ETH_TASK_AG_CTX_E4_RESERVED6_SHIFT 7
+	__le32 reg0 /* reg0 */;
+	u8 byte2 /* byte2 */;
+	u8 byte3 /* byte3 */;
+	u8 byte4 /* byte4 */;
+	u8 e4_reserved7 /* icid_ext */;
+	__le16 word1 /* word1 */;
+	__le16 word2 /* word2 */;
+	__le16 word3 /* word3 */;
+	__le16 word4 /* word4 */;
+	__le16 word5 /* word5 */;
+	__le16 e4_reserved8 /* word6 */;
+	__le32 reg1 /* reg1 */;
+};
+
+
 /*
- * GFT CAM line struct
+ * GFS RAM line struct with fields breakout
  */
-struct gft_cam_line {
-	__le32 camline;
-/* Indication if the line is valid. */
-#define GFT_CAM_LINE_VALID_MASK      0x1
-#define GFT_CAM_LINE_VALID_SHIFT     0
-/* Data bits, the word that compared with the profile key */
-#define GFT_CAM_LINE_DATA_MASK       0x3FFF
-#define GFT_CAM_LINE_DATA_SHIFT      1
-/* Mask bits, indicate the bits in the data that are Dont-Care */
-#define GFT_CAM_LINE_MASK_BITS_MASK  0x3FFF
-#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
-#define GFT_CAM_LINE_RESERVED1_MASK  0x7
-#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+struct gfs_profile_ram_line {
+	__le32 reg0;
+#define GFS_PROFILE_RAM_LINE_IN_SRC_PORT_MASK           0x1F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_IN_SRC_PORT_SHIFT          0
+#define GFS_PROFILE_RAM_LINE_IN_SRC_IP_MASK             0xFF /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_IN_SRC_IP_SHIFT            5
+#define GFS_PROFILE_RAM_LINE_IN_DST_MAC_MASK            0x3F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_IN_DST_MAC_SHIFT           13
+#define GFS_PROFILE_RAM_LINE_IN_SRC_MAC_MASK            0x3F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_IN_SRC_MAC_SHIFT           19
+#define GFS_PROFILE_RAM_LINE_IN_ETHTYPE_MASK            0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_ETHTYPE_SHIFT           25
+#define GFS_PROFILE_RAM_LINE_IN_CVLAN_MASK              0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_CVLAN_SHIFT             26
+#define GFS_PROFILE_RAM_LINE_IN_CVLAN_DEI_MASK          0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_CVLAN_DEI_SHIFT         27
+#define GFS_PROFILE_RAM_LINE_IN_CVLAN_PRI_MASK          0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_CVLAN_PRI_SHIFT         28
+#define GFS_PROFILE_RAM_LINE_IN_IS_UNICAST_MASK         0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_IS_UNICAST_SHIFT        29
+#define GFS_PROFILE_RAM_LINE_IN_CVLAN_EXISTS_MASK       0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_CVLAN_EXISTS_SHIFT      30
+#define GFS_PROFILE_RAM_LINE_IN_SVLAN_EXISTS_MASK       0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_SVLAN_EXISTS_SHIFT      31
+	__le32 reg1;
+#define GFS_PROFILE_RAM_LINE_IN_IS_IP_MASK              0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_IS_IP_SHIFT             0
+#define GFS_PROFILE_RAM_LINE_IN_IS_TCP_UDP_SCTP_MASK    0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_IS_TCP_UDP_SCTP_SHIFT   1
+#define GFS_PROFILE_RAM_LINE_IN_DSCP_MASK               0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_DSCP_SHIFT              2
+#define GFS_PROFILE_RAM_LINE_IN_ECN_MASK                0x3 /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_IN_ECN_SHIFT               3
+#define GFS_PROFILE_RAM_LINE_IN_DST_IP_MASK             0xFF /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_IN_DST_IP_SHIFT            5
+#define GFS_PROFILE_RAM_LINE_IN_DST_PORT_MASK           0x1F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_IN_DST_PORT_SHIFT          13
+#define GFS_PROFILE_RAM_LINE_IN_TTL_EQUALS_ZERO_MASK    0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_TTL_EQUALS_ZERO_SHIFT   18
+#define GFS_PROFILE_RAM_LINE_IN_TTL_EQUALS_ONE_MASK     0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_TTL_EQUALS_ONE_SHIFT    19
+#define GFS_PROFILE_RAM_LINE_IN_IP_PROTOCOL_MASK        0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_IP_PROTOCOL_SHIFT       20
+#define GFS_PROFILE_RAM_LINE_IN_IP_VERSION_MASK         0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IN_IP_VERSION_SHIFT        21
+#define GFS_PROFILE_RAM_LINE_IN_IPV6_FLOW_LABEL_MASK    0x1F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_IN_IPV6_FLOW_LABEL_SHIFT   22
+#define GFS_PROFILE_RAM_LINE_TUN_SRC_PORT_MASK          0x1F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_TUN_SRC_PORT_SHIFT         27
+	__le32 reg2;
+#define GFS_PROFILE_RAM_LINE_TUN_SRC_IP_MASK            0xFF /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_TUN_SRC_IP_SHIFT           0
+#define GFS_PROFILE_RAM_LINE_TUN_DST_MAC_MASK           0x3F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_TUN_DST_MAC_SHIFT          8
+#define GFS_PROFILE_RAM_LINE_TUN_SRC_MAC_MASK           0x3F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_TUN_SRC_MAC_SHIFT          14
+#define GFS_PROFILE_RAM_LINE_TUN_ETHTYPE_MASK           0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_ETHTYPE_SHIFT          20
+#define GFS_PROFILE_RAM_LINE_TUN_CVLAN_MASK             0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_CVLAN_SHIFT            21
+#define GFS_PROFILE_RAM_LINE_TUN_CVLAN_DEI_MASK         0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_CVLAN_DEI_SHIFT        22
+#define GFS_PROFILE_RAM_LINE_TUN_CVLAN_PRI_MASK         0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_CVLAN_PRI_SHIFT        23
+#define GFS_PROFILE_RAM_LINE_TUN_IS_UNICAST_MASK        0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_IS_UNICAST_SHIFT       24
+#define GFS_PROFILE_RAM_LINE_TUN_CVLAN_EXISTS_MASK      0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_CVLAN_EXISTS_SHIFT     25
+#define GFS_PROFILE_RAM_LINE_TUN_SVLAN_EXISTS_MASK      0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_SVLAN_EXISTS_SHIFT     26
+#define GFS_PROFILE_RAM_LINE_TUN_IS_IP_MASK             0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_IS_IP_SHIFT            27
+#define GFS_PROFILE_RAM_LINE_TUN_IS_TCP_UDP_SCTP_MASK   0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_IS_TCP_UDP_SCTP_SHIFT  28
+#define GFS_PROFILE_RAM_LINE_TUN_DSCP_MASK              0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_DSCP_SHIFT             29
+#define GFS_PROFILE_RAM_LINE_TUN_ECN_MASK               0x3 /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_TUN_ECN_SHIFT              30
+	__le32 reg3;
+#define GFS_PROFILE_RAM_LINE_TUN_DST_IP_MASK            0xFF /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_TUN_DST_IP_SHIFT           0
+#define GFS_PROFILE_RAM_LINE_TUN_DST_PORT_MASK          0x1F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_TUN_DST_PORT_SHIFT         8
+#define GFS_PROFILE_RAM_LINE_TUN_TTL_EQUALS_ZERO_MASK   0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_TTL_EQUALS_ZERO_SHIFT  13
+#define GFS_PROFILE_RAM_LINE_TUN_TTL_EQUALS_ONE_MASK    0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_TTL_EQUALS_ONE_SHIFT   14
+#define GFS_PROFILE_RAM_LINE_TUN_IP_PROTOCOL_MASK       0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_IP_PROTOCOL_SHIFT      15
+#define GFS_PROFILE_RAM_LINE_TUN_IP_VERSION_MASK        0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUN_IP_VERSION_SHIFT       16
+#define GFS_PROFILE_RAM_LINE_TUN_IPV6_FLOW_LABEL_MASK   0x1F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_TUN_IPV6_FLOW_LABEL_SHIFT  17
+#define GFS_PROFILE_RAM_LINE_PF_MASK                    0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_PF_SHIFT                   22
+#define GFS_PROFILE_RAM_LINE_TUNNEL_EXISTS_MASK         0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TUNNEL_EXISTS_SHIFT        23
+/* mask of type: bitwise (use enum gfs_tunnel_type_enum) */
+#define GFS_PROFILE_RAM_LINE_TUNNEL_TYPE_MASK           0xF
+#define GFS_PROFILE_RAM_LINE_TUNNEL_TYPE_SHIFT          24
+#define GFS_PROFILE_RAM_LINE_TENANT_ID_MASK             0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_TENANT_ID_SHIFT            28
+#define GFS_PROFILE_RAM_LINE_ENTROPY_MASK               0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_ENTROPY_SHIFT              29
+#define GFS_PROFILE_RAM_LINE_L2_HEADER_EXISTS_MASK      0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_L2_HEADER_EXISTS_SHIFT     30
+#define GFS_PROFILE_RAM_LINE_IP_FRAGMENT_MASK           0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_IP_FRAGMENT_SHIFT          31
+	__le32 reg4;
+#define GFS_PROFILE_RAM_LINE_TCP_FLAGS_MASK             0x3FF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_TCP_FLAGS_SHIFT            0
+#define GFS_PROFILE_RAM_LINE_CALC_TCP_FLAGS_MASK        0x3F /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_CALC_TCP_FLAGS_SHIFT       10
+#define GFS_PROFILE_RAM_LINE_STAG_MASK                  0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_STAG_SHIFT                 16
+#define GFS_PROFILE_RAM_LINE_STAG_DEI_MASK              0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_STAG_DEI_SHIFT             17
+#define GFS_PROFILE_RAM_LINE_STAG_PRI_MASK              0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_STAG_PRI_SHIFT             18
+#define GFS_PROFILE_RAM_LINE_MPLS_EXISTS_MASK           0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_MPLS_EXISTS_SHIFT          19
+#define GFS_PROFILE_RAM_LINE_MPLS_LABEL_MASK            0x1F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_MPLS_LABEL_SHIFT           20
+#define GFS_PROFILE_RAM_LINE_MPLS_TC_MASK               0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_MPLS_TC_SHIFT              25
+#define GFS_PROFILE_RAM_LINE_MPLS_BOS_MASK              0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_MPLS_BOS_SHIFT             26
+#define GFS_PROFILE_RAM_LINE_MPLS_TTL_EQUALS_ZERO_MASK  0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_MPLS_TTL_EQUALS_ZERO_SHIFT 27
+#define GFS_PROFILE_RAM_LINE_MPLS_TTL_EQUALS_ONE_MASK   0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_MPLS_TTL_EQUALS_ONE_SHIFT  28
+#define GFS_PROFILE_RAM_LINE_MPLS2_EXISTS_MASK          0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_MPLS2_EXISTS_SHIFT         29
+#define GFS_PROFILE_RAM_LINE_MPLS3_EXISTS_MASK          0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_MPLS3_EXISTS_SHIFT         30
+#define GFS_PROFILE_RAM_LINE_MPLS4_EXISTS_MASK          0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_MPLS4_EXISTS_SHIFT         31
+	__le32 reg5;
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE0_MASK            0xFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE0_SHIFT           0
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE1_MASK            0xFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE1_SHIFT           8
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE2_MASK            0xFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE2_SHIFT           16
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE3_MASK            0xFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE3_SHIFT           24
+	__le32 reg6;
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE4_MASK            0xFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE4_SHIFT           0
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE5_MASK            0xFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_BYTE5_SHIFT           8
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD0_MASK            0xFFFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD0_SHIFT           16
+	__le32 reg7;
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD1_MASK            0xFFFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD1_SHIFT           0
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD2_MASK            0xFFFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD2_SHIFT           16
+	__le32 reg8;
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD3_MASK            0xFFFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD3_SHIFT           0
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD4_MASK            0xFFFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD4_SHIFT           16
+	__le32 reg9;
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD5_MASK            0xFFFF /* mask of type: bitwise */
+#define GFS_PROFILE_RAM_LINE_FLEX_WORD5_SHIFT           0
+/* mask of type: bitwise, profile id associated with this context */
+#define GFS_PROFILE_RAM_LINE_PROFILE_ID_MASK            0x3FF
+#define GFS_PROFILE_RAM_LINE_PROFILE_ID_SHIFT           16
+#define GFS_PROFILE_RAM_LINE_FLEX_REG0_MASK             0x3F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_FLEX_REG0_SHIFT            26
+	__le32 reg10;
+#define GFS_PROFILE_RAM_LINE_FLEX_REG1_MASK             0x3F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_FLEX_REG1_SHIFT            0
+#define GFS_PROFILE_RAM_LINE_FLEX_REG2_MASK             0x3F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_FLEX_REG2_SHIFT            6
+#define GFS_PROFILE_RAM_LINE_FLEX_REG3_MASK             0x3F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_FLEX_REG3_SHIFT            12
+#define GFS_PROFILE_RAM_LINE_FLEX_REG4_MASK             0x3F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_FLEX_REG4_SHIFT            18
+#define GFS_PROFILE_RAM_LINE_FLEX_REG5_MASK             0x3F /* mask of type: prefix */
+#define GFS_PROFILE_RAM_LINE_FLEX_REG5_SHIFT            24
+#define GFS_PROFILE_RAM_LINE_VPORT_ID_MASK              0x1 /* mask of type: bool */
+#define GFS_PROFILE_RAM_LINE_VPORT_ID_SHIFT             30
+/* When set - if a lookup from this profile matches, all the lookups from subsequent profiles will
+ * be discarded
+ */
+#define GFS_PROFILE_RAM_LINE_PRIORITY_MASK              0x1
+#define GFS_PROFILE_RAM_LINE_PRIORITY_SHIFT             31
+	__le32 reg11;
+#define GFS_PROFILE_RAM_LINE_SWAP_I2O_MASK              0x3 /*  (use enum gfs_swap_i2o_enum) */
+#define GFS_PROFILE_RAM_LINE_SWAP_I2O_SHIFT             0
+#define GFS_PROFILE_RAM_LINE_RESERVED_MASK              0x3FFFFFFF
+#define GFS_PROFILE_RAM_LINE_RESERVED_SHIFT             2
+	__le32 reserved_regs[4];
 };
 
 
 /*
- * GFT CAM line struct (for driversim use)
+ * GFT CAM line struct with fields breakout
  */
 struct gft_cam_line_mapped {
 	__le32 camline;
 /* Indication if the line is valid. */
 #define GFT_CAM_LINE_MAPPED_VALID_MASK                     0x1
 #define GFT_CAM_LINE_MAPPED_VALID_SHIFT                    0
-/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+/*  (use enum gft_profile_ip_version) */
 #define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK                0x1
 #define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT               1
-/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+/*  (use enum gft_profile_ip_version) */
 #define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK         0x1
 #define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT        2
-/* use enum gft_profile_upper_protocol_type
- * (use enum gft_profile_upper_protocol_type)
- */
+/*  (use enum gft_profile_upper_protocol_type) */
 #define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK       0xF
 #define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT      3
-/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
+/*  (use enum gft_profile_tunnel_type) */
 #define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK               0xF
 #define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT              7
 #define GFT_CAM_LINE_MAPPED_PF_ID_MASK                     0xF
 #define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT                    11
-/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
 #define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK           0x1
 #define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT          15
-/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
 #define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK    0x1
 #define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT   16
-/* use enum gft_profile_upper_protocol_type
- * (use enum gft_profile_upper_protocol_type)
- */
 #define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK  0xF
 #define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
-/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
 #define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK          0xF
 #define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT         21
 #define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK                0xF
@@ -2127,12 +4293,6 @@ struct gft_cam_line_mapped {
 };
 
 
-union gft_cam_line_union {
-	struct gft_cam_line cam_line;
-	struct gft_cam_line_mapped cam_line_mapped;
-};
-
-
 /*
  * Used in gft_profile_key: Indication for ip version
  */
@@ -2154,9 +4314,7 @@ struct gft_profile_key {
 /* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
 #define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK    0x1
 #define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT   1
-/* use enum gft_profile_upper_protocol_type
- * (use enum gft_profile_upper_protocol_type)
- */
+/* use enum gft_profile_upper_protocol_type (use enum gft_profile_upper_protocol_type) */
 #define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK  0xF
 #define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
 /* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
@@ -2212,7 +4370,7 @@ enum gft_profile_upper_protocol_type {
  */
 struct gft_ram_line {
 	__le32 lo;
-#define GFT_RAM_LINE_VLAN_SELECT_MASK              0x3
+#define GFT_RAM_LINE_VLAN_SELECT_MASK              0x3 /*  (use enum gft_vlan_select) */
 #define GFT_RAM_LINE_VLAN_SELECT_SHIFT             0
 #define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK          0x1
 #define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT         2
@@ -2272,7 +4430,7 @@ struct gft_ram_line {
 #define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT             29
 #define GFT_RAM_LINE_DST_PORT_MASK                 0x1
 #define GFT_RAM_LINE_DST_PORT_SHIFT                30
-#define GFT_RAM_LINE_SRC_PORT_MASK                 0x1U
+#define GFT_RAM_LINE_SRC_PORT_MASK                 0x1
 #define GFT_RAM_LINE_SRC_PORT_SHIFT                31
 	__le32 hi;
 #define GFT_RAM_LINE_DSCP_MASK                     0x1
@@ -2311,5 +4469,4 @@ enum gft_vlan_select {
 	MAX_GFT_VLAN_SELECT
 };
 
-
 #endif /* __ECORE_HSI_ETH__ */
diff --git a/drivers/net/qede/base/ecore_hsi_func_common.h b/drivers/net/qede/base/ecore_hsi_func_common.h
index 4351b7b14..6dc10aece 100644
--- a/drivers/net/qede/base/ecore_hsi_func_common.h
+++ b/drivers/net/qede/base/ecore_hsi_func_common.h
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016 - 2020 Cavium Inc.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
 
 #ifndef _HSI_FUNC_COMMON_H
diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h
index 7efe2eff1..93b24b729 100644
--- a/drivers/net/qede/base/ecore_hsi_init_func.h
+++ b/drivers/net/qede/base/ecore_hsi_init_func.h
@@ -1,9 +1,9 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef __ECORE_HSI_INIT_FUNC__
 #define __ECORE_HSI_INIT_FUNC__
 /********************************/
@@ -18,6 +18,670 @@
 #define CRC8_TABLE_SIZE					256
 #endif
 
+/*
+ * GFS context Descriptot QREG (0)
+ */
+struct gfs_context_descriptor {
+	u32 bitfields0;
+/* Reserved for valid bit in searcher database */
+#define GFS_CONTEXT_DESCRIPTOR_RESERVED_VALID_MASK            0x1
+#define GFS_CONTEXT_DESCRIPTOR_RESERVED_VALID_SHIFT           0
+/* Reserved for last bit in searcher database */
+#define GFS_CONTEXT_DESCRIPTOR_RESERVED_LAST_MASK             0x1
+#define GFS_CONTEXT_DESCRIPTOR_RESERVED_LAST_SHIFT            1
+#define GFS_CONTEXT_DESCRIPTOR_FW_RESERVED0_MASK              0x1F
+#define GFS_CONTEXT_DESCRIPTOR_FW_RESERVED0_SHIFT             2
+/* Skips the last search in the bundle according to the logic descried is GFS spec */
+#define GFS_CONTEXT_DESCRIPTOR_SKIP_LAST_SEARCH_MASK          0x1
+#define GFS_CONTEXT_DESCRIPTOR_SKIP_LAST_SEARCH_SHIFT         7
+/* Always use leading. (Leading, Accumulative, both leading and accumulative) (use enum
+ * gfs_context_type)
+ */
+#define GFS_CONTEXT_DESCRIPTOR_CONTEXT_TYPE_MASK              0x3
+#define GFS_CONTEXT_DESCRIPTOR_CONTEXT_TYPE_SHIFT             8
+/* 0   do always , 1   if redirection condition is not met (use enum gfs_cntx_cmd0) */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND0_COND_MASK             0x1
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND0_COND_SHIFT            10
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND0_CHANGE_VPORT_MASK     0x1 /* if set, change the vport */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND0_CHANGE_VPORT_SHIFT    11
+/* Substituted to  resolution  structure for FW use (use enum gfs_res_struct_dst_type) */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND0_FW_HINT_MASK          0x7
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND0_FW_HINT_SHIFT         12
+/* Command ID, 0   From the 3rd QREG of this context (QREG#2), 1-127   From canned Commands RAM */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND0_LOCATION_MASK         0x7F
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND0_LOCATION_SHIFT        15
+/* Used if  ChangeVport  is set */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND0_VPORT_MASK            0xFF
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND0_VPORT_SHIFT           22
+/* 0   disabled, 1   do always, 2   do if redirection condition is met, 3   do if  sampling
+ * condition is met (use enum gfs_cntx_cmd1)
+ */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND1_COND_MASK             0x3
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND1_COND_SHIFT            30
+	u32 priority /* Priority of the leading context */;
+	u32 bitfields1;
+/* 0   disabled, 1   copy to a single Vport always ,2   copy to a single Vport if copy condition is
+ * met ,3   copy to multiple Vports always, 4   copy to multiple Vports if copy condition is met, 5
+ *  do if  sampling  condition is met (use enum gfs_cntx_cmd2)
+ */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND2_COND_MASK             0x7
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND2_COND_SHIFT            0
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND1_CHANGE_VPORT_MASK     0x1 /* is set, change the vport */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND1_CHANGE_VPORT_SHIFT    3
+/* Substituted to  resolution  structure for FW use (use enum gfs_res_struct_dst_type) */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND1_FW_HINT_MASK          0x7
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND1_FW_HINT_SHIFT         4
+/* 0   From the 3rd QREG of this context (QREG#2), 1-127   From canned Commands RAM */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND1_LOCATION_MASK         0x7F
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND1_LOCATION_SHIFT        7
+/* Used if  ChangeVport  is set */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND1_VPORT_MASK            0xFF
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND1_VPORT_SHIFT           14
+/* Matched with 8 TCP flags and 2 additional bits supplied in the GFS header. */
+#define GFS_CONTEXT_DESCRIPTOR_COPY_CONDITION_EN_MASK         0x3FF
+#define GFS_CONTEXT_DESCRIPTOR_COPY_CONDITION_EN_SHIFT        22
+	u32 bitfields2;
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND2_CHANGE_VPORT_MASK     0x1 /* is set, change the vport */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND2_CHANGE_VPORT_SHIFT    0
+/* Substituted to  resolution  structure for FW use (use enum gfs_res_struct_dst_type) */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND2_FW_HINT_MASK          0x7
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND2_FW_HINT_SHIFT         1
+/* 0   From the 3rd QREG of this context (QREG#2), 1-127   From canned Commands RAM */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND2_LOCATION_MASK         0x7F
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND2_LOCATION_SHIFT        4
+/* For copy to a single Vport   Vport number.For copy to multiple Vports: 0-31   index of RAM entry
+ * that contains the Vports bitmask, 0xFF   the bitmask is located in QREG#2 and QREG#3
+ */
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND2_VPORT_MASK            0xFF
+#define GFS_CONTEXT_DESCRIPTOR_COMMAND2_VPORT_SHIFT           11
+/* Matched with TTL==0 (bit 0), TTL==1 (bit 1) from eth_gfs_redirect_condition in
+ * action_data.redirect.condition, and 2 additional bits (2 and 3) supplied in the GFS header.
+ */
+#define GFS_CONTEXT_DESCRIPTOR_REDIRECTION_CONDITION_EN_MASK  0xF
+#define GFS_CONTEXT_DESCRIPTOR_REDIRECTION_CONDITION_EN_SHIFT 19
+#define GFS_CONTEXT_DESCRIPTOR_FW_RESERVED1_MASK              0x1FF
+#define GFS_CONTEXT_DESCRIPTOR_FW_RESERVED1_SHIFT             23
+};
+
+/*
+ * RGFS context replacement data QREG 1
+ */
+struct rgfs_replacement_struct {
+/* CID for replacement in CM header. Used for steering to TX queue. Only one copy can use CID. In
+ * TGFS used to change event ID   Select TGFS flow in PSTORM
+ */
+	u32 cid;
+	u16 flags;
+/* Connection Type for replacement in the CM header (Unused) */
+#define RGFS_REPLACEMENT_STRUCT_CONNECTION_TYPE_MASK          0xF
+#define RGFS_REPLACEMENT_STRUCT_CONNECTION_TYPE_SHIFT         0
+/* Core Affinity for replacement in the CM header (Unused) */
+#define RGFS_REPLACEMENT_STRUCT_CORE_AFFINITY_MASK            0x3
+#define RGFS_REPLACEMENT_STRUCT_CORE_AFFINITY_SHIFT           4
+/* Change dst IP address that RSS header refers to. Set 1 to replace inner IPV4 address */
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_DST_IP_ADDRESS_MASK    0x1
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_DST_IP_ADDRESS_SHIFT   6
+/* Change src IP address that RSS header refers to. Set 1 to replace inner IPV4 address */
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_SRC_IP_ADDRESS_MASK    0x1
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_SRC_IP_ADDRESS_SHIFT   7
+/* Change dst port that RSS header refers to. Set 1 to replace inner UDP/TCP port */
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_DST_PORT_MASK          0x1
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_DST_PORT_SHIFT         8
+/* Change src port that RSS header refers to. Set 1 to replace inner UDP/TCP port */
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_SRC_PORT_MASK          0x1
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_SRC_PORT_SHIFT         9
+/* Change IpVerType field in the RSS header. Set 0 */
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_RSS_IPVER_TYPE_MASK    0x1
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_RSS_IPVER_TYPE_SHIFT   10
+/* Change L4Type field in the RSS header. Set 0 */
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_RSS_L4_TYPE_MASK       0x1
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_RSS_L4_TYPE_SHIFT      11
+#define RGFS_REPLACEMENT_STRUCT_RSS_L4_TYPE_MASK              0x3 /* Unused */
+#define RGFS_REPLACEMENT_STRUCT_RSS_L4_TYPE_SHIFT             12
+/* Change OVER_IP_PROTOCOL field in the RSS header. Set 0 */
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_RSS_OVER_IP_PROT_MASK  0x1
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_RSS_OVER_IP_PROT_SHIFT 14
+/* Change Ipv4Frag field in the RSS header. Set 0 */
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_RSS_IPV4_FRAG_MASK     0x1
+#define RGFS_REPLACEMENT_STRUCT_CHANGE_RSS_IPV4_FRAG_SHIFT    15
+	u16 dst_port /* Dst port value for replacement. L4 port */;
+	u16 src_port /* Src port value for replacement. L4 port */;
+	u16 fw_reserved;
+/* Flow Id. Used for TMLD aggregation. Always set. Replace last register in TM message. */
+	u32 fw_flow_id;
+};
+
+struct gfs_modify_inner_ipv4_addr {
+	u32 dst_ip /* Inner IPv4 destination address. */;
+};
+
+struct gfs_modify_inner_ipv6_addr {
+	u16 dst_ip_index /* Inner IPv6 destination IP indirection indexs. */;
+	u16 src_ip_index /* Inner IPv6 source IP indirection indexs. */;
+};
+
+union gfs_modify_inner_ip_addr {
+	struct gfs_modify_inner_ipv4_addr ipv4 /* Inner IPv4 destination address. */;
+	struct gfs_modify_inner_ipv6_addr ipv6 /* Inner IPv6 indirection indexs. */;
+};
+
+/*
+ * Modify tunnel IPV4 header flags
+ */
+struct gfs_modify_tunnel_ipv4_header_flags {
+	u16 bitfields0;
+/* Modify tunnel source IP address. */
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_FW_MODIFY_TUNN_IP_SRC_ADDR_MASK    0x1
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_FW_MODIFY_TUNN_IP_SRC_ADDR_SHIFT   0
+/* Modify tunnel destination IP address. */
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_FW_MODIFY_TUNN_IP_DST_ADDR_MASK    0x1
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_FW_MODIFY_TUNN_IP_DST_ADDR_SHIFT   1
+/* Modify tunnel IP TTL or Hop Limit */
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_FW_SET_TUNN_IP_TTL_HOP_LIMIT_MASK  0x1
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_FW_SET_TUNN_IP_TTL_HOP_LIMIT_SHIFT 2
+/* Decrement tunnel IP TTL or Hop Limit */
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_FW_DEC_TUNN_IP_TTL_HOP_LIMIT_MASK  0x1
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_FW_DEC_TUNN_IP_TTL_HOP_LIMIT_SHIFT 3
+/* Modify tunnel IP DSCP */
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_FW_MODIFY_TUNN_IP_DSCP_MASK        0x1
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_FW_MODIFY_TUNN_IP_DSCP_SHIFT       4
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_RESERVED_MASK                      0x7FF
+#define GFS_MODIFY_TUNNEL_IPV4_HEADER_FLAGS_RESERVED_SHIFT                     5
+};
+
+/*
+ * Push tunnel IPV4 header flags
+ */
+struct gfs_push_tunnel_ipv4_header_flags {
+	u16 bitfields0;
+#define GFS_PUSH_TUNNEL_IPV4_HEADER_FLAGS_DONT_FRAG_FLAG_MASK             0x1
+#define GFS_PUSH_TUNNEL_IPV4_HEADER_FLAGS_DONT_FRAG_FLAG_SHIFT            0
+/* Modify tunnel destination IP address. */
+#define GFS_PUSH_TUNNEL_IPV4_HEADER_FLAGS_FW_TUNN_IPV4_ID_IND_INDEX_MASK  0x1FF
+#define GFS_PUSH_TUNNEL_IPV4_HEADER_FLAGS_FW_TUNN_IPV4_ID_IND_INDEX_SHIFT 1
+#define GFS_PUSH_TUNNEL_IPV4_HEADER_FLAGS_RESERVED_MASK                   0x3F
+#define GFS_PUSH_TUNNEL_IPV4_HEADER_FLAGS_RESERVED_SHIFT                  10
+};
+
+union gfs_tunnel_header_flags {
+/* Modify tunnel IPV4 header Flags */
+	struct gfs_modify_tunnel_ipv4_header_flags modify_tunnel_ipv4_header_flags;
+/* Push tunnel IPV4 header Flags */
+	struct gfs_push_tunnel_ipv4_header_flags push_tunnel_ipv4_header_flags;
+};
+
+/*
+ * Push tunnel VXLAN header data
+ */
+struct gfs_push_tunnel_vxlan_header_data {
+	u16 vni_bits0to15 /* Tunnel ID bits  0..15 */;
+	u8 vni_bits16to23 /* Tunnel ID bits 16..23 */;
+	u8 bitfields0;
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_RESERVED0_MASK           0x7
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_RESERVED0_SHIFT          0
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_UDP_CHECKSUM_VALID_MASK  0x1 /* UDP checksum exists */
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_UDP_CHECKSUM_VALID_SHIFT 3
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_I_FRAG_MASK              0x1 /* VNI valid. */
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_I_FRAG_SHIFT             4
+/* Use out of band VNI from RX path for hairpin traffic. */
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_OOB_VNI_MASK             0x1
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_OOB_VNI_SHIFT            5
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_RESERVED1_MASK           0x1
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_RESERVED1_SHIFT          6
+/* Calculate UDP source port from 4-tuple. */
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_CALC_ENTROPY_MASK        0x1
+#define GFS_PUSH_TUNNEL_VXLAN_HEADER_DATA_CALC_ENTROPY_SHIFT       7
+	u16 entropy /* VXLAN UDP source value. */;
+};
+
+/*
+ * Modify tunnel VXLAN header data
+ */
+struct gfs_modify_tunnel_vxlan_header_data {
+	u16 vni_bits0to15 /* Tunnel ID bits  0..15 */;
+	u8 vni_bits16to23 /* Tunnel ID bits 16..23 */;
+	u8 bitfields0;
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_RESERVED0_MASK        0x7
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_RESERVED0_SHIFT       0
+/* Update UDP checksum if exists */
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_UPDATE_CHECKSUM_MASK  0x1
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_UPDATE_CHECKSUM_SHIFT 3
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_SET_VNI_MASK          0x1 /* Set VNI */
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_SET_VNI_SHIFT         4
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_RESERVED1_MASK        0x1
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_RESERVED1_SHIFT       5
+/* Set       UDP source port. */
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_SET_ENTROPY_MASK      0x1
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_SET_ENTROPY_SHIFT     6
+/* Calculate UDP source port from 4-tuple. */
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_CALC_ENTROPY_MASK     0x1
+#define GFS_MODIFY_TUNNEL_VXLAN_HEADER_DATA_CALC_ENTROPY_SHIFT    7
+	u16 entropy /* VXLAN UDP source value. */;
+};
+
+/*
+ * Push tunnel GRE header data
+ */
+struct gfs_push_tunnel_gre_header_data {
+	u16 key_bits0to15 /* Tunnel ID bits  0..15. */;
+	u16 key_bits16to31 /* Tunnel ID bits 16..31. */;
+	u16 bitfields0;
+#define GFS_PUSH_TUNNEL_GRE_HEADER_DATA_CHECKSUM_VALID_MASK  0x1 /* GRE checksum exists */
+#define GFS_PUSH_TUNNEL_GRE_HEADER_DATA_CHECKSUM_VALID_SHIFT 0
+#define GFS_PUSH_TUNNEL_GRE_HEADER_DATA_KEY_VALID_MASK       0x1 /* Key exists */
+#define GFS_PUSH_TUNNEL_GRE_HEADER_DATA_KEY_VALID_SHIFT      1
+/* Use out of band KEY from RX path for hairpin traffic. */
+#define GFS_PUSH_TUNNEL_GRE_HEADER_DATA_OOB_KEY_MASK         0x1
+#define GFS_PUSH_TUNNEL_GRE_HEADER_DATA_OOB_KEY_SHIFT        2
+#define GFS_PUSH_TUNNEL_GRE_HEADER_DATA_RESERVED_MASK        0x1FFF
+#define GFS_PUSH_TUNNEL_GRE_HEADER_DATA_RESERVED_SHIFT       3
+};
+
+/*
+ * Modify tunnel GRE header data
+ */
+struct gfs_modify_tunnel_gre_header_data {
+	u16 key_bits0to15 /* Tunnel ID bits  0..15. */;
+	u16 key_bits16to31 /* Tunnel ID bits 16..31. */;
+	u16 bitfields0;
+/* Update GRE checksum if exists */
+#define GFS_MODIFY_TUNNEL_GRE_HEADER_DATA_UPDATE_CHECKSUM_MASK  0x1
+#define GFS_MODIFY_TUNNEL_GRE_HEADER_DATA_UPDATE_CHECKSUM_SHIFT 0
+#define GFS_MODIFY_TUNNEL_GRE_HEADER_DATA_SET_VALID_MASK        0x1 /* Set Key */
+#define GFS_MODIFY_TUNNEL_GRE_HEADER_DATA_SET_VALID_SHIFT       1
+#define GFS_MODIFY_TUNNEL_GRE_HEADER_DATA_RESERVED_MASK         0x3FFF
+#define GFS_MODIFY_TUNNEL_GRE_HEADER_DATA_RESERVED_SHIFT        2
+};
+
+union gfs_tunnel_header_data {
+/* Push   VXLAN tunnel header Data */
+	struct gfs_push_tunnel_vxlan_header_data push_tunnel_vxlan_header_data;
+/* Modify VXLAN tunnel header Data */
+	struct gfs_modify_tunnel_vxlan_header_data modify_tunnel_vxlan_header_data;
+/* Push   GRE   tunnel header Data */
+	struct gfs_push_tunnel_gre_header_data push_tunnel_gre_header_data;
+/* Modify GRE   tunnel header Data */
+	struct gfs_modify_tunnel_gre_header_data modify_tunnel_gre_header_data;
+};
+
+/*
+ * RGFS context Descriptor QREG
+ */
+struct eth_rgfs_full_context {
+	struct gfs_context_descriptor context_descriptor;
+	struct rgfs_replacement_struct replacement_struct;
+	u32 bitfields2;
+/* The log2 factor to apply to the timestamps (must be the same for both TIDs in flow) */
+#define ETH_RGFS_FULL_CONTEXT_FW_COUNTERS_TIMESTAMP_LOG2_FACTOR_MASK  0x1F
+#define ETH_RGFS_FULL_CONTEXT_FW_COUNTERS_TIMESTAMP_LOG2_FACTOR_SHIFT 0
+/* First ETH header (after packet modification) VLAN mode: NOP, POP, PUSH, change VID, change PRI,
+ * change whole tag. (use enum gfs_vlan_mode)
+ */
+#define ETH_RGFS_FULL_CONTEXT_FW_FIRST_HDR_VLAN_MODE_MASK             0x7
+#define ETH_RGFS_FULL_CONTEXT_FW_FIRST_HDR_VLAN_MODE_SHIFT            5
+#define ETH_RGFS_FULL_CONTEXT_RESERVED1_MASK                          0xFF
+#define ETH_RGFS_FULL_CONTEXT_RESERVED1_SHIFT                         8
+/*  (use enum eth_gfs_pop_hdr_type) */
+#define ETH_RGFS_FULL_CONTEXT_FW_POP_TYPE_MASK                        0x3
+#define ETH_RGFS_FULL_CONTEXT_FW_POP_TYPE_SHIFT                       16
+#define ETH_RGFS_FULL_CONTEXT_FW_TID0_VALID_MASK                      0x1 /* TID 0 Valid */
+#define ETH_RGFS_FULL_CONTEXT_FW_TID0_VALID_SHIFT                     18
+#define ETH_RGFS_FULL_CONTEXT_FW_TID1_VALID_MASK                      0x1 /* TID 1 Valid */
+#define ETH_RGFS_FULL_CONTEXT_FW_TID1_VALID_SHIFT                     19
+/* First duplication driver hint. */
+#define ETH_RGFS_FULL_CONTEXT_DRV_HINT_0_MASK                         0x7
+#define ETH_RGFS_FULL_CONTEXT_DRV_HINT_0_SHIFT                        20
+#define ETH_RGFS_FULL_CONTEXT_RESERVED2_MASK                          0x7
+#define ETH_RGFS_FULL_CONTEXT_RESERVED2_SHIFT                         23
+/* Second duplication driver hint. */
+#define ETH_RGFS_FULL_CONTEXT_DRV_HINT_1_MASK                         0x7
+#define ETH_RGFS_FULL_CONTEXT_DRV_HINT_1_SHIFT                        26
+/* Third duplication driver hint. */
+#define ETH_RGFS_FULL_CONTEXT_DRV_HINT_2_MASK                         0x7
+#define ETH_RGFS_FULL_CONTEXT_DRV_HINT_2_SHIFT                        29
+	u16 fw_first_hdr_vlan_val /* Tunnel or single header VLAN value */;
+	u16 fw_command1_sample_len /* 2nd Command sample length (1st command is command0) */;
+	u32 fw_tid0 /* Counter 0 TID */;
+	u32 fw_tid1 /* Counter 1 TID */;
+/* HDR Modify   inner IPv4 destination IP or IPv6 indirection indexs. */
+	union gfs_modify_inner_ip_addr fw_modify_inner_ip_addr;
+	u16 fw_tunnel_src_mac_ind;
+/* Tunnel source MAC indirection index. 0 are invalid index. */
+#define ETH_RGFS_FULL_CONTEXT_FW_TUNNEL_SRC_MAC_IND_INDEX_MASK        0x1FF
+#define ETH_RGFS_FULL_CONTEXT_FW_TUNNEL_SRC_MAC_IND_INDEX_SHIFT       0
+#define ETH_RGFS_FULL_CONTEXT_RESERVED3_MASK                          0xF
+#define ETH_RGFS_FULL_CONTEXT_RESERVED3_SHIFT                         9
+/* CM HDR override - Clear for drop. No special allocation needed for drop fields. */
+#define ETH_RGFS_FULL_CONTEXT_XXLOCK_CMD_MASK                         0x7
+#define ETH_RGFS_FULL_CONTEXT_XXLOCK_CMD_SHIFT                        13
+	u16 bitfields3;
+/* Modify inner/single L4 ports */
+#define ETH_RGFS_FULL_CONTEXT_FW_MODIFY_INNER_4TUPLE_PORTS_MASK       0x1
+#define ETH_RGFS_FULL_CONTEXT_FW_MODIFY_INNER_4TUPLE_PORTS_SHIFT      0
+/* Modify inner/single IP addresses */
+#define ETH_RGFS_FULL_CONTEXT_FW_MODIFY_INNER_4TUPLE_ADDR_MASK        0x1
+#define ETH_RGFS_FULL_CONTEXT_FW_MODIFY_INNER_4TUPLE_ADDR_SHIFT       1
+/* Modify inner/single ETH header */
+#define ETH_RGFS_FULL_CONTEXT_FW_MODIFY_INNER_MAC_MASK                0x1
+#define ETH_RGFS_FULL_CONTEXT_FW_MODIFY_INNER_MAC_SHIFT               2
+#define ETH_RGFS_FULL_CONTEXT_FW_MODIFY_INNER_IP_DSCP_MASK            0x1 /* Modify inner IP DSCP */
+#define ETH_RGFS_FULL_CONTEXT_FW_MODIFY_INNER_IP_DSCP_SHIFT           3
+/* Inner ETH header VLAN modification: NOP, change VID, change PRI, change whole tag, POP, PUSH (use
+ * enum gfs_vlan_mode)
+ */
+#define ETH_RGFS_FULL_CONTEXT_FW_INNER_HDR_VLAN_MODE_MASK             0x7
+#define ETH_RGFS_FULL_CONTEXT_FW_INNER_HDR_VLAN_MODE_SHIFT            4
+/* Modify inner IP TTL or Hop Limit */
+#define ETH_RGFS_FULL_CONTEXT_FW_SET_INNER_IP_TTL_HOP_LIMIT_MASK      0x1
+#define ETH_RGFS_FULL_CONTEXT_FW_SET_INNER_IP_TTL_HOP_LIMIT_SHIFT     7
+/* Decrement inner IP TTL or Hop Limit */
+#define ETH_RGFS_FULL_CONTEXT_FW_DEC_INNER_IP_TTL_HOP_LIMIT_MASK      0x1
+#define ETH_RGFS_FULL_CONTEXT_FW_DEC_INNER_IP_TTL_HOP_LIMIT_SHIFT     8
+/* CM HDR override - Clear for drop. No special allocation needed for drop fields. */
+#define ETH_RGFS_FULL_CONTEXT_LOAD_ST_CTX_FLG_MASK                    0x1
+#define ETH_RGFS_FULL_CONTEXT_LOAD_ST_CTX_FLG_SHIFT                   9
+#define ETH_RGFS_FULL_CONTEXT_RESERVED4_MASK                          0x3F
+#define ETH_RGFS_FULL_CONTEXT_RESERVED4_SHIFT                         10
+	u16 fw_command2_sample_len /* 3rd Command sample length (1st command is command0) */;
+	u16 fw_inner_vlan_val /* Inner VLAN value */;
+	u32 bitfields4;
+/* inner IP DSCP Set value */
+#define ETH_RGFS_FULL_CONTEXT_FW_INNER_IP_DSCP_MASK                   0x3F
+#define ETH_RGFS_FULL_CONTEXT_FW_INNER_IP_DSCP_SHIFT                  0
+/* Inner source      MAC indirection index. 0 are invalid index. */
+#define ETH_RGFS_FULL_CONTEXT_FW_INNER_SRC_MAC_IND_INDEX_MASK         0x1FF
+#define ETH_RGFS_FULL_CONTEXT_FW_INNER_SRC_MAC_IND_INDEX_SHIFT        6
+/* Inner destination MAC indirection index. 0 are invalid index. */
+#define ETH_RGFS_FULL_CONTEXT_FW_INNER_DEST_MAC_IND_INDEX_MASK        0x1FF
+#define ETH_RGFS_FULL_CONTEXT_FW_INNER_DEST_MAC_IND_INDEX_SHIFT       15
+/* inner IP TTL or Hop Limit Set value */
+#define ETH_RGFS_FULL_CONTEXT_FW_INNER_IP_TTL_HOP_LIMIT_MASK          0xFF
+#define ETH_RGFS_FULL_CONTEXT_FW_INNER_IP_TTL_HOP_LIMIT_SHIFT         24
+	u32 src_ip /* HDR Modify   IPv4 source IP. */;
+/* Filter marker. Always set. Replace per-pf config in basic block. Reported by CQE  */
+	u32 flow_mark;
+/* HDR Modify   tunnel IPv4 destination IP or IPv6 indirection index. Used by FW. */
+	u32 fw_tunn_dest_ip;
+/* HDR Modify   tunnel IPv4 source IP or IPv6 indirection index. Used by FW. */
+	u32 fw_tunn_src_ip;
+/* Tunnel IP tunnel header flag union. */
+	union gfs_tunnel_header_flags tunnel_ip_header_flags;
+	u8 fw_tunn_ip_ttl_hop_limit /* Tunnel IP TTL or Hop Limit Set value */;
+	u8 bitfields5;
+/* RSS HDR override - for CID steering. Disable CID override. */
+#define ETH_RGFS_FULL_CONTEXT_CID_OVERWRITE_MASK                      0x1
+#define ETH_RGFS_FULL_CONTEXT_CID_OVERWRITE_SHIFT                     0
+/* Tunnel IP header flags for PUSH flow: 2 LSBits of tos field. */
+#define ETH_RGFS_FULL_CONTEXT_FW_TUNNEL_IP_ECN_MASK                   0x3
+#define ETH_RGFS_FULL_CONTEXT_FW_TUNNEL_IP_ECN_SHIFT                  1
+/* Push headers type. (use enum eth_gfs_push_hdr_type) */
+#define ETH_RGFS_FULL_CONTEXT_FW_PUSH_TYPE_MASK                       0x7
+#define ETH_RGFS_FULL_CONTEXT_FW_PUSH_TYPE_SHIFT                      3
+#define ETH_RGFS_FULL_CONTEXT_RESERVED_MASK                           0x3
+#define ETH_RGFS_FULL_CONTEXT_RESERVED_SHIFT                          6
+	union gfs_tunnel_header_data tunnel_header_data /* VXLAN/GRE tunnel header union. */;
+	u16 bitfields6;
+/* Tunnel destination MAC indirection index. 0 are invalid index. */
+#define ETH_RGFS_FULL_CONTEXT_FW_TUNNEL_DEST_MAC_IND_INDEX_MASK       0x1FF
+#define ETH_RGFS_FULL_CONTEXT_FW_TUNNEL_DEST_MAC_IND_INDEX_SHIFT      0
+#define ETH_RGFS_FULL_CONTEXT_RESERVED5_MASK                          0x7F
+#define ETH_RGFS_FULL_CONTEXT_RESERVED5_SHIFT                         9
+	u32 bitfields7;
+/* RSS HDR override - for queue/CID steering. Only one copy can use queue ID. Limited by 256. Bits
+ * 9-11 can be used by context.
+ */
+#define ETH_RGFS_FULL_CONTEXT_LDR_HEADER_REGION_MASK                  0xFF
+#define ETH_RGFS_FULL_CONTEXT_LDR_HEADER_REGION_SHIFT                 0
+#define ETH_RGFS_FULL_CONTEXT_RESERVED6_MASK                          0xF
+#define ETH_RGFS_FULL_CONTEXT_RESERVED6_SHIFT                         8
+/* RSS HDR override - For steering. Only one copy can use queue ID. Limited by 1. Bits 1-3 can be
+ * used by context. * Cannot Move *
+ */
+#define ETH_RGFS_FULL_CONTEXT_CALC_RSS_AND_IND_TBL_MASK               0xF
+#define ETH_RGFS_FULL_CONTEXT_CALC_RSS_AND_IND_TBL_SHIFT              12
+/* Tunnel IP DSCP Set value */
+#define ETH_RGFS_FULL_CONTEXT_FW_TUNNEL_IP_DSCP_MASK                  0x3F
+#define ETH_RGFS_FULL_CONTEXT_FW_TUNNEL_IP_DSCP_SHIFT                 16
+/* RSS HDR override - For steering. Only one copy can use queue ID. Limited by 511. Bits 9 can be
+ * used by context. * Cannot Move *
+ */
+#define ETH_RGFS_FULL_CONTEXT_DEFAULT_QUEUE_ID_MASK                   0x3FF
+#define ETH_RGFS_FULL_CONTEXT_DEFAULT_QUEUE_ID_SHIFT                  22
+};
+
+
+/*
+ * TGFS context Descriptor QREG
+ */
+struct eth_tgfs_full_context {
+	struct gfs_context_descriptor context_descriptor;
+/* Tunnel or single VLAN value, setting vlan value for push command, updating pri, vid or vlan value
+ * for the pri, vid and whole_tag update commands, respectively
+ */
+	u16 fw_first_hdr_vlan_val;
+	u16 bitfields0;
+/* The log2 factor to apply to the timestamp (must be the same for both TIDs in flow) */
+#define ETH_TGFS_FULL_CONTEXT_FW_COUNTERS_TIMESTAMP_LOG2_FACTOR_MASK  0x1F
+#define ETH_TGFS_FULL_CONTEXT_FW_COUNTERS_TIMESTAMP_LOG2_FACTOR_SHIFT 0
+/* First ETH header VLAN modification: NOP, POP, PUSH, change VID, change PRI, change whole tag.
+ * Replace basic block register 8 (use enum gfs_vlan_mode)
+ */
+#define ETH_TGFS_FULL_CONTEXT_FW_FIRST_HDR_VLAN_MODE_MASK             0x7
+#define ETH_TGFS_FULL_CONTEXT_FW_FIRST_HDR_VLAN_MODE_SHIFT            5
+#define ETH_TGFS_FULL_CONTEXT_FW_TID0_VALID_MASK                      0x1 /* TID 0 Valid */
+#define ETH_TGFS_FULL_CONTEXT_FW_TID0_VALID_SHIFT                     8
+#define ETH_TGFS_FULL_CONTEXT_FW_TID1_VALID_MASK                      0x1 /* TID 1 Valid */
+#define ETH_TGFS_FULL_CONTEXT_FW_TID1_VALID_SHIFT                     9
+#define ETH_TGFS_FULL_CONTEXT_RESERVED0_MASK                          0x3F
+#define ETH_TGFS_FULL_CONTEXT_RESERVED0_SHIFT                         10
+	u16 fw_command1_sample_len /* Command 2 sample length */;
+	u16 fw_command2_sample_len /* Command 3 sample length */;
+	u32 fw_tid0 /* Counter TID. */;
+	u32 fw_tid1 /* Counter TID. */;
+	u32 src_ip /* Inner IPv4 source IP or IPv6 indirection index. */;
+	u32 dst_ip /* Inner IPv4 destination IP or IPv6 indirection index. */;
+	u16 dst_port /* L4 destination port. */;
+	u16 src_port /* L4 source port. */;
+	u32 bitfields1;
+/* Inner source MAC indirection index. 0 are invalid index. */
+#define ETH_TGFS_FULL_CONTEXT_FW_INNER_SRC_MAC_IND_INDEX_MASK         0x1FF
+#define ETH_TGFS_FULL_CONTEXT_FW_INNER_SRC_MAC_IND_INDEX_SHIFT        0
+/* Inner destination MAC indirection index. 0 are invalid index. */
+#define ETH_TGFS_FULL_CONTEXT_FW_INNER_DEST_MAC_IND_INDEX_MASK        0x1FF
+#define ETH_TGFS_FULL_CONTEXT_FW_INNER_DEST_MAC_IND_INDEX_SHIFT       9
+/* Modify inner/single source IP address. */
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_SRC_IP_ADDR_MASK        0x1
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_SRC_IP_ADDR_SHIFT       18
+/* Modify inner/single destination IP address. */
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_DEST_IP_ADDR_MASK       0x1
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_DEST_IP_ADDR_SHIFT      19
+/* Modify inner/single L4 header source port */
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_L4_SRC_PORT_MASK        0x1
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_L4_SRC_PORT_SHIFT       20
+/* Modify inner/single L4 header destination port */
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_L4_DEST_PORT_MASK       0x1
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_L4_DEST_PORT_SHIFT      21
+/* Modify MACs in inner ETH header */
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_MAC_MASK                0x1
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_MAC_SHIFT               22
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_IP_DSCP_MASK            0x1 /* Modify inner IP DSCP */
+#define ETH_TGFS_FULL_CONTEXT_FW_MODIFY_INNER_IP_DSCP_SHIFT           23
+/* Modify inner IP TTL or Hop Limit */
+#define ETH_TGFS_FULL_CONTEXT_FW_SET_INNER_IP_TTL_HOP_LIMIT_MASK      0x1
+#define ETH_TGFS_FULL_CONTEXT_FW_SET_INNER_IP_TTL_HOP_LIMIT_SHIFT     24
+/* Decrement inner IP TTL or Hop Limit */
+#define ETH_TGFS_FULL_CONTEXT_FW_DEC_INNER_IP_TTL_HOP_LIMIT_MASK      0x1
+#define ETH_TGFS_FULL_CONTEXT_FW_DEC_INNER_IP_TTL_HOP_LIMIT_SHIFT     25
+#define ETH_TGFS_FULL_CONTEXT_RESERVED1_MASK                          0x1
+#define ETH_TGFS_FULL_CONTEXT_RESERVED1_SHIFT                         26
+/* Original flag set for some copy. */
+#define ETH_TGFS_FULL_CONTEXT_FW_ORIGINAL_COPY_FLG_MASK               0x1
+#define ETH_TGFS_FULL_CONTEXT_FW_ORIGINAL_COPY_FLG_SHIFT              27
+/* Set checksum offload due to modify fields. FP must verify if checksum exist. bit 0 - ip, bit 1 -
+ * l4, bit 2 - tunn_ip, bit 3 - tunn_l4
+ */
+#define ETH_TGFS_FULL_CONTEXT_FW_SET_CHECKSUM_OFFLOAD_MASK            0xF
+#define ETH_TGFS_FULL_CONTEXT_FW_SET_CHECKSUM_OFFLOAD_SHIFT           28
+	u32 bitfields2;
+/* First ETH header destination MAC indirection index. 0 for unchanged. Replace basic block register
+ * 8.
+ */
+#define ETH_TGFS_FULL_CONTEXT_FW_FIRST_HDR_DEST_MAC_IND_INDEX_MASK    0x1FF
+#define ETH_TGFS_FULL_CONTEXT_FW_FIRST_HDR_DEST_MAC_IND_INDEX_SHIFT   0
+/* First ETH header source MAC indirection index. 0 for unchanged. Replace basic block register 8 */
+#define ETH_TGFS_FULL_CONTEXT_FW_FIRST_HDR_SRC_MAC_IND_INDEX_MASK     0x1FF
+#define ETH_TGFS_FULL_CONTEXT_FW_FIRST_HDR_SRC_MAC_IND_INDEX_SHIFT    9
+/* Reserved for last bit in searcher database (use enum eth_gfs_pop_hdr_type) */
+#define ETH_TGFS_FULL_CONTEXT_FW_POP_TYPE_MASK                        0x3
+#define ETH_TGFS_FULL_CONTEXT_FW_POP_TYPE_SHIFT                       18
+#define ETH_TGFS_FULL_CONTEXT_RESERVED2_MASK                          0x1
+#define ETH_TGFS_FULL_CONTEXT_RESERVED2_SHIFT                         20
+/* Inner ETH header VLAN modification: NOP, change VID, change PRI, change whole tag, POP, PUSH.
+ * Replace basic block register 8 (use enum gfs_vlan_mode)
+ */
+#define ETH_TGFS_FULL_CONTEXT_FW_INNER_HDR_VLAN_MODE_MASK             0x7
+#define ETH_TGFS_FULL_CONTEXT_FW_INNER_HDR_VLAN_MODE_SHIFT            21
+/* inner IP TTL or Hop Limit Set value. Replace basic block register 8. */
+#define ETH_TGFS_FULL_CONTEXT_FW_INNER_IP_TTL_HOP_LIMIT_MASK          0xFF
+#define ETH_TGFS_FULL_CONTEXT_FW_INNER_IP_TTL_HOP_LIMIT_SHIFT         24
+	u16 bitfields3;
+/* Push headers type. (use enum eth_gfs_push_hdr_type) */
+#define ETH_TGFS_FULL_CONTEXT_FW_PUSH_TYPE_MASK                       0x7
+#define ETH_TGFS_FULL_CONTEXT_FW_PUSH_TYPE_SHIFT                      0
+/* inner IP DSCP Set value */
+#define ETH_TGFS_FULL_CONTEXT_FW_INNER_IP_DSCP_MASK                   0x3F
+#define ETH_TGFS_FULL_CONTEXT_FW_INNER_IP_DSCP_SHIFT                  3
+#define ETH_TGFS_FULL_CONTEXT_RESERVED3_MASK                          0x7F
+#define ETH_TGFS_FULL_CONTEXT_RESERVED3_SHIFT                         9
+	u16 fw_inner_vlan_val /* Inner VLAN value */;
+	u32 flow_mark /* Filter marker. Pass to Rx if destination is RX_VPORT */;
+	u8 bitfields4;
+/* Tunnel IP header flags for PUSH flow: 2 LSBits of tos field. */
+#define ETH_TGFS_FULL_CONTEXT_FW_TUNNEL_IP_ECN_MASK                   0x3
+#define ETH_TGFS_FULL_CONTEXT_FW_TUNNEL_IP_ECN_SHIFT                  0
+/* Tunnel IP DSCP Set value */
+#define ETH_TGFS_FULL_CONTEXT_FW_TUNNEL_IP_DSCP_MASK                  0x3F
+#define ETH_TGFS_FULL_CONTEXT_FW_TUNNEL_IP_DSCP_SHIFT                 2
+	u8 fw_tunn_ip_ttl_hop_limit /* Tunnel IP TTL or Hop Limit Set value */;
+	u16 reserved4;
+	union gfs_tunnel_header_data tunnel_header_data /* VXLAN/GRE tunnel header union. */;
+/* Tunnel IP tunnel header flag union. */
+	union gfs_tunnel_header_flags tunnel_ip_header_flags;
+/* HDR Modify   tunnel IPv4 destination IP or IPv6 indirection index. */
+	u32 fw_tunn_dest_ip;
+	u32 fw_tunn_src_ip /* HDR Modify   tunnel IPv4 source IP or IPv6 indirection index. */;
+	u16 reserved5;
+	u8 event_id /* This field is used to replace part of event ID in MCM header. */;
+	u8 reserved6;
+	u32 fw_flow_id /* Flow Id. Replace basic block register 9. */;
+	u32 reserved7[2];
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * GFS Context VLAN mode enum
+ */
+enum gfs_vlan_mode {
+	e_vlan_mode_nop,
+	e_vlan_mode_change_vid,
+	e_vlan_mode_change_pri,
+	e_vlan_mode_change_whole_tag,
+/* In order to be applicable for fw_inner_vlan_modify_flg as well, pop and puch are placed at the
+ * end so as to allow nop..change_whole_tag to consume exactly 2 bits
+ */
+	e_vlan_mode_pop,
+/* In order to be applicable for fw_inner_vlan_modify_flg as well, pop and puch are placed at the
+ * end so as to allow nop..change_whole_tag to consume exactly 2 bits
+ */
+	e_vlan_mode_push,
+	MAX_GFS_VLAN_MODE
+};
+
+
+/*
+ * structure of the tgfs fw hint
+ */
+struct tgfs_fw_hint {
+	u8 bitfields;
+#define TGFS_FW_HINT_FIRST_DUP_FLG_MASK  0x1 /* set if first duplicate */
+#define TGFS_FW_HINT_FIRST_DUP_FLG_SHIFT 0
+/* specify where the pkt is destined for (use enum gfs_res_struct_dst_type) */
+#define TGFS_FW_HINT_DEST_TYPE_MASK      0x3
+#define TGFS_FW_HINT_DEST_TYPE_SHIFT     1
+#define TGFS_FW_HINT_RESERVED_MASK       0x1F
+#define TGFS_FW_HINT_RESERVED_SHIFT      3
+};
+
+
+/*
+ * GFS Context Command 0 enum
+ */
+enum gfs_cntx_cmd0 {
+	e_cntx_cmd0_do_always /* do always */,
+	e_cntx_cmd0_if_red_not_met /* if redirection condition is not met */,
+	MAX_GFS_CNTX_CMD0
+};
+
+
+/*
+ * GFS Context Command 1 enum
+ */
+enum gfs_cntx_cmd1 {
+	e_cntx_cmd1_disable /* disabled */,
+	e_cntx_cmd1_do_always /* do always */,
+	e_cntx_cmd1_do_if_red_met /* do if redirection condition is met */,
+	e_cntx_cmd1_do_if_samp_met /* do if  sampling  condition is met */,
+	MAX_GFS_CNTX_CMD1
+};
+
+
+/*
+ * GFS Context Command 2 enum
+ */
+enum gfs_cntx_cmd2 {
+	e_cntx_cmd2_disable /* disabled */,
+	e_cntx_cmd2_cpy_single_vport_always /* copy to a single Vport always */,
+	e_cntx_cmd2_cpy_single_vport_cpy_met /* copy to a single Vport if copy condition is met */,
+	e_cntx_cmd2_cpy_mul_vport_always /* copy to multiple Vports always */,
+	e_cntx_cmd2_cpy_mul_vport_cpy_met /* copy to multiple Vports if copy condition is met */,
+	e_cntx_cmd2_do_if_samp_met /* do if  sampling  condition is met */,
+	MAX_GFS_CNTX_CMD2
+};
+
+
+
+/*
+ * GFS context type
+ */
+enum gfs_context_type {
+	e_gfs_cntx_lead /* leading context type */,
+	e_gfs_cntx_accum /* accumulative context type */,
+	e_gfs_cntx_lead_and_accum /* leading and accumulative context type */,
+	MAX_GFS_CONTEXT_TYPE
+};
+
+
+enum gfs_res_struct_dst_type {
+	e_gfs_dst_type_regular /* Perform regular TX-Switching classification  */,
+	e_gfs_dst_type_rx_vport /* Redirect to RX Vport */,
+/* TX-Switching is bypassed, The TX destination is written in vport field in resolution struct */
+	e_gfs_dst_type_tx_sw_bypass,
+	MAX_GFS_RES_STRUCT_DST_TYPE
+};
+
+
+
 /*
  * BRB RAM init requirements
  */
@@ -34,12 +698,12 @@ struct init_brb_ram_req {
  * ETS per-TC init requirements
  */
 struct init_ets_tc_req {
-/* if set, this TC participates in the arbitration with a strict priority
- * (the priority is equal to the TC ID)
+/* if set, this TC participates in the arbitration with a strict priority (the priority is equal to
+ * the TC ID)
  */
 	u8 use_sp;
-/* if set, this TC participates in the arbitration with a WFQ weight
- * (indicated by the weight field)
+/* if set, this TC participates in the arbitration with a WFQ weight (indicated by the weight field)
+ *
  */
 	u8 use_wfq;
 	u16 weight /* An arbitration weight. Valid only if use_wfq is set. */;
@@ -50,8 +714,7 @@ struct init_ets_tc_req {
  */
 struct init_ets_req {
 	u32 mtu /* Max packet size (in bytes) */;
-/* ETS initialization requirements per TC. */
-	struct init_ets_tc_req tc_req[NUM_OF_TCS];
+	struct init_ets_tc_req tc_req[NUM_OF_TCS] /* ETS initialization requirements per TC. */;
 };
 
 
@@ -62,8 +725,7 @@ struct init_ets_req {
 struct init_nig_lb_rl_req {
 /* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
 	u16 lb_mac_rate;
-/* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
-	u16 lb_rate;
+	u16 lb_rate /* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */;
 	u32 mtu /* Max packet size (in bytes) */;
 /* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */
 	u16 tc_rate[NUM_OF_PHYS_TCS];
@@ -91,9 +753,7 @@ struct init_nig_pri_tc_map_req {
  * QM per global RL init parameters
  */
 struct init_qm_global_rl_params {
-/* Rate limit in Mb/sec units. If set to zero, the link speed is uwsed
- * instead.
- */
+/* Rate limit in Mb/sec units. If set to zero, the link speed is uwsed instead. */
 	u32 rate_limit;
 };
 
@@ -103,25 +763,24 @@ struct init_qm_global_rl_params {
  */
 struct init_qm_port_params {
 	u8 active /* Indicates if this port is active */;
-/* Vector of valid bits for active TCs used by this port */
-	u8 active_phys_tcs;
-/* number of PBF command lines that can be used by this port */
+	u8 active_phys_tcs /* Vector of valid bits for active TCs used by this port */;
+/* Number of PBF command lines that can be used by this port. In E4 each line is 256b, and in E5
+ * each line is 512b.
+ */
 	u16 num_pbf_cmd_lines;
-/* number of BTB blocks that can be used by this port */
-	u16 num_btb_blocks;
+	u16 num_btb_blocks /* Number of BTB blocks that can be used by this port */;
 	u16 reserved;
 };
 
 
 /*
- * QM per-PQ init parameters
+ * QM per PQ init parameters
  */
 struct init_qm_pq_params {
 	u8 vport_id /* VPORT ID */;
 	u8 tc_id /* TC ID */;
 	u8 wrr_group /* WRR group */;
-/* Indicates if a rate limiter should be allocated for the PQ (0/1) */
-	u8 rl_valid;
+	u8 rl_valid /* Indicates if a rate limiter should be allocated for the PQ (0/1) */;
 	u16 rl_id /* RL ID, valid only if rl_valid is true */;
 	u8 port_id /* Port ID */;
 	u8 reserved;
@@ -132,9 +791,7 @@ struct init_qm_pq_params {
  * QM per VPORT init parameters
  */
 struct init_qm_vport_params {
-/* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is
- * globally disabled.
- */
+/* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is globally disabled. */
 	u16 wfq;
 /* the first Tx PQ ID associated with this VPORT for each TC. */
 	u16 first_tx_pq_id[NUM_OF_TCS];
diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h
index 4f878d061..bdbf54730 100644
--- a/drivers/net/qede/base/ecore_hsi_init_tool.h
+++ b/drivers/net/qede/base/ecore_hsi_init_tool.h
@@ -1,9 +1,9 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef __ECORE_HSI_INIT_TOOL__
 #define __ECORE_HSI_INIT_TOOL__
 /**************************************/
@@ -20,19 +20,87 @@
 /* Max size in dwords of a zipped array */
 #define MAX_ZIPPED_SIZE			8192
 
+
 enum chip_ids {
 	CHIP_BB,
 	CHIP_K2,
+	CHIP_E5,
 	MAX_CHIP_IDS
 };
 
 
+enum dbg_bus_clients {
+	DBG_BUS_CLIENT_RBCN,
+	DBG_BUS_CLIENT_RBCP,
+	DBG_BUS_CLIENT_RBCR,
+	DBG_BUS_CLIENT_RBCT,
+	DBG_BUS_CLIENT_RBCU,
+	DBG_BUS_CLIENT_RBCF,
+	DBG_BUS_CLIENT_RBCX,
+	DBG_BUS_CLIENT_RBCS,
+	DBG_BUS_CLIENT_RBCH,
+	DBG_BUS_CLIENT_RBCZ,
+	DBG_BUS_CLIENT_OTHER_ENGINE,
+	DBG_BUS_CLIENT_TIMESTAMP,
+	DBG_BUS_CLIENT_CPU,
+	DBG_BUS_CLIENT_RBCY,
+	DBG_BUS_CLIENT_RBCQ,
+	DBG_BUS_CLIENT_RBCM,
+	DBG_BUS_CLIENT_RBCB,
+	DBG_BUS_CLIENT_RBCW,
+	DBG_BUS_CLIENT_RBCV,
+	MAX_DBG_BUS_CLIENTS
+};
+
+
+enum init_modes {
+	MODE_BB_A0_DEPRECATED,
+	MODE_BB,
+	MODE_K2,
+	MODE_ASIC,
+	MODE_EMUL_REDUCED,
+	MODE_EMUL_FULL,
+	MODE_FPGA,
+	MODE_CHIPSIM,
+	MODE_SF,
+	MODE_MF_SD,
+	MODE_MF_SI,
+	MODE_PORTS_PER_ENG_1,
+	MODE_PORTS_PER_ENG_2,
+	MODE_PORTS_PER_ENG_4,
+	MODE_100G,
+	MODE_E5,
+	MODE_SKIP_PRAM_INIT,
+	MODE_EMUL_MAC,
+	MAX_INIT_MODES
+};
+
+
+enum init_phases {
+	PHASE_ENGINE,
+	PHASE_PORT,
+	PHASE_PF,
+	PHASE_VF,
+	PHASE_QM_PF,
+	MAX_INIT_PHASES
+};
+
+
+enum init_split_types {
+	SPLIT_TYPE_NONE,
+	SPLIT_TYPE_PORT,
+	SPLIT_TYPE_PF,
+	SPLIT_TYPE_PORT_PF,
+	SPLIT_TYPE_VF,
+	MAX_INIT_SPLIT_TYPES
+};
+
+
 /*
  * Binary buffer header
  */
 struct bin_buffer_hdr {
-/* buffer offset in bytes from the beginning of the binary file */
-	u32 offset;
+	u32 offset /* buffer offset in bytes from the beginning of the binary file */;
 	u32 length /* buffer length in bytes */;
 };
 
@@ -46,7 +114,8 @@ enum bin_init_buffer_type {
 	BIN_BUF_INIT_VAL /* init data */,
 	BIN_BUF_INIT_MODE_TREE /* init modes tree */,
 	BIN_BUF_INIT_IRO /* internal RAM offsets */,
-	BIN_BUF_INIT_OVERLAYS /* FW overlays (except overlay 0) */,
+	BIN_BUF_INIT_OVERLAYS_E4 /* E4 FW overlays (except overlay 0) */,
+	BIN_BUF_INIT_OVERLAYS_E5 /* E5 FW overlays (except overlay 0) */,
 	MAX_BIN_INIT_BUFFER_TYPE
 };
 
@@ -58,8 +127,7 @@ struct fw_overlay_buf_hdr {
 	u32 data;
 #define FW_OVERLAY_BUF_HDR_STORM_ID_MASK  0xFF /* Storm ID */
 #define FW_OVERLAY_BUF_HDR_STORM_ID_SHIFT 0
-/* Size of Storm FW overlay buffer in dwords */
-#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK  0xFFFFFF
+#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK  0xFFFFFF /* Size of Storm FW overlay buffer in dwords */
 #define FW_OVERLAY_BUF_HDR_BUF_SIZE_SHIFT 8
 };
 
@@ -69,11 +137,9 @@ struct fw_overlay_buf_hdr {
  */
 struct init_array_raw_hdr {
 	u32 data;
-/* Init array type, from init_array_types enum */
-#define INIT_ARRAY_RAW_HDR_TYPE_MASK    0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK    0xF /* Init array type, from init_array_types enum */
 #define INIT_ARRAY_RAW_HDR_TYPE_SHIFT   0
-/* init array params */
-#define INIT_ARRAY_RAW_HDR_PARAMS_MASK  0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK  0xFFFFFFF /* init array params */
 #define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
 };
 
@@ -82,11 +148,9 @@ struct init_array_raw_hdr {
  */
 struct init_array_standard_hdr {
 	u32 data;
-/* Init array type, from init_array_types enum */
-#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK  0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK  0xF /* Init array type, from init_array_types enum */
 #define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
-/* Init array size (in dwords) */
-#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK  0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK  0xFFFFFFF /* Init array size (in dwords) */
 #define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
 };
 
@@ -98,8 +162,7 @@ struct init_array_zipped_hdr {
 /* Init array type, from init_array_types enum */
 #define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK         0xF
 #define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT        0
-/* Init array zipped size (in bytes) */
-#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK  0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK  0xFFFFFFF /* Init array zipped size (in bytes) */
 #define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
 };
 
@@ -111,11 +174,9 @@ struct init_array_pattern_hdr {
 /* Init array type, from init_array_types enum */
 #define INIT_ARRAY_PATTERN_HDR_TYPE_MASK          0xF
 #define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT         0
-/* pattern size in dword */
-#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK  0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK  0xF /* pattern size in dword */
 #define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
-/* pattern repetitions */
-#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK   0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK   0xFFFFFF /* pattern repetitions */
 #define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT  8
 };
 
@@ -124,77 +185,13 @@ struct init_array_pattern_hdr {
  */
 union init_array_hdr {
 	struct init_array_raw_hdr raw /* raw init array header */;
-/* standard init array header */
-	struct init_array_standard_hdr standard;
+	struct init_array_standard_hdr standard /* standard init array header */;
 	struct init_array_zipped_hdr zipped /* zipped init array header */;
 	struct init_array_pattern_hdr pattern /* pattern init array header */;
 };
 
 
-enum dbg_bus_clients {
-	DBG_BUS_CLIENT_RBCN,
-	DBG_BUS_CLIENT_RBCP,
-	DBG_BUS_CLIENT_RBCR,
-	DBG_BUS_CLIENT_RBCT,
-	DBG_BUS_CLIENT_RBCU,
-	DBG_BUS_CLIENT_RBCF,
-	DBG_BUS_CLIENT_RBCX,
-	DBG_BUS_CLIENT_RBCS,
-	DBG_BUS_CLIENT_RBCH,
-	DBG_BUS_CLIENT_RBCZ,
-	DBG_BUS_CLIENT_OTHER_ENGINE,
-	DBG_BUS_CLIENT_TIMESTAMP,
-	DBG_BUS_CLIENT_CPU,
-	DBG_BUS_CLIENT_RBCY,
-	DBG_BUS_CLIENT_RBCQ,
-	DBG_BUS_CLIENT_RBCM,
-	DBG_BUS_CLIENT_RBCB,
-	DBG_BUS_CLIENT_RBCW,
-	DBG_BUS_CLIENT_RBCV,
-	MAX_DBG_BUS_CLIENTS
-};
-
-
-enum init_modes {
-	MODE_BB_A0_DEPRECATED,
-	MODE_BB,
-	MODE_K2,
-	MODE_ASIC,
-	MODE_EMUL_REDUCED,
-	MODE_EMUL_FULL,
-	MODE_FPGA,
-	MODE_CHIPSIM,
-	MODE_SF,
-	MODE_MF_SD,
-	MODE_MF_SI,
-	MODE_PORTS_PER_ENG_1,
-	MODE_PORTS_PER_ENG_2,
-	MODE_PORTS_PER_ENG_4,
-	MODE_100G,
-	MODE_SKIP_PRAM_INIT,
-	MODE_EMUL_MAC,
-	MAX_INIT_MODES
-};
-
-
-enum init_phases {
-	PHASE_ENGINE,
-	PHASE_PORT,
-	PHASE_PF,
-	PHASE_VF,
-	PHASE_QM_PF,
-	MAX_INIT_PHASES
-};
-
 
-enum init_split_types {
-	SPLIT_TYPE_NONE,
-	SPLIT_TYPE_PORT,
-	SPLIT_TYPE_PF,
-	SPLIT_TYPE_PORT_PF,
-	SPLIT_TYPE_VF,
-	MAX_INIT_SPLIT_TYPES
-};
 
 
 /*
@@ -214,8 +211,7 @@ enum init_array_types {
  */
 struct init_callback_op {
 	u32 op_data;
-/* Init operation, from init_op_types enum */
-#define INIT_CALLBACK_OP_OP_MASK        0xF
+#define INIT_CALLBACK_OP_OP_MASK        0xF /* Init operation, from init_op_types enum */
 #define INIT_CALLBACK_OP_OP_SHIFT       0
 #define INIT_CALLBACK_OP_RESERVED_MASK  0xFFFFFFF
 #define INIT_CALLBACK_OP_RESERVED_SHIFT 4
@@ -229,12 +225,11 @@ struct init_callback_op {
  */
 struct init_delay_op {
 	u32 op_data;
-/* Init operation, from init_op_types enum */
-#define INIT_DELAY_OP_OP_MASK        0xF
+#define INIT_DELAY_OP_OP_MASK        0xF /* Init operation, from init_op_types enum */
 #define INIT_DELAY_OP_OP_SHIFT       0
 #define INIT_DELAY_OP_RESERVED_MASK  0xFFFFFFF
 #define INIT_DELAY_OP_RESERVED_SHIFT 4
-	__le32 delay /* delay in us */;
+	u32 delay /* delay in us */;
 };
 
 
@@ -243,13 +238,11 @@ struct init_delay_op {
  */
 struct init_if_mode_op {
 	u32 op_data;
-/* Init operation, from init_op_types enum */
-#define INIT_IF_MODE_OP_OP_MASK          0xF
+#define INIT_IF_MODE_OP_OP_MASK          0xF /* Init operation, from init_op_types enum */
 #define INIT_IF_MODE_OP_OP_SHIFT         0
 #define INIT_IF_MODE_OP_RESERVED1_MASK   0xFFF
 #define INIT_IF_MODE_OP_RESERVED1_SHIFT  4
-/* Commands to skip if the modes dont match */
-#define INIT_IF_MODE_OP_CMD_OFFSET_MASK  0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK  0xFFFF /* Commands to skip if the modes dont match */
 #define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
 	u16 reserved2;
 	u16 modes_buf_offset /* offset (in bytes) in modes expression buffer */;
@@ -261,24 +254,19 @@ struct init_if_mode_op {
  */
 struct init_if_phase_op {
 	u32 op_data;
-/* Init operation, from init_op_types enum */
-#define INIT_IF_PHASE_OP_OP_MASK           0xF
-#define INIT_IF_PHASE_OP_OP_SHIFT          0
-/* Indicates if DMAE is enabled in this phase */
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK  0x1
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
-#define INIT_IF_PHASE_OP_RESERVED1_MASK    0x7FF
-#define INIT_IF_PHASE_OP_RESERVED1_SHIFT   5
-/* Commands to skip if the phases dont match */
-#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK   0xFFFF
-#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT  16
+#define INIT_IF_PHASE_OP_OP_MASK          0xF /* Init operation, from init_op_types enum */
+#define INIT_IF_PHASE_OP_OP_SHIFT         0
+#define INIT_IF_PHASE_OP_RESERVED1_MASK   0xFFF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT  4
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK  0xFFFF /* Commands to skip if the phases dont match */
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
 	u32 phase_data;
-#define INIT_IF_PHASE_OP_PHASE_MASK        0xFF /* Init phase */
-#define INIT_IF_PHASE_OP_PHASE_SHIFT       0
-#define INIT_IF_PHASE_OP_RESERVED2_MASK    0xFF
-#define INIT_IF_PHASE_OP_RESERVED2_SHIFT   8
-#define INIT_IF_PHASE_OP_PHASE_ID_MASK     0xFFFF /* Init phase ID */
-#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT    16
+#define INIT_IF_PHASE_OP_PHASE_MASK       0xFF /* Init phase */
+#define INIT_IF_PHASE_OP_PHASE_SHIFT      0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK   0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT  8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK    0xFFFF /* Init phase ID */
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT   16
 };
 
 
@@ -298,8 +286,7 @@ enum init_mode_ops {
  */
 struct init_raw_op {
 	u32 op_data;
-/* Init operation, from init_op_types enum */
-#define INIT_RAW_OP_OP_MASK      0xF
+#define INIT_RAW_OP_OP_MASK      0xF /* Init operation, from init_op_types enum */
 #define INIT_RAW_OP_OP_SHIFT     0
 #define INIT_RAW_OP_PARAM1_MASK  0xFFFFFFF /* init param 1 */
 #define INIT_RAW_OP_PARAM1_SHIFT 4
@@ -318,12 +305,9 @@ struct init_op_array_params {
  * Write init operation arguments
  */
 union init_write_args {
-/* value to write, used when init source is INIT_SRC_INLINE */
-	u32 inline_val;
-/* number of zeros to write, used when init source is INIT_SRC_ZEROS */
-	u32 zeros_count;
-/* array offset to write, used when init source is INIT_SRC_ARRAY */
-	u32 array_offset;
+	u32 inline_val /* value to write, used when init source is INIT_SRC_INLINE */;
+	u32 zeros_count /* number of zeros to write, used when init source is INIT_SRC_ZEROS */;
+	u32 array_offset /* array offset to write, used when init source is INIT_SRC_ARRAY */;
 /* runtime array params to write, used when init source is INIT_SRC_RUNTIME */
 	struct init_op_array_params runtime;
 };
@@ -333,19 +317,15 @@ union init_write_args {
  */
 struct init_write_op {
 	u32 data;
-/* init operation, from init_op_types enum */
-#define INIT_WRITE_OP_OP_MASK        0xF
+#define INIT_WRITE_OP_OP_MASK        0xF /* init operation, from init_op_types enum */
 #define INIT_WRITE_OP_OP_SHIFT       0
-/* init source type, taken from init_source_types enum */
-#define INIT_WRITE_OP_SOURCE_MASK    0x7
+#define INIT_WRITE_OP_SOURCE_MASK    0x7 /* init source type, taken from init_source_types enum */
 #define INIT_WRITE_OP_SOURCE_SHIFT   4
 #define INIT_WRITE_OP_RESERVED_MASK  0x1
 #define INIT_WRITE_OP_RESERVED_SHIFT 7
-/* indicates if the register is wide-bus */
-#define INIT_WRITE_OP_WIDE_BUS_MASK  0x1
+#define INIT_WRITE_OP_WIDE_BUS_MASK  0x1 /* indicates if the register is wide-bus */
 #define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
-/* internal (absolute) GRC address, in dwords */
-#define INIT_WRITE_OP_ADDRESS_MASK   0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_MASK   0x7FFFFF /* internal (absolute) GRC address, in dwords */
 #define INIT_WRITE_OP_ADDRESS_SHIFT  9
 	union init_write_args args /* Write init operation arguments */;
 };
@@ -355,19 +335,15 @@ struct init_write_op {
  */
 struct init_read_op {
 	u32 op_data;
-/* init operation, from init_op_types enum */
-#define INIT_READ_OP_OP_MASK         0xF
+#define INIT_READ_OP_OP_MASK         0xF /* init operation, from init_op_types enum */
 #define INIT_READ_OP_OP_SHIFT        0
-/* polling type, from init_poll_types enum */
-#define INIT_READ_OP_POLL_TYPE_MASK  0xF
+#define INIT_READ_OP_POLL_TYPE_MASK  0xF /* polling type, from init_poll_types enum */
 #define INIT_READ_OP_POLL_TYPE_SHIFT 4
 #define INIT_READ_OP_RESERVED_MASK   0x1
 #define INIT_READ_OP_RESERVED_SHIFT  8
-/* internal (absolute) GRC address, in dwords */
-#define INIT_READ_OP_ADDRESS_MASK    0x7FFFFF
+#define INIT_READ_OP_ADDRESS_MASK    0x7FFFFF /* internal (absolute) GRC address, in dwords */
 #define INIT_READ_OP_ADDRESS_SHIFT   9
-/* expected polling value, used only when polling is done */
-	u32 expected_val;
+	u32 expected_val /* expected polling value, used only when polling is done */;
 };
 
 /*
@@ -391,10 +367,8 @@ union init_op {
 enum init_op_types {
 	INIT_OP_READ /* GRC read init command */,
 	INIT_OP_WRITE /* GRC write init command */,
-/* Skip init commands if the init modes expression doesn't match */
-	INIT_OP_IF_MODE,
-/* Skip init commands if the init phase doesn't match */
-	INIT_OP_IF_PHASE,
+	INIT_OP_IF_MODE /* Skip init commands if the init modes expression doesn't match */,
+	INIT_OP_IF_PHASE /* Skip init commands if the init phase doesn't match */,
 	INIT_OP_DELAY /* delay init command */,
 	INIT_OP_CALLBACK /* callback init command */,
 	MAX_INIT_OP_TYPES
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index 627620baf..1b751d837 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -13,15 +13,28 @@
 #include "ecore_hsi_init_tool.h"
 #include "ecore_iro.h"
 #include "ecore_init_fw_funcs.h"
-static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
+static u16 con_region_offsets_e4[3][NUM_OF_CONNECTION_TYPES_E4] = {
 	{ 400,  336,  352,  368,  304,  384,  416,  352}, /* region 3 offsets */
 	{ 528,  496,  416,  512,  448,  512,  544,  480}, /* region 4 offsets */
 	{ 608,  544,  496,  576,  576,  592,  624,  560}  /* region 5 offsets */
 };
-static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
+static u16 task_region_offsets_e4[1][NUM_OF_CONNECTION_TYPES_E4] = {
 	{ 240,  240,  112,    0,    0,    0,    0,   96}  /* region 1 offsets */
 };
 
+static u16 con_region_offsets_e5[3][NUM_OF_CONNECTION_TYPES_E5] = {
+/* region 3 offsets */
+	{ 400,  352,  384,  384,  352,  384,  416,  352,    0,    0,    0,    0,    0,    0, 0, 0},
+/* region 4 offsets */
+	{ 528,  512,  512,  544,  512,  512,  544,  480,    0,    0,    0,    0,    0,    0, 0, 0},
+/* region 5 offsets */
+	{ 608,  560,  592,  608,  640,  592,  624,  560,    0,    0,    0,    0,    0,    0, 0, 0}
+};
+static u16 task_region_offsets_e5[1][NUM_OF_CONNECTION_TYPES_E5] = {
+/* region 1 offsets */
+	{ 256,  256,  128,    0,   32,    0,    0,   96,    0,    0,    0,    0,    0,    0, 0, 0}
+};
+
 /* General constants */
 #define QM_PQ_MEM_4KB(pq_size) \
 	(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
@@ -149,22 +162,20 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
 #define QM_STOP_CMD_POLL_PERIOD_US	500
 
 /* QM command macros */
-#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
+#define QM_CMD_STRUCT_SIZE(cmd)		cmd##_STRUCT_SIZE
 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
 	SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
 
-#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, \
-			   rl_valid, rl_id, voq, wrr) \
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, rl_id, ext_voq, wrr) \
 	do { \
 		OSAL_MEMSET(&(map), 0, sizeof(map)); \
-		SET_FIELD(map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
-		SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0); \
-		SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_ID, rl_id); \
-		SET_FIELD(map.reg, QM_RF_PQ_MAP_VP_PQ_ID, vp_pq_id); \
-		SET_FIELD(map.reg, QM_RF_PQ_MAP_VOQ, voq); \
-		SET_FIELD(map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, wrr); \
-		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
-			     *((u32 *)&(map))); \
+		SET_FIELD((map).reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
+		SET_FIELD((map).reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid ? 1 : 0); \
+		SET_FIELD((map).reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); \
+		SET_FIELD((map).reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); \
+		SET_FIELD((map).reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); \
+		SET_FIELD((map).reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); \
+		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), *((u32 *)&(map))); \
 	} while (0)
 
 #define WRITE_PQ_INFO_TO_RAM		1
@@ -389,20 +400,19 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
  */
 static int ecore_global_rl_rt_init(struct ecore_hwfn *p_hwfn,
 				   struct init_qm_global_rl_params
-				     global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])
+					global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])
 {
-	u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
-			  (u32)QM_RL_CRD_REG_SIGN_BIT;
+	u16 num_global_rls = ECORE_IS_E5(p_hwfn->p_dev) ? MAX_QM_GLOBAL_RLS_E5
+							: MAX_QM_GLOBAL_RLS_E4;
+	u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | (u32)QM_RL_CRD_REG_SIGN_BIT;
 	u32 inc_val;
 	u16 rl_id;
 
 	/* Go over all global RLs */
-	for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
-		u32 rate_limit = global_rl_params ?
-				 global_rl_params[rl_id].rate_limit : 0;
+	for (rl_id = 0; rl_id < num_global_rls; rl_id++) {
+		u32 rate_limit = global_rl_params ? global_rl_params[rl_id].rate_limit : 0;
 
-		inc_val = QM_RL_INC_VAL(rate_limit ?
-					rate_limit : QM_MAX_LINK_SPEED);
+		inc_val = QM_RL_INC_VAL(rate_limit ? rate_limit : QM_MAX_LINK_SPEED);
 		if (inc_val > QM_VP_RL_MAX_INC_VAL(QM_MAX_LINK_SPEED)) {
 			DP_NOTICE(p_hwfn, true, "Invalid rate limit configuration.\n");
 			return -1;
@@ -467,11 +477,10 @@ static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
 	/* Go over all Tx PQs */
 	for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
 		u16 first_tx_pq_id, vport_id_in_pf;
-		struct qm_rf_pq_map tx_pq_map;
 		bool is_vf_pq;
-		u8 voq;
+		u8 ext_voq;
 
-		voq = VOQ(pq_params[i].port_id, pq_params[i].tc_id,
+		ext_voq = VOQ(pq_params[i].port_id, pq_params[i].tc_id,
 			  max_phys_tcs_per_port);
 		is_vf_pq = (i >= num_pf_pqs);
 
@@ -480,7 +489,7 @@ static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
 		first_tx_pq_id =
 		vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
 		if (first_tx_pq_id == QM_INVALID_PQ_ID) {
-			u32 map_val = (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+			u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
 				      (pf_id << QM_WFQ_VP_PQ_PF_SHIFT);
 
 			/* Create new VP PQ */
@@ -494,9 +503,19 @@ static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
 		}
 
 		/* Prepare PQ map entry */
-		QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, pq_id, first_tx_pq_id,
-				  pq_params[i].rl_valid, pq_params[i].rl_id,
-				  voq, pq_params[i].wrr_group);
+		if (ECORE_IS_E5(p_hwfn->p_dev)) {
+			struct qm_rf_pq_map_e5 tx_pq_map;
+
+			QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E5, pq_id, first_tx_pq_id,
+				pq_params[i].rl_valid, pq_params[i].rl_id, ext_voq,
+				pq_params[i].wrr_group);
+		} else {
+			struct qm_rf_pq_map_e4 tx_pq_map;
+
+			QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, first_tx_pq_id,
+				pq_params[i].rl_valid, pq_params[i].rl_id, ext_voq,
+				pq_params[i].wrr_group);
+		}
 
 		/* Set PQ base address */
 		STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
@@ -941,7 +960,8 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
 						u32 vport_rl,
 						u32 link_speed)
 {
-	u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+	u32 inc_val, max_qm_global_rls = ECORE_IS_E5(p_hwfn->p_dev) ? MAX_QM_GLOBAL_RLS_E5
+								    : MAX_QM_GLOBAL_RLS_E4;
 
 	if (vport_id >= max_qm_global_rls) {
 		DP_NOTICE(p_hwfn, true,
@@ -1501,9 +1521,8 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
 
 	/* Update PRS register */
 	reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
-	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
-			   PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
-			   vxlan_enable);
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_ENCAPSULATION_TYPE_EN_FLAGS_VXLAN_ENABLE_SHIFT,
+		vxlan_enable);
 	ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
 	if (reg_val) { /* TODO: handle E5 init */
 		reg_val = ecore_rd(p_hwfn, p_ptt,
@@ -1536,11 +1555,11 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
 	/* Update PRS register */
 	reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
-		   PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
-		   eth_gre_enable);
+		PRS_ENCAPSULATION_TYPE_EN_FLAGS_ETH_OVER_GRE_ENABLE_SHIFT,
+		eth_gre_enable);
 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
-		   PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
-		   ip_gre_enable);
+		PRS_ENCAPSULATION_TYPE_EN_FLAGS_IP_OVER_GRE_ENABLE_SHIFT,
+		ip_gre_enable);
 	ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
 	if (reg_val) { /* TODO: handle E5 init */
 		reg_val = ecore_rd(p_hwfn, p_ptt,
@@ -1591,11 +1610,11 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
 	/* Update PRS register */
 	reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
-		   PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
-		   eth_geneve_enable);
+		PRS_ENCAPSULATION_TYPE_EN_FLAGS_ETH_OVER_GENEVE_ENABLE_SHIFT,
+		eth_geneve_enable);
 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
-		   PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
-		   ip_geneve_enable);
+		PRS_ENCAPSULATION_TYPE_EN_FLAGS_IP_OVER_GENEVE_ENABLE_SHIFT,
+		ip_geneve_enable);
 	ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
 	if (reg_val) { /* TODO: handle E5 init */
 		reg_val = ecore_rd(p_hwfn, p_ptt,
@@ -1903,8 +1922,7 @@ static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
 /* Calculate and return CDU validation byte per connection type / region /
  * cid
  */
-static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
-					 u8 conn_type, u8 region, u32 cid)
+static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
 {
 	static u8 crc8_table_valid;	/*automatically initialized to 0*/
 	u8 crc, validation_byte = 0;
@@ -1971,17 +1989,23 @@ void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
 {
 	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
 
-	p_ctx = (u8 *)p_ctx_mem;
+	p_ctx = (u8 * const)p_ctx_mem;
 
-	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
-	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
-	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+	if (ECORE_IS_E5(p_hwfn->p_dev)) {
+		x_val_ptr = &p_ctx[con_region_offsets_e5[0][ctx_type]];
+		t_val_ptr = &p_ctx[con_region_offsets_e5[1][ctx_type]];
+		u_val_ptr = &p_ctx[con_region_offsets_e5[2][ctx_type]];
+	} else {
+		x_val_ptr = &p_ctx[con_region_offsets_e4[0][ctx_type]];
+		t_val_ptr = &p_ctx[con_region_offsets_e4[1][ctx_type]];
+		u_val_ptr = &p_ctx[con_region_offsets_e4[2][ctx_type]];
+	}
 
 	OSAL_MEMSET(p_ctx, 0, ctx_size);
 
-	*x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
-	*t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
-	*u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
+	*x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
+	*t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
+	*u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
 }
 
 /* Calcualte and set validation bytes for task context */
@@ -1990,13 +2014,15 @@ void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
 {
 	u8 *p_ctx, *region1_val_ptr;
 
-	p_ctx = (u8 *)p_ctx_mem;
-	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+	p_ctx = (u8 * const)p_ctx_mem;
+
+	region1_val_ptr = ECORE_IS_E5(p_hwfn->p_dev) ?
+		&p_ctx[task_region_offsets_e5[0][ctx_type]] :
+		&p_ctx[task_region_offsets_e4[0][ctx_type]];
 
 	OSAL_MEMSET(p_ctx, 0, ctx_size);
 
-	*region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 1,
-							  tid);
+	*region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
 }
 
 /* Memset session context to 0 while preserving validation bytes */
@@ -2006,11 +2032,17 @@ void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
 	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
 	u8 x_val, t_val, u_val;
 
-	p_ctx = (u8 *)p_ctx_mem;
+	p_ctx = (u8 * const)p_ctx_mem;
 
-	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
-	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
-	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+	if (ECORE_IS_E5(p_hwfn->p_dev)) {
+		x_val_ptr = &p_ctx[con_region_offsets_e5[0][ctx_type]];
+		t_val_ptr = &p_ctx[con_region_offsets_e5[1][ctx_type]];
+		u_val_ptr = &p_ctx[con_region_offsets_e5[2][ctx_type]];
+	} else {
+		x_val_ptr = &p_ctx[con_region_offsets_e4[0][ctx_type]];
+		t_val_ptr = &p_ctx[con_region_offsets_e4[1][ctx_type]];
+		u_val_ptr = &p_ctx[con_region_offsets_e4[2][ctx_type]];
+	}
 
 	x_val = *x_val_ptr;
 	t_val = *t_val_ptr;
@@ -2030,8 +2062,11 @@ void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
 	u8 *p_ctx, *region1_val_ptr;
 	u8 region1_val;
 
-	p_ctx = (u8 *)p_ctx_mem;
-	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+	p_ctx = (u8 * const)p_ctx_mem;
+
+	region1_val_ptr = ECORE_IS_E5(p_hwfn->p_dev) ?
+		&p_ctx[task_region_offsets_e5[0][ctx_type]] :
+		&p_ctx[task_region_offsets_e4[0][ctx_type]];
 
 	region1_val = *region1_val_ptr;
 
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index ea964ea2f..42885a401 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -568,10 +568,18 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
 	fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
 	len = buf_hdr[BIN_BUF_INIT_CMD].length;
 	fw->init_ops_size = len / sizeof(struct init_raw_op);
-	offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
-	fw->fw_overlays = (u32 *)(fw_data + offset);
-	len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
-	fw->fw_overlays_len = len;
+
+	if (ECORE_IS_E4(p_dev)) {
+		offset = buf_hdr[BIN_BUF_INIT_OVERLAYS_E4].offset;
+		fw->fw_overlays_e4 = (u32 *)(fw_data + offset);
+		len = buf_hdr[BIN_BUF_INIT_OVERLAYS_E4].length;
+		fw->fw_overlays_e4_len = len;
+	} else {
+		offset = buf_hdr[BIN_BUF_INIT_OVERLAYS_E5].offset;
+		fw->fw_overlays_e5 = (u32 *)(fw_data + offset);
+		len = buf_hdr[BIN_BUF_INIT_OVERLAYS_E5].length;
+		fw->fw_overlays_e5_len = len;
+	}
 #else
 	fw->init_ops = (union init_op *)init_ops;
 	fw->arr_data = (u32 *)init_val;
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 4207b1853..e0d61e375 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -1544,7 +1544,8 @@ static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
 	if (IS_VF(p_hwfn->p_dev))
 		return;/* @@@TBD MichalK- VF CAU... */
 
-	sb_offset = igu_sb_id * PIS_PER_SB;
+	sb_offset = igu_sb_id * (ECORE_IS_E4(p_hwfn->p_dev) ? PIS_PER_SB_E4
+							    : PIS_PER_SB_E5);
 	OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
 
 	SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
@@ -1735,14 +1736,24 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
 				       void *sb_virt_addr,
 				       dma_addr_t sb_phy_addr, u16 sb_id)
 {
+	struct ecore_dev *p_dev = p_hwfn->p_dev;
+
 	sb_info->sb_virt = sb_virt_addr;
-	struct status_block *sb_virt;
+	if (ECORE_IS_E4(p_hwfn->p_dev)) {
+		struct status_block_e4 *sb_virt_e4 =
+			(struct status_block_e4 *)sb_info->sb_virt;
 
-	sb_virt = (struct status_block *)sb_info->sb_virt;
+		sb_info->sb_size = sizeof(*sb_virt_e4);
+		sb_info->sb_pi_array = sb_virt_e4->pi_array;
+		sb_info->sb_prod_index = &sb_virt_e4->prod_index;
+	} else {
+		struct status_block_e5 *sb_virt_e5 =
+			(struct status_block_e5 *)sb_info->sb_virt;
 
-	sb_info->sb_size = sizeof(*sb_virt);
-	sb_info->sb_pi_array = sb_virt->pi_array;
-	sb_info->sb_prod_index = &sb_virt->prod_index;
+		sb_info->sb_size = sizeof(*sb_virt_e5);
+		sb_info->sb_pi_array = sb_virt_e5->pi_array;
+		sb_info->sb_prod_index = &sb_virt_e5->prod_index;
+	}
 
 	sb_info->sb_phys = sb_phy_addr;
 
@@ -1875,7 +1886,8 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
 	ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
 			  p_virt, p_phys, ECORE_SP_SB_ID);
 
-	p_sb->pi_info_arr_size = PIS_PER_SB;
+	p_sb->pi_info_arr_size = ECORE_IS_E4(p_hwfn->p_dev) ? PIS_PER_SB_E4
+							    : PIS_PER_SB_E5;
 
 	return ECORE_SUCCESS;
 }
@@ -2081,6 +2093,7 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
 				       u16 igu_sb_id, u16 opaque, bool b_set)
 {
 	struct ecore_igu_block *p_block;
+	u8 pis_per_sb;
 	int pi, i;
 
 	p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
@@ -2114,10 +2127,11 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
 			  igu_sb_id);
 
 	/* Clear the CAU for the SB */
-	for (pi = 0; pi < PIS_PER_SB; pi++)
+	pis_per_sb = ECORE_IS_E4(p_hwfn->p_dev) ? PIS_PER_SB_E4 : PIS_PER_SB_E5;
+	for (pi = 0; pi < pis_per_sb; pi++)
 		ecore_wr(p_hwfn, p_ptt,
 			 CAU_REG_PI_MEMORY +
-			 (igu_sb_id * PIS_PER_SB + pi) * 4,
+			 (igu_sb_id * pis_per_sb + pi) * 4,
 			 0);
 }
 
@@ -2718,6 +2732,7 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
 					  struct ecore_sb_info_dbg *p_info)
 {
 	u16 sbid = p_sb->igu_sb_id;
+	u32 pis_per_sb;
 	u32 i;
 
 	if (IS_VF(p_hwfn->p_dev))
@@ -2731,11 +2746,11 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
 	p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
 				    IGU_REG_CONSUMER_MEM + sbid * 4);
 
-	for (i = 0; i < PIS_PER_SB; i++)
+	pis_per_sb = ECORE_IS_E4(p_hwfn->p_dev) ? PIS_PER_SB_E4 : PIS_PER_SB_E5;
+	for (i = 0; i < pis_per_sb; i++)
 		p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
 					      CAU_REG_PI_MEMORY +
-					      sbid * 4 * PIS_PER_SB +
-					      i * 4);
+					      sbid * 4 * pis_per_sb + i * 4);
 
 	return ECORE_SUCCESS;
 }
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 5042cd1d1..924c31429 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -17,7 +17,9 @@
 #define ECORE_SB_EVENT_MASK	0x0003
 
 #define SB_ALIGNED_SIZE(p_hwfn) \
-	ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+	(ECORE_IS_E4((p_hwfn)->p_dev) ? \
+	 ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn) : \
+	 ALIGNED_TYPE_SIZE(struct status_block_e5, p_hwfn))
 
 #define ECORE_SB_INVALID_IDX	0xffff
 
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index d7b6b86cc..553d7e6b3 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -47,7 +47,7 @@ struct ecore_sb_info {
 struct ecore_sb_info_dbg {
 	u32 igu_prod;
 	u32 igu_cons;
-	u16 pi[PIS_PER_SB];
+	u16 pi[MAX_PIS_PER_SB];
 };
 
 struct ecore_sb_cnt_info {
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index af234dec8..01eb69c0e 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -1869,7 +1869,7 @@ static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
 	p_stats->common.ttl0_discard +=
 		HILO_64_REGPAIR(mstats.ttl0_discard);
 	p_stats->common.tpa_coalesced_pkts +=
-		HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+		HILO_64_REGPAIR(mstats.tpa_coalesced_pkts_lb_hairpin_discard);
 	p_stats->common.tpa_coalesced_events +=
 		HILO_64_REGPAIR(mstats.tpa_coalesced_events);
 	p_stats->common.tpa_aborts_num +=
@@ -2144,7 +2144,7 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
 				  struct ecore_spq_comp_cb *p_cb,
 				  struct ecore_ntuple_filter_params *p_params)
 {
-	struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
+	struct rx_update_gft_filter_ramrod_data *p_ramrod = OSAL_NULL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
 	struct ecore_sp_init_data init_data;
 	u16 abs_rx_q_id = 0;
@@ -2165,7 +2165,7 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
 	}
 
 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
-				   ETH_RAMROD_GFT_UPDATE_FILTER,
+				   ETH_RAMROD_RX_UPDATE_GFT_FILTER,
 				   PROTOCOLID_ETH, &init_data);
 	if (rc != ECORE_SUCCESS)
 		return rc;
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 02f613688..93e984bf8 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -201,32 +201,49 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
 	__le32 *p_spq_base_lo, *p_spq_base_hi;
 	struct regpair *p_consolid_base_addr;
 	u8 *p_flags1, *p_flags9, *p_flags10;
-	struct core_conn_context *p_cxt;
 	struct ecore_cxt_info cxt_info;
 	u32 core_conn_context_size;
 	__le16 *p_physical_q0;
 	u16 physical_q;
+	void *p_cxt;
 	enum _ecore_status_t rc;
 
 	cxt_info.iid = p_spq->cid;
 
 	rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
-
 	if (rc != ECORE_SUCCESS) {
 		DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
 			  p_spq->cid);
 		return;
 	}
 
-	p_cxt = cxt_info.p_cxt;
-	core_conn_context_size = sizeof(*p_cxt);
-	p_flags1 = &p_cxt->xstorm_ag_context.flags1;
-	p_flags9 = &p_cxt->xstorm_ag_context.flags9;
-	p_flags10 = &p_cxt->xstorm_ag_context.flags10;
-	p_physical_q0 = &p_cxt->xstorm_ag_context.physical_q0;
-	p_spq_base_lo = &p_cxt->xstorm_st_context.spq_base_lo;
-	p_spq_base_hi = &p_cxt->xstorm_st_context.spq_base_hi;
-	p_consolid_base_addr = &p_cxt->xstorm_st_context.consolid_base_addr;
+	if (ECORE_IS_E4(p_hwfn->p_dev)) {
+		struct e4_core_conn_context *p_cxt_e4 = cxt_info.p_cxt;
+
+		p_cxt = p_cxt_e4;
+		core_conn_context_size = sizeof(*p_cxt_e4);
+		p_flags1 = &p_cxt_e4->xstorm_ag_context.flags1;
+		p_flags9 = &p_cxt_e4->xstorm_ag_context.flags9;
+		p_flags10 = &p_cxt_e4->xstorm_ag_context.flags10;
+		p_physical_q0 = &p_cxt_e4->xstorm_ag_context.physical_q0;
+		p_spq_base_lo = &p_cxt_e4->xstorm_st_context.spq_base_lo;
+		p_spq_base_hi = &p_cxt_e4->xstorm_st_context.spq_base_hi;
+		p_consolid_base_addr =
+			&p_cxt_e4->xstorm_st_context.consolid_base_addr;
+	} else {
+		struct e5_core_conn_context *p_cxt_e5 = cxt_info.p_cxt;
+
+		p_cxt = p_cxt_e5;
+		core_conn_context_size = sizeof(*p_cxt_e5);
+		p_flags1 = &p_cxt_e5->xstorm_ag_context.flags1;
+		p_flags9 = &p_cxt_e5->xstorm_ag_context.flags9;
+		p_flags10 = &p_cxt_e5->xstorm_ag_context.flags10;
+		p_physical_q0 = &p_cxt_e5->xstorm_ag_context.physical_q0;
+		p_spq_base_lo = &p_cxt_e5->xstorm_st_context.spq_base_lo;
+		p_spq_base_hi = &p_cxt_e5->xstorm_st_context.spq_base_hi;
+		p_consolid_base_addr =
+			&p_cxt_e5->xstorm_st_context.consolid_base_addr;
+	}
 
 	/* @@@TBD we zero the context until we have ilt_reset implemented. */
 	OSAL_MEM_ZERO(p_cxt, core_conn_context_size);
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index 0958e5a0a..80dbdeaa0 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -24,7 +24,7 @@ union ramrod_data {
 	struct tx_queue_stop_ramrod_data		tx_queue_stop;
 	struct vport_start_ramrod_data			vport_start;
 	struct vport_stop_ramrod_data			vport_stop;
-	struct rx_update_gft_filter_data		rx_update_gft;
+	struct rx_update_gft_filter_ramrod_data		rx_update_gft;
 	struct vport_update_ramrod_data			vport_update;
 	struct core_rx_start_ramrod_data		core_rx_queue_start;
 	struct core_rx_stop_ramrod_data			core_rx_queue_stop;
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index ed8cc695f..b49465b88 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -1859,8 +1859,9 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 
 	/* fill in pfdev info */
 	pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
-	pfdev_info->db_size = 0;	/* @@@ TBD MichalK Vf Doorbells */
-	pfdev_info->indices_per_sb = PIS_PER_SB;
+	pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
+	pfdev_info->indices_per_sb = ECORE_IS_E4(p_hwfn->p_dev) ? PIS_PER_SB_E4
+								: PIS_PER_SB_E5;
 
 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index db03bc494..e00caf646 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -874,7 +874,7 @@ ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
 
 		*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
 			   MSTORM_QZONE_START(p_hwfn->p_dev) +
-			   (hw_qid) * MSTORM_QZONE_SIZE;
+			   hw_qid * MSTORM_QZONE_SIZE(p_hwfn->p_dev);
 
 		/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
 		__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 4611d86d9..1ca11c473 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -24,7 +24,7 @@
  */
 #define ETH_HSI_VER_NO_PKT_LEN_TUNN         5
 
-/* Maximum number of pinned L2 connections (CIDs)*/
+/* Maximum number of pinned L2 connections (CIDs) */
 #define ETH_PINNED_CONN_MAX_NUM             32
 
 #define ETH_CACHE_LINE_SIZE                 64
@@ -100,12 +100,15 @@
 #define ETH_FILTER_RULES_COUNT              10
 /* number of RSS indirection table entries, per Vport) */
 #define ETH_RSS_IND_TABLE_ENTRIES_NUM       128
+/* RSS indirection table mask size in registers */
+#define ETH_RSS_IND_TABLE_MASK_SIZE_REGS    (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32)
 /* Length of RSS key (in regs) */
 #define ETH_RSS_KEY_SIZE_REGS               10
 /* number of available RSS engines in AH */
 #define ETH_RSS_ENGINE_NUM_K2               207
 /* number of available RSS engines in BB */
 #define ETH_RSS_ENGINE_NUM_BB               127
+#define ETH_RSS_ENGINE_NUM_E5               256 /* number of available RSS engines in E5 */
 
 /* TPA constants */
 /* Maximum number of open TPA aggregations */
@@ -123,6 +126,7 @@
 
 /* GFS constants */
 #define ETH_GFT_TRASHCAN_VPORT         0x1FF /* GFT drop flow vport number */
+#define ETH_GFS_NUM_OF_ACTIONS         10           /* Maximum number of GFS actions supported */
 
 
 
diff --git a/drivers/net/qede/qede_debug.c b/drivers/net/qede/qede_debug.c
index 180d6f92c..d46727c45 100644
--- a/drivers/net/qede/qede_debug.c
+++ b/drivers/net/qede/qede_debug.c
@@ -2719,9 +2719,13 @@ static u32 qed_grc_dump_ctx_data(struct ecore_hwfn *p_hwfn,
 static u32 qed_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
 			    struct ecore_ptt *p_ptt, u32 *dump_buf, bool dump)
 {
-	u32 offset = 0;
+	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+	u32 offset = 0, num_ltids;
 	u8 storm_id;
 
+	num_ltids = dev_data->chip_id >=
+		CHIP_E5 ? NUM_OF_LTIDS_E5 : NUM_OF_LTIDS_E4;
+
 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
 		if (!qed_grc_is_storm_included(p_hwfn,
 					       (enum dbg_storms)storm_id))
@@ -2751,7 +2755,7 @@ static u32 qed_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
 						dump_buf + offset,
 						dump,
 						"TASK_AG_CTX",
-						NUM_OF_LTIDS,
+						num_ltids,
 						CM_CTX_TASK_AG, storm_id);
 
 		/* Dump Task ST context size */
@@ -2760,7 +2764,7 @@ static u32 qed_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
 						dump_buf + offset,
 						dump,
 						"TASK_ST_CTX",
-						NUM_OF_LTIDS,
+						num_ltids,
 						CM_CTX_TASK_ST, storm_id);
 	}
 
@@ -3398,10 +3402,12 @@ static enum dbg_status qed_grc_dump(struct ecore_hwfn *p_hwfn,
 				    bool dump, u32 *num_dumped_dwords)
 {
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-	u32 dwords_read, offset = 0;
+	u32 dwords_read, offset = 0, num_ltids;
 	bool parities_masked = false;
 	u8 i;
 
+	num_ltids = dev_data->chip_id >=
+		CHIP_E5 ? NUM_OF_LTIDS_E5 : NUM_OF_LTIDS_E4;
 	*num_dumped_dwords = 0;
 	dev_data->num_regs_read = 0;
 
@@ -3422,7 +3428,7 @@ static enum dbg_status qed_grc_dump(struct ecore_hwfn *p_hwfn,
 	offset += qed_dump_num_param(dump_buf + offset,
 				     dump,
 				     "num-ltids",
-				     NUM_OF_LTIDS);
+				     num_ltids);
 	offset += qed_dump_num_param(dump_buf + offset,
 				     dump, "num-ports", dev_data->num_ports);
 
@@ -4775,9 +4781,9 @@ static u32 qed_ilt_dump(struct ecore_hwfn *p_hwfn,
 	offset += qed_dump_section_hdr(dump_buf + offset,
 				       dump, "num_pf_cids_per_conn_type", 1);
 	offset += qed_dump_num_param(dump_buf + offset,
-				     dump, "size", NUM_OF_CONNECTION_TYPES);
+				     dump, "size", NUM_OF_CONNECTION_TYPES_E4);
 	for (conn_type = 0, valid_conn_pf_cids = 0;
-	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+	     conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
 		u32 num_pf_cids =
 		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
 
@@ -4790,9 +4796,9 @@ static u32 qed_ilt_dump(struct ecore_hwfn *p_hwfn,
 	offset += qed_dump_section_hdr(dump_buf + offset,
 				       dump, "num_vf_cids_per_conn_type", 1);
 	offset += qed_dump_num_param(dump_buf + offset,
-				     dump, "size", NUM_OF_CONNECTION_TYPES);
+				     dump, "size", NUM_OF_CONNECTION_TYPES_E4);
 	for (conn_type = 0, valid_conn_vf_cids = 0;
-	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+	     conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
 		u32 num_vf_cids =
 		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
 
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index caa9d1d4f..e865d988f 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -19,7 +19,7 @@
 char qede_fw_file[PATH_MAX];
 
 static const char * const QEDE_DEFAULT_FIRMWARE =
-	"/lib/firmware/qed/qed_init_values-8.40.33.0.bin";
+	"/lib/firmware/qed/qed_init_values-8.62.0.0.bin";
 
 static void
 qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 75d78cebb..6fc67d878 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -569,12 +569,14 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
 		  uint16_t sb_id)
 {
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-	struct status_block *sb_virt;
 	dma_addr_t sb_phys;
+	void *sb_virt;
+	u32 sb_size;
 	int rc;
 
-	sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
-					  sizeof(struct status_block));
+	sb_size = ECORE_IS_E5(edev) ? sizeof(struct status_block_e5) :
+				      sizeof(struct status_block_e4);
+	sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sb_size);
 	if (!sb_virt) {
 		DP_ERR(edev, "Status block allocation failed\n");
 		return -ENOMEM;
@@ -583,8 +585,7 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
 					sb_phys, sb_id);
 	if (rc) {
 		DP_ERR(edev, "Status block initialization failed\n");
-		OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
-				       sizeof(struct status_block));
+		OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys, sb_size);
 		return rc;
 	}
 
@@ -680,8 +681,8 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
 			DP_INFO(edev, "Free sb_info index 0x%x\n",
 					fp->sb_info->igu_sb_id);
 			OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
-				fp->sb_info->sb_phys,
-				sizeof(struct status_block));
+					       fp->sb_info->sb_phys,
+					       fp->sb_info->sb_size);
 			rte_free(fp->sb_info);
 			fp->sb_info = NULL;
 		}
-- 
2.18.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 3/7] net/qede/base: add OS abstracted changes
  2021-02-19 10:14 [dpdk-dev] [PATCH 0/7] net/qede: add support for new HW Rasesh Mody
  2021-02-19 10:14 ` [dpdk-dev] [PATCH 2/7] net/qede/base: changes for HSI to support " Rasesh Mody
@ 2021-02-19 10:14 ` Rasesh Mody
  2021-02-19 10:14 ` [dpdk-dev] [PATCH 4/7] net/qede/base: update base driver to 8.62.4.0 Rasesh Mody
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Rasesh Mody @ 2021-02-19 10:14 UTC (permalink / raw)
  To: jerinj, ferruh.yigit; +Cc: Rasesh Mody, dev, GR-Everest-DPDK-Dev, Igor Russkikh

The patch includes OS abstracted changes required to support new
hardware and the new feature supported by it. It also adds new bit
ops to RTE library.

Signed-off-by: Rasesh Mody <rmody@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
---
 drivers/net/qede/base/bcm_osal.c    |  2 +-
 drivers/net/qede/base/bcm_osal.h    | 39 ++++++++++++++++++---
 lib/librte_eal/include/rte_bitops.h | 54 ++++++++++++++++++++++++++++-
 3 files changed, 88 insertions(+), 7 deletions(-)

diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 2c59397e0..23a84795f 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -121,7 +121,7 @@ void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
 			      struct ecore_vf_acquire_sw_info *vf_sw_info)
 {
 	vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
-	vf_sw_info->override_fw_version = 1;
+	/* TODO - fill driver version */
 }
 
 void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index c5b539928..38b7fff67 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -47,9 +47,10 @@ void qed_link_update(struct ecore_hwfn *hwfn);
 #endif
 #endif
 
-#define OSAL_WARN(arg1, arg2, arg3, ...) (0)
-
-#define UNUSED(x)	(void)(x)
+#define UNUSED1(a)		(void)(a)
+#define UNUSED2(a, b)		((void)(a), UNUSED1(b))
+#define UNUSED3(a, b, c)	((void)(a), UNUSED2(b, c))
+#define UNUSED4(a, b, c, d)	((void)(a), UNUSED3(b, c, d))
 
 /* Memory Types */
 typedef uint8_t u8;
@@ -167,9 +168,8 @@ typedef pthread_mutex_t osal_mutex_t;
 #define OSAL_SPIN_UNLOCK(lock) rte_spinlock_unlock(lock)
 #define OSAL_SPIN_LOCK_IRQSAVE(lock, flags)	\
 	do {					\
-		UNUSED(lock);			\
 		flags = 0;			\
-		UNUSED(flags);			\
+		UNUSED2(lock, flags);		\
 	} while (0)
 #define OSAL_SPIN_UNLOCK_IRQSAVE(lock, flags) nothing
 #define OSAL_SPIN_LOCK_ALLOC(hwfn, lock) nothing
@@ -326,6 +326,18 @@ typedef struct osal_list_t {
 #define OSAL_GET_BIT(bit, bitmap) \
 	rte_bit_relaxed_get32(bit, bitmap)
 
+#define OSAL_TEST_BIT(bit, bitmap) \
+	OSAL_GET_BIT(bit, bitmap)
+
+#define OSAL_TEST_AND_CLEAR_BIT(bit, bitmap) \
+	rte_bit_relaxed_test_and_clear32(bit, bitmap)
+
+#define OSAL_TEST_AND_FLIP_BIT(bit, bitmap) \
+	rte_bit_relaxed_test_and_flip32(bit, bitmap)
+
+#define OSAL_NON_ATOMIC_SET_BIT(bit, bitmap) \
+	rte_bit_relaxed_set32(bit, bitmap)
+
 u32 qede_find_first_bit(unsigned long *, u32);
 #define OSAL_FIND_FIRST_BIT(bitmap, length) \
 	qede_find_first_bit(bitmap, length)
@@ -342,7 +354,10 @@ u32 qede_find_first_zero_bit(u32 *bitmap, u32 length);
 #define OSAL_BITMAP_WEIGHT(bitmap, count) 0
 
 #define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
+#define OSAL_BW_UPDATE(hwfn, ptt) nothing
 #define OSAL_TRANSCEIVER_UPDATE(hwfn) nothing
+#define OSAL_TRANSCEIVER_TX_FAULT(hwfn) nothing
+#define OSAL_TRANSCEIVER_RX_LOS(hwfn) nothing
 #define OSAL_DCBX_AEN(hwfn, mib_type) nothing
 
 /* SR-IOV channel */
@@ -366,6 +381,8 @@ void osal_vf_flr_update(struct ecore_hwfn *p_hwfn);
 #define OSAL_IOV_VF_MSG_TYPE(hwfn, vfid, vf_msg_type) nothing
 #define OSAL_IOV_PF_RESP_TYPE(hwfn, vfid, pf_resp_type) nothing
 #define OSAL_IOV_VF_VPORT_STOP(hwfn, vf) nothing
+#define OSAL_IOV_DB_REC_HANDLER(hwfn) nothing
+#define OSAL_IOV_BULLETIN_UPDATE(hwfn) nothing
 
 u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
 		   u8 *input_buf, u32 max_size, u8 *unzip_buf);
@@ -412,16 +429,20 @@ u32 qede_osal_log2(u32);
 
 #define OFFSETOF(str, field) __builtin_offsetof(str, field)
 #define OSAL_ASSERT(is_assert) assert(is_assert)
+#define OSAL_WARN(condition, format, ...) (0)
 #define OSAL_BEFORE_PF_START(file, engine) nothing
 #define OSAL_AFTER_PF_STOP(file, engine) nothing
+#define OSAL_GCD(a, b)  (1)
 
 /* Endian macros */
 #define OSAL_CPU_TO_BE32(val) rte_cpu_to_be_32(val)
+#define OSAL_CPU_TO_BE16(val) rte_cpu_to_be_16(val)
 #define OSAL_BE32_TO_CPU(val) rte_be_to_cpu_32(val)
 #define OSAL_CPU_TO_LE32(val) rte_cpu_to_le_32(val)
 #define OSAL_CPU_TO_LE16(val) rte_cpu_to_le_16(val)
 #define OSAL_LE32_TO_CPU(val) rte_le_to_cpu_32(val)
 #define OSAL_LE16_TO_CPU(val) rte_le_to_cpu_16(val)
+#define OSAL_BE16_TO_CPU(val) rte_be_to_cpu_16(val)
 #define OSAL_CPU_TO_BE64(val) rte_cpu_to_be_64(val)
 
 #define OSAL_ARRAY_SIZE(arr) RTE_DIM(arr)
@@ -432,6 +453,7 @@ u32 qede_osal_log2(u32);
 #define OSAL_STRLEN(string) strlen(string)
 #define OSAL_STRCPY(dst, string) strcpy(dst, string)
 #define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len)
+#define OSAL_STRLCPY(dst, string, len) strlcpy(dst, string, len)
 #define OSAL_STRCMP(str1, str2) strcmp(str1, str2)
 #define OSAL_STRTOUL(str, base, res) 0
 
@@ -463,6 +485,7 @@ u32 qede_crc32(u32 crc, u8 *ptr, u32 length);
 #define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
 #define OSAL_HW_INFO_CHANGE(p_hwfn, change) nothing
 #define OSAL_MFW_CMD_PREEMPT(p_hwfn) nothing
+#define OSAL_NUM_FUNCS_IS_SET(p_hwfn) nothing
 #define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
 
 #define OSAL_DIV_S64(a, b)	((a) / (b))
@@ -478,4 +501,10 @@ enum dbg_status	qed_dbg_alloc_user_data(struct ecore_hwfn *p_hwfn,
 	qed_dbg_alloc_user_data(p_hwfn, user_data_ptr)
 #define OSAL_DB_REC_OCCURRED(p_hwfn) nothing
 
+typedef int osal_va_list;
+#define OSAL_VA_START(A, B) UNUSED2(A, B)
+#define OSAL_VA_END(A) UNUSED1(A)
+#define OSAL_VSNPRINTF(A, ...) 0
+#define OSAL_INT_DBG_STORE(P_DEV, ...) nothing
+
 #endif /* __BCM_OSAL_H */
diff --git a/lib/librte_eal/include/rte_bitops.h b/lib/librte_eal/include/rte_bitops.h
index 141e8ea73..9a39dd13c 100644
--- a/lib/librte_eal/include/rte_bitops.h
+++ b/lib/librte_eal/include/rte_bitops.h
@@ -54,7 +54,7 @@ rte_bit_relaxed_get32(unsigned int nr, volatile uint32_t *addr)
 {
 	RTE_ASSERT(nr < 32);
 
-	uint32_t mask = UINT32_C(1) << nr;
+	uint32_t mask = RTE_BIT32(nr);
 	return (*addr) & mask;
 }
 
@@ -152,6 +152,32 @@ rte_bit_relaxed_test_and_clear32(unsigned int nr, volatile uint32_t *addr)
 	return val & mask;
 }
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Return the original bit from a 32-bit value, then flip it to 0 without
+ * memory ordering.
+ *
+ * @param nr
+ *   The target bit to get and flip.
+ * @param addr
+ *   The address holding the bit.
+ * @return
+ *   The original bit.
+ */
+__rte_experimental
+static inline uint32_t
+rte_bit_relaxed_test_and_flip32(unsigned int nr, volatile uint32_t *addr)
+{
+	RTE_ASSERT(nr < 32);
+
+	uint32_t mask = RTE_BIT32(nr);
+	uint32_t val = *addr;
+	*addr = val ^ mask;
+	return val & mask;
+}
+
 /*------------------------ 64-bit relaxed operations ------------------------*/
 
 /**
@@ -271,4 +297,30 @@ rte_bit_relaxed_test_and_clear64(unsigned int nr, volatile uint64_t *addr)
 	return val & mask;
 }
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Return the original bit from a 64-bit value, then flip it to 0 without
+ * memory ordering.
+ *
+ * @param nr
+ *   The target bit to get and flip.
+ * @param addr
+ *   The address holding the bit.
+ * @return
+ *   The original bit.
+ */
+__rte_experimental
+static inline uint64_t
+rte_bit_relaxed_test_and_flip64(unsigned int nr, volatile uint64_t *addr)
+{
+	RTE_ASSERT(nr < 64);
+
+	uint64_t mask = RTE_BIT64(nr);
+	uint64_t val = *addr;
+	*addr = val ^ mask;
+	return val & mask;
+}
+
 #endif /* _RTE_BITOPS_H_ */
-- 
2.18.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 4/7] net/qede/base: update base driver to 8.62.4.0
  2021-02-19 10:14 [dpdk-dev] [PATCH 0/7] net/qede: add support for new HW Rasesh Mody
  2021-02-19 10:14 ` [dpdk-dev] [PATCH 2/7] net/qede/base: changes for HSI to support " Rasesh Mody
  2021-02-19 10:14 ` [dpdk-dev] [PATCH 3/7] net/qede/base: add OS abstracted changes Rasesh Mody
@ 2021-02-19 10:14 ` Rasesh Mody
  2021-02-19 10:14 ` [dpdk-dev] [PATCH 5/7] net/qede: changes for DMA page chain allocation and free Rasesh Mody
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Rasesh Mody @ 2021-02-19 10:14 UTC (permalink / raw)
  To: jerinj, ferruh.yigit; +Cc: Rasesh Mody, dev, GR-Everest-DPDK-Dev, Igor Russkikh

The patch updates the base driver required to support new hardware
and the new firmware supported by it as well as the existing hardware.
The base driver is updated to 8.62.4.0 version. There are some white
space changes adhering new policy.

Signed-off-by: Rasesh Mody <rmody@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
---
 drivers/net/qede/base/ecore.h               |  550 +-
 drivers/net/qede/base/ecore_attn_values.h   |    3 +-
 drivers/net/qede/base/ecore_cxt.c           | 1184 +++--
 drivers/net/qede/base/ecore_cxt.h           |  140 +-
 drivers/net/qede/base/ecore_cxt_api.h       |   31 +-
 drivers/net/qede/base/ecore_dcbx.c          |  524 +-
 drivers/net/qede/base/ecore_dcbx.h          |   16 +-
 drivers/net/qede/base/ecore_dcbx_api.h      |   41 +-
 drivers/net/qede/base/ecore_dev.c           | 3865 ++++++++++----
 drivers/net/qede/base/ecore_dev_api.h       |  354 +-
 drivers/net/qede/base/ecore_gtt_reg_addr.h  |   93 +-
 drivers/net/qede/base/ecore_gtt_values.h    |    4 +-
 drivers/net/qede/base/ecore_hsi_common.h    |    2 +-
 drivers/net/qede/base/ecore_hw.c            |  381 +-
 drivers/net/qede/base/ecore_hw.h            |   55 +-
 drivers/net/qede/base/ecore_hw_defs.h       |   45 +-
 drivers/net/qede/base/ecore_init_fw_funcs.c | 1224 +++--
 drivers/net/qede/base/ecore_init_fw_funcs.h |  457 +-
 drivers/net/qede/base/ecore_init_ops.c      |  143 +-
 drivers/net/qede/base/ecore_init_ops.h      |   19 +-
 drivers/net/qede/base/ecore_int.c           | 1313 +++--
 drivers/net/qede/base/ecore_int.h           |   60 +-
 drivers/net/qede/base/ecore_int_api.h       |  125 +-
 drivers/net/qede/base/ecore_iov_api.h       |  115 +-
 drivers/net/qede/base/ecore_iro.h           |  427 +-
 drivers/net/qede/base/ecore_iro_values.h    |  463 +-
 drivers/net/qede/base/ecore_l2.c            |  485 +-
 drivers/net/qede/base/ecore_l2.h            |   18 +-
 drivers/net/qede/base/ecore_l2_api.h        |  148 +-
 drivers/net/qede/base/ecore_mcp.c           | 2610 +++++++---
 drivers/net/qede/base/ecore_mcp.h           |  124 +-
 drivers/net/qede/base/ecore_mcp_api.h       |  467 +-
 drivers/net/qede/base/ecore_mng_tlv.c       |  910 ++--
 drivers/net/qede/base/ecore_proto_if.h      |   69 +-
 drivers/net/qede/base/ecore_rt_defs.h       |  895 ++--
 drivers/net/qede/base/ecore_sp_api.h        |    6 +-
 drivers/net/qede/base/ecore_sp_commands.c   |  138 +-
 drivers/net/qede/base/ecore_sp_commands.h   |   18 +-
 drivers/net/qede/base/ecore_spq.c           |  330 +-
 drivers/net/qede/base/ecore_spq.h           |   63 +-
 drivers/net/qede/base/ecore_sriov.c         | 1660 ++++--
 drivers/net/qede/base/ecore_sriov.h         |  146 +-
 drivers/net/qede/base/ecore_status.h        |    4 +-
 drivers/net/qede/base/ecore_utils.h         |   18 +-
 drivers/net/qede/base/ecore_vf.c            |  546 +-
 drivers/net/qede/base/ecore_vf.h            |   54 +-
 drivers/net/qede/base/ecore_vf_api.h        |   72 +-
 drivers/net/qede/base/ecore_vfpf_if.h       |  117 +-
 drivers/net/qede/base/eth_common.h          |  294 +-
 drivers/net/qede/base/mcp_public.h          | 2341 ++++++---
 drivers/net/qede/base/nvm_cfg.h             | 5059 ++++++++++---------
 drivers/net/qede/qede_debug.c               |   31 +-
 drivers/net/qede/qede_ethdev.c              |    2 +-
 drivers/net/qede/qede_rxtx.c                |    2 +-
 54 files changed, 18340 insertions(+), 9921 deletions(-)

diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 3fb8fd2ce..9801d5348 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -1,19 +1,28 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef __ECORE_H
 #define __ECORE_H
 
+#include <linux/if_ether.h>
+
+#define ECORE_ETH_ALEN ETH_ALEN
+#define ECORE_ETH_P_8021Q ETH_P_8021Q
+#define ECORE_ETH_P_8021AD ETH_P_8021AD
+#define UEFI
+
 /* @DPDK */
+#define CONFIG_ECORE_BINARY_FW
+#undef CONFIG_ECORE_ZIPPED_FW
+
+#ifdef CONFIG_ECORE_BINARY_FW
 #include <sys/stat.h>
 #include <fcntl.h>
 #include <unistd.h>
-
-#define CONFIG_ECORE_BINARY_FW
-#undef CONFIG_ECORE_ZIPPED_FW
+#endif
 
 #ifdef CONFIG_ECORE_ZIPPED_FW
 #include <zlib.h>
@@ -29,8 +38,8 @@
 #include "mcp_public.h"
 
 #define ECORE_MAJOR_VERSION		8
-#define ECORE_MINOR_VERSION		40
-#define ECORE_REVISION_VERSION		26
+#define ECORE_MINOR_VERSION		62
+#define ECORE_REVISION_VERSION		4
 #define ECORE_ENGINEERING_VERSION	0
 
 #define ECORE_VERSION							\
@@ -49,14 +58,23 @@
 #define ECORE_WFQ_UNIT	100
 #include "../qede_logs.h" /* @DPDK */
 
-#define ISCSI_BDQ_ID(_port_id) (_port_id)
-#define FCOE_BDQ_ID(_port_id) (_port_id + 2)
 /* Constants */
 #define ECORE_WID_SIZE		(1024)
 #define ECORE_MIN_WIDS		(4)
 
 /* Configurable */
 #define ECORE_PF_DEMS_SIZE	(4)
+#define ECORE_VF_DEMS_SIZE	(32)
+#define ECORE_MIN_DPIS		(4)  /* The minimal number of DPIs required to
+				      * load the driver. The number was
+				      * arbitrarily set.
+				      */
+/* Derived */
+#define ECORE_MIN_PWM_REGION	(ECORE_WID_SIZE * ECORE_MIN_DPIS)
+
+#define ECORE_CXT_PF_CID (0xff)
+
+#define ECORE_HW_STOP_RETRY_LIMIT	(10)
 
 /* cau states */
 enum ecore_coalescing_mode {
@@ -71,23 +89,29 @@ enum ecore_nvm_cmd {
 	ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
 	ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
 	ECORE_EXT_PHY_FW_UPGRADE = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE,
-	ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
 	ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
 	ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
 	ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ,
 	ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE,
+	ECORE_ENCRYPT_PASSWORD = DRV_MSG_CODE_ENCRYPT_PASSWORD,
 	ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00
 };
 
-#ifndef LINUX_REMOVE
-#if !defined(CONFIG_ECORE_L2)
+#if !defined(CONFIG_ECORE_L2) && !defined(CONFIG_ECORE_ROCE) && \
+	!defined(CONFIG_ECORE_FCOE) && !defined(CONFIG_ECORE_ISCSI) && \
+	!defined(CONFIG_ECORE_IWARP) && !defined(CONFIG_ECORE_OOO)
 #define CONFIG_ECORE_L2
 #define CONFIG_ECORE_SRIOV
-#endif
+#define CONFIG_ECORE_ROCE
+#define CONFIG_ECORE_IWARP
+#define CONFIG_ECORE_FCOE
+#define CONFIG_ECORE_ISCSI
+#define CONFIG_ECORE_LL2
+#define CONFIG_ECORE_OOO
 #endif
 
 /* helpers */
-#ifndef __EXTRACT__LINUX__
+#ifndef __EXTRACT__LINUX__IF__
 #define MASK_FIELD(_name, _value)					\
 		((_value) &= (_name##_MASK))
 
@@ -103,14 +127,16 @@ do {									\
 #define GET_FIELD(value, name)						\
 	(((value) >> (name##_SHIFT)) & name##_MASK)
 
-#define GET_MFW_FIELD(name, field)				\
+#define GET_MFW_FIELD(name, field)					\
 	(((name) & (field ## _MASK)) >> (field ## _OFFSET))
 
 #define SET_MFW_FIELD(name, field, value)				\
 do {									\
-	(name) &= ~((field ## _MASK));		\
+	(name) &= ~(field ## _MASK);					\
 	(name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));	\
 } while (0)
+
+#define DB_ADDR_SHIFT(addr)             ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
 #endif
 
 static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
@@ -121,7 +147,7 @@ static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
 	return db_addr;
 }
 
-static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
+static OSAL_INLINE u32 DB_ADDR_VF_E4(u32 cid, u32 DEMS)
 {
 	u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
 		      FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
@@ -129,6 +155,17 @@ static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
 	return db_addr;
 }
 
+static OSAL_INLINE u32 DB_ADDR_VF_E5(u32 cid, u32 DEMS)
+{
+	u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+		      (cid * ECORE_VF_DEMS_SIZE);
+
+	return db_addr;
+}
+
+#define DB_ADDR_VF(dev, cid, DEMS) \
+	(ECORE_IS_E4(dev) ? DB_ADDR_VF_E4(cid, DEMS) : DB_ADDR_VF_E5(cid, DEMS))
+
 #define ALIGNED_TYPE_SIZE(type_name, p_hwfn)				  \
 	((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \
 	 ~((1 << (p_hwfn->p_dev->cache_shift)) - 1))
@@ -143,7 +180,84 @@ static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
 #endif
 #endif
 
-#ifndef __EXTRACT__LINUX__
+#ifndef __EXTRACT__LINUX__IF__
+#define ECORE_INT_DEBUG_SIZE_DEF     _MB(2)
+struct ecore_internal_trace {
+	char *buf;
+	u32 size;
+	u64 prod;
+	osal_spinlock_t lock;
+};
+
+#define ECORE_DP_INT_LOG_MAX_STR_SIZE 256
+#define ECORE_DP_INT_LOG_DEFAULT_MASK (0xffffc3ff)
+
+#ifndef UEFI
+/* Debug print definitions */
+#define DP_INT_LOG(P_DEV, LEVEL, MODULE, fmt, ...)		\
+do {								\
+	if (OSAL_UNLIKELY((P_DEV)->dp_int_level > (LEVEL)))	\
+		break;						\
+	if (OSAL_UNLIKELY((P_DEV)->dp_int_level == ECORE_LEVEL_VERBOSE) && \
+	    ((LEVEL) == ECORE_LEVEL_VERBOSE) &&			\
+	    ((P_DEV)->dp_int_module & (MODULE)) == 0)		\
+		break;						\
+								\
+	OSAL_INT_DBG_STORE(P_DEV, fmt,				\
+				__func__, __LINE__,		\
+				(P_DEV)->name ? (P_DEV)->name : "",	\
+				##__VA_ARGS__);		\
+} while (0)
+
+#define DP_ERR(P_DEV, fmt, ...)				\
+do {							\
+	DP_INT_LOG((P_DEV), ECORE_LEVEL_ERR, 0,		\
+		"ERR: [%s:%d(%s)]" fmt, ##__VA_ARGS__);	\
+	PRINT_ERR((P_DEV)->dp_ctx, "[%s:%d(%s)]" fmt,	\
+		  __func__, __LINE__,			\
+		  (P_DEV)->name ? (P_DEV)->name : "",	\
+		  ##__VA_ARGS__);			\
+} while (0)
+
+#define DP_NOTICE(P_DEV, is_assert, fmt, ...)				\
+do {									\
+	DP_INT_LOG((P_DEV), ECORE_LEVEL_NOTICE,	0,			\
+		 "NOTICE: [%s:%d(%s)]" fmt, ##__VA_ARGS__);		\
+	if (OSAL_UNLIKELY((P_DEV)->dp_level <= ECORE_LEVEL_NOTICE)) {	\
+		PRINT((P_DEV)->dp_ctx, "[%s:%d(%s)]" fmt,		\
+		      __func__, __LINE__,				\
+		      (P_DEV)->name ? (P_DEV)->name : "",		\
+		      ##__VA_ARGS__);					\
+		OSAL_ASSERT(!(is_assert));				\
+	}								\
+} while (0)
+
+#define DP_INFO(P_DEV, fmt, ...)				      \
+do {								      \
+	DP_INT_LOG((P_DEV), ECORE_LEVEL_INFO, 0,		      \
+		"INFO: [%s:%d(%s)]" fmt, ##__VA_ARGS__);	      \
+	if (OSAL_UNLIKELY((P_DEV)->dp_level <= ECORE_LEVEL_INFO)) {   \
+		PRINT((P_DEV)->dp_ctx, "[%s:%d(%s)]" fmt,	      \
+		      __func__, __LINE__,			      \
+		      (P_DEV)->name ? (P_DEV)->name : "",	      \
+		      ##__VA_ARGS__);				      \
+	}							      \
+} while (0)
+
+#define DP_VERBOSE(P_DEV, module, fmt, ...)				\
+do {									\
+	DP_INT_LOG((P_DEV), ECORE_LEVEL_VERBOSE, module,		\
+		"VERBOSE: [%s:%d(%s)]" fmt, ##__VA_ARGS__);		\
+	if (OSAL_UNLIKELY(((P_DEV)->dp_level <= ECORE_LEVEL_VERBOSE) &&	\
+	    ((P_DEV)->dp_module & module))) {				\
+		PRINT((P_DEV)->dp_ctx, "[%s:%d(%s)]" fmt,		\
+		      __func__, __LINE__,				\
+		      (P_DEV)->name ? (P_DEV)->name : "",		\
+		      ##__VA_ARGS__);					\
+	}								\
+} while (0)
+#endif
+
 enum DP_LEVEL {
 	ECORE_LEVEL_VERBOSE	= 0x0,
 	ECORE_LEVEL_INFO	= 0x1,
@@ -181,6 +295,7 @@ enum DP_MODULE {
 	ECORE_MSG_SP		= 0x100000,
 	ECORE_MSG_STORAGE	= 0x200000,
 	ECORE_MSG_OOO		= 0x200000,
+	ECORE_MSG_FS		= 0x400000,
 	ECORE_MSG_CXT		= 0x800000,
 	ECORE_MSG_LL2		= 0x1000000,
 	ECORE_MSG_ILT		= 0x2000000,
@@ -188,13 +303,49 @@ enum DP_MODULE {
 	ECORE_MSG_DEBUG		= 0x8000000,
 	/* to be added...up to 0x8000000 */
 };
+
+/**
+ * @brief Convert from 32b debug param to two params of level and module
+
+ * @param debug
+ * @param p_dp_module
+ * @param p_dp_level
+ * @return void
+ *
+ * @note Input 32b decoding:
+ *	 b31 - enable all NOTICE prints. NOTICE prints are for deviation from
+ *	 the 'happy' flow, e.g. memory allocation failed.
+ *	 b30 - enable all INFO prints. INFO prints are for major steps in the
+ *	 flow and provide important parameters.
+ *	 b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of
+ *	 that module. VERBOSE prints are for tracking the specific flow in low
+ *	 level.
+ *
+ *	 Notice that the level should be that of the lowest required logs.
+ */
+static OSAL_INLINE void ecore_config_debug(u32 debug, u32 *p_dp_module,
+					   u8 *p_dp_level)
+{
+	*p_dp_level = ECORE_LEVEL_NOTICE;
+	*p_dp_module = 0;
+
+	if (debug & ECORE_LOG_VERBOSE_MASK) {
+		*p_dp_level = ECORE_LEVEL_VERBOSE;
+		*p_dp_module = (debug & 0x3FFFFFFF);
+	} else if (debug & ECORE_LOG_INFO_MASK) {
+		*p_dp_level = ECORE_LEVEL_INFO;
+	} else if (debug & ECORE_LOG_NOTICE_MASK) {
+		*p_dp_level = ECORE_LEVEL_NOTICE;
+	}
+}
+
 #endif
 
 #define for_each_hwfn(p_dev, i)	for (i = 0; i < p_dev->num_hwfns; i++)
 
 #define D_TRINE(val, cond1, cond2, true1, true2, def) \
-	(val == (cond1) ? true1 : \
-	 (val == (cond2) ? true2 : def))
+	((val) == (cond1) ? (true1) : \
+	 ((val) == (cond2) ? (true2) : (def)))
 
 /* forward */
 struct ecore_ptt_pool;
@@ -210,6 +361,8 @@ struct ecore_igu_info;
 struct ecore_mcp_info;
 struct ecore_dcbx_info;
 struct ecore_llh_info;
+struct ecore_fs_info_e4;
+struct ecore_fs_info_e5;
 
 struct ecore_rt_data {
 	u32	*init_val;
@@ -233,6 +386,13 @@ enum ecore_tunn_clss {
 	MAX_ECORE_TUNN_CLSS,
 };
 
+#ifndef __EXTRACT__LINUX__IF__
+enum ecore_tcp_ip_version {
+	ECORE_TCP_IPV4,
+	ECORE_TCP_IPV6,
+};
+#endif
+
 struct ecore_tunn_update_type {
 	bool b_update_mode;
 	bool b_mode_enabled;
@@ -256,6 +416,9 @@ struct ecore_tunnel_info {
 
 	bool b_update_rx_cls;
 	bool b_update_tx_cls;
+
+	bool update_non_l2_vxlan;
+	bool non_l2_vxlan_enable;
 };
 
 /* The PCI personality is not quite synonymous to protocol ID:
@@ -279,6 +442,15 @@ struct ecore_qm_iids {
 	u32 tids;
 };
 
+/* The PCI relax ordering is either taken care by management FW or can be
+ * enable/disable by ecore client.
+ */
+enum ecore_pci_rlx_odr {
+	ECORE_DEFAULT_RLX_ODR,
+	ECORE_ENABLE_RLX_ODR,
+	ECORE_DISABLE_RLX_ODR
+};
+
 #define MAX_PF_PER_PORT 8
 
 /* HW / FW resources, output of features supported below, most information
@@ -292,12 +464,16 @@ enum ecore_resources {
 	ECORE_RL,
 	ECORE_MAC,
 	ECORE_VLAN,
+	ECORE_VF_RDMA_CNQ_RAM,
 	ECORE_RDMA_CNQ_RAM,
 	ECORE_ILT,
-	ECORE_LL2_QUEUE,
+	ECORE_LL2_RAM_QUEUE,
+	ECORE_LL2_CTX_QUEUE,
 	ECORE_CMDQS_CQS,
 	ECORE_RDMA_STATS_QUEUE,
 	ECORE_BDQ,
+	ECORE_VF_MAC_ADDR,
+	ECORE_GFS_PROFILE,
 
 	/* This is needed only internally for matching against the IGU.
 	 * In case of legacy MFW, would be set to `0'.
@@ -317,6 +493,7 @@ enum ecore_feature {
 	ECORE_EXTRA_VF_QUE,
 	ECORE_VMQ,
 	ECORE_RDMA_CNQ,
+	ECORE_VF_RDMA_CNQ,
 	ECORE_ISCSI_CQ,
 	ECORE_FCOE_CQ,
 	ECORE_VF_L2_QUE,
@@ -335,6 +512,11 @@ enum ecore_port_mode {
 	ECORE_PORT_MODE_DE_1X25G,
 	ECORE_PORT_MODE_DE_4X25G,
 	ECORE_PORT_MODE_DE_2X10G,
+	ECORE_PORT_MODE_DE_2X50G_R1,
+	ECORE_PORT_MODE_DE_4X50G_R1,
+	ECORE_PORT_MODE_DE_1X100G_R2,
+	ECORE_PORT_MODE_DE_2X100G_R2,
+	ECORE_PORT_MODE_DE_1X100G_R4,
 };
 
 enum ecore_dev_cap {
@@ -345,7 +527,7 @@ enum ecore_dev_cap {
 	ECORE_DEV_CAP_IWARP
 };
 
-#ifndef __EXTRACT__LINUX__
+#ifndef __EXTRACT__LINUX__IF__
 enum ecore_hw_err_type {
 	ECORE_HW_ERR_FAN_FAIL,
 	ECORE_HW_ERR_MFW_RESP_FAIL,
@@ -356,10 +538,9 @@ enum ecore_hw_err_type {
 };
 #endif
 
-enum ecore_db_rec_exec {
-	DB_REC_DRY_RUN,
-	DB_REC_REAL_DEAL,
-	DB_REC_ONCE,
+enum ecore_wol_support {
+	ECORE_WOL_SUPPORT_NONE,
+	ECORE_WOL_SUPPORT_PME,
 };
 
 struct ecore_hw_info {
@@ -382,7 +563,9 @@ struct ecore_hw_info {
 	((dev)->hw_info.personality == ECORE_PCI_FCOE)
 #define ECORE_IS_ISCSI_PERSONALITY(dev) \
 	((dev)->hw_info.personality == ECORE_PCI_ISCSI)
-
+#define ECORE_IS_NVMETCP_PERSONALITY(dev) \
+	((dev)->hw_info.personality == ECORE_PCI_ISCSI && \
+	 (dev)->is_nvmetcp)
 	/* Resource Allocation scheme results */
 	u32 resc_start[ECORE_MAX_RESC];
 	u32 resc_num[ECORE_MAX_RESC];
@@ -397,23 +580,22 @@ struct ecore_hw_info {
 	/* Amount of traffic classes HW supports */
 	u8 num_hw_tc;
 
-/* Amount of TCs which should be active according to DCBx or upper layer driver
- * configuration
- */
-
+/* Amount of TCs which should be active according to DCBx or upper layer driver configuration */
 	u8 num_active_tc;
 
 	/* The traffic class used by PF for it's offloaded protocol */
 	u8 offload_tc;
+	bool offload_tc_set;
+
+	bool multi_tc_roce_en;
+#define IS_ECORE_MULTI_TC_ROCE(p_hwfn) (!!((p_hwfn)->hw_info.multi_tc_roce_en))
 
 	u32 concrete_fid;
 	u16 opaque_fid;
 	u16 ovlan;
 	u32 part_num[4];
 
-	unsigned char hw_mac_addr[ETH_ALEN];
-	u64 node_wwn; /* For FCoE only */
-	u64 port_wwn; /* For FCoE only */
+	unsigned char hw_mac_addr[ECORE_ETH_ALEN];
 
 	u16 num_iscsi_conns;
 	u16 num_fcoe_conns;
@@ -424,12 +606,16 @@ struct ecore_hw_info {
 
 	u32 port_mode;
 	u32 hw_mode;
-	u32 device_capabilities;
+	u32 device_capabilities; /* @DPDK */
 
+#ifndef __EXTRACT__LINUX__THROW__
 	/* Default DCBX mode */
 	u8 dcbx_mode;
+#endif
 
 	u16 mtu;
+
+	enum ecore_wol_support		b_wol_support;
 };
 
 /* maximun size of read/write commands (HW limit) */
@@ -470,38 +656,71 @@ struct ecore_wfq_data {
 
 #define OFLD_GRP_SIZE 4
 
+struct ecore_offload_pq {
+	u8 port;
+	u8 tc;
+};
+
 struct ecore_qm_info {
 	struct init_qm_pq_params    *qm_pq_params;
 	struct init_qm_vport_params *qm_vport_params;
 	struct init_qm_port_params  *qm_port_params;
 	u16			start_pq;
-	u8			start_vport;
+	u16			start_vport;
+	u16			start_rl;
 	u16			pure_lb_pq;
-	u16			offload_pq;
+	u16			first_ofld_pq;
+	u16			first_llt_pq;
 	u16			pure_ack_pq;
 	u16			ooo_pq;
+	u16			single_vf_rdma_pq;
 	u16			first_vf_pq;
 	u16			first_mcos_pq;
 	u16			first_rl_pq;
+	u16			first_ofld_grp_pq;
 	u16			num_pqs;
 	u16			num_vf_pqs;
+	u16			ilt_pf_pqs;
 	u8			num_vports;
+	u8			num_rls;
 	u8			max_phys_tcs_per_port;
 	u8			ooo_tc;
+	bool			pq_overflow;
 	bool			pf_rl_en;
 	bool			pf_wfq_en;
 	bool			vport_rl_en;
 	bool			vport_wfq_en;
+	bool			vf_rdma_en;
+#define IS_ECORE_QM_VF_RDMA(_p_hwfn) ((_p_hwfn)->qm_info.vf_rdma_en)
 	u8			pf_wfq;
 	u32			pf_rl;
 	struct ecore_wfq_data	*wfq_data;
 	u8			num_pf_rls;
+	struct ecore_offload_pq offload_group[OFLD_GRP_SIZE];
+	u8			offload_group_count;
+#define IS_ECORE_OFLD_GRP(p_hwfn) ((p_hwfn)->qm_info.offload_group_count > 0)
+
+	/* Locks PQ getters against QM info initialization */
+	osal_spinlock_t		qm_info_lock;
 };
 
+#define ECORE_OVERFLOW_BIT	1
+
 struct ecore_db_recovery_info {
-	osal_list_t list;
-	osal_spinlock_t lock;
-	u32 db_recovery_counter;
+	osal_list_t	list;
+	osal_spinlock_t	lock;
+	u32		count;
+
+	/* PF doorbell overflow sticky indicator was cleared in the DORQ
+	 * attention callback, but still needs to execute doorbell recovery.
+	 * Full (REAL_DEAL) dorbell recovery is executed in the periodic
+	 * handler.
+	 * This value doesn't require a lock but must use atomic operations.
+	 */
+	u32		overflow; /* @DPDK*/
+
+	/* Indicates that DORQ attention was handled in ecore_int_deassertion */
+	bool		dorq_attn;
 };
 
 struct storm_stats {
@@ -553,18 +772,32 @@ enum ecore_mf_mode_bit {
 	/* Use stag for steering */
 	ECORE_MF_8021AD_TAGGING,
 
+	/* Allow DSCP to TC mapping */
+	ECORE_MF_DSCP_TO_TC_MAP,
+
 	/* Allow FIP discovery fallback */
 	ECORE_MF_FIP_SPECIAL,
+
+	/* Do not insert a vlan tag with id 0 */
+	ECORE_MF_DONT_ADD_VLAN0_TAG,
+
+	/* Allow VF RDMA */
+	ECORE_MF_VF_RDMA,
+
+	/* Allow RoCE LAG */
+	ECORE_MF_ROCE_LAG,
 };
 
 enum ecore_ufp_mode {
 	ECORE_UFP_MODE_ETS,
 	ECORE_UFP_MODE_VNIC_BW,
+	ECORE_UFP_MODE_UNKNOWN
 };
 
 enum ecore_ufp_pri_type {
 	ECORE_UFP_PRI_OS,
-	ECORE_UFP_PRI_VNIC
+	ECORE_UFP_PRI_VNIC,
+	ECORE_UFP_PRI_UNKNOWN
 };
 
 struct ecore_ufp_info {
@@ -578,16 +811,85 @@ enum BAR_ID {
 	BAR_ID_1	/* Used for doorbells */
 };
 
+#ifndef __EXTRACT__LINUX__IF__
+enum ecore_lag_type {
+	ECORE_LAG_TYPE_NONE,
+	ECORE_LAG_TYPE_ACTIVEACTIVE,
+	ECORE_LAG_TYPE_ACTIVEBACKUP
+};
+#endif
+
 struct ecore_nvm_image_info {
 	u32				num_images;
 	struct bist_nvm_image_att	*image_att;
 	bool				valid;
 };
 
+#define LAG_MAX_PORT_NUM	2
+
+struct ecore_lag_info {
+	enum ecore_lag_type	lag_type;
+	void			(*link_change_cb)(void *cxt);
+	void			*cxt;
+	u8			port_num;
+	u32			active_ports; /* @DPDK */
+	u8			first_port;
+	u8			second_port;
+	bool			is_master;
+	u8			master_pf;
+};
+
+/* PWM region specific data */
+struct ecore_dpi_info {
+	u16			wid_count;
+	u32			dpi_size;
+	u32			dpi_count;
+	u32			dpi_start_offset; /* this is used to calculate
+						   * the doorbell address
+						   */
+	u32			dpi_bit_shift_addr;
+};
+
+struct ecore_common_dpm_info {
+	u8			db_bar_no_edpm;
+	u8			mfw_no_edpm;
+	bool			vf_cfg;
+};
+
+enum ecore_hsi_def_type {
+	ECORE_HSI_DEF_MAX_NUM_VFS,
+	ECORE_HSI_DEF_MAX_NUM_L2_QUEUES,
+	ECORE_HSI_DEF_MAX_NUM_PORTS,
+	ECORE_HSI_DEF_MAX_SB_PER_PATH,
+	ECORE_HSI_DEF_MAX_NUM_PFS,
+	ECORE_HSI_DEF_MAX_NUM_VPORTS,
+	ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE,
+	ECORE_HSI_DEF_MAX_QM_TX_QUEUES,
+	ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS,
+	ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
+	ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS,
+	ECORE_HSI_DEF_MAX_PBF_CMD_LINES,
+	ECORE_HSI_DEF_MAX_BTB_BLOCKS,
+	ECORE_NUM_HSI_DEFS
+};
+
+enum ecore_rx_config_flags {
+	ECORE_RX_CONF_SKIP_ACCEPT_FLAGS_UPDATE,
+	ECORE_RX_CONF_SKIP_UCAST_FILTER_UPDATE,
+	ECORE_RX_CONF_SET_LB_VPORT
+};
+
+struct ecore_rx_config {
+	u32			flags; /* @DPDK */
+	u8			loopback_dst_vport_id;
+};
+
 struct ecore_hwfn {
 	struct ecore_dev		*p_dev;
 	u8				my_id;		/* ID inside the PF */
-#define IS_LEAD_HWFN(edev)		(!((edev)->my_id))
+#define IS_LEAD_HWFN(_p_hwfn)		(!((_p_hwfn)->my_id))
+#define IS_AFFIN_HWFN(_p_hwfn) \
+	((_p_hwfn) == ECORE_AFFIN_HWFN((_p_hwfn)->p_dev))
 	u8				rel_pf_id;	/* Relative to engine*/
 	u8				abs_pf_id;
 #define ECORE_PATH_ID(_p_hwfn) \
@@ -597,10 +899,11 @@ struct ecore_hwfn {
 
 	u32				dp_module;
 	u8				dp_level;
+	u32				dp_int_module;
+	u8				dp_int_level;
 	char				name[NAME_SIZE];
 	void				*dp_ctx;
 
-	bool				first_on_engine;
 	bool				hw_init_done;
 
 	u8				num_funcs_on_engine;
@@ -612,6 +915,8 @@ struct ecore_hwfn {
 	void OSAL_IOMEM			*doorbells;
 	u64				db_phys_addr;
 	unsigned long			db_size;
+	u64				reg_offset;
+	u64				db_offset;
 
 	/* PTT pool */
 	struct ecore_ptt_pool		*p_ptt_pool;
@@ -638,6 +943,11 @@ struct ecore_hwfn {
 	struct ecore_ptt		*p_main_ptt;
 	struct ecore_ptt		*p_dpc_ptt;
 
+	/* PTP will be used only by the leading function.
+	 * Usage of all PTP-apis should be synchronized as result.
+	 */
+	struct ecore_ptt		*p_ptp_ptt;
+
 	struct ecore_sb_sp_info		*p_sp_sb;
 	struct ecore_sb_attn_info	*p_sb_attn;
 
@@ -649,6 +959,7 @@ struct ecore_hwfn {
 	struct ecore_fcoe_info		*p_fcoe_info;
 	struct ecore_rdma_info		*p_rdma_info;
 	struct ecore_pf_params		pf_params;
+	bool				is_nvmetcp;
 
 	bool				b_rdma_enabled_in_prs;
 	u32				rdma_prs_search_reg;
@@ -673,8 +984,8 @@ struct ecore_hwfn {
 	/* QM init */
 	struct ecore_qm_info		qm_info;
 
-#ifdef CONFIG_ECORE_ZIPPED_FW
 	/* Buffer for unzipping firmware data */
+#ifdef CONFIG_ECORE_ZIPPED_FW
 	void *unzip_buf;
 #endif
 
@@ -682,19 +993,12 @@ struct ecore_hwfn {
 	void				*dbg_user_info;
 	struct virt_mem_desc		dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
 
-	struct z_stream_s		*stream;
-
-	/* PWM region specific data */
-	u32				dpi_size;
-	u32				dpi_count;
-	u32				dpi_start_offset; /* this is used to
-							   * calculate th
-							   * doorbell address
-							   */
+	struct ecore_dpi_info		dpi_info;
 
-	/* If one of the following is set then EDPM shouldn't be used */
+	struct ecore_common_dpm_info	dpm_info;
+	u8				roce_edpm_mode;
 	u8				dcbx_no_edpm;
-	u8				db_bar_no_edpm;
+	u8				num_vf_cnqs;
 
 	/* L2-related */
 	struct ecore_l2_info		*p_l2_info;
@@ -708,11 +1012,26 @@ struct ecore_hwfn {
 	 * struct ecore_hw_prepare_params by ecore client.
 	 */
 	bool b_en_pacing;
+	struct ecore_lag_info		lag_info;
 
 	/* Nvm images number and attributes */
-	struct ecore_nvm_image_info     nvm_info;
+	struct ecore_nvm_image_info	nvm_info;
+
+	/* Flow steering info */
+	union {
+		struct ecore_fs_info_e4	*e4;
+		struct ecore_fs_info_e5	*e5;
+		void			*info;
+	} fs_info;
 
-	struct phys_mem_desc            *fw_overlay_mem;
+	/* Flow steering statistics accuracy */
+	u8				fs_accuracy;
+
+	struct phys_mem_desc		*fw_overlay_mem;
+	enum _ecore_status_t		(*p_dummy_cb)
+					(struct ecore_hwfn *p_hwfn, void *cookie);
+	/* Rx configuration */
+	struct ecore_rx_config		rx_conf;
 
 	/* @DPDK */
 	struct ecore_ptt		*p_arfs_ptt;
@@ -722,18 +1041,22 @@ struct ecore_hwfn {
 	u32 iov_task_flags;
 };
 
+#ifndef __EXTRACT__LINUX__THROW__
 enum ecore_mf_mode {
 	ECORE_MF_DEFAULT,
 	ECORE_MF_OVLAN,
 	ECORE_MF_NPAR,
 	ECORE_MF_UFP,
 };
+#endif
 
+#ifndef __EXTRACT__LINUX__IF__
 enum ecore_dev_type {
 	ECORE_DEV_TYPE_BB,
 	ECORE_DEV_TYPE_AH,
 	ECORE_DEV_TYPE_E5,
 };
+#endif
 
 /* @DPDK */
 enum ecore_dbg_features {
@@ -765,7 +1088,12 @@ struct ecore_dev {
 	u8				dp_level;
 	char				name[NAME_SIZE];
 	void				*dp_ctx;
+	struct ecore_internal_trace	internal_trace;
+	u8				dp_int_level;
+	u32				dp_int_module;
 
+/* for work DP_* macros with cdev, hwfn, etc */
+	struct ecore_dev		*p_dev;
 	enum ecore_dev_type		type;
 /* Translate type/revision combo into the proper conditions */
 #define ECORE_IS_BB(dev)	((dev)->type == ECORE_DEV_TYPE_BB)
@@ -781,6 +1109,8 @@ struct ecore_dev {
 #define ECORE_IS_E4(dev)	(ECORE_IS_BB(dev) || ECORE_IS_AH(dev))
 #define ECORE_IS_E5(dev)	((dev)->type == ECORE_DEV_TYPE_E5)
 
+#define ECORE_E5_MISSING_CODE	OSAL_BUILD_BUG_ON(false)
+
 	u16 vendor_id;
 	u16 device_id;
 #define ECORE_DEV_ID_MASK	0xff00
@@ -829,21 +1159,18 @@ struct ecore_dev {
 #define CHIP_BOND_ID_MASK		0xff
 #define CHIP_BOND_ID_SHIFT		0
 
-	u8				num_engines;
 	u8				num_ports;
 	u8				num_ports_in_engine;
-	u8				num_funcs_in_port;
 
 	u8				path_id;
 
-	u32				mf_bits;
+	u32				mf_bits; /* @DPDK */
+#ifndef __EXTRACT__LINUX__THROW__
 	enum ecore_mf_mode		mf_mode;
-#define IS_MF_DEFAULT(_p_hwfn)	\
-	(((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
-#define IS_MF_SI(_p_hwfn)	\
-	(((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
-#define IS_MF_SD(_p_hwfn)	\
-	(((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+#define IS_MF_DEFAULT(_p_hwfn)	(((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn)	(((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
+#define IS_MF_SD(_p_hwfn)	(((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+#endif
 
 	int				pcie_width;
 	int				pcie_speed;
@@ -852,7 +1179,9 @@ struct ecore_dev {
 	u8				mcp_rev;
 	u8				boot_mode;
 
-	u8				wol;
+	/* WoL related configurations */
+	u8				wol_config;
+	u8				wol_mac[ECORE_ETH_ALEN];
 
 	u32				int_mode;
 	enum ecore_coalescing_mode	int_coalescing_mode;
@@ -907,6 +1236,7 @@ struct ecore_dev {
 	u32				rdma_max_sge;
 	u32				rdma_max_inline;
 	u32				rdma_max_srq_sge;
+	u8				ilt_page_size;
 
 	struct ecore_eth_stats		*reset_stats;
 	struct ecore_fw_data		*fw_data;
@@ -916,8 +1246,7 @@ struct ecore_dev {
 	/* Recovery */
 	bool				recov_in_prog;
 
-/* Indicates whether should prevent attentions from being reasserted */
-
+	/* Indicates whether should prevent attentions from being reasserted */
 	bool				attn_clr_en;
 
 	/* Indicates whether allowing the MFW to collect a crash dump */
@@ -926,6 +1255,9 @@ struct ecore_dev {
 	/* Indicates if the reg_fifo is checked after any register access */
 	bool				chk_reg_fifo;
 
+	/* Indicates the monitored address by ecore_rd()/ecore_wr() */
+	u32				monitored_hw_addr;
+
 #ifndef ASIC_ONLY
 	bool				b_is_emul_full;
 	bool				b_is_emul_mac;
@@ -937,6 +1269,9 @@ struct ecore_dev {
 	/* Indicates whether this PF serves a storage target */
 	bool				b_is_target;
 
+	/* Instruct driver to read statistics from the specified bin id */
+	u16				stats_bin_id;
+
 #ifdef CONFIG_ECORE_BINARY_FW /* @DPDK */
 	void				*firmware;
 	u64				fw_len;
@@ -952,23 +1287,6 @@ struct ecore_dev {
 	struct rte_pci_device		*pci_dev;
 };
 
-enum ecore_hsi_def_type {
-	ECORE_HSI_DEF_MAX_NUM_VFS,
-	ECORE_HSI_DEF_MAX_NUM_L2_QUEUES,
-	ECORE_HSI_DEF_MAX_NUM_PORTS,
-	ECORE_HSI_DEF_MAX_SB_PER_PATH,
-	ECORE_HSI_DEF_MAX_NUM_PFS,
-	ECORE_HSI_DEF_MAX_NUM_VPORTS,
-	ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE,
-	ECORE_HSI_DEF_MAX_QM_TX_QUEUES,
-	ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS,
-	ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
-	ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS,
-	ECORE_HSI_DEF_MAX_PBF_CMD_LINES,
-	ECORE_HSI_DEF_MAX_BTB_BLOCKS,
-	ECORE_NUM_HSI_DEFS
-};
-
 u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev,
 			  enum ecore_hsi_def_type type);
 
@@ -1039,6 +1357,11 @@ int ecore_device_num_ports(struct ecore_dev *p_dev);
 void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
 			   u8 *mac);
 
+#define ECORE_TOS_ECN_SHIFT	0
+#define ECORE_TOS_ECN_MASK	0x3
+#define ECORE_TOS_DSCP_SHIFT	2
+#define ECORE_TOS_DSCP_MASK	0x3f
+
 /* Flags for indication of required queues */
 #define PQ_FLAGS_RLS	(1 << 0)
 #define PQ_FLAGS_MCOS	(1 << 1)
@@ -1046,26 +1369,50 @@ void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
 #define PQ_FLAGS_OOO	(1 << 3)
 #define PQ_FLAGS_ACK	(1 << 4)
 #define PQ_FLAGS_OFLD	(1 << 5)
-#define PQ_FLAGS_VFS	(1 << 6)
-#define PQ_FLAGS_LLT	(1 << 7)
+#define PQ_FLAGS_GRP	(1 << 6)
+#define PQ_FLAGS_VFS	(1 << 7)
+#define PQ_FLAGS_LLT	(1 << 8)
+#define PQ_FLAGS_MTC	(1 << 9)
+#define PQ_FLAGS_VFR	(1 << 10)
+#define PQ_FLAGS_VSR	(1 << 11)
 
 /* physical queue index for cm context intialization */
 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
+u16 ecore_get_cm_pq_idx_vf_rdma(struct ecore_hwfn *p_hwfn, u16 vf);
+
 u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+u16 ecore_get_cm_pq_idx_grp(struct ecore_hwfn *p_hwfn, u8 idx);
+u16 ecore_get_cm_pq_idx_ofld_mtc(struct ecore_hwfn *p_hwfn, u16 idx, u8 tc);
+u16 ecore_get_cm_pq_idx_llt_mtc(struct ecore_hwfn *p_hwfn, u16 idx, u8 tc);
+u16 ecore_get_cm_pq_idx_ll2(struct ecore_hwfn *p_hwfn, u8 tc);
 
-/* qm vport for rate limit configuration */
-u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+/* qm vport/rl for rate limit configuration */
+u16 ecore_get_pq_vport_id_from_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+u16 ecore_get_pq_vport_id_from_vf(struct ecore_hwfn *p_hwfn, u16 vf);
+u16 ecore_get_pq_rl_id_from_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+u16 ecore_get_pq_rl_id_from_vf(struct ecore_hwfn *p_hwfn, u16 vf);
 
 const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
 
 /* doorbell recovery mechanism */
 void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn);
-void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
-			       enum ecore_db_rec_exec);
-
-bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn);
+void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t
+ecore_db_rec_flush_queue(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+			 u32 usage_cnt_reg, u32 *count);
+#define ECORE_DB_REC_COUNT	1000
+#define ECORE_DB_REC_INTERVAL	100
+
+bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn,
+			struct ecore_common_dpm_info *dpm_info);
+
+enum _ecore_status_t ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
+					    struct ecore_ptt *p_ptt,
+					    struct ecore_dpi_info *dpi_info,
+					    u32 pwm_region_size,
+					    u32 n_cpus);
 
 /* amount of resources used in qm init */
 u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
@@ -1074,9 +1421,18 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
 u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
 u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
 
+void ecore_hw_info_set_offload_tc(struct ecore_hw_info *p_info, u8 tc);
+u8 ecore_get_offload_tc(struct ecore_hwfn *p_hwfn);
+
 #define MFW_PORT(_p_hwfn)	((_p_hwfn)->abs_pf_id % \
 				 ecore_device_num_ports((_p_hwfn)->p_dev))
 
+enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev, u8 rel_ppfid,
+				     u8 *p_abs_ppfid);
+enum _ecore_status_t ecore_llh_map_ppfid_to_pfid(struct ecore_hwfn *p_hwfn,
+						 struct ecore_ptt *p_ptt,
+						 u8 ppfid, u8 pfid);
+
 /* The PFID<->PPFID calculation is based on the relative index of a PF on its
  * port. In BB there is a bug in the LLH in which the PPFID is actually engine
  * based, and thus it equals the PFID.
@@ -1110,6 +1466,12 @@ enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev);
 void ecore_set_platform_str(struct ecore_hwfn *p_hwfn,
 			    char *buf_str, u32 buf_size);
 
+#define LNX_STATIC
+#define IFDEF_HAS_IFLA_VF_RATE
+#define ENDIF_HAS_IFLA_VF_RATE
+#define IFDEF_DEFINE_IFLA_VF_SPOOFCHK
+#define ENDIF_DEFINE_IFLA_VF_SPOOFCHK
+
 #define TSTORM_QZONE_START	PXP_VF_BAR0_START_SDM_ZONE_A
 #define TSTORM_QZONE_SIZE(dev) \
 	(ECORE_IS_E4(dev) ? TSTORM_QZONE_SIZE_E4 : TSTORM_QZONE_SIZE_E5)
diff --git a/drivers/net/qede/base/ecore_attn_values.h b/drivers/net/qede/base/ecore_attn_values.h
index ec773fbdd..ff4fc5c38 100644
--- a/drivers/net/qede/base/ecore_attn_values.h
+++ b/drivers/net/qede/base/ecore_attn_values.h
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
 
 #ifndef __ATTN_VALUES_H__
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 221cec22b..c22f97f7d 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -1,9 +1,9 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #include "bcm_osal.h"
 #include "reg_addr.h"
 #include "common_hsi.h"
@@ -37,13 +37,11 @@
 #define TM_ELEM_SIZE	4
 
 /* ILT constants */
-#define ILT_DEFAULT_HW_P_SIZE	4
-
 #define ILT_PAGE_IN_BYTES(hw_p_size)	(1U << ((hw_p_size) + 12))
 #define ILT_CFG_REG(cli, reg)		PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
 
 /* ILT entry structure */
-#define ILT_ENTRY_PHY_ADDR_MASK		0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_MASK		(~0ULL >> 12)
 #define ILT_ENTRY_PHY_ADDR_SHIFT	0
 #define ILT_ENTRY_VALID_MASK		0x1ULL
 #define ILT_ENTRY_VALID_SHIFT		52
@@ -81,11 +79,11 @@ union e5_type1_task_context {
 };
 
 struct src_ent {
-	u8 opaque[56];
+	u8  opaque[56];
 	u64 next;
 };
 
-#define CDUT_SEG_ALIGNMET 3	/* in 4k chunks */
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
 #define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
 
 #define CONN_CXT_SIZE(p_hwfn) \
@@ -93,8 +91,6 @@ struct src_ent {
 	 ALIGNED_TYPE_SIZE(union e4_conn_context, (p_hwfn)) : \
 	 ALIGNED_TYPE_SIZE(union e5_conn_context, (p_hwfn)))
 
-#define SRQ_CXT_SIZE (sizeof(struct regpair) * 8) /* @DPDK */
-
 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \
 	(ECORE_IS_E4(((p_hwfn)->p_dev)) ? \
 	 ALIGNED_TYPE_SIZE(union e4_type0_task_context, (p_hwfn)) : \
@@ -117,9 +113,12 @@ static bool src_proto(enum protocol_type type)
 		type == PROTOCOLID_IWARP;
 }
 
-static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
+static bool tm_cid_proto(enum protocol_type type)
 {
-	return type == PROTOCOLID_TOE;
+	return type == PROTOCOLID_ISCSI ||
+	       type == PROTOCOLID_FCOE  ||
+	       type == PROTOCOLID_ROCE  ||
+	       type == PROTOCOLID_IWARP;
 }
 
 static bool tm_tid_proto(enum protocol_type type)
@@ -133,8 +132,8 @@ struct ecore_cdu_iids {
 	u32 per_vf_cids;
 };
 
-static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
-			       struct ecore_cdu_iids *iids)
+static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr   *p_mngr,
+			       struct ecore_cdu_iids	*iids)
 {
 	u32 type;
 
@@ -146,8 +145,8 @@ static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
 
 /* counts the iids for the Searcher block configuration */
 struct ecore_src_iids {
-	u32 pf_cids;
-	u32 per_vf_cids;
+	u32			pf_cids;
+	u32			per_vf_cids;
 };
 
 static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
@@ -156,6 +155,9 @@ static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
 	u32 i;
 
 	for (i = 0; i < MAX_CONN_TYPES; i++) {
+		if (!src_proto(i))
+			continue;
+
 		iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
 		iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
 	}
@@ -167,24 +169,39 @@ static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
 /* counts the iids for the Timers block configuration */
 struct ecore_tm_iids {
 	u32 pf_cids;
-	u32 pf_tids[NUM_TASK_PF_SEGMENTS];	/* per segment */
+	u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
 	u32 pf_tids_total;
 	u32 per_vf_cids;
 	u32 per_vf_tids;
 };
 
 static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,
-			      struct ecore_cxt_mngr *p_mngr,
 			      struct ecore_tm_iids *iids)
 {
+	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct ecore_conn_type_cfg *p_cfg;
 	bool tm_vf_required = false;
 	bool tm_required = false;
-	u32 i, j;
+	int i, j;
 
-	for (i = 0; i < MAX_CONN_TYPES; i++) {
+	/* Timers is a special case -> we don't count how many cids require
+	 * timers but what's the max cid that will be used by the timer block.
+	 * therefore we traverse in reverse order, and once we hit a protocol
+	 * that requires the timers memory, we'll sum all the protocols up
+	 * to that one.
+	 */
+	for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
 		p_cfg = &p_mngr->conn_cfg[i];
 
+		/* In E5 the CORE CIDs are allocated first, and not according to
+		 * its 'enum protocol_type' value. To not miss the count of the
+		 * CORE CIDs by a protocol that requires the timers memory, but
+		 * with a lower 'enum protocol_type' value - the addition of the
+		 * CORE CIDs is done outside the loop.
+		 */
+		if (ECORE_IS_E5(p_hwfn->p_dev) && (i == PROTOCOLID_CORE))
+			continue;
+
 		if (tm_cid_proto(i) || tm_required) {
 			if (p_cfg->cid_count)
 				tm_required = true;
@@ -196,6 +213,7 @@ static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,
 			if (p_cfg->cids_per_vf)
 				tm_vf_required = true;
 
+			iids->per_vf_cids += p_cfg->cids_per_vf;
 		}
 
 		if (tm_tid_proto(i)) {
@@ -215,6 +233,15 @@ static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,
 		}
 	}
 
+	if (ECORE_IS_E5(p_hwfn->p_dev)) {
+		p_cfg = &p_mngr->conn_cfg[PROTOCOLID_CORE];
+
+		if (tm_required)
+			iids->pf_cids += p_cfg->cid_count;
+		if (tm_vf_required)
+			iids->per_vf_cids += p_cfg->cids_per_vf;
+	}
+
 	iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
 	iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN);
 	iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN);
@@ -251,7 +278,7 @@ static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
 		vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
 	}
 
-	iids->vf_cids += vf_cids * p_mngr->vf_count;
+	iids->vf_cids = vf_cids;
 	iids->tids += vf_tids * p_mngr->vf_count;
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
@@ -259,8 +286,8 @@ static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
 		   iids->cids, iids->vf_cids, iids->tids, vf_tids);
 }
 
-static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
-						    u32 seg)
+static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn   *p_hwfn,
+						    u32			seg)
 {
 	struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
 	u32 i;
@@ -275,24 +302,18 @@ static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
 	return OSAL_NULL;
 }
 
-static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
-{
-	struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
-
-	p_mgr->srq_count = num_srqs;
-}
-
-u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
+/* This function was written under the assumption that all the ILT clients
+ * share the same ILT page size (although it is not required).
+ */
+u32 ecore_cxt_get_ilt_page_size(struct ecore_hwfn *p_hwfn)
 {
-	struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
-
-	return p_mgr->srq_count;
+	return ILT_PAGE_IN_BYTES(p_hwfn->p_dev->ilt_page_size);
 }
 
 /* set the iids (cid/tid) count per protocol */
 static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
-				   enum protocol_type type,
-				   u32 cid_count, u32 vf_cid_cnt)
+					  enum protocol_type type,
+					  u32 cid_count, u32 vf_cid_cnt)
 {
 	struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 	struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
@@ -301,8 +322,9 @@ static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
 	p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN);
 }
 
-u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
-				  enum protocol_type type, u32 *vf_cid)
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn	*p_hwfn,
+				  enum protocol_type	type,
+				  u32			*vf_cid)
 {
 	if (vf_cid)
 		*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
@@ -310,28 +332,41 @@ u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
 	return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
 }
 
-u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
-				  enum protocol_type type)
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn	*p_hwfn,
+				  enum protocol_type	type,
+				  u8			vf_id)
 {
-	return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+	if (vf_id != ECORE_CXT_PF_CID)
+		return p_hwfn->p_cxt_mngr->acquired_vf[type][vf_id].start_cid;
+	else
+		return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
 }
 
 u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
-					 enum protocol_type type)
+				  enum protocol_type type,
+				  u8		     vf_id)
 {
+	struct ecore_conn_type_cfg *p_conn_cfg;
 	u32 cnt = 0;
 	int i;
 
-	for (i = 0; i < TASK_SEGMENTS; i++)
-		cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+	p_conn_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[type];
+
+	if (vf_id != ECORE_CXT_PF_CID)
+		return p_conn_cfg->tid_seg[TASK_SEGMENT_VF].count;
+
+	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++)
+		cnt += p_conn_cfg->tid_seg[i].count;
 
 	return cnt;
 }
 
-static OSAL_INLINE void
-ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
-			      enum protocol_type proto,
-			      u8 seg, u8 seg_type, u32 count, bool has_fl)
+static void ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
+					  enum protocol_type proto,
+					  u8 seg,
+					  u8 seg_type,
+					  u32 count,
+					  bool has_fl)
 {
 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
@@ -342,12 +377,13 @@ ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
 }
 
 /* the *p_line parameter must be either 0 for the first invocation or the
- * value returned in the previous invocation.
+   value returned in the previous invocation.
  */
-static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
-				   struct ecore_ilt_cli_blk *p_blk,
-				   u32 start_line,
-				   u32 total_size, u32 elem_size)
+static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg	*p_cli,
+				   struct ecore_ilt_cli_blk	*p_blk,
+				   u32				start_line,
+				   u32				total_size,
+				   u32				elem_size)
 {
 	u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
 
@@ -362,10 +398,11 @@ static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
 	p_blk->start_line = start_line;
 }
 
-static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
-				   struct ecore_ilt_client_cfg *p_cli,
-				   struct ecore_ilt_cli_blk *p_blk,
-				   u32 *p_line, enum ilt_clients client_id)
+static void ecore_ilt_cli_adv_line(struct ecore_hwfn		*p_hwfn,
+				    struct ecore_ilt_client_cfg	*p_cli,
+				    struct ecore_ilt_cli_blk	*p_blk,
+				    u32				*p_line,
+				    enum ilt_clients		client_id)
 {
 	if (!p_blk->total_size)
 		return;
@@ -378,8 +415,7 @@ static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
 	p_cli->last.val = *p_line - 1;
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
-		   "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x"
-		   " [Real %08x] Start line %d\n",
+		   "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
 		   client_id, p_cli->first.val, p_cli->last.val,
 		   p_blk->total_size, p_blk->real_size_in_page,
 		   p_blk->start_line);
@@ -388,7 +424,7 @@ static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
 static void ecore_ilt_get_dynamic_line_range(struct ecore_hwfn *p_hwfn,
 					     enum ilt_clients ilt_client,
 					     u32 *dynamic_line_offset,
-					     u32 *dynamic_line_cnt)
+					     u32 *dynamic_line_cnt, u8 is_vf)
 {
 	struct ecore_ilt_client_cfg *p_cli;
 	struct ecore_conn_type_cfg *p_cfg;
@@ -404,9 +440,23 @@ static void ecore_ilt_get_dynamic_line_range(struct ecore_hwfn *p_hwfn,
 		p_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE];
 
 		cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
-		    (u32)CONN_CXT_SIZE(p_hwfn);
-
-		*dynamic_line_cnt = p_cfg->cid_count / cxts_per_p;
+			     (u32)CONN_CXT_SIZE(p_hwfn);
+
+		*dynamic_line_cnt = is_vf ? p_cfg->cids_per_vf / cxts_per_p :
+					    p_cfg->cid_count / cxts_per_p;
+
+		/* In E5 the CORE CIDs are allocated before the ROCE CIDs */
+		if (*dynamic_line_cnt && ECORE_IS_E5(p_hwfn->p_dev)) {
+			u32 roce_cid_cnt = is_vf ? p_cfg->cids_per_vf :
+					   p_cfg->cid_count;
+			u32 core_cid_cnt;
+
+			p_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_CORE];
+			core_cid_cnt = p_cfg->cid_count;
+			*dynamic_line_offset = 1 + (core_cid_cnt / cxts_per_p);
+			*dynamic_line_cnt = ((core_cid_cnt + roce_cid_cnt) /
+					     cxts_per_p) - *dynamic_line_offset;
+		}
 	}
 }
 
@@ -424,7 +474,7 @@ ecore_cxt_set_blk(struct ecore_ilt_cli_blk *p_blk)
 {
 	p_blk->total_size = 0;
 	return p_blk;
-	}
+}
 
 static u32
 ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr)
@@ -435,9 +485,9 @@ ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr)
 	OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
 	ecore_cxt_src_iids(p_mngr, &src_iids);
 
-	/* Both the PF and VFs searcher connections are stored in the per PF
-	 * database. Thus sum the PF searcher cids and all the VFs searcher
-	 * cids.
+	/* Both the PF and VFs searcher connections are stored
+	 * in the per PF database. Thus sum the PF searcher
+	 * cids and all the VFs searcher cids.
 	 */
 	elem_num = src_iids.pf_cids +
 		   src_iids.per_vf_cids * p_mngr->vf_count;
@@ -450,16 +500,34 @@ ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr)
 	return elem_num;
 }
 
-enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
+static void eocre_ilt_blk_reset(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+	u32 cli_idx, blk_idx;
+
+	for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
+		for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
+			clients[cli_idx].pf_blks[blk_idx].total_size = 0;
+		for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
+			clients[cli_idx].vf_blks[blk_idx].total_size = 0;
+	}
+}
+
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn,
+					       u32 *line_count)
 {
-	u32 curr_line, total, i, task_size, line, total_size, elem_size;
+	u32 total, i, task_size, line, total_size, elem_size;
 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	u32 curr_line, prev_line, total_lines;
 	struct ecore_ilt_client_cfg *p_cli;
 	struct ecore_ilt_cli_blk *p_blk;
 	struct ecore_cdu_iids cdu_iids;
 	struct ecore_qm_iids qm_iids;
 	struct ecore_tm_iids tm_iids;
 	struct ecore_tid_seg *p_seg;
+	u16 num_vf_pqs;
+	int ret;
 
 	OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
 	OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
@@ -467,8 +535,14 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 
 	p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
 
+	/* Reset all the ILT blocks at the beginning of ILT compute - this
+	 * is done in order to prevent memory allocation for irrelevant blocks
+	 * afterwards (e.g. VF timer block after disabling VF-RDMA).
+	 */
+	eocre_ilt_blk_reset(p_hwfn);
+
 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
-		   "hwfn [%d] - Set context mngr starting line to be 0x%08x\n",
+		   "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
 		   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
 
 	/* CDUC */
@@ -494,7 +568,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 
 	ecore_ilt_get_dynamic_line_range(p_hwfn, ILT_CLI_CDUC,
 					 &p_blk->dynamic_line_offset,
-					 &p_blk->dynamic_line_cnt);
+					 &p_blk->dynamic_line_cnt, IOV_PF);
 
 	/* CDUC VF */
 	p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
@@ -510,6 +584,10 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 				       ILT_CLI_CDUC);
 
+	ecore_ilt_get_dynamic_line_range(p_hwfn, ILT_CLI_CDUC,
+					 &p_blk->dynamic_line_offset,
+					 &p_blk->dynamic_line_cnt, IOV_VF);
+
 	/* CDUT PF */
 	p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
 	p_cli->first.val = curr_line;
@@ -525,8 +603,39 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 		ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
 				       p_mngr->task_type_size[p_seg->type]);
 
+		prev_line = curr_line;
 		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 				       ILT_CLI_CDUT);
+		total_lines = curr_line - prev_line;
+
+		switch (i) {
+		case ECORE_CXT_ISCSI_TID_SEG:
+			p_mngr->iscsi_task_pages = (u16)total_lines;
+			DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+				   "CDUT ILT Info: iscsi_task_pages %hu\n",
+				   p_mngr->iscsi_task_pages);
+			break;
+		case ECORE_CXT_FCOE_TID_SEG:
+			p_mngr->fcoe_task_pages = (u16)total_lines;
+			DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+				   "CDUT ILT Info: fcoe_task_pages %hu\n",
+				   p_mngr->fcoe_task_pages);
+			break;
+		case ECORE_CXT_ROCE_TID_SEG:
+			p_mngr->roce_task_pages = (u16)total_lines;
+			DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+				   "CDUT ILT Info: roce_task_pages %hu\n",
+				   p_mngr->roce_task_pages);
+			break;
+		case ECORE_CXT_ETH_TID_SEG:
+			p_mngr->eth_task_pages = (u16)total_lines;
+			DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+				   "CDUT ILT Info: eth_task_pages %hu\n",
+				   p_mngr->eth_task_pages);
+			break;
+		default:
+			break;
+		}
 	}
 
 	/* next the 'init' task memory (forced load memory) */
@@ -535,8 +644,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 		if (!p_seg || p_seg->count == 0)
 			continue;
 
-		p_blk =
-		     ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
+		p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
 
 		if (!p_seg->has_fl_mem) {
 			/* The segment is active (total size pf 'working'
@@ -590,8 +698,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 				       ILT_CLI_CDUT);
 
 		/* 'init' memory */
-		p_blk =
-		     ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
+		p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
 		if (!p_seg->has_fl_mem) {
 			/* see comment above */
 			line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
@@ -599,7 +706,8 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 		} else {
 			task_size = p_mngr->task_type_size[p_seg->type];
 			ecore_ilt_cli_blk_fill(p_cli, p_blk,
-					       curr_line, total, task_size);
+					       curr_line, total,
+					       task_size);
 			ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 					       ILT_CLI_CDUT);
 		}
@@ -624,23 +732,29 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 	p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
 	p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
 
+	ecore_cxt_qm_iids(p_hwfn, &qm_iids);
+
 	/* At this stage, after the first QM configuration, the PF PQs amount
 	 * is the highest possible. Save this value at qm_info->ilt_pf_pqs to
 	 * detect overflows in the future.
 	 * Even though VF PQs amount can be larger than VF count, use vf_count
 	 * because each VF requires only the full amount of CIDs.
 	 */
-	ecore_cxt_qm_iids(p_hwfn, &qm_iids);
+	qm_info->ilt_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
+	if (ECORE_IS_VF_RDMA(p_hwfn))
+		num_vf_pqs = RESC_NUM(p_hwfn, ECORE_PQ) - qm_info->ilt_pf_pqs;
+	else
+		num_vf_pqs = (u16)p_mngr->vf_count;
+
 	total = ecore_qm_pf_mem_size(p_hwfn, qm_iids.cids,
 				     qm_iids.vf_cids, qm_iids.tids,
-				     p_hwfn->qm_info.num_pqs + OFLD_GRP_SIZE,
-				     p_hwfn->qm_info.num_vf_pqs);
+				     qm_info->ilt_pf_pqs,
+				     num_vf_pqs);
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
-		   "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d,"
-		   " num_vf_pqs=%d, memory_size=%d)\n",
+		   "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, pf_pqs=%d, vf_pqs=%d, memory_size=%d)\n",
 		   qm_iids.cids, qm_iids.vf_cids, qm_iids.tids,
-		   p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
+		   qm_info->ilt_pf_pqs, p_mngr->vf_count, total);
 
 	ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000,
 			       QM_PQ_ELEMENT_SIZE);
@@ -650,7 +764,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 
 	/* TM PF */
 	p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
-	ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
+	ecore_cxt_tm_iids(p_hwfn, &tm_iids);
 	total = tm_iids.pf_cids + tm_iids.pf_tids_total;
 	if (total) {
 		p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
@@ -668,12 +782,14 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 	if (total) {
 		p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[0]);
 		ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
-				       total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+				       total * TM_ELEM_SIZE,
+				       TM_ELEM_SIZE);
 
 		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 				       ILT_CLI_TM);
 
 		p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
 		for (i = 1; i < p_mngr->vf_count; i++) {
 			ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 					       ILT_CLI_TM);
@@ -696,30 +812,54 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 		p_cli->pf_total_lines = curr_line - p_blk->start_line;
 	}
 
-	/* TSDM (SRQ CONTEXT) */
-	total = ecore_cxt_get_srq_count(p_hwfn);
-
-	if (total) {
-		p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
-		p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
-		ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
-				       total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
-
-		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
-				       ILT_CLI_TSDM);
-		p_cli->pf_total_lines = curr_line - p_blk->start_line;
-	}
+	*line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
 
 	if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
 	    RESC_NUM(p_hwfn, ECORE_ILT)) {
-		DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
-		       curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
 		return ECORE_INVAL;
 	}
 
 	return ECORE_SUCCESS;
 }
 
+u32 ecore_cxt_cfg_ilt_compute_excess(struct ecore_hwfn *p_hwfn, u32 used_lines)
+{
+	struct ecore_ilt_client_cfg *p_cli;
+	u32 excess_lines, available_lines;
+	struct ecore_cxt_mngr *p_mngr;
+	u32 ilt_page_size, elem_size;
+	struct ecore_tid_seg *p_seg;
+	int i;
+
+	available_lines = RESC_NUM(p_hwfn, ECORE_ILT);
+	excess_lines = used_lines - available_lines;
+
+	if (!excess_lines)
+		return 0;
+
+	if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+		return 0;
+
+	p_mngr = p_hwfn->p_cxt_mngr;
+	p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+	ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+		p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+		if (!p_seg || p_seg->count == 0)
+			continue;
+
+		elem_size = p_mngr->task_type_size[p_seg->type];
+		if (!elem_size)
+			continue;
+
+		return (ilt_page_size / elem_size) * excess_lines;
+	}
+
+	DP_ERR(p_hwfn, "failed computing excess ILT lines\n");
+	return 0;
+}
+
 static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
@@ -810,6 +950,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
 	if (rc)
 		goto t2_fail;
 
+
 	/* Set the t2 pointers */
 
 	/* entries per page - must be a power of two */
@@ -829,7 +970,8 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
 		u32 j;
 
 		for (j = 0; j < ent_num - 1; j++) {
-			val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+			val = p_ent_phys +
+			      (j + 1) * sizeof(struct src_ent);
 			entries[j].next = OSAL_CPU_TO_BE64(val);
 		}
 
@@ -849,11 +991,11 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
 	return rc;
 }
 
-#define for_each_ilt_valid_client(pos, clients)		\
-	for (pos = 0; pos < MAX_ILT_CLIENTS; pos++)		\
-		if (!clients[pos].active) {		\
-			continue;			\
-		} else					\
+#define for_each_ilt_valid_client(pos, clients)	\
+	for (pos = 0; pos < MAX_ILT_CLIENTS; pos++)	\
+		if (!(clients)[pos].active) {	\
+			continue;		\
+		} else				\
 
 
 /* Total number of ILT lines used by this PF */
@@ -885,24 +1027,26 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
 
 		if (p_dma->virt_addr)
 			OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
-					       p_dma->p_virt,
-					       p_dma->phys_addr, p_dma->size);
+					       p_dma->virt_addr,
+					       p_dma->phys_addr,
+					       p_dma->size);
 		p_dma->virt_addr = OSAL_NULL;
 	}
 	OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
 	p_mngr->ilt_shadow = OSAL_NULL;
 }
 
-static enum _ecore_status_t
-ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
-		    struct ecore_ilt_cli_blk *p_blk,
-		    enum ilt_clients ilt_client, u32 start_line_offset)
+static enum _ecore_status_t ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
+						struct ecore_ilt_cli_blk *p_blk,
+						enum ilt_clients ilt_client,
+						u32 start_line_offset)
 {
 	struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
 	u32 lines, line, sz_left, lines_to_skip, first_skipped_line;
 
 	/* Special handling for RoCE that supports dynamic allocation */
-	if (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM)
+	if (ECORE_IS_RDMA_PERSONALITY(p_hwfn) &&
+	    ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
 		return ECORE_SUCCESS;
 
 	if (!p_blk->total_size)
@@ -910,7 +1054,8 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
 
 	sz_left = p_blk->total_size;
 	lines_to_skip = p_blk->dynamic_line_cnt;
-	lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
+	lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) -
+		lines_to_skip;
 	line = p_blk->start_line + start_line_offset -
 	       p_hwfn->p_cxt_mngr->pf_start_line;
 	first_skipped_line = line + p_blk->dynamic_line_offset;
@@ -926,7 +1071,6 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
 		}
 
 		size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
-
 /* @DPDK */
 #define ILT_BLOCK_ALIGN_SIZE 0x1000
 		p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev,
@@ -941,9 +1085,8 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
 		ilt_shadow[line].size = size;
 
 		DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
-			   "ILT shadow: Line [%d] Physical 0x%lx"
-			   " Virtual %p Size %d\n",
-			   line, (unsigned long)p_phys, p_virt, size);
+			   "ILT shadow: Line [%d] Physical 0x%" PRIx64 " Virtual %p Size %d\n",
+			   line, (u64)p_phys, p_virt, size);
 
 		sz_left -= size;
 		line++;
@@ -955,7 +1098,7 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
 
 static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
 {
-	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	struct ecore_cxt_mngr *p_mngr  = p_hwfn->p_cxt_mngr;
 	struct ecore_ilt_client_cfg *clients = p_mngr->clients;
 	struct ecore_ilt_cli_blk *p_blk;
 	u32 size, i, j, k;
@@ -965,7 +1108,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
 	p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
 					 size * sizeof(struct phys_mem_desc));
 
-	if (!p_mngr->ilt_shadow) {
+	if (p_mngr->ilt_shadow == OSAL_NULL) {
 		DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
 		rc = ECORE_NOMEM;
 		goto ilt_shadow_fail;
@@ -995,6 +1138,8 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
 		}
 	}
 
+	p_mngr->ilt_shadow_size = size;
+
 	return ECORE_SUCCESS;
 
 ilt_shadow_fail:
@@ -1013,6 +1158,9 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
 		p_mngr->acquired[type].max_count = 0;
 		p_mngr->acquired[type].start_cid = 0;
 
+		if (!p_mngr->acquired_vf[type])
+			continue;
+
 		for (vf = 0; vf < max_num_vfs; vf++) {
 			OSAL_FREE(p_hwfn->p_dev,
 				  p_mngr->acquired_vf[type][vf].cid_map);
@@ -1025,8 +1173,8 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
 
 static enum _ecore_status_t
 __ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
-			   u32 cid_start, u32 cid_count,
-			   struct ecore_cid_acquired_map *p_map)
+			     u32 cid_start, u32 cid_count,
+			     struct ecore_cid_acquired_map *p_map)
 {
 	u32 size;
 
@@ -1060,8 +1208,8 @@ ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type, u32 start_cid,
 
 	p_cfg = &p_mngr->conn_cfg[type];
 
-		/* Handle PF maps */
-		p_map = &p_mngr->acquired[type];
+	/* Handle PF maps */
+	p_map = &p_mngr->acquired[type];
 	rc = __ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
 					  p_cfg->cid_count, p_map);
 	if (rc != ECORE_SUCCESS)
@@ -1086,7 +1234,23 @@ static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
 	u32 type;
 	enum _ecore_status_t rc;
 
+	/* Set the CORE CIDs to be first so it can have a global range ID */
+	if (ECORE_IS_E5(p_hwfn->p_dev)) {
+		rc = ecore_cid_map_alloc_single(p_hwfn, PROTOCOLID_CORE,
+						start_cid, vf_start_cid);
+		if (rc != ECORE_SUCCESS)
+			goto cid_map_fail;
+
+		start_cid = p_mngr->conn_cfg[PROTOCOLID_CORE].cid_count;
+
+		/* Add to VFs the required offset to be after the CORE CIDs */
+		vf_start_cid = start_cid;
+	}
+
 	for (type = 0; type < MAX_CONN_TYPES; type++) {
+		if (ECORE_IS_E5(p_hwfn->p_dev) && (type == PROTOCOLID_CORE))
+			continue;
+
 		rc = ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
 						vf_start_cid);
 		if (rc != ECORE_SUCCESS)
@@ -1107,6 +1271,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_cid_acquired_map *acquired_vf;
 	struct ecore_ilt_client_cfg *clients;
+	struct ecore_hw_sriov_info *p_iov;
 	struct ecore_cxt_mngr *p_mngr;
 	u32 i, max_num_vfs;
 
@@ -1116,6 +1281,9 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
 		return ECORE_NOMEM;
 	}
 
+	/* Set the cxt mangr pointer priori to further allocations */
+	p_hwfn->p_cxt_mngr = p_mngr;
+
 	/* Initialize ILT client registers */
 	clients = p_mngr->clients;
 	clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
@@ -1144,16 +1312,22 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
 
 	/* default ILT page size for all clients is 64K */
 	for (i = 0; i < MAX_ILT_CLIENTS; i++)
-		p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+		p_mngr->clients[i].p_size.val = p_hwfn->p_dev->ilt_page_size;
 
+	/* Initialize task sizes */
 	/* due to removal of ISCSI/FCoE files union type0_task_context
 	 * task_type_size will be 0. So hardcoded for now.
 	 */
 	p_mngr->task_type_size[0] = 512; /* @DPDK */
 	p_mngr->task_type_size[1] = 128; /* @DPDK */
 
-	if (p_hwfn->p_dev->p_iov_info)
-		p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
+	p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
+
+	p_iov = p_hwfn->p_dev->p_iov_info;
+	if (p_iov) {
+		p_mngr->vf_count = p_iov->total_vfs;
+		p_mngr->first_vf_in_pf = p_iov->first_vf_in_pf;
+	}
 
 	/* Initialize the dynamic ILT allocation mutex */
 #ifdef CONFIG_ECORE_LOCK_ALLOC
@@ -1164,9 +1338,6 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
 #endif
 	OSAL_MUTEX_INIT(&p_mngr->mutex);
 
-	/* Set the cxt mangr pointer prior to further allocations */
-	p_hwfn->p_cxt_mngr = p_mngr;
-
 	max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
 	for (i = 0; i < MAX_CONN_TYPES; i++) {
 		acquired_vf = OSAL_CALLOC(p_hwfn->p_dev, GFP_KERNEL,
@@ -1177,7 +1348,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
 			return ECORE_NOMEM;
 		}
 
-		p_mngr->acquired_vf[i] = acquired_vf;
+		p_hwfn->p_cxt_mngr->acquired_vf[i] = acquired_vf;
 	}
 
 	return ECORE_SUCCESS;
@@ -1185,7 +1356,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
 
 enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
 {
-	enum _ecore_status_t rc;
+	enum _ecore_status_t    rc;
 
 	/* Allocate the ILT shadow table */
 	rc = ecore_ilt_shadow_alloc(p_hwfn);
@@ -1194,10 +1365,11 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
 		goto tables_alloc_fail;
 	}
 
-	/* Allocate the T2  table */
+	/* Allocate the T2 tables */
 	rc = ecore_cxt_src_t2_alloc(p_hwfn);
 	if (rc) {
-		DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
+		DP_NOTICE(p_hwfn, false,
+			  "Failed to allocate src T2 memory\n");
 		goto tables_alloc_fail;
 	}
 
@@ -1334,7 +1506,7 @@ void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
 
 static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn)
 {
-	u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+	u32 page_sz, elems_per_page, block_waste,  cxt_size, cdu_params = 0;
 
 	/* CDUC - connection configuration */
 	page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
@@ -1390,7 +1562,8 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
 		CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
 		CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
 		CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
-		CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+		CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET,
+		CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET
 	};
 
 	static const u32 rt_type_offset_fl_arr[] = {
@@ -1404,7 +1577,6 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
 
 	/* There are initializations only for CDUT during pf Phase */
 	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
-		/* Segment 0 */
 		p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
 		if (!p_seg)
 			continue;
@@ -1415,22 +1587,40 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
 		 * Page size is larger than 32K!
 		 */
 		offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
-			  (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
-			   p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+			 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+			  p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
 
 		cdu_seg_params = 0;
 		SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
 		SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
-		STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+		STORE_RT_REG(p_hwfn, rt_type_offset_arr[i],
+			     cdu_seg_params);
 
 		offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
-			  (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
-			   p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+			 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+			  p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+		cdu_seg_params = 0;
+		SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+		SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+		STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i],
+			     cdu_seg_params);
+	}
 
+	/* Init VF (single) segment */
+	p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+	if (p_seg) {
+		/* VF has a single segment so the offset is 0 by definition.
+		 * The offset expresses where this segment starts relative to
+		 * the VF section in CDUT. Since there is a single segment it
+		 * will be 0 by definition
+		 */
+		offset = 0;
 		cdu_seg_params = 0;
 		SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
 		SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
-		STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+		STORE_RT_REG(p_hwfn, rt_type_offset_arr[TASK_SEGMENT_VF],
+			     cdu_seg_params);
 	}
 }
 
@@ -1459,12 +1649,35 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 /* CM PF */
 static void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
 {
-	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
-		     ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
+	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, ecore_get_cm_pq_idx(p_hwfn,
+		     PQ_FLAGS_LB));
+}
+
+#define GLB_MAX_ICID_RT_OFFSET(id) \
+	DORQ_REG_GLB_MAX_ICID_ ## id ## _RT_OFFSET
+#define GLB_RANGE2CONN_TYPE_RT_OFFSET(id) \
+	DORQ_REG_GLB_RANGE2CONN_TYPE_ ## id ## _RT_OFFSET
+
+static void ecore_dq_init_common(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	u32 dq_core_max_cid;
+
+	if (!ECORE_IS_E5(p_hwfn->p_dev))
+		return;
+
+	dq_core_max_cid = p_mngr->conn_cfg[PROTOCOLID_CORE].cid_count >>
+			  DQ_RANGE_SHIFT;
+	STORE_RT_REG(p_hwfn, GLB_MAX_ICID_RT_OFFSET(0), dq_core_max_cid);
+
+	/* Range ID #1 is an empty range */
+	STORE_RT_REG(p_hwfn, GLB_MAX_ICID_RT_OFFSET(1), dq_core_max_cid);
+
+	STORE_RT_REG(p_hwfn, GLB_RANGE2CONN_TYPE_RT_OFFSET(0), PROTOCOLID_CORE);
 }
 
 /* DQ PF */
-static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
+static void ecore_dq_init_pf_e4(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
@@ -1505,11 +1718,10 @@ static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
 	dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
 
-	/* Connection types 6 & 7 are not in use, yet they must be configured
-	 * as the highest possible connection. Not configuring them means the
-	 * defaults will be  used, and with a large number of cids a bug may
-	 * occur, if the defaults will be smaller than dq_pf_max_cid /
-	 * dq_vf_max_cid.
+	/* Connection types 6 & 7 are not in use, but still must be configured
+	 * as the highest possible connection. Not configuring them means that
+	 * the defaults will be used, and with a large number of cids a bug may
+	 * occur, if the defaults are smaller than dq_pf_max_cid/dq_vf_max_cid.
 	 */
 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
@@ -1518,6 +1730,90 @@ static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
 }
 
+#define PRV_MAX_ICID_RT_OFFSET(pfvf, id) \
+	DORQ_REG_PRV_ ## pfvf ##  _MAX_ICID_ ## id ## _RT_OFFSET
+#define PRV_RANGE2CONN_TYPE_RT_OFFSET(pfvf, id) \
+	DORQ_REG_PRV_ ## pfvf ## _RANGE2CONN_TYPE_ ## id ## _RT_OFFSET
+
+static void ecore_dq_init_pf_e5(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	u32 dq_pf_max_cid, dq_vf_max_cid, type;
+
+	/* The private ranges should start after the CORE's global range */
+	dq_pf_max_cid = p_mngr->conn_cfg[PROTOCOLID_CORE].cid_count >>
+			DQ_RANGE_SHIFT;
+	dq_vf_max_cid = dq_pf_max_cid;
+
+	/* Range ID #2 */
+	if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
+		type = PROTOCOLID_ISCSI;
+	else if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
+		type = PROTOCOLID_FCOE;
+	else if (ECORE_IS_ROCE_PERSONALITY(p_hwfn))
+		type = PROTOCOLID_ROCE;
+	else /* ETH or ETH_IWARP */
+		type = PROTOCOLID_ETH;
+
+	dq_pf_max_cid += p_mngr->conn_cfg[type].cid_count >> DQ_RANGE_SHIFT;
+	dq_vf_max_cid += p_mngr->conn_cfg[type].cids_per_vf >> DQ_RANGE_SHIFT;
+	STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 2), dq_pf_max_cid);
+	STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(PF, 2), type);
+	STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 2), dq_vf_max_cid);
+	STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(VF, 2), type);
+
+	/* Range ID #3 */
+	if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
+		dq_pf_max_cid += p_mngr->conn_cfg[PROTOCOLID_ETH].cid_count >>
+				 DQ_RANGE_SHIFT;
+		dq_vf_max_cid +=
+			p_mngr->conn_cfg[PROTOCOLID_ETH].cids_per_vf >>
+			DQ_RANGE_SHIFT;
+		STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 3),
+			     dq_pf_max_cid);
+		STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(PF, 3),
+			     PROTOCOLID_ETH);
+		STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 3),
+			     dq_vf_max_cid);
+		STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(VF, 3),
+			     PROTOCOLID_ETH);
+	} else if (ECORE_IS_IWARP_PERSONALITY(p_hwfn)) {
+		dq_pf_max_cid += p_mngr->conn_cfg[PROTOCOLID_IWARP].cid_count >>
+				 DQ_RANGE_SHIFT;
+		dq_vf_max_cid +=
+			p_mngr->conn_cfg[PROTOCOLID_IWARP].cids_per_vf >>
+			DQ_RANGE_SHIFT;
+		STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 3),
+			     dq_pf_max_cid);
+		STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(PF, 3),
+			     PROTOCOLID_IWARP);
+		STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 3),
+			     dq_vf_max_cid);
+		STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(VF, 3),
+			     PROTOCOLID_IWARP);
+	} else {
+		/* Range ID #3 is an empty range */
+		STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 3),
+			     dq_pf_max_cid);
+		STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 3),
+			     dq_vf_max_cid);
+	}
+
+	/* Range IDs #4 and #5 are empty ranges */
+	STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 4), dq_pf_max_cid);
+	STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 4), dq_vf_max_cid);
+	STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 5), dq_pf_max_cid);
+	STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 5), dq_vf_max_cid);
+}
+
+static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
+{
+	if (ECORE_IS_E4(p_hwfn->p_dev))
+		ecore_dq_init_pf_e4(p_hwfn);
+	else
+		ecore_dq_init_pf_e5(p_hwfn);
+}
+
 static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_ilt_client_cfg *ilt_clients;
@@ -1529,7 +1825,8 @@ static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
 			     ilt_clients[i].first.reg,
 			     ilt_clients[i].first.val);
 		STORE_RT_REG(p_hwfn,
-			     ilt_clients[i].last.reg, ilt_clients[i].last.val);
+			     ilt_clients[i].last.reg,
+			     ilt_clients[i].last.val);
 		STORE_RT_REG(p_hwfn,
 			     ilt_clients[i].p_size.reg,
 			     ilt_clients[i].p_size.val);
@@ -1585,7 +1882,8 @@ static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
 	blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
 	if (p_cli->active) {
 		STORE_RT_REG(p_hwfn,
-			     PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+			     PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET,
+			     blk_factor);
 		STORE_RT_REG(p_hwfn,
 			     PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
 			     p_cli->pf_total_lines);
@@ -1606,8 +1904,8 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
 	ecore_ilt_bounds_init(p_hwfn);
 	ecore_ilt_vf_bounds_init(p_hwfn);
 
-	p_mngr = p_hwfn->p_cxt_mngr;
-	p_shdw = p_mngr->ilt_shadow;
+	p_mngr  = p_hwfn->p_cxt_mngr;
+	p_shdw  = p_mngr->ilt_shadow;
 	clients = p_hwfn->p_cxt_mngr->clients;
 
 	for_each_ilt_valid_client(i, clients) {
@@ -1616,13 +1914,13 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
 		 */
 		line = clients[i].first.val - p_mngr->pf_start_line;
 		rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
-		    clients[i].first.val * ILT_ENTRY_IN_REGS;
+			   clients[i].first.val * ILT_ENTRY_IN_REGS;
 
 		for (; line <= clients[i].last.val - p_mngr->pf_start_line;
 		     line++, rt_offst += ILT_ENTRY_IN_REGS) {
 			u64 ilt_hw_entry = 0;
 
-			/** p_virt could be OSAL_NULL incase of dynamic
+			/** virt_addr could be OSAL_NULL incase of dynamic
 			 *  allocation
 			 */
 			if (p_shdw[line].virt_addr != OSAL_NULL) {
@@ -1631,12 +1929,10 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
 					  (p_shdw[line].phys_addr >> 12));
 
 				DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
-					"Setting RT[0x%08x] from"
-					" ILT[0x%08x] [Client is %d] to"
-					" Physical addr: 0x%lx\n",
+					"Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to "
+					"Physical addr: 0x%" PRIx64 "\n",
 					rt_offst, line, i,
-					(unsigned long)(p_shdw[line].
-							phys_addr >> 12));
+					(u64)(p_shdw[line].phys_addr >> 12));
 			}
 
 			STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
@@ -1673,65 +1969,114 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
 		   conn_num);
 }
 
-/* Timers PF */
-#define TM_CFG_NUM_IDS_SHIFT		0
-#define TM_CFG_NUM_IDS_MASK		0xFFFFULL
-#define TM_CFG_PRE_SCAN_OFFSET_SHIFT	16
-#define TM_CFG_PRE_SCAN_OFFSET_MASK	0x1FFULL
-#define TM_CFG_PARENT_PF_SHIFT		25
-#define TM_CFG_PARENT_PF_MASK		0x7ULL
-
-#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT	30
-#define TM_CFG_CID_PRE_SCAN_ROWS_MASK	0x1FFULL
+/* Timers PF - configuration memory for the connections and the tasks */
+
+/* Common parts to connections and tasks */
+#define TM_CFG_NUM_IDS_SHIFT			0
+#define TM_CFG_NUM_IDS_MASK			0xFFFFULL
+/* BB */
+#define TM_CFG_PRE_SCAN_OFFSET_BB_SHIFT		16
+#define TM_CFG_PRE_SCAN_OFFSET_BB_MASK		0x1FFULL
+#define TM_CFG_PARENT_PF_BB_SHIFT		26
+#define TM_CFG_PARENT_PF_BB_MASK		0x7ULL
+/* AH */
+#define TM_CFG_PRE_SCAN_OFFSET_AH_SHIFT		16
+#define TM_CFG_PRE_SCAN_OFFSET_AH_MASK		0x3FFULL
+#define TM_CFG_PARENT_PF_AH_SHIFT		26
+#define TM_CFG_PARENT_PF_AH_MASK		0xFULL
+/* E5 */
+#define TM_CFG_PRE_SCAN_OFFSET_E5_SHIFT		16
+#define TM_CFG_PRE_SCAN_OFFSET_E5_MASK		0x3FFULL
+#define TM_CFG_PARENT_PF_E5_SHIFT		26
+#define TM_CFG_PARENT_PF_E5_MASK		0xFULL
+
+/* Connections specific */
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT		30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK		0x1FFULL
+
+/* Tasks specific */
+#define TM_CFG_TID_OFFSET_SHIFT			30
+#define TM_CFG_TID_OFFSET_MASK			0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT		49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK		0x1FFULL
+
+static void ecore_tm_cfg_set_parent_pf(struct ecore_dev *p_dev, u64 *cfg_word,
+				       u8 val)
+{
+	if (ECORE_IS_BB(p_dev))
+		SET_FIELD(*cfg_word, TM_CFG_PARENT_PF_BB, val);
+	else if (ECORE_IS_AH(p_dev))
+		SET_FIELD(*cfg_word, TM_CFG_PARENT_PF_AH, val);
+	else /* E5 */
+		SET_FIELD(*cfg_word, TM_CFG_PARENT_PF_E5, val);
+}
 
-#define TM_CFG_TID_OFFSET_SHIFT		30
-#define TM_CFG_TID_OFFSET_MASK		0x7FFFFULL
-#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT	49
-#define TM_CFG_TID_PRE_SCAN_ROWS_MASK	0x1FFULL
+static void ecore_tm_cfg_set_pre_scan_offset(struct ecore_dev *p_dev,
+					     u64 *cfg_word, u8 val)
+{
+	if (ECORE_IS_BB(p_dev))
+		SET_FIELD(*cfg_word, TM_CFG_PRE_SCAN_OFFSET_BB, val);
+	else if (ECORE_IS_AH(p_dev))
+		SET_FIELD(*cfg_word, TM_CFG_PRE_SCAN_OFFSET_AH, val);
+	else /* E5 */
+		SET_FIELD(*cfg_word, TM_CFG_PRE_SCAN_OFFSET_E5, val);
+}
 
 static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	u32 active_seg_mask = 0, tm_offset, rt_reg;
+	u32 *p_cfg_word_32, cfg_word_size;
 	struct ecore_tm_iids tm_iids;
 	u64 cfg_word;
 	u8 i;
 
 	OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
-	ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
+	ecore_cxt_tm_iids(p_hwfn, &tm_iids);
 
 	/* @@@TBD No pre-scan for now */
 
-		cfg_word = 0;
-		SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
-		SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
-	SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+	cfg_word = 0;
+	SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+	ecore_tm_cfg_set_parent_pf(p_hwfn->p_dev, &cfg_word, p_hwfn->rel_pf_id);
+	ecore_tm_cfg_set_pre_scan_offset(p_hwfn->p_dev, &cfg_word, 0);
+	if (ECORE_IS_E4(p_hwfn->p_dev))
 		SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
 
+	/* Each CONFIG_CONN_MEM row in E5 is 32 bits and not 64 bits as in E4 */
+	p_cfg_word_32 = (u32 *)&cfg_word;
+	cfg_word_size = ECORE_IS_E4(p_hwfn->p_dev) ? sizeof(cfg_word)
+						   : sizeof(*p_cfg_word_32);
+
 	/* Note: We assume consecutive VFs for a PF */
 	for (i = 0; i < p_mngr->vf_count; i++) {
 		rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
-		    (sizeof(cfg_word) / sizeof(u32)) *
-		    (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
-		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+			 (cfg_word_size / sizeof(u32)) *
+			 (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
+		if (ECORE_IS_E4(p_hwfn->p_dev))
+			STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+		else
+			STORE_RT_REG_AGG(p_hwfn, rt_reg, *p_cfg_word_32);
 	}
 
 	cfg_word = 0;
 	SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
-	SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
-	SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);	/* n/a for PF */
-	SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all   */
+	ecore_tm_cfg_set_parent_pf(p_hwfn->p_dev, &cfg_word, 0); /* n/a for PF */
+	ecore_tm_cfg_set_pre_scan_offset(p_hwfn->p_dev, &cfg_word, 0);
+	if (ECORE_IS_E4(p_hwfn->p_dev))
+		SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
 
 	rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
-	    (sizeof(cfg_word) / sizeof(u32)) *
-	    (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
-	STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+		 (cfg_word_size / sizeof(u32)) *
+		 (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
+	if (ECORE_IS_E4(p_hwfn->p_dev))
+		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+	else
+		STORE_RT_REG_AGG(p_hwfn, rt_reg, *p_cfg_word_32);
 
-	/* enable scan */
+	/* enable scan for PF */
 	STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
-		     tm_iids.pf_cids ? 0x1 : 0x0);
-
-	/* @@@TBD how to enable the scan for the VFs */
+		     tm_iids.pf_cids  ? 0x1 : 0x0);
 
 	tm_offset = tm_iids.per_vf_cids;
 
@@ -1739,14 +2084,15 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
 	for (i = 0; i < p_mngr->vf_count; i++) {
 		cfg_word = 0;
 		SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
-		SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
-		SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+		ecore_tm_cfg_set_pre_scan_offset(p_hwfn->p_dev, &cfg_word, 0);
+		ecore_tm_cfg_set_parent_pf(p_hwfn->p_dev, &cfg_word,
+					   p_hwfn->rel_pf_id);
 		SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
 		SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
 
 		rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
-		    (sizeof(cfg_word) / sizeof(u32)) *
-		    (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
+			 (sizeof(cfg_word) / sizeof(u32)) *
+			 (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
 
 		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
 	}
@@ -1755,15 +2101,15 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
 	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
 		cfg_word = 0;
 		SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
-		SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
-		SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+		ecore_tm_cfg_set_pre_scan_offset(p_hwfn->p_dev, &cfg_word, 0);
+		ecore_tm_cfg_set_parent_pf(p_hwfn->p_dev, &cfg_word, 0);
 		SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
 		SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
 
 		rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
-		    (sizeof(cfg_word) / sizeof(u32)) *
-		    (NUM_OF_VFS(p_hwfn->p_dev) +
-		     p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+			 (sizeof(cfg_word) / sizeof(u32)) *
+			 (NUM_OF_VFS(p_hwfn->p_dev) +
+			 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
 
 		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
 		active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
@@ -1771,9 +2117,43 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
 		tm_offset += tm_iids.pf_tids[i];
 	}
 
+	if (ECORE_IS_RDMA_PERSONALITY(p_hwfn))
+		active_seg_mask = 0;
+
 	STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+}
 
-	/* @@@TBD how to enable the scan for the VFs */
+void ecore_tm_clear_vf_ilt(struct ecore_hwfn *p_hwfn, u16 vf_idx)
+{
+	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	struct ecore_ilt_client_cfg *p_cli;
+	struct phys_mem_desc *shadow_line;
+	struct ecore_ilt_cli_blk *p_blk;
+	u32 shadow_start_line, line;
+	u32 i;
+
+	p_cli = &p_mngr->clients[ILT_CLI_TM];
+	p_blk = &p_cli->vf_blks[0];
+	line = p_blk->start_line + vf_idx * p_cli->vf_total_lines;
+	shadow_start_line = line - p_mngr->pf_start_line;
+
+	for (i = 0; i < p_cli->vf_total_lines; i++) {
+		shadow_line = &p_mngr->ilt_shadow[shadow_start_line + i];
+
+		DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+			   "zeroing ILT for VF %d line %d address %p size %d\n",
+			   vf_idx, i, shadow_line->virt_addr, shadow_line->size);
+
+		if (shadow_line->virt_addr != OSAL_NULL)
+			OSAL_MEM_ZERO(shadow_line->virt_addr, shadow_line->size);
+	}
+}
+
+static void ecore_prs_init_common(struct ecore_hwfn *p_hwfn)
+{
+	if ((p_hwfn->hw_info.personality == ECORE_PCI_FCOE) &&
+	    p_hwfn->pf_params.fcoe_pf_params.is_target)
+		STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
 }
 
 static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
@@ -1781,6 +2161,7 @@ static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct ecore_conn_type_cfg *p_fcoe;
 	struct ecore_tid_seg *p_tid;
+	u32 max_tid;
 
 	p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
 
@@ -1789,15 +2170,23 @@ static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
 		return;
 
 	p_tid = &p_fcoe->tid_seg[ECORE_CXT_FCOE_TID_SEG];
-	STORE_RT_REG_AGG(p_hwfn,
-			PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
-			p_tid->count);
+	max_tid = p_tid->count - 1;
+	if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
+		STORE_RT_REG_AGG(p_hwfn,
+				 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
+				 max_tid);
+	} else {
+		STORE_RT_REG_AGG(p_hwfn,
+				PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
+				max_tid);
+	}
 }
 
 void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
 {
-	/* CDU configuration */
 	ecore_cdu_init_common(p_hwfn);
+	ecore_prs_init_common(p_hwfn);
+	ecore_dq_init_common(p_hwfn);
 }
 
 void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
@@ -1807,7 +2196,10 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 	ecore_dq_init_pf(p_hwfn);
 	ecore_cdu_init_pf(p_hwfn);
 	ecore_ilt_init_pf(p_hwfn);
-	ecore_src_init_pf(p_hwfn);
+
+	if (!ECORE_IS_E5(p_hwfn->p_dev))
+		ecore_src_init_pf(p_hwfn);
+
 	ecore_tm_init_pf(p_hwfn);
 	ecore_prs_init_pf(p_hwfn);
 }
@@ -1850,7 +2242,7 @@ enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
 		return ECORE_NORESOURCES;
 	}
 
-	OSAL_SET_BIT(rel_cid, p_map->cid_map);
+	OSAL_NON_ATOMIC_SET_BIT(rel_cid, p_map->cid_map);
 
 	*p_cid = rel_cid + p_map->start_cid;
 
@@ -1890,15 +2282,16 @@ static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
 			break;
 		}
 	}
+
 	if (*p_type == MAX_CONN_TYPES) {
-		DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid);
+		DP_NOTICE(p_hwfn, false, "Invalid CID %d vfid %02x\n", cid, vfid);
 		goto fail;
 	}
 
 	rel_cid = cid - (*pp_map)->start_cid;
-	if (!OSAL_GET_BIT(rel_cid, (*pp_map)->cid_map)) {
-		DP_NOTICE(p_hwfn, true,
-			  "CID %d [vifd %02x] not acquired", cid, vfid);
+	if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) {
+		DP_NOTICE(p_hwfn, false,
+			  "CID %d [vifd %02x] not acquired\n", cid, vfid);
 		goto fail;
 	}
 
@@ -1975,29 +2368,38 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
 		return ECORE_INVAL;
 
 	p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].virt_addr +
-	    p_info->iid % cxts_per_p * conn_cxt_size;
+			      p_info->iid % cxts_per_p * conn_cxt_size;
 
 	DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
-		"Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
-		(p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
+		   "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+		   (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
 
 	return ECORE_SUCCESS;
 }
 
-enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn,
+					     u32 rdma_tasks, u32 eth_tasks)
 {
+	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+
 	/* Set the number of required CORE connections */
-	u32 core_cids = 1;	/* SPQ */
+	enum _ecore_status_t rc = ECORE_SUCCESS;
+	struct ecore_ptt *p_ptt;
+	u32 core_cids = 1; /* SPQ */
+	u32 vf_core_cids = 0;
 
-	ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
+	ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids,
+				      vf_core_cids);
 
 	switch (p_hwfn->hw_info.personality) {
 	case ECORE_PCI_ETH:
-		{
+	{
 		u32 count = 0;
 
 		struct ecore_eth_pf_params *p_params =
-			    &p_hwfn->pf_params.eth_pf_params;
+					&p_hwfn->pf_params.eth_pf_params;
+
+		p_mngr->task_ctx_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
 
 		if (!p_params->num_vf_cons)
 			p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT;
@@ -2005,17 +2407,108 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
 					      p_params->num_cons,
 					      p_params->num_vf_cons);
 
+		ecore_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ETH,
+					      ECORE_CXT_ETH_TID_SEG,
+					      ETH_CDU_TASK_SEG_TYPE,
+					      eth_tasks, false);
+
+#ifdef CONFIG_ECORE_FS
+		if (ECORE_IS_E5(p_hwfn->p_dev))
+			p_hwfn->fs_info.e5->tid_count = eth_tasks;
+#endif
+
 		count = p_params->num_arfs_filters;
 
-		if (!OSAL_GET_BIT(ECORE_MF_DISABLE_ARFS,
+		if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS,
 				   &p_hwfn->p_dev->mf_bits))
 			p_hwfn->p_cxt_mngr->arfs_count = count;
 
 		break;
-		}
+	}
 	default:
+		rc = ECORE_INVAL;
+	}
+
+	return rc;
+}
+
+enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
+						struct ecore_tid_mem *p_info)
+{
+	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	u32 proto, seg, total_lines, i, shadow_line;
+	struct ecore_ilt_client_cfg *p_cli;
+	struct ecore_ilt_cli_blk *p_fl_seg;
+	struct ecore_tid_seg *p_seg_info;
+
+	/* Verify the personality */
+	switch (p_hwfn->hw_info.personality) {
+	case ECORE_PCI_FCOE:
+		proto = PROTOCOLID_FCOE;
+		seg = ECORE_CXT_FCOE_TID_SEG;
+		break;
+	case ECORE_PCI_ISCSI:
+		proto = PROTOCOLID_ISCSI;
+		seg = ECORE_CXT_ISCSI_TID_SEG;
+		break;
+	default:
+		return ECORE_INVAL;
+	}
+
+	p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+	if (!p_cli->active)
+		return ECORE_INVAL;
+
+	p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+	if (!p_seg_info->has_fl_mem)
 		return ECORE_INVAL;
+
+	p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+	total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
+				   p_fl_seg->real_size_in_page);
+
+	for (i = 0; i < total_lines; i++) {
+		shadow_line = i + p_fl_seg->start_line -
+			      p_hwfn->p_cxt_mngr->pf_start_line;
+		p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
 	}
+	p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
+			p_fl_seg->real_size_in_page;
+	p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
+	p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
+				     p_info->tid_size;
+
+	return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_cxt_get_iid_info(struct ecore_hwfn *p_hwfn,
+		       enum ecore_cxt_elem_type elem_type,
+		       struct ecore_ilt_client_cfg **pp_cli,
+		       struct ecore_ilt_cli_blk **pp_blk,
+		       u32 *elem_size, bool is_vf)
+{
+	struct ecore_ilt_client_cfg *p_cli;
+
+	switch (elem_type) {
+	case ECORE_ELEM_CXT:
+		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+		*elem_size = CONN_CXT_SIZE(p_hwfn);
+		*pp_blk = is_vf ? &p_cli->vf_blks[CDUC_BLK] :
+			  &p_cli->pf_blks[CDUC_BLK];
+		break;
+	case ECORE_ELEM_ETH_TASK:
+		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+		*elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+		*pp_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ETH_TID_SEG)];
+		break;
+	default:
+		DP_NOTICE(p_hwfn, false,
+			  "ECORE_INVALID elem type = %d", elem_type);
+		return ECORE_INVAL;
+	}
+
+	*pp_cli = p_cli;
 
 	return ECORE_SUCCESS;
 }
@@ -2026,8 +2519,13 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
 enum _ecore_status_t
 ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
 			    enum ecore_cxt_elem_type elem_type,
-			    u32 iid)
+			    u32 iid, u8 vf_id)
 {
+	/* TODO
+	 * Check to see if we need to do anything differeny if this is
+	 * called on behalf of VF.
+	 */
+
 	u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
 	struct ecore_ilt_client_cfg *p_cli;
 	struct ecore_ilt_cli_blk *p_blk;
@@ -2036,33 +2534,25 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
 	u64 ilt_hw_entry;
 	void *p_virt;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
+	bool is_vf = (vf_id != ECORE_CXT_PF_CID);
 
-	switch (elem_type) {
-	case ECORE_ELEM_CXT:
-		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
-		elem_size = CONN_CXT_SIZE(p_hwfn);
-		p_blk = &p_cli->pf_blks[CDUC_BLK];
-		break;
-	case ECORE_ELEM_SRQ:
-		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
-		elem_size = SRQ_CXT_SIZE;
-		p_blk = &p_cli->pf_blks[SRQ_BLK];
-		break;
-	case ECORE_ELEM_TASK:
-		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
-		elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
-		p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
-		break;
-	default:
-		DP_NOTICE(p_hwfn, false,
-			  "ECORE_INVALID elem type = %d", elem_type);
-		return ECORE_INVAL;
-	}
+	rc = ecore_cxt_get_iid_info(p_hwfn, elem_type, &p_cli, &p_blk,
+				    &elem_size, is_vf);
+	if (rc)
+		return rc;
 
 	/* Calculate line in ilt */
 	hw_p_size = p_cli->p_size.val;
 	elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
-	line = p_blk->start_line + (iid / elems_per_p);
+	if (is_vf)
+		/* start_line - where the VF sections starts (p_blk is VF's one)
+		 * (vf_id * p_cli->vf_total_lines) - Where this VF starts
+		 */
+		line = p_blk->start_line +
+		       (vf_id * p_cli->vf_total_lines) + (iid / elems_per_p);
+	else
+		line = p_blk->start_line + (iid / elems_per_p);
+
 	shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
 
 	/* If line is already allocated, do nothing, otherwise allocate it and
@@ -2070,7 +2560,9 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
 	 * This section can be run in parallel from different contexts and thus
 	 * a mutex protection is needed.
 	 */
-
+#ifdef _NTDDK_
+#pragma warning(suppress : 28121)
+#endif
 	OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
 
 	if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
@@ -2106,14 +2598,13 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
 	SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
 	SET_FIELD(ilt_hw_entry,
 		  ILT_ENTRY_PHY_ADDR,
-		 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr >> 12));
-
-/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
+		  (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr >>
+		   12));
 
+	/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
 	ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
 			    reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
 			    OSAL_NULL /* default parameters */);
-
 out1:
 	ecore_ptt_release(p_hwfn, p_ptt);
 out0:
@@ -2125,77 +2616,119 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
 /* This function is very RoCE oriented, if another protocol in the future
  * will want this feature we'll need to modify the function to be more generic
  */
-static enum _ecore_status_t
+enum _ecore_status_t
 ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
 			 enum ecore_cxt_elem_type elem_type,
-			 u32 start_iid, u32 count)
+			 u32 start_iid, u32 count, u8 vf_id)
 {
-	u32 start_line, end_line, shadow_start_line, shadow_end_line;
+	u32 end_iid, start_line, end_line, shadow_start_line, shadow_end_line;
+	u32 start_offset, end_offset, start_iid_offset, end_iid_offset;
 	u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+	bool b_skip_start = false, b_skip_end = false;
+	bool is_vf = (vf_id != ECORE_CXT_PF_CID);
 	struct ecore_ilt_client_cfg *p_cli;
 	struct ecore_ilt_cli_blk *p_blk;
-	u32 end_iid = start_iid + count;
+	struct phys_mem_desc *ilt_page;
 	struct ecore_ptt *p_ptt;
 	u64 ilt_hw_entry = 0;
-	u32 i;
+	u32 i, abs_line;
+	enum _ecore_status_t rc = ECORE_SUCCESS;
 
-	switch (elem_type) {
-	case ECORE_ELEM_CXT:
-		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
-		elem_size = CONN_CXT_SIZE(p_hwfn);
-		p_blk = &p_cli->pf_blks[CDUC_BLK];
-		break;
-	case ECORE_ELEM_SRQ:
-		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
-		elem_size = SRQ_CXT_SIZE;
-		p_blk = &p_cli->pf_blks[SRQ_BLK];
-		break;
-	case ECORE_ELEM_TASK:
-		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
-		elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
-		p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
-		break;
-	default:
-		DP_NOTICE(p_hwfn, false,
-			  "ECORE_INVALID elem type = %d", elem_type);
-		return ECORE_INVAL;
-	}
+	/* in case this client has no ILT lines, no need to free anything */
+	if (count == 0)
+		return ECORE_SUCCESS;
 
-	/* Calculate line in ilt */
+	rc = ecore_cxt_get_iid_info(p_hwfn, elem_type, &p_cli, &p_blk,
+				    &elem_size, is_vf);
+	if (rc)
+		return rc;
+
+	/* Calculate lines in ILT.
+	 * Skip the start line if 'start_iid' is not the first element in page.
+	 * Skip the end line if 'end_iid' is not the last element in page.
+	 */
 	hw_p_size = p_cli->p_size.val;
 	elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+	end_iid = start_iid + count - 1;
+
 	start_line = p_blk->start_line + (start_iid / elems_per_p);
 	end_line = p_blk->start_line + (end_iid / elems_per_p);
-	if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
-		end_line--;
+
+	if (is_vf) {
+		start_line += (vf_id * p_cli->vf_total_lines);
+		end_line += (vf_id * p_cli->vf_total_lines);
+	}
+
+	if (start_iid % elems_per_p)
+		b_skip_start = true;
+
+	if ((end_iid % elems_per_p) != (elems_per_p - 1))
+		b_skip_end = true;
+
+	start_iid_offset = (start_iid % elems_per_p) * elem_size;
+	end_iid_offset = ((end_iid % elems_per_p) + 1) * elem_size;
 
 	shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
 	shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
 
 	p_ptt = ecore_ptt_acquire(p_hwfn);
 	if (!p_ptt) {
-		DP_NOTICE(p_hwfn, false,
-			  "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
+		DP_NOTICE(p_hwfn, false, "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
 		return ECORE_TIMEOUT;
 	}
 
-	for (i = shadow_start_line; i < shadow_end_line; i++) {
-		if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
+	/* This piece of code takes care of freeing the ILT specific range, as
+	 * well as setting it to zero.
+	 * In case the start or the end lines of the range share other iids,
+	 * they should not be freed, but only be set to zero.
+	 * The reason for zeroing the lines is to prevent future access to the
+	 * old iids.
+	 *
+	 * For example, lets assume the RDMA tids of VF0 occupy 3.5 lines.
+	 * Now we run a test which uses all the tids and then perform VF FLR.
+	 * During the FLR we will free the first 3 lines but not the 4th.
+	 * If we won't zero the first half of the 4th line, a new VF0 might try
+	 * to use the old tids which are stored there, and this will lead to an
+	 * error.
+	 */
+	for (i = shadow_start_line; i <= shadow_end_line; i++) {
+		ilt_page = &p_hwfn->p_cxt_mngr->ilt_shadow[i];
+
+		if (!ilt_page->virt_addr) {
+			DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+				   "Virtual address of ILT shadow line %u is NULL\n", i);
+			continue;
+		}
+
+		start_offset = (i == shadow_start_line && b_skip_start) ?
+			       start_iid_offset : 0;
+		end_offset = (i == shadow_end_line && b_skip_end) ?
+			     end_iid_offset : ilt_page->size;
+
+		OSAL_MEM_ZERO((u8 *)ilt_page->virt_addr + start_offset,
+			      end_offset - start_offset);
+
+		DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+			   "Zeroing shadow line %u start offset %x end offset %x\n",
+			   i, start_offset, end_offset);
+
+		if (end_offset - start_offset < ilt_page->size)
 			continue;
 
 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
-				    p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
-				    p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr,
-				    p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
+				       ilt_page->virt_addr,
+				       ilt_page->phys_addr,
+				       ilt_page->size);
 
-		p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = OSAL_NULL;
-		p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
-		p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+		ilt_page->virt_addr = OSAL_NULL;
+		ilt_page->phys_addr = 0;
+		ilt_page->size = 0;
 
 		/* compute absolute offset */
+		abs_line = p_hwfn->p_cxt_mngr->pf_start_line + i;
 		reg_offset = PSWRQ2_REG_ILT_MEMORY +
-		    ((start_line++) * ILT_REG_SIZE_IN_BYTES *
-		     ILT_ENTRY_IN_REGS);
+			     (abs_line * ILT_REG_SIZE_IN_BYTES *
+			      ILT_ENTRY_IN_REGS);
 
 		/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
 		 * wide-bus.
@@ -2212,6 +2745,75 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
 	return ECORE_SUCCESS;
 }
 
+enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
+					    u32 tid,
+					    u8 ctx_type,
+					    void **pp_task_ctx)
+{
+	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	struct ecore_ilt_client_cfg *p_cli;
+	struct ecore_tid_seg *p_seg_info;
+	struct ecore_ilt_cli_blk *p_seg;
+	u32 num_tids_per_block;
+	u32 tid_size, ilt_idx;
+	u32 total_lines;
+	u32 proto, seg;
+
+	/* Verify the personality */
+	switch (p_hwfn->hw_info.personality) {
+	case ECORE_PCI_FCOE:
+		proto = PROTOCOLID_FCOE;
+		seg = ECORE_CXT_FCOE_TID_SEG;
+		break;
+	case ECORE_PCI_ISCSI:
+		proto = PROTOCOLID_ISCSI;
+		seg = ECORE_CXT_ISCSI_TID_SEG;
+		break;
+	case ECORE_PCI_ETH_RDMA:
+	case ECORE_PCI_ETH_IWARP:
+	case ECORE_PCI_ETH_ROCE:
+	case ECORE_PCI_ETH:
+		/* All ETH personalities refer to Ethernet TIDs since RDMA does
+		 * not use this API.
+		 */
+		proto = PROTOCOLID_ETH;
+		seg = ECORE_CXT_ETH_TID_SEG;
+		break;
+	default:
+		return ECORE_INVAL;
+	}
+
+	p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+	if (!p_cli->active)
+		return ECORE_INVAL;
+
+	p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+	if (ctx_type == ECORE_CTX_WORKING_MEM) {
+		p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
+	} else if (ctx_type == ECORE_CTX_FL_MEM) {
+		if (!p_seg_info->has_fl_mem)
+			return ECORE_INVAL;
+		p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+	} else {
+		return ECORE_INVAL;
+	}
+	total_lines = DIV_ROUND_UP(p_seg->total_size,
+				   p_seg->real_size_in_page);
+	tid_size = p_mngr->task_type_size[p_seg_info->type];
+	num_tids_per_block = p_seg->real_size_in_page / tid_size;
+
+	if (total_lines < tid / num_tids_per_block)
+		return ECORE_INVAL;
+
+	ilt_idx = tid / num_tids_per_block + p_seg->start_line -
+		  p_mngr->pf_start_line;
+	*pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
+			     (tid % num_tids_per_block) * tid_size;
+
+	return ECORE_SUCCESS;
+}
+
 static u16 ecore_blk_calculate_pages(struct ecore_ilt_cli_blk *p_blk)
 {
 	if (p_blk->real_size_in_page == 0)
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index cd0e32e3b..fe61a8b8f 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -1,25 +1,35 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef _ECORE_CID_
 #define _ECORE_CID_
 
 #include "ecore_hsi_common.h"
+#include "ecore_hsi_eth.h"
 #include "ecore_proto_if.h"
 #include "ecore_cxt_api.h"
 
-/* Tasks segments definitions  */
-#define ECORE_CXT_ISCSI_TID_SEG			PROTOCOLID_ISCSI	/* 0 */
-#define ECORE_CXT_FCOE_TID_SEG			PROTOCOLID_FCOE		/* 1 */
-#define ECORE_CXT_ROCE_TID_SEG			PROTOCOLID_ROCE		/* 2 */
+/* Tasks segments definitions (keeping this numbering is necessary) */
+#define ECORE_CXT_ISCSI_TID_SEG			0	/* PROTOCOLID_ISCSI */
+#define ECORE_CXT_FCOE_TID_SEG			1	/* PROTOCOLID_FCOE */
+#define ECORE_CXT_ROCE_TID_SEG			2	/* PROTOCOLID_ROCE */
+#define ECORE_CXT_ETH_TID_SEG			3
 
 enum ecore_cxt_elem_type {
 	ECORE_ELEM_CXT,
 	ECORE_ELEM_SRQ,
-	ECORE_ELEM_TASK
+	ECORE_ELEM_RDMA_TASK,
+	ECORE_ELEM_ETH_TASK,
+	ECORE_ELEM_XRC_SRQ,
+};
+
+/* @DPDK */
+enum ecore_iov_is_vf_or_pf {
+	IOV_PF	= 0,	/* This is a PF instance. */
+	IOV_VF	= 1	/* This is a VF instance. */
 };
 
 u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
@@ -27,11 +37,12 @@ u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
 				  u32 *vf_cid);
 
 u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
-				  enum protocol_type type);
+				  enum protocol_type type,
+				  u8 vf_id);
 
 u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
-				  enum protocol_type type);
-u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
+				  enum protocol_type type,
+				  u8 vf_id);
 
 /**
  * @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
@@ -40,16 +51,27 @@ u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn,
+					     u32 rdma_tasks, u32 eth_tasks);
 
 /**
  * @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
  *
  * @param p_hwfn
+ * @param last_line
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn,
+					       u32 *last_line);
+
+/**
+ * @brief ecore_cxt_cfg_ilt_compute_excess - how many lines can be decreased
+ *
+ * @param p_hwfn
+ * @param used_lines
+ */
+u32 ecore_cxt_cfg_ilt_compute_excess(struct ecore_hwfn *p_hwfn, u32 used_lines);
 
 /**
  * @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct
@@ -68,8 +90,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn);
 void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn);
 
 /**
- * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired
- *        map
+ * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
  *
  * @param p_hwfn
  *
@@ -85,8 +106,7 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn);
 void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn);
 
 /**
- * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per
- *        path.
+ * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
  *
  * @param p_hwfn
  */
@@ -121,7 +141,16 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
 				     struct ecore_ptt *p_ptt);
 
-#define ECORE_CXT_PF_CID (0xff)
+/**
+ * @brief Reconfigures QM from a non-sleepable context.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_qm_reconf_intr(struct ecore_hwfn *p_hwfn,
+					  struct ecore_ptt *p_ptt);
 
 /**
  * @brief ecore_cxt_release - Release a cid
@@ -177,28 +206,39 @@ enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
  * @param p_hwfn
  * @param elem_type
  * @param iid
+ * @param vf_id
  *
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t
 ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
 			    enum ecore_cxt_elem_type elem_type,
-			    u32 iid);
+			    u32 iid, u8 vf_id);
 
 /**
- * @brief ecore_cxt_free_proto_ilt - function frees ilt pages
- *        associated with the protocol passed.
+ * @brief ecore_cxt_free_ilt_range - function frees ilt pages
+ *        associated with the protocol and element type passed.
  *
  * @param p_hwfn
  * @param proto
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
-					      enum protocol_type proto);
+enum _ecore_status_t
+ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
+			 enum ecore_cxt_elem_type elem_type,
+			 u32 start_iid, u32 count, u8 vf_id);
 
 #define ECORE_CTX_WORKING_MEM 0
 #define ECORE_CTX_FL_MEM 1
+enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
+					    u32 tid,
+					    u8 ctx_type,
+					    void **task_ctx);
+
+u32 ecore_cxt_get_ilt_page_size(struct ecore_hwfn *p_hwfn);
+
+u32 ecore_cxt_get_total_srq_count(struct ecore_hwfn *p_hwfn);
 
 /* Max number of connection types in HW (DQ/CDU etc.) */
 #define MAX_CONN_TYPES		PROTOCOLID_COMMON
@@ -206,20 +246,20 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
 #define NUM_TASK_PF_SEGMENTS	4
 #define NUM_TASK_VF_SEGMENTS	1
 
-/* PF per protocol configuration object */
+/* PF per protocl configuration object */
 #define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
 #define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
 
 struct ecore_tid_seg {
-	u32 count;
-	u8 type;
-	bool has_fl_mem;
+	u32	count;
+	u8	type;
+	bool	has_fl_mem;
 };
 
 struct ecore_conn_type_cfg {
-	u32 cid_count;
-	u32 cids_per_vf;
-	struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
+	u32			cid_count;
+	u32			cids_per_vf;
+	struct ecore_tid_seg	tid_seg[TASK_SEGMENTS];
 };
 
 /* ILT Client configuration,
@@ -240,7 +280,7 @@ struct ilt_cfg_pair {
 };
 
 struct ecore_ilt_cli_blk {
-	u32 total_size;		/* 0 means not active */
+	u32 total_size; /* 0 means not active */
 	u32 real_size_in_page;
 	u32 start_line;
 	u32 dynamic_line_offset;
@@ -248,29 +288,29 @@ struct ecore_ilt_cli_blk {
 };
 
 struct ecore_ilt_client_cfg {
-	bool active;
+	bool				active;
 
 	/* ILT boundaries */
-	struct ilt_cfg_pair first;
-	struct ilt_cfg_pair last;
-	struct ilt_cfg_pair p_size;
+	struct ilt_cfg_pair		first;
+	struct ilt_cfg_pair		last;
+	struct ilt_cfg_pair		p_size;
 
 	/* ILT client blocks for PF */
-	struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
-	u32 pf_total_lines;
+	struct ecore_ilt_cli_blk	pf_blks[ILT_CLI_PF_BLOCKS];
+	u32				pf_total_lines;
 
 	/* ILT client blocks for VFs */
-	struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
-	u32 vf_total_lines;
+	struct ecore_ilt_cli_blk	vf_blks[ILT_CLI_VF_BLOCKS];
+	u32				vf_total_lines;
 };
 
 #define MAP_WORD_SIZE		sizeof(unsigned long)
 #define BITS_PER_MAP_WORD	(MAP_WORD_SIZE * 8)
 
 struct ecore_cid_acquired_map {
-	u32 start_cid;
-	u32 max_count;
-	u32 *cid_map;
+	u32		start_cid;
+	u32		max_count;
+	u32		*cid_map; /* @DPDK */
 };
 
 struct ecore_src_t2 {
@@ -281,7 +321,7 @@ struct ecore_src_t2 {
 };
 
 struct ecore_cxt_mngr {
-	/* Per protocol configuration */
+	/* Per protocl configuration */
 	struct ecore_conn_type_cfg	conn_cfg[MAX_CONN_TYPES];
 
 	/* computed ILT structure */
@@ -300,15 +340,14 @@ struct ecore_cxt_mngr {
 	struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
 	struct ecore_cid_acquired_map *acquired_vf[MAX_CONN_TYPES];
 
-	/* ILT  shadow table */
+	/* ILT shadow table */
 	struct phys_mem_desc		*ilt_shadow;
 	u32				ilt_shadow_size;
 	u32				pf_start_line;
 
 	/* Mutex for a dynamic ILT allocation */
-	osal_mutex_t mutex;
+	osal_mutex_t			mutex;
 
-	/* SRC T2 */
 	struct ecore_src_t2		src_t2;
 
 	/* The infrastructure originally was very generic and context/task
@@ -317,19 +356,24 @@ struct ecore_cxt_mngr {
 	 * needing for a given block we'd iterate over all the relevant
 	 * connection-types.
 	 * But since then we've had some additional resources, some of which
-	 * require memory which is independent of the general context/task
+	 * require memory which is indepent of the general context/task
 	 * scheme. We add those here explicitly per-feature.
 	 */
 
 	/* total number of SRQ's for this hwfn */
 	u32				srq_count;
+	u32				xrc_srq_count;
+	u32				vfs_srq_count;
 
 	/* Maximal number of L2 steering filters */
 	u32				arfs_count;
 
 	/* TODO - VF arfs filters ? */
 
-	u8				task_type_id;
+	u16				iscsi_task_pages;
+	u16				fcoe_task_pages;
+	u16				roce_task_pages;
+	u16				eth_task_pages;
 	u16				task_ctx_size;
 	u16				conn_ctx_size;
 };
@@ -338,4 +382,6 @@ u16 ecore_get_cdut_num_pf_init_pages(struct ecore_hwfn *p_hwfn);
 u16 ecore_get_cdut_num_vf_init_pages(struct ecore_hwfn *p_hwfn);
 u16 ecore_get_cdut_num_pf_work_pages(struct ecore_hwfn *p_hwfn);
 u16 ecore_get_cdut_num_vf_work_pages(struct ecore_hwfn *p_hwfn);
+
+void ecore_tm_clear_vf_ilt(struct ecore_hwfn *p_hwfn, u16 vf_idx);
 #endif /* _ECORE_CID_ */
diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h
index 6c8b2831c..fa85a0dc8 100644
--- a/drivers/net/qede/base/ecore_cxt_api.h
+++ b/drivers/net/qede/base/ecore_cxt_api.h
@@ -1,9 +1,9 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef __ECORE_CXT_API_H__
 #define __ECORE_CXT_API_H__
 
@@ -24,15 +24,26 @@ struct ecore_tid_mem {
 };
 
 /**
-* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
-*
-*
-* @param p_hwfn
-* @param p_info in/out
-*
-* @return enum _ecore_status_t
-*/
+ * @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
+ *
+ *
+ * @param p_hwfn
+ * @param p_info in/out
+ *
+ * @return enum _ecore_status_t
+ */
 enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
 					    struct ecore_cxt_info *p_info);
 
+/**
+ * @brief ecore_cxt_get_tid_mem_info
+ *
+ * @param p_hwfn
+ * @param p_info
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
+						struct ecore_tid_mem *p_info);
+
 #endif
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index f94efce72..627975369 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -1,9 +1,9 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #include "bcm_osal.h"
 #include "ecore.h"
 #include "ecore_sp_commands.h"
@@ -15,6 +15,10 @@
 
 #define ECORE_DCBX_MAX_MIB_READ_TRY	(100)
 #define ECORE_ETH_TYPE_DEFAULT		(0)
+#define ECORE_ETH_TYPE_ROCE		(0x8915)
+#define ECORE_UDP_PORT_TYPE_ROCE_V2	(0x12B7)
+#define ECORE_ETH_TYPE_FCOE		(0x8906)
+#define ECORE_TCP_PORT_ISCSI		(0xCBC)
 
 #define ECORE_DCBX_INVALID_PRIORITY	0xFF
 
@@ -22,7 +26,7 @@
  * the traffic class corresponding to the priority.
  */
 #define ECORE_DCBX_PRIO2TC(prio_tc_tbl, prio) \
-		((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+		((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0xF)
 
 static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
 {
@@ -70,6 +74,56 @@ static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
 	return !!(ethtype && (proto_id == ECORE_ETH_TYPE_DEFAULT));
 }
 
+static bool ecore_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+	bool port;
+
+	if (ieee)
+		port = ecore_dcbx_ieee_app_port(app_info_bitmap,
+						DCBX_APP_SF_IEEE_TCP_PORT);
+	else
+		port = ecore_dcbx_app_port(app_info_bitmap);
+
+	return !!(port && (proto_id == ECORE_TCP_PORT_ISCSI));
+}
+
+static bool ecore_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+	bool ethtype;
+
+	if (ieee)
+		ethtype = ecore_dcbx_ieee_app_ethtype(app_info_bitmap);
+	else
+		ethtype = ecore_dcbx_app_ethtype(app_info_bitmap);
+
+	return !!(ethtype && (proto_id == ECORE_ETH_TYPE_FCOE));
+}
+
+static bool ecore_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+	bool ethtype;
+
+	if (ieee)
+		ethtype = ecore_dcbx_ieee_app_ethtype(app_info_bitmap);
+	else
+		ethtype = ecore_dcbx_app_ethtype(app_info_bitmap);
+
+	return !!(ethtype && (proto_id == ECORE_ETH_TYPE_ROCE));
+}
+
+static bool ecore_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+	bool port;
+
+	if (ieee)
+		port = ecore_dcbx_ieee_app_port(app_info_bitmap,
+						DCBX_APP_SF_IEEE_UDP_PORT);
+	else
+		port = ecore_dcbx_app_port(app_info_bitmap);
+
+	return !!(port && (proto_id == ECORE_UDP_PORT_TYPE_ROCE_V2));
+}
+
 static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 				 u16 proto_id, bool ieee)
 {
@@ -92,7 +146,7 @@ ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
 		       struct ecore_dcbx_results *p_data)
 {
 	enum dcbx_protocol_type id;
-	int i;
+	u32 i;
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "DCBX negotiated: %d\n",
 		   p_data->dcbx_enabled);
@@ -101,10 +155,8 @@ ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
 		id = ecore_dcbx_app_update[i].id;
 
 		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
-			   "%s info: update %d, enable %d, prio %d, tc %d,"
-			   " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
-			   ecore_dcbx_app_update[i].name,
-			   p_data->arr[id].update,
+			   "%s info: update %d, enable %d, prio %d, tc %d, num_active_tc %d dscp_enable = %d dscp_val = %d\n",
+			   ecore_dcbx_app_update[i].name, p_data->arr[id].update,
 			   p_data->arr[id].enable, p_data->arr[id].priority,
 			   p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc,
 			   p_data->arr[id].dscp_enable,
@@ -130,7 +182,7 @@ u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri)
 static void
 ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
 		      struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
-		      bool enable, u8 prio, u8 tc,
+		      bool app_tlv, bool enable, u8 prio, u8 tc,
 		      enum dcbx_protocol_type type,
 		      enum ecore_pci_personality personality)
 {
@@ -143,21 +195,21 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
 		p_data->arr[type].dscp_enable = false;
 		p_data->arr[type].dscp_val = 0;
 	} else {
-		p_data->arr[type].dscp_enable = true;
+		p_data->arr[type].dscp_enable = enable;
 	}
+
 	p_data->arr[type].update = UPDATE_DCB_DSCP;
 
-	/* Do not add valn tag 0 when DCB is enabled and port is in UFP mode */
-	if (OSAL_GET_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+	if (OSAL_TEST_BIT(ECORE_MF_DONT_ADD_VLAN0_TAG, &p_hwfn->p_dev->mf_bits))
 		p_data->arr[type].dont_add_vlan0 = true;
 
 	/* QM reconf data */
-	if (p_hwfn->hw_info.personality == personality)
-		p_hwfn->hw_info.offload_tc = tc;
+	if (app_tlv && p_hwfn->hw_info.personality == personality)
+		ecore_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
 
 	/* Configure dcbx vlan priority in doorbell block for roce EDPM */
-	if (OSAL_GET_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits) &&
-	    type == DCBX_PROTOCOL_ROCE) {
+	if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits) &&
+	    (type == DCBX_PROTOCOL_ROCE)) {
 		ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
 		ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
 	}
@@ -167,12 +219,12 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
 static void
 ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
 			   struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
-			   bool enable, u8 prio, u8 tc,
+			   bool app_tlv, bool enable, u8 prio, u8 tc,
 			   enum dcbx_protocol_type type)
 {
 	enum ecore_pci_personality personality;
 	enum dcbx_protocol_type id;
-	int i;
+	u32 i;
 
 	for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
 		id = ecore_dcbx_app_update[i].id;
@@ -182,7 +234,7 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
 
 		personality = ecore_dcbx_app_update[i].personality;
 
-		ecore_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
+		ecore_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable,
 				      prio, tc, type, personality);
 	}
 }
@@ -221,12 +273,22 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
 				 u32 app_prio_bitmap, u16 id,
 				 enum dcbx_protocol_type *type, bool ieee)
 {
-	if (ecore_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
+	if (ecore_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
+		*type = DCBX_PROTOCOL_FCOE;
+	} else if (ecore_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
+		*type = DCBX_PROTOCOL_ROCE;
+	} else if (ecore_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
+		*type = DCBX_PROTOCOL_ISCSI;
+	} else if (ecore_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
 		*type = DCBX_PROTOCOL_ETH;
+	} else if (ecore_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
+		*type = DCBX_PROTOCOL_ROCE_V2;
+	} else if (ecore_dcbx_iwarp_tlv(p_hwfn, app_prio_bitmap, id, ieee)) {
+		*type = DCBX_PROTOCOL_IWARP;
 	} else {
 		*type = DCBX_MAX_PROTOCOL_TYPE;
 		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
-			    "No action required, App TLV entry = 0x%x\n",
+			   "No action required, App TLV entry = 0x%x\n",
 			   app_prio_bitmap);
 		return false;
 	}
@@ -287,13 +349,13 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 				enable = true;
 			}
 
-			ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt,
+			ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true,
 						   enable, priority, tc, type);
 		}
 	}
 
 	/* If Eth TLV is not detected, use UFP TC as default TC */
-	if (OSAL_GET_BIT(ECORE_MF_UFP_SPECIFIC,
+	if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC,
 			  &p_hwfn->p_dev->mf_bits) && !eth_tlv)
 		p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
 
@@ -310,9 +372,9 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 			continue;
 
 		/* if no app tlv was present, don't override in FW */
-		ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt,
-					  p_data->arr[DCBX_PROTOCOL_ETH].enable,
-					  priority, tc, type);
+		ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false,
+					   p_data->arr[DCBX_PROTOCOL_ETH].enable,
+					   priority, tc, type);
 	}
 
 	return ECORE_SUCCESS;
@@ -399,16 +461,14 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
 		read_count++;
 
 		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
-			   "mib type = %d, try count = %d prefix seq num  ="
-			   " %d suffix seq num = %d\n",
+			   "mib type = %d, try count = %d prefix seq num  = %d suffix seq num = %d\n",
 			   type, read_count, prefix_seq_num, suffix_seq_num);
 	} while ((prefix_seq_num != suffix_seq_num) &&
 		 (read_count < ECORE_DCBX_MAX_MIB_READ_TRY));
 
 	if (read_count >= ECORE_DCBX_MAX_MIB_READ_TRY) {
 		DP_ERR(p_hwfn,
-		       "MIB read err, mib type = %d, try count ="
-		       " %d prefix seq num = %d suffix seq num = %d\n",
+		       "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
 		       type, read_count, prefix_seq_num, suffix_seq_num);
 		rc = ECORE_IO;
 	}
@@ -423,12 +483,36 @@ ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn,
 {
 	u8 val;
 
+	p_prio->roce = ECORE_DCBX_INVALID_PRIORITY;
+	p_prio->roce_v2 = ECORE_DCBX_INVALID_PRIORITY;
+	p_prio->iscsi = ECORE_DCBX_INVALID_PRIORITY;
+	p_prio->fcoe = ECORE_DCBX_INVALID_PRIORITY;
+
+	if (p_results->arr[DCBX_PROTOCOL_ROCE].update &&
+	    p_results->arr[DCBX_PROTOCOL_ROCE].enable)
+		p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority;
+
+	if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+	    p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) {
+		val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority;
+		p_prio->roce_v2 = val;
+	}
+
+	if (p_results->arr[DCBX_PROTOCOL_ISCSI].update &&
+	    p_results->arr[DCBX_PROTOCOL_ISCSI].enable)
+		p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority;
+
+	if (p_results->arr[DCBX_PROTOCOL_FCOE].update &&
+	    p_results->arr[DCBX_PROTOCOL_FCOE].enable)
+		p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority;
+
 	if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
 	    p_results->arr[DCBX_PROTOCOL_ETH].enable)
 		p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
-		   "Priorities: eth %d\n",
+		   "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n",
+		   p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe,
 		   p_prio->eth);
 }
 
@@ -474,8 +558,7 @@ ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
 				entry->sf_ieee = ECORE_DCBX_SF_IEEE_UDP_PORT;
 				break;
 			case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
-				entry->sf_ieee =
-						ECORE_DCBX_SF_IEEE_TCP_UDP_PORT;
+				entry->sf_ieee = ECORE_DCBX_SF_IEEE_TCP_UDP_PORT;
 				break;
 			}
 		} else {
@@ -506,6 +589,7 @@ ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
 
 	p_params->pfc.willing = GET_MFW_FIELD(pfc, DCBX_PFC_WILLING);
 	p_params->pfc.max_tc = GET_MFW_FIELD(pfc, DCBX_PFC_CAPS);
+	p_params->pfc.mbc = GET_MFW_FIELD(pfc, DCBX_PFC_MBC);
 	p_params->pfc.enabled = GET_MFW_FIELD(pfc, DCBX_PFC_ENABLED);
 	pfc_map = GET_MFW_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
 	p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
@@ -518,9 +602,9 @@ ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
 	p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
-		   "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n",
+		   "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d mbc = %d\n",
 		   p_params->pfc.willing, pfc_map, p_params->pfc.max_tc,
-		   p_params->pfc.enabled);
+		   p_params->pfc.enabled, p_params->pfc.mbc);
 }
 
 static void
@@ -540,7 +624,12 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
 		   p_params->ets_willing, p_params->ets_enabled,
 		   p_params->ets_cbs, p_ets->pri_tc_tbl[0],
 		   p_params->max_ets_tc);
-
+	if (p_params->ets_enabled && !p_params->max_ets_tc) {
+		p_params->max_ets_tc = ECORE_MAX_PFC_PRIORITIES;
+		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+			   "ETS params: max_ets_tc is forced to %d\n",
+		   p_params->max_ets_tc);
+	}
 	/* 8 bit tsa and bw data corresponding to each of the 8 TC's are
 	 * encoded in a type u32 array of size 2.
 	 */
@@ -600,8 +689,8 @@ ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
 	params->remote.valid = true;
 }
 
-static void  ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
-					struct ecore_dcbx_get *params)
+static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
+				       struct ecore_dcbx_get *params)
 {
 	struct ecore_dcbx_dscp_params *p_dscp;
 	struct dcb_dscp_map *p_dscp_map;
@@ -616,7 +705,7 @@ static void  ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
 	 * where each entry holds the 4bit priority map for 8 dscp entries.
 	 */
 	for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
-		pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
+		pri_map = p_dscp_map->dscp_pri_map[i];
 		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
 			   entry, pri_map);
 		for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
@@ -785,7 +874,7 @@ ecore_dcbx_read_operational_mib(struct ecore_hwfn *p_hwfn,
 
 	OSAL_MEM_ZERO(&data, sizeof(data));
 	data.addr = p_hwfn->mcp_info->port_addr +
-	    offsetof(struct public_port, operational_dcbx_mib);
+		    offsetof(struct public_port, operational_dcbx_mib);
 	data.mib = &p_hwfn->p_dcbx_info->operational;
 	data.size = sizeof(struct dcbx_mib);
 	rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
@@ -803,7 +892,7 @@ ecore_dcbx_read_remote_mib(struct ecore_hwfn *p_hwfn,
 
 	OSAL_MEM_ZERO(&data, sizeof(data));
 	data.addr = p_hwfn->mcp_info->port_addr +
-	    offsetof(struct public_port, remote_dcbx_mib);
+		    offsetof(struct public_port, remote_dcbx_mib);
 	data.mib = &p_hwfn->p_dcbx_info->remote;
 	data.size = sizeof(struct dcbx_mib);
 	rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
@@ -819,7 +908,7 @@ ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 
 	OSAL_MEM_ZERO(&data, sizeof(data));
 	data.addr = p_hwfn->mcp_info->port_addr +
-	    offsetof(struct public_port, local_admin_dcbx_mib);
+			offsetof(struct public_port, local_admin_dcbx_mib);
 	data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
 	data.size = sizeof(struct dcbx_local_params);
 	ecore_memcpy_from(p_hwfn, p_ptt, data.local_admin,
@@ -867,6 +956,61 @@ static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
 		DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
 	}
 
+	return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_dscp_map_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+			   bool b_en)
+{
+	struct ecore_dev *p_dev = p_hwfn->p_dev;
+	u8 ppfid, abs_ppfid, pfid;
+	u32 addr, val;
+	u16 fid;
+	enum _ecore_status_t rc;
+
+	if (!OSAL_TEST_BIT(ECORE_MF_DSCP_TO_TC_MAP, &p_dev->mf_bits))
+		return ECORE_INVAL;
+
+	if (ECORE_IS_E4(p_hwfn->p_dev)) {
+		addr = NIG_REG_DSCP_TO_TC_MAP_ENABLE_BB_K2;
+		val = b_en ? 0x1 : 0x0;
+	} else { /* E5 */
+		addr = NIG_REG_LLH_TC_CLS_DSCP_MODE_E5;
+		val = b_en ? 0x2  /* L2-PRI if exists, else L3-DSCP */
+			   : 0x0; /* L2-PRI only */
+	}
+
+	if (!ECORE_IS_AH(p_dev))
+		return ecore_all_ppfids_wr(p_hwfn, p_ptt, addr, val);
+
+	/* Workaround for a HW bug in E4 (only AH is affected):
+	 * Instead of writing to "NIG_REG_DSCP_TO_TC_MAP_ENABLE[ppfid]", write
+	 * to "NIG_REG_DSCP_TO_TC_MAP_ENABLE[n]", where "n" is the "pfid" which
+	 * is read from "NIG_REG_LLH_PPFID2PFID_TBL[ppfid]".
+	 */
+	for (ppfid = 0; ppfid < ecore_llh_get_num_ppfid(p_dev); ppfid++) {
+		rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+		if (rc != ECORE_SUCCESS)
+			return rc;
+
+		/* Cannot just take "rel_pf_id" since a ppfid could have been
+		 * loaned to another pf (e.g. RDMA bonding).
+		 */
+		pfid = (u8)ecore_rd(p_hwfn, p_ptt,
+				    NIG_REG_LLH_PPFID2PFID_TBL_0 +
+				    abs_ppfid * 0x4);
+
+		fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, pfid);
+		ecore_fid_pretend(p_hwfn, p_ptt, fid);
+
+		ecore_wr(p_hwfn, p_ptt, addr, val);
+
+		fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+				  p_hwfn->rel_pf_id);
+		ecore_fid_pretend(p_hwfn, p_ptt, fid);
+	}
+
 	return ECORE_SUCCESS;
 }
 
@@ -893,7 +1037,7 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 			/* reconfigure tcs of QM queues according
 			 * to negotiation results
 			 */
-			ecore_qm_reconf(p_hwfn, p_ptt);
+			ecore_qm_reconf_intr(p_hwfn, p_ptt);
 
 			/* update storm FW with negotiation results */
 			ecore_sp_pf_update_dcbx(p_hwfn);
@@ -902,20 +1046,33 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 
 	ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
 
-	/* Update the DSCP to TC mapping enable bit if required */
-	if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
-	    p_hwfn->p_dcbx_info->dscp_nig_update) {
-		u8 val = !!p_hwfn->p_dcbx_info->get.dscp.enabled;
-		u32 addr = NIG_REG_DSCP_TO_TC_MAP_ENABLE_BB_K2;
+	if (type == ECORE_DCBX_OPERATIONAL_MIB) {
+		struct ecore_dcbx_results *p_data;
+		u16 val;
+
+		/* Enable/disable the DSCP to TC mapping if required */
+		if (p_hwfn->p_dcbx_info->dscp_nig_update) {
+			bool b_en = p_hwfn->p_dcbx_info->get.dscp.enabled;
+
+			rc = ecore_dcbx_dscp_map_enable(p_hwfn, p_ptt, b_en);
+			if (rc != ECORE_SUCCESS) {
+				DP_NOTICE(p_hwfn, false,
+					  "Failed to update the DSCP to TC mapping enable bit\n");
+				return rc;
+			}
 
-		rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, addr, val);
-		if (rc != ECORE_SUCCESS) {
-			DP_NOTICE(p_hwfn, false,
-				  "Failed to update the DSCP to TC mapping enable bit\n");
-			return rc;
+			p_hwfn->p_dcbx_info->dscp_nig_update = false;
 		}
 
-		p_hwfn->p_dcbx_info->dscp_nig_update = false;
+		/* Configure in NIG which protocols support EDPM and should
+		 * honor PFC.
+		 */
+		p_data = &p_hwfn->p_dcbx_info->results;
+		val = (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE].tc) |
+			(0x1 << p_data->arr[DCBX_PROTOCOL_ROCE_V2].tc);
+		val <<= NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT;
+		val |= NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN;
+		ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_EDPM_CTRL, val);
 	}
 
 	OSAL_DCBX_AEN(p_hwfn, type);
@@ -925,6 +1082,15 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 
 enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
 {
+#ifndef __EXTRACT__LINUX__
+	OSAL_BUILD_BUG_ON(ECORE_LLDP_CHASSIS_ID_STAT_LEN !=
+			  LLDP_CHASSIS_ID_STAT_LEN);
+	OSAL_BUILD_BUG_ON(ECORE_LLDP_PORT_ID_STAT_LEN !=
+			  LLDP_PORT_ID_STAT_LEN);
+	OSAL_BUILD_BUG_ON(ECORE_DCBX_MAX_APP_PROTOCOL !=
+			  DCBX_MAX_APP_PROTOCOL);
+#endif
+
 	p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
 					  sizeof(*p_hwfn->p_dcbx_info));
 	if (!p_hwfn->p_dcbx_info) {
@@ -942,6 +1108,7 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
 void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn)
 {
 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info);
+	p_hwfn->p_dcbx_info = OSAL_NULL;
 }
 
 static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
@@ -963,17 +1130,62 @@ void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
 	struct protocol_dcb_data *p_dcb_data;
 	u8 update_flag;
 
+	update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
+	p_dest->update_fcoe_dcb_data_mode = update_flag;
+
+	update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
+	p_dest->update_roce_dcb_data_mode = update_flag;
+
+	update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
+	p_dest->update_rroce_dcb_data_mode = update_flag;
+
+	update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
+	p_dest->update_iscsi_dcb_data_mode = update_flag;
 	update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
 	p_dest->update_eth_dcb_data_mode = update_flag;
 	update_flag = p_src->arr[DCBX_PROTOCOL_IWARP].update;
 	p_dest->update_iwarp_dcb_data_mode = update_flag;
 
+	p_dcb_data = &p_dest->fcoe_dcb_data;
+	ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
+	p_dcb_data = &p_dest->roce_dcb_data;
+	ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE);
+	p_dcb_data = &p_dest->rroce_dcb_data;
+	ecore_dcbx_update_protocol_data(p_dcb_data, p_src,
+					DCBX_PROTOCOL_ROCE_V2);
+	p_dcb_data = &p_dest->iscsi_dcb_data;
+	ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
 	p_dcb_data = &p_dest->eth_dcb_data;
 	ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
 	p_dcb_data = &p_dest->iwarp_dcb_data;
 	ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_IWARP);
 }
 
+bool ecore_dcbx_get_dscp_state(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_dcbx_get *p_dcbx_info = &p_hwfn->p_dcbx_info->get;
+
+	return p_dcbx_info->dscp.enabled;
+}
+
+u8 ecore_dcbx_get_priority_tc(struct ecore_hwfn *p_hwfn, u8 pri)
+{
+	struct ecore_dcbx_get *dcbx_info = &p_hwfn->p_dcbx_info->get;
+
+	if (pri >= ECORE_MAX_PFC_PRIORITIES) {
+		DP_ERR(p_hwfn, "Invalid priority %d\n", pri);
+		return ECORE_DCBX_DEFAULT_TC;
+	}
+
+	if (!dcbx_info->operational.valid) {
+		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+			   "Dcbx parameters not available\n");
+		return ECORE_DCBX_DEFAULT_TC;
+	}
+
+	return dcbx_info->operational.params.ets_pri_tc_tbl[pri];
+}
+
 enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
 					     struct ecore_dcbx_get *p_get,
 					     enum ecore_mib_read_type type)
@@ -981,6 +1193,11 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
 	struct ecore_ptt *p_ptt;
 	enum _ecore_status_t rc;
 
+#ifndef ASIC_ONLY
+	if (!ecore_mcp_is_init(p_hwfn))
+		return ECORE_INVAL;
+#endif
+
 	if (IS_VF(p_hwfn->p_dev))
 		return ECORE_INVAL;
 
@@ -1008,24 +1225,16 @@ ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
 	u8 pfc_map = 0;
 	int i;
 
-	if (p_params->pfc.willing)
-		*pfc |= DCBX_PFC_WILLING_MASK;
-	else
-		*pfc &= ~DCBX_PFC_WILLING_MASK;
-
-	if (p_params->pfc.enabled)
-		*pfc |= DCBX_PFC_ENABLED_MASK;
-	else
-		*pfc &= ~DCBX_PFC_ENABLED_MASK;
-
-	*pfc &= ~DCBX_PFC_CAPS_MASK;
-	*pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_OFFSET;
+	SET_MFW_FIELD(*pfc, DCBX_PFC_ERROR, 0);
+	SET_MFW_FIELD(*pfc, DCBX_PFC_WILLING, p_params->pfc.willing ? 1 : 0);
+	SET_MFW_FIELD(*pfc, DCBX_PFC_ENABLED, p_params->pfc.enabled ? 1 : 0);
+	SET_MFW_FIELD(*pfc, DCBX_PFC_CAPS, (u32)p_params->pfc.max_tc);
+	SET_MFW_FIELD(*pfc, DCBX_PFC_MBC, p_params->pfc.mbc ? 1 : 0);
 
 	for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
 		if (p_params->pfc.prio[i])
 			pfc_map |= (1 << i);
-	*pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
-	*pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_OFFSET);
+	SET_MFW_FIELD(*pfc, DCBX_PFC_PRI_EN_BITMAP, pfc_map);
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
 }
@@ -1039,23 +1248,14 @@ ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
 	u32 val;
 	int i;
 
-	if (p_params->ets_willing)
-		p_ets->flags |= DCBX_ETS_WILLING_MASK;
-	else
-		p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
-
-	if (p_params->ets_cbs)
-		p_ets->flags |= DCBX_ETS_CBS_MASK;
-	else
-		p_ets->flags &= ~DCBX_ETS_CBS_MASK;
-
-	if (p_params->ets_enabled)
-		p_ets->flags |= DCBX_ETS_ENABLED_MASK;
-	else
-		p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
-
-	p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
-	p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_OFFSET;
+	SET_MFW_FIELD(p_ets->flags, DCBX_ETS_WILLING,
+		      p_params->ets_willing ? 1 : 0);
+	SET_MFW_FIELD(p_ets->flags, DCBX_ETS_CBS,
+		      p_params->ets_cbs ? 1 : 0);
+	SET_MFW_FIELD(p_ets->flags, DCBX_ETS_ENABLED,
+		      p_params->ets_enabled ? 1 : 0);
+	SET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS,
+		      (u32)p_params->max_ets_tc);
 
 	bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
 	tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
@@ -1089,66 +1289,55 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
 	u32 *entry;
 	int i;
 
-	if (p_params->app_willing)
-		p_app->flags |= DCBX_APP_WILLING_MASK;
-	else
-		p_app->flags &= ~DCBX_APP_WILLING_MASK;
-
-	if (p_params->app_valid)
-		p_app->flags |= DCBX_APP_ENABLED_MASK;
-	else
-		p_app->flags &= ~DCBX_APP_ENABLED_MASK;
-
-	p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
-	p_app->flags |= (u32)p_params->num_app_entries <<
-			DCBX_APP_NUM_ENTRIES_OFFSET;
+	SET_MFW_FIELD(p_app->flags, DCBX_APP_WILLING,
+		      p_params->app_willing ? 1 : 0);
+	SET_MFW_FIELD(p_app->flags, DCBX_APP_ENABLED,
+		      p_params->app_valid ? 1 : 0);
+	SET_MFW_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES,
+		      (u32)p_params->num_app_entries);
 
 	for (i = 0; i < p_params->num_app_entries; i++) {
 		entry = &p_app->app_pri_tbl[i].entry;
 		*entry = 0;
 		if (ieee) {
-			*entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
+			SET_MFW_FIELD(*entry, DCBX_APP_SF_IEEE, 0);
+			SET_MFW_FIELD(*entry, DCBX_APP_SF, 0);
 			switch (p_params->app_entry[i].sf_ieee) {
 			case ECORE_DCBX_SF_IEEE_ETHTYPE:
-				*entry  |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
-					    DCBX_APP_SF_IEEE_OFFSET);
-				*entry  |= ((u32)DCBX_APP_SF_ETHTYPE <<
-					    DCBX_APP_SF_OFFSET);
+				SET_MFW_FIELD(*entry, DCBX_APP_SF_IEEE,
+					      (u32)DCBX_APP_SF_IEEE_ETHTYPE);
+				SET_MFW_FIELD(*entry, DCBX_APP_SF,
+					      (u32)DCBX_APP_SF_ETHTYPE);
 				break;
 			case ECORE_DCBX_SF_IEEE_TCP_PORT:
-				*entry  |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
-					    DCBX_APP_SF_IEEE_OFFSET);
-				*entry  |= ((u32)DCBX_APP_SF_PORT <<
-					    DCBX_APP_SF_OFFSET);
+				SET_MFW_FIELD(*entry, DCBX_APP_SF_IEEE,
+					      (u32)DCBX_APP_SF_IEEE_TCP_PORT);
+				SET_MFW_FIELD(*entry, DCBX_APP_SF,
+					      (u32)DCBX_APP_SF_PORT);
 				break;
 			case ECORE_DCBX_SF_IEEE_UDP_PORT:
-				*entry  |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
-					    DCBX_APP_SF_IEEE_OFFSET);
-				*entry  |= ((u32)DCBX_APP_SF_PORT <<
-					    DCBX_APP_SF_OFFSET);
+				SET_MFW_FIELD(*entry, DCBX_APP_SF_IEEE,
+					      (u32)DCBX_APP_SF_IEEE_UDP_PORT);
+				SET_MFW_FIELD(*entry, DCBX_APP_SF,
+					      (u32)DCBX_APP_SF_PORT);
 				break;
 			case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT:
-				*entry  |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
-					    DCBX_APP_SF_IEEE_OFFSET;
-				*entry  |= ((u32)DCBX_APP_SF_PORT <<
-					    DCBX_APP_SF_OFFSET);
+				SET_MFW_FIELD(*entry, DCBX_APP_SF_IEEE,
+					      (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT);
+				SET_MFW_FIELD(*entry, DCBX_APP_SF,
+					      (u32)DCBX_APP_SF_PORT);
 				break;
 			}
 		} else {
-			*entry &= ~DCBX_APP_SF_MASK;
-			if (p_params->app_entry[i].ethtype)
-				*entry  |= ((u32)DCBX_APP_SF_ETHTYPE <<
-					    DCBX_APP_SF_OFFSET);
-			else
-				*entry  |= ((u32)DCBX_APP_SF_PORT <<
-					    DCBX_APP_SF_OFFSET);
+			SET_MFW_FIELD(*entry, DCBX_APP_SF,
+				      p_params->app_entry[i].ethtype ?
+				      (u32)DCBX_APP_SF_ETHTYPE :
+				      (u32)DCBX_APP_SF_PORT);
 		}
-		*entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
-		*entry |= ((u32)p_params->app_entry[i].proto_id <<
-			   DCBX_APP_PROTOCOL_ID_OFFSET);
-		*entry &= ~DCBX_APP_PRI_MAP_MASK;
-		*entry |= ((u32)(p_params->app_entry[i].prio) <<
-			   DCBX_APP_PRI_MAP_OFFSET);
+		SET_MFW_FIELD(*entry, DCBX_APP_PROTOCOL_ID,
+			      (u32)p_params->app_entry[i].proto_id);
+		SET_MFW_FIELD(*entry, DCBX_APP_PRI_MAP,
+			      (u32)(1 << p_params->app_entry[i].prio));
 	}
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
@@ -1200,9 +1389,8 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
 	OSAL_MEMCPY(p_dscp_map, &p_hwfn->p_dcbx_info->dscp_map,
 		    sizeof(*p_dscp_map));
 
-	p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
-	if (p_params->dscp.enabled)
-		p_dscp_map->flags |= DCB_DSCP_ENABLE_MASK;
+	SET_MFW_FIELD(p_dscp_map->flags, DCB_DSCP_ENABLE,
+		      p_params->dscp.enabled ? 1 : 0);
 
 	for (i = 0, entry = 0; i < 8; i++) {
 		val = 0;
@@ -1210,7 +1398,7 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
 			val |= (((u32)p_params->dscp.dscp_pri_map[entry]) <<
 				(j * 4));
 
-		p_dscp_map->dscp_pri_map[i] = OSAL_CPU_TO_BE32(val);
+		p_dscp_map->dscp_pri_map[i] = val;
 	}
 
 	p_hwfn->p_dcbx_info->dscp_nig_update = true;
@@ -1231,10 +1419,10 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
 					      struct ecore_dcbx_set *params,
 					      bool hw_commit)
 {
+	u32 resp = 0, param = 0, drv_mb_param = 0;
 	struct dcbx_local_params local_admin;
 	struct ecore_dcbx_mib_meta_data data;
 	struct dcb_dscp_map dscp_map;
-	u32 resp = 0, param = 0;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
 
 	OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
@@ -1263,8 +1451,9 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
 				data.size);
 	}
 
+	SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_LLDP_SEND, 1);
 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
-			   1 << DRV_MB_PARAM_LLDP_SEND_OFFSET, &resp, &param);
+			   drv_mb_param, &resp, &param);
 	if (rc != ECORE_SUCCESS)
 		DP_NOTICE(p_hwfn, false,
 			  "Failed to send DCBX update request\n");
@@ -1276,7 +1465,7 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
 						  struct ecore_dcbx_set *params)
 {
 	struct ecore_dcbx_get *dcbx_info;
-	int rc;
+	enum _ecore_status_t rc;
 
 	if (p_hwfn->p_dcbx_info->set.config.valid) {
 		OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
@@ -1557,6 +1746,11 @@ ecore_dcbx_get_dscp_priority(struct ecore_hwfn *p_hwfn,
 	struct ecore_dcbx_get *p_dcbx_info;
 	enum _ecore_status_t rc;
 
+	if (IS_VF(p_hwfn->p_dev)) {
+		DP_ERR(p_hwfn->p_dev, "ecore rdma get dscp priority not supported for VF.\n");
+		return ECORE_INVAL;
+	}
+
 	if (dscp_index >= ECORE_DCBX_DSCP_SIZE) {
 		DP_ERR(p_hwfn, "Invalid dscp index %d\n", dscp_index);
 		return ECORE_INVAL;
@@ -1588,6 +1782,11 @@ ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 	struct ecore_dcbx_set dcbx_set;
 	enum _ecore_status_t rc;
 
+	if (IS_VF(p_hwfn->p_dev)) {
+		DP_ERR(p_hwfn->p_dev, "ecore rdma set dscp priority not supported for VF.\n");
+		return ECORE_INVAL;
+	}
+
 	if (dscp_index >= ECORE_DCBX_DSCP_SIZE ||
 	    pri_val >= ECORE_MAX_PFC_PRIORITIES) {
 		DP_ERR(p_hwfn, "Invalid dscp params: index = %d pri = %d\n",
@@ -1605,3 +1804,58 @@ ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 
 	return ecore_dcbx_config_params(p_hwfn, p_ptt, &dcbx_set, 1);
 }
+
+enum _ecore_status_t
+ecore_lldp_get_stats(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+		     struct ecore_lldp_stats *p_params)
+{
+	u32 mcp_resp = 0, mcp_param = 0, drv_mb_param = 0, addr, val;
+	struct lldp_stats_stc lldp_stats;
+	enum _ecore_status_t rc;
+
+	switch (p_params->agent) {
+	case ECORE_LLDP_NEAREST_BRIDGE:
+		val = LLDP_NEAREST_BRIDGE;
+		break;
+	case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
+		val = LLDP_NEAREST_NON_TPMR_BRIDGE;
+		break;
+	case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
+		val = LLDP_NEAREST_CUSTOMER_BRIDGE;
+		break;
+	default:
+		DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent);
+		return ECORE_INVAL;
+	}
+
+	SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_LLDP_STATS_AGENT, val);
+	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_LLDP_STATS,
+			   drv_mb_param, &mcp_resp, &mcp_param);
+	if (rc != ECORE_SUCCESS) {
+		DP_ERR(p_hwfn, "GET_LLDP_STATS failed, error = %d\n", rc);
+		return rc;
+	}
+
+	addr = p_hwfn->mcp_info->drv_mb_addr +
+		OFFSETOF(struct public_drv_mb, union_data);
+
+	ecore_memcpy_from(p_hwfn, p_ptt, &lldp_stats, addr, sizeof(lldp_stats));
+
+	p_params->tx_frames = lldp_stats.tx_frames_total;
+	p_params->rx_frames = lldp_stats.rx_frames_total;
+	p_params->rx_discards = lldp_stats.rx_frames_discarded;
+	p_params->rx_age_outs = lldp_stats.rx_age_outs;
+
+	return ECORE_SUCCESS;
+}
+
+bool ecore_dcbx_is_enabled(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_dcbx_operational_params *op_params =
+		&p_hwfn->p_dcbx_info->get.operational;
+
+	if (op_params->valid && op_params->enabled)
+		return true;
+
+	return false;
+}
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index 519e6ceaa..57c07c59f 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -1,9 +1,9 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef __ECORE_DCBX_H__
 #define __ECORE_DCBX_H__
 
@@ -45,18 +45,26 @@ struct ecore_dcbx_mib_meta_data {
 
 /* ECORE local interface routines */
 enum _ecore_status_t
-ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
-			    enum ecore_mib_read_type);
+ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+			    enum ecore_mib_read_type type);
 
 enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
 void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn);
 void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
 				     struct pf_update_ramrod_data *p_dest);
 
+#define ECORE_DCBX_DEFAULT_TC	0
+
+u8 ecore_dcbx_get_priority_tc(struct ecore_hwfn *p_hwfn, u8 pri);
+
+bool ecore_dcbx_get_dscp_state(struct ecore_hwfn *p_hwfn);
+
 /* Returns TOS value for a given priority */
 u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri);
 
 enum _ecore_status_t
 ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
 
+bool ecore_dcbx_is_enabled(struct ecore_hwfn *p_hwfn);
+
 #endif /* __ECORE_DCBX_H__ */
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index 6fad2ecc2..26616f0b9 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -1,9 +1,9 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef __ECORE_DCBX_API_H__
 #define __ECORE_DCBX_API_H__
 
@@ -30,7 +30,7 @@ struct ecore_dcbx_app_data {
 	bool dont_add_vlan0;	/* Do not insert a vlan tag with id 0 */
 };
 
-#ifndef __EXTRACT__LINUX__
+#ifndef __EXTRACT__LINUX__IF__
 enum dcbx_protocol_type {
 	DCBX_PROTOCOL_ISCSI,
 	DCBX_PROTOCOL_FCOE,
@@ -72,6 +72,7 @@ struct ecore_dcbx_app_prio {
 struct ecore_dbcx_pfc_params {
 	bool	willing;
 	bool	enabled;
+	bool	mbc;
 	u8	prio[ECORE_MAX_PFC_PRIORITIES];
 	u8	max_tc;
 };
@@ -86,7 +87,6 @@ enum ecore_dcbx_sf_ieee_type {
 struct ecore_app_entry {
 	bool ethtype;
 	enum ecore_dcbx_sf_ieee_type sf_ieee;
-	bool enabled;
 	u8 prio;
 	u16 proto_id;
 	enum dcbx_protocol_type proto_type;
@@ -199,17 +199,26 @@ struct ecore_lldp_sys_tlvs {
 	u16 buf_size;
 };
 
-enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *,
-					     struct ecore_dcbx_get *,
-					     enum ecore_mib_read_type);
+struct ecore_lldp_stats {
+	enum ecore_lldp_agent agent;
+	u32 tx_frames;
+	u32 rx_frames;
+	u32 rx_discards;
+	u32 rx_age_outs;
+};
+
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
+					     struct ecore_dcbx_get *p_get,
+					     enum ecore_mib_read_type type);
 
-enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *,
-						  struct ecore_dcbx_set *);
+enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
+						  struct ecore_dcbx_set
+						  *params);
 
-enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
-					      struct ecore_ptt *,
-					      struct ecore_dcbx_set *,
-					      bool);
+enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
+					      struct ecore_ptt *p_ptt,
+					      struct ecore_dcbx_set *params,
+					      bool hw_commit);
 
 enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn,
 					     struct ecore_ptt *p_ptt,
@@ -238,6 +247,11 @@ enum _ecore_status_t
 ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 			     u8 dscp_index, u8 pri_val);
 
+enum _ecore_status_t
+ecore_lldp_get_stats(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+		     struct ecore_lldp_stats *p_params);
+
+#ifndef __EXTRACT__LINUX__C__
 static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
 	{DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
 	{DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
@@ -246,5 +260,6 @@ static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
 	{DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH},
 	{DCBX_PROTOCOL_IWARP, "IWARP", ECORE_PCI_ETH_IWARP}
 };
+#endif
 
 #endif /* __ECORE_DCBX_API_H__ */
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 5db02d0c4..63e5d6860 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1,9 +1,9 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #include "bcm_osal.h"
 #include "reg_addr.h"
 #include "ecore_gtt_reg_addr.h"
@@ -27,8 +27,15 @@
 #include "ecore_iro.h"
 #include "nvm_cfg.h"
 #include "ecore_dcbx.h"
+#include <linux/pci_regs.h> /* @DPDK */
 #include "ecore_l2.h"
 
+#ifdef _NTDDK_
+#pragma warning(push)
+#pragma warning(disable : 28167)
+#pragma warning(disable : 28123)
+#endif
+
 /* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
  * registers involved are not split and thus configuration is a race where
  * some of the PFs configuration might be lost.
@@ -43,6 +50,11 @@ static u32 qm_lock_ref_cnt;
 static bool b_ptt_gtt_init;
 #endif
 
+void ecore_set_ilt_page_size(struct ecore_dev *p_dev, u8 ilt_page_size)
+{
+	p_dev->ilt_page_size = ilt_page_size;
+}
+
 /******************** Doorbell Recovery *******************/
 /* The doorbell recovery mechanism consists of a list of entries which represent
  * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
@@ -60,10 +72,11 @@ struct ecore_db_recovery_entry {
 	u8			hwfn_idx;
 };
 
+/* @DPDK */
 /* display a single doorbell recovery entry */
-void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
-				struct ecore_db_recovery_entry *db_entry,
-				const char *action)
+static void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
+				       struct ecore_db_recovery_entry *db_entry,
+				       const char *action)
 {
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
 		   action, db_entry, db_entry->db_addr, db_entry->db_data,
@@ -72,17 +85,40 @@ void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
 		   db_entry->hwfn_idx);
 }
 
-/* doorbell address sanity (address within doorbell bar range) */
-bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr,
-			 void *db_data)
+/* find hwfn according to the doorbell address */
+static struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
+						 void OSAL_IOMEM *db_addr)
 {
-	/* make sure doorbell address  is within the doorbell bar */
-	if (db_addr < p_dev->doorbells || (u8 *)db_addr >
-			(u8 *)p_dev->doorbells + p_dev->db_size) {
+	struct ecore_hwfn *p_hwfn;
+
+	/* in CMT doorbell bar is split down the middle between engine 0 and enigne 1 */
+	if (ECORE_IS_CMT(p_dev))
+		p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
+			&p_dev->hwfns[0] : &p_dev->hwfns[1];
+	else
+		p_hwfn = ECORE_LEADING_HWFN(p_dev);
+
+	return p_hwfn;
+}
+
+/* doorbell address sanity (address within doorbell bar range) */
+static bool ecore_db_rec_sanity(struct ecore_dev *p_dev,
+				void OSAL_IOMEM *db_addr,
+				enum ecore_db_rec_width db_width,
+				void *db_data)
+{
+	struct ecore_hwfn *p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
+	u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
+
+	/* make sure doorbell address is within the doorbell bar */
+	if (db_addr < p_hwfn->doorbells ||
+	    (u8 OSAL_IOMEM *)db_addr + width >
+	    (u8 OSAL_IOMEM *)p_hwfn->doorbells + p_hwfn->db_size) {
 		OSAL_WARN(true,
 			  "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
-			  db_addr, p_dev->doorbells,
-			  (u8 *)p_dev->doorbells + p_dev->db_size);
+			  db_addr, p_hwfn->doorbells,
+			  (u8 OSAL_IOMEM *)p_hwfn->doorbells + p_hwfn->db_size);
+
 		return false;
 	}
 
@@ -95,24 +131,6 @@ bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr,
 	return true;
 }
 
-/* find hwfn according to the doorbell address */
-struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
-					  void OSAL_IOMEM *db_addr)
-{
-	struct ecore_hwfn *p_hwfn;
-
-	/* In CMT doorbell bar is split down the middle between engine 0 and
-	 * enigne 1
-	 */
-	if (ECORE_IS_CMT(p_dev))
-		p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
-			&p_dev->hwfns[0] : &p_dev->hwfns[1];
-	else
-		p_hwfn = ECORE_LEADING_HWFN(p_dev);
-
-	return p_hwfn;
-}
-
 /* add a new entry to the doorbell recovery mechanism */
 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
 					   void OSAL_IOMEM *db_addr,
@@ -123,14 +141,8 @@ enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
 	struct ecore_db_recovery_entry *db_entry;
 	struct ecore_hwfn *p_hwfn;
 
-	/* shortcircuit VFs, for now */
-	if (IS_VF(p_dev)) {
-		DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
-		return ECORE_SUCCESS;
-	}
-
 	/* sanitize doorbell address */
-	if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
+	if (!ecore_db_rec_sanity(p_dev, db_addr, db_width, db_data))
 		return ECORE_INVAL;
 
 	/* obtain hwfn from doorbell address */
@@ -171,16 +183,6 @@ enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
 	enum _ecore_status_t rc = ECORE_INVAL;
 	struct ecore_hwfn *p_hwfn;
 
-	/* shortcircuit VFs, for now */
-	if (IS_VF(p_dev)) {
-		DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
-		return ECORE_SUCCESS;
-	}
-
-	/* sanitize doorbell address */
-	if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
-		return ECORE_INVAL;
-
 	/* obtain hwfn from doorbell address */
 	p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
 
@@ -190,12 +192,9 @@ enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
 				 &p_hwfn->db_recovery_info.list,
 				 list_entry,
 				 struct ecore_db_recovery_entry) {
-		/* search according to db_data addr since db_addr is not unique
-		 * (roce)
-		 */
+		/* search according to db_data addr since db_addr is not unique (roce) */
 		if (db_entry->db_data == db_data) {
-			ecore_db_recovery_dp_entry(p_hwfn, db_entry,
-						   "Deleting");
+			ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
 			OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
 					       &p_hwfn->db_recovery_info.list);
 			rc = ECORE_SUCCESS;
@@ -217,40 +216,40 @@ enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
 }
 
 /* initialize the doorbell recovery mechanism */
-enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
+static enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
 {
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n");
 
 	/* make sure db_size was set in p_dev */
-	if (!p_hwfn->p_dev->db_size) {
+	if (!p_hwfn->db_size) {
 		DP_ERR(p_hwfn->p_dev, "db_size not set\n");
 		return ECORE_INVAL;
 	}
 
 	OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
 #ifdef CONFIG_ECORE_LOCK_ALLOC
-	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock))
+	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock,
+				 "db_recov_lock"))
 		return ECORE_NOMEM;
 #endif
 	OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
-	p_hwfn->db_recovery_info.db_recovery_counter = 0;
+	p_hwfn->db_recovery_info.count = 0;
 
 	return ECORE_SUCCESS;
 }
 
 /* destroy the doorbell recovery mechanism */
-void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
+static void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n");
 	if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
-		DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
+		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
 		while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
-			db_entry = OSAL_LIST_FIRST_ENTRY(
-						&p_hwfn->db_recovery_info.list,
-						struct ecore_db_recovery_entry,
-						list_entry);
+			db_entry = OSAL_LIST_FIRST_ENTRY(&p_hwfn->db_recovery_info.list,
+							 struct ecore_db_recovery_entry,
+							 list_entry);
 			ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
 			OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
 					       &p_hwfn->db_recovery_info.list);
@@ -260,17 +259,34 @@ void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
 #ifdef CONFIG_ECORE_LOCK_ALLOC
 	OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock);
 #endif
-	p_hwfn->db_recovery_info.db_recovery_counter = 0;
+	p_hwfn->db_recovery_info.count = 0;
 }
 
 /* print the content of the doorbell recovery mechanism */
 void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+	u32 dp_module;
+	u8 dp_level;
 
 	DP_NOTICE(p_hwfn, false,
-		  "Dispalying doorbell recovery database. Counter was %d\n",
-		  p_hwfn->db_recovery_info.db_recovery_counter);
+		  "Displaying doorbell recovery database. Counter is %d\n",
+		  p_hwfn->db_recovery_info.count);
+
+	if (IS_PF(p_hwfn->p_dev))
+		if (p_hwfn->pf_iov_info->max_db_rec_count > 0)
+			DP_NOTICE(p_hwfn, false,
+				  "Max VF counter is %u (VF %u)\n",
+				  p_hwfn->pf_iov_info->max_db_rec_count,
+				  p_hwfn->pf_iov_info->max_db_rec_vfid);
+
+	/* Save dp_module/dp_level values and enable ECORE_MSG_SPQ verbosity
+	 * to force print the db entries.
+	 */
+	dp_module = p_hwfn->dp_module;
+	p_hwfn->dp_module |= ECORE_MSG_SPQ;
+	dp_level = p_hwfn->dp_level;
+	p_hwfn->dp_level = ECORE_LEVEL_VERBOSE;
 
 	/* protect the list */
 	OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
@@ -282,28 +298,27 @@ void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn)
 	}
 
 	OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+
+	/* Get back to saved dp_module/dp_level values */
+	p_hwfn->dp_module = dp_module;
+	p_hwfn->dp_level = dp_level;
 }
 
 /* ring the doorbell of a single doorbell recovery entry */
-void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
-			    struct ecore_db_recovery_entry *db_entry,
-			    enum ecore_db_rec_exec db_exec)
+static void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
+				   struct ecore_db_recovery_entry *db_entry)
 {
 	/* Print according to width */
 	if (db_entry->db_width == DB_REC_WIDTH_32B)
-		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %x\n",
-			   db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
-			   db_entry->db_addr, *(u32 *)db_entry->db_data);
+		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+			   "ringing doorbell address %p data %x\n",
+			   db_entry->db_addr,
+			   *(u32 *)db_entry->db_data);
 	else
-		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %lx\n",
-			   db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
+		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+			   "ringing doorbell address %p data %" PRIx64 "\n",
 			   db_entry->db_addr,
-			   *(unsigned long *)(db_entry->db_data));
-
-	/* Sanity */
-	if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr,
-				 db_entry->db_data))
-		return;
+			   *(u64 *)(db_entry->db_data));
 
 	/* Flush the write combined buffer. Since there are multiple doorbelling
 	 * entities using the same address, if we don't flush, a transaction
@@ -312,14 +327,12 @@ void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
 	OSAL_WMB(p_hwfn->p_dev);
 
 	/* Ring the doorbell */
-	if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
-		if (db_entry->db_width == DB_REC_WIDTH_32B)
-			DIRECT_REG_WR(p_hwfn, db_entry->db_addr,
-				      *(u32 *)(db_entry->db_data));
-		else
-			DIRECT_REG_WR64(p_hwfn, db_entry->db_addr,
-					*(u64 *)(db_entry->db_data));
-	}
+	if (db_entry->db_width == DB_REC_WIDTH_32B)
+		DIRECT_REG_WR(p_hwfn, db_entry->db_addr,
+			      *(u32 *)(db_entry->db_data));
+	else
+		DIRECT_REG_WR64(p_hwfn, db_entry->db_addr,
+				*(u64 *)(db_entry->db_data));
 
 	/* Flush the write combined buffer. Next doorbell may come from a
 	 * different entity to the same address...
@@ -328,30 +341,21 @@ void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
 }
 
 /* traverse the doorbell recovery entry list and ring all the doorbells */
-void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
-			       enum ecore_db_rec_exec db_exec)
+void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
 
-	if (db_exec != DB_REC_ONCE) {
-		DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n",
-			  p_hwfn->db_recovery_info.db_recovery_counter);
-
-		/* track amount of times recovery was executed */
-		p_hwfn->db_recovery_info.db_recovery_counter++;
-	}
+	DP_NOTICE(p_hwfn, false,
+		  "Executing doorbell recovery. Counter is %d\n",
+		  ++p_hwfn->db_recovery_info.count);
 
 	/* protect the list */
 	OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
 	OSAL_LIST_FOR_EACH_ENTRY(db_entry,
 				 &p_hwfn->db_recovery_info.list,
 				 list_entry,
-				 struct ecore_db_recovery_entry) {
-		ecore_db_recovery_ring(p_hwfn, db_entry, db_exec);
-		if (db_exec == DB_REC_ONCE)
-			break;
-	}
-
+				 struct ecore_db_recovery_entry)
+		ecore_db_recovery_ring(p_hwfn, db_entry);
 	OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
 }
 /******************** Doorbell Recovery end ****************/
@@ -364,7 +368,7 @@ enum ecore_llh_filter_type {
 };
 
 struct ecore_llh_mac_filter {
-	u8 addr[ETH_ALEN];
+	u8 addr[ECORE_ETH_ALEN];
 };
 
 struct ecore_llh_protocol_filter {
@@ -661,16 +665,15 @@ ecore_llh_shadow_remove_all_filters(struct ecore_dev *p_dev, u8 ppfid)
 	return ECORE_SUCCESS;
 }
 
-static enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev,
-					    u8 rel_ppfid, u8 *p_abs_ppfid)
+enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev, u8 rel_ppfid,
+				     u8 *p_abs_ppfid)
 {
 	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
-	u8 ppfids = p_llh_info->num_ppfid - 1;
 
 	if (rel_ppfid >= p_llh_info->num_ppfid) {
 		DP_NOTICE(p_dev, false,
-			  "rel_ppfid %d is not valid, available indices are 0..%hhd\n",
-			  rel_ppfid, ppfids);
+			  "rel_ppfid %d is not valid, available indices are 0..%hhu\n",
+			  rel_ppfid, p_llh_info->num_ppfid - 1);
 		return ECORE_INVAL;
 	}
 
@@ -679,6 +682,24 @@ static enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev,
 	return ECORE_SUCCESS;
 }
 
+enum _ecore_status_t ecore_llh_map_ppfid_to_pfid(struct ecore_hwfn *p_hwfn,
+						 struct ecore_ptt *p_ptt,
+						 u8 ppfid, u8 pfid)
+{
+	u8 abs_ppfid;
+	u32 addr;
+	enum _ecore_status_t rc;
+
+	rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
+	ecore_wr(p_hwfn, p_ptt, addr, pfid);
+
+	return ECORE_SUCCESS;
+}
+
 static enum _ecore_status_t
 __ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 {
@@ -791,21 +812,21 @@ static enum _ecore_status_t ecore_llh_hw_init_pf(struct ecore_hwfn *p_hwfn,
 						 bool avoid_eng_affin)
 {
 	struct ecore_dev *p_dev = p_hwfn->p_dev;
-	u8 ppfid, abs_ppfid;
+	u8 ppfid;
 	enum _ecore_status_t rc;
 
 	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
-		u32 addr;
-
-		rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
-		if (rc != ECORE_SUCCESS)
+		rc = ecore_llh_map_ppfid_to_pfid(p_hwfn, p_ptt, ppfid,
+						 p_hwfn->rel_pf_id);
+		if (rc != ECORE_SUCCESS) {
+			DP_NOTICE(p_dev, false,
+				  "Failed to map ppfid %d to pfid %d\n",
+				  ppfid, p_hwfn->rel_pf_id);
 			return rc;
-
-		addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
-		ecore_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
+		}
 	}
 
-	if (OSAL_GET_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
+	if (OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
 	    !ECORE_IS_FCOE_PERSONALITY(p_hwfn)) {
 		rc = ecore_llh_add_mac_filter(p_dev, 0,
 					      p_hwfn->hw_info.hw_mac_addr);
@@ -833,7 +854,10 @@ enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev)
 	return p_dev->l2_affin_hint ? ECORE_ENG1 : ECORE_ENG0;
 }
 
-/* TBD - should be removed when these definitions are available in reg_addr.h */
+/* TBD -
+ * When the relevant definitions are available in reg_addr.h, the SHIFT
+ * definitions should be removed, and the MASK definitions should be revised.
+ */
 #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK		0x3
 #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT		0
 #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK		0x3
@@ -939,7 +963,7 @@ enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
 	return rc;
 }
 
-struct ecore_llh_filter_details {
+struct ecore_llh_filter_e4_details {
 	u64 value;
 	u32 mode;
 	u32 protocol_type;
@@ -948,10 +972,10 @@ struct ecore_llh_filter_details {
 };
 
 static enum _ecore_status_t
-ecore_llh_access_filter(struct ecore_hwfn *p_hwfn,
-			struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx,
-			struct ecore_llh_filter_details *p_details,
-			bool b_write_access)
+ecore_llh_access_filter_e4(struct ecore_hwfn *p_hwfn,
+			   struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx,
+			   struct ecore_llh_filter_e4_details *p_details,
+			   bool b_write_access)
 {
 	u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
 	struct dmae_params params;
@@ -1035,16 +1059,16 @@ ecore_llh_access_filter(struct ecore_hwfn *p_hwfn,
 }
 
 static enum _ecore_status_t
-ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 			u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type,
 			u32 high, u32 low)
 {
-	struct ecore_llh_filter_details filter_details;
+	struct ecore_llh_filter_e4_details filter_details;
 
 	filter_details.enable = 1;
 	filter_details.value = ((u64)high << 32) | low;
 	filter_details.hdr_sel =
-		OSAL_GET_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits) ?
+		OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits) ?
 		1 : /* inner/encapsulated header */
 		0;  /* outer/tunnel header */
 	filter_details.protocol_type = filter_prot_type;
@@ -1052,42 +1076,104 @@ ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 			      1 : /* protocol-based classification */
 			      0;  /* MAC-address based classification */
 
-	return ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
-				&filter_details,
-				true /* write access */);
+	return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+					  &filter_details,
+					  true /* write access */);
 }
 
 static enum _ecore_status_t
-ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn,
+ecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn,
 			   struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
 {
-	struct ecore_llh_filter_details filter_details;
+	struct ecore_llh_filter_e4_details filter_details;
 
 	OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
 
-	return ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
-				       &filter_details,
-				       true /* write access */);
+	return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+					  &filter_details,
+					  true /* write access */);
+}
+
+/* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
+ * Should be removed when the function is implemented.
+ */
+static enum _ecore_status_t
+ecore_llh_add_filter_e5(struct ecore_hwfn OSAL_UNUSED * p_hwfn,
+			struct ecore_ptt OSAL_UNUSED * p_ptt,
+			u8 OSAL_UNUSED abs_ppfid, u8 OSAL_UNUSED filter_idx,
+			u8 OSAL_UNUSED filter_prot_type, u32 OSAL_UNUSED high,
+			u32 OSAL_UNUSED low)
+{
+	ECORE_E5_MISSING_CODE;
+
+	return ECORE_SUCCESS;
+}
+
+/* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
+ * Should be removed when the function is implemented.
+ */
+static enum _ecore_status_t
+ecore_llh_remove_filter_e5(struct ecore_hwfn OSAL_UNUSED * p_hwfn,
+			   struct ecore_ptt OSAL_UNUSED * p_ptt,
+			   u8 OSAL_UNUSED abs_ppfid,
+			   u8 OSAL_UNUSED filter_idx)
+{
+	ECORE_E5_MISSING_CODE;
+
+	return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+		     u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high,
+		     u32 low)
+{
+	if (ECORE_IS_E4(p_hwfn->p_dev))
+		return ecore_llh_add_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+					       filter_idx, filter_prot_type,
+					       high, low);
+	else /* E5 */
+		return ecore_llh_add_filter_e5(p_hwfn, p_ptt, abs_ppfid,
+					       filter_idx, filter_prot_type,
+					       high, low);
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+			u8 abs_ppfid, u8 filter_idx)
+{
+	if (ECORE_IS_E4(p_hwfn->p_dev))
+		return ecore_llh_remove_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+						  filter_idx);
+	else /* E5 */
+		return ecore_llh_remove_filter_e5(p_hwfn, p_ptt, abs_ppfid,
+						  filter_idx);
 }
 
 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
-					      u8 mac_addr[ETH_ALEN])
+					      u8 mac_addr[ECORE_ETH_ALEN])
 {
 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
-	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
 	union ecore_llh_filter filter;
 	u8 filter_idx, abs_ppfid;
+	struct ecore_ptt *p_ptt;
 	u32 high, low, ref_cnt;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
 
+	if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+		return rc;
+
+	if (IS_VF(p_hwfn->p_dev)) {
+		DP_NOTICE(p_dev, false, "Setting MAC to LLH is not supported to VF\n");
+		return ECORE_NOTIMPL;
+	}
+
+	p_ptt = ecore_ptt_acquire(p_hwfn);
 	if (p_ptt == OSAL_NULL)
 		return ECORE_AGAIN;
 
-	if (!OSAL_GET_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
-		goto out;
-
 	OSAL_MEM_ZERO(&filter, sizeof(filter));
-	OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
+	OSAL_MEMCPY(filter.mac.addr, mac_addr, ECORE_ETH_ALEN);
 	rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
 					 ECORE_LLH_FILTER_TYPE_MAC,
 					 &filter, &filter_idx, &ref_cnt);
@@ -1204,6 +1290,22 @@ ecore_llh_protocol_filter_to_hilo(struct ecore_dev *p_dev,
 	return ECORE_SUCCESS;
 }
 
+enum _ecore_status_t
+ecore_llh_add_dst_tcp_port_filter(struct ecore_dev *p_dev, u16 dest_port)
+{
+	return ecore_llh_add_protocol_filter(p_dev, 0,
+					ECORE_LLH_FILTER_TCP_DEST_PORT,
+					ECORE_LLH_DONT_CARE, dest_port);
+}
+
+enum _ecore_status_t
+ecore_llh_add_src_tcp_port_filter(struct ecore_dev *p_dev, u16 src_port)
+{
+	return ecore_llh_add_protocol_filter(p_dev, 0,
+					ECORE_LLH_FILTER_TCP_SRC_PORT,
+					src_port, ECORE_LLH_DONT_CARE);
+}
+
 enum _ecore_status_t
 ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
 			      enum ecore_llh_prot_filter_type_t type,
@@ -1212,15 +1314,15 @@ ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
 	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
 	u8 filter_idx, abs_ppfid, type_bitmap;
-	char str[32];
 	union ecore_llh_filter filter;
 	u32 high, low, ref_cnt;
+	char str[32];
 	enum _ecore_status_t rc = ECORE_SUCCESS;
 
 	if (p_ptt == OSAL_NULL)
 		return ECORE_AGAIN;
 
-	if (!OSAL_GET_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
+	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
 		goto out;
 
 	rc = ecore_llh_protocol_filter_stringify(p_dev, type,
@@ -1275,23 +1377,33 @@ ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
 }
 
 void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
-				 u8 mac_addr[ETH_ALEN])
+				 u8 mac_addr[ECORE_ETH_ALEN])
 {
 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
-	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
 	union ecore_llh_filter filter;
 	u8 filter_idx, abs_ppfid;
+	struct ecore_ptt *p_ptt;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
 	u32 ref_cnt;
 
-	if (p_ptt == OSAL_NULL)
+	if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
 		return;
 
-	if (!OSAL_GET_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
-		goto out;
+	if (ECORE_IS_NVMETCP_PERSONALITY(p_hwfn))
+		return;
+
+	if (IS_VF(p_hwfn->p_dev)) {
+		DP_NOTICE(p_dev, false, "Removing MAC from LLH is not supported to VF\n");
+		return;
+	}
+
+	p_ptt = ecore_ptt_acquire(p_hwfn);
+
+	if (p_ptt == OSAL_NULL)
+		return;
 
 	OSAL_MEM_ZERO(&filter, sizeof(filter));
-	OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
+	OSAL_MEMCPY(filter.mac.addr, mac_addr, ECORE_ETH_ALEN);
 	rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
 					    &ref_cnt);
 	if (rc != ECORE_SUCCESS)
@@ -1326,6 +1438,22 @@ void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
 	ecore_ptt_release(p_hwfn, p_ptt);
 }
 
+void ecore_llh_remove_dst_tcp_port_filter(struct ecore_dev *p_dev,
+					  u16 dest_port)
+{
+	ecore_llh_remove_protocol_filter(p_dev, 0,
+					 ECORE_LLH_FILTER_TCP_DEST_PORT,
+					 ECORE_LLH_DONT_CARE, dest_port);
+}
+
+void ecore_llh_remove_src_tcp_port_filter(struct ecore_dev *p_dev,
+					  u16 src_port)
+{
+	ecore_llh_remove_protocol_filter(p_dev, 0,
+					 ECORE_LLH_FILTER_TCP_SRC_PORT,
+					 src_port, ECORE_LLH_DONT_CARE);
+}
+
 void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
 				      enum ecore_llh_prot_filter_type_t type,
 				      u16 source_port_or_eth_type,
@@ -1334,15 +1462,15 @@ void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
 	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
 	u8 filter_idx, abs_ppfid;
-	char str[32];
 	union ecore_llh_filter filter;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
+	char str[32];
 	u32 ref_cnt;
 
 	if (p_ptt == OSAL_NULL)
 		return;
 
-	if (!OSAL_GET_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
+	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
 		goto out;
 
 	rc = ecore_llh_protocol_filter_stringify(p_dev, type,
@@ -1396,8 +1524,8 @@ void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid)
 	if (p_ptt == OSAL_NULL)
 		return;
 
-	if (!OSAL_GET_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
-	    !OSAL_GET_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
+	    !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
 		goto out;
 
 	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
@@ -1411,7 +1539,7 @@ void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid)
 	for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
 	     filter_idx++) {
 		rc = ecore_llh_remove_filter(p_hwfn, p_ptt,
-						abs_ppfid, filter_idx);
+					     abs_ppfid, filter_idx);
 		if (rc != ECORE_SUCCESS)
 			goto out;
 	}
@@ -1423,8 +1551,8 @@ void ecore_llh_clear_all_filters(struct ecore_dev *p_dev)
 {
 	u8 ppfid;
 
-	if (!OSAL_GET_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
-	    !OSAL_GET_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
+	    !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
 		return;
 
 	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++)
@@ -1450,22 +1578,18 @@ enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,
 	return ECORE_SUCCESS;
 }
 
-enum _ecore_status_t
-ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
+static enum _ecore_status_t
+ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+			u8 ppfid)
 {
-	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
-	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
-	struct ecore_llh_filter_details filter_details;
+	struct ecore_llh_filter_e4_details filter_details;
 	u8 abs_ppfid, filter_idx;
 	u32 addr;
 	enum _ecore_status_t rc;
 
-	if (!p_ptt)
-		return ECORE_AGAIN;
-
 	rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);
 	if (rc != ECORE_SUCCESS)
-		goto out;
+		return rc;
 
 	addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
 	DP_NOTICE(p_hwfn, false,
@@ -1476,22 +1600,46 @@ ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
 	for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
 	     filter_idx++) {
 		OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
-		rc =  ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid,
-					      filter_idx, &filter_details,
-					      false /* read access */);
+		rc =  ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+						 filter_idx, &filter_details,
+						 false /* read access */);
 		if (rc != ECORE_SUCCESS)
-			goto out;
+			return rc;
 
 		DP_NOTICE(p_hwfn, false,
-			  "filter %2hhd: enable %d, value 0x%016lx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\n",
+			  "filter %2hhd: enable %d, value 0x%016" PRIx64 ", mode %d, protocol_type 0x%x, hdr_sel 0x%x\n",
 			  filter_idx, filter_details.enable,
-			  (unsigned long)filter_details.value,
-			  filter_details.mode,
+			  filter_details.value, filter_details.mode,
 			  filter_details.protocol_type, filter_details.hdr_sel);
 	}
 
+	return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_dump_ppfid_e5(struct ecore_hwfn OSAL_UNUSED * p_hwfn,
+			struct ecore_ptt OSAL_UNUSED * p_ptt,
+			u8 OSAL_UNUSED ppfid)
+{
+	ECORE_E5_MISSING_CODE;
+
+	return ECORE_NOTIMPL;
+}
+
+enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
+{
+	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+	enum _ecore_status_t rc;
+
+	if (p_ptt == OSAL_NULL)
+		return ECORE_AGAIN;
+
+	if (ECORE_IS_E4(p_dev))
+		rc = ecore_llh_dump_ppfid_e4(p_hwfn, p_ptt, ppfid);
+	else /* E5 */
+		rc = ecore_llh_dump_ppfid_e5(p_hwfn, p_ptt, ppfid);
 
-out:
 	ecore_ptt_release(p_hwfn, p_ptt);
 
 	return rc;
@@ -1513,15 +1661,6 @@ enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev)
 
 /******************************* NIG LLH - End ********************************/
 
-/* Configurable */
-#define ECORE_MIN_DPIS		(4)	/* The minimal num of DPIs required to
-					 * load the driver. The number was
-					 * arbitrarily set.
-					 */
-
-/* Derived */
-#define ECORE_MIN_PWM_REGION	(ECORE_WID_SIZE * ECORE_MIN_DPIS)
-
 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
 			     struct ecore_ptt *p_ptt,
 			     enum BAR_ID bar_id)
@@ -1544,18 +1683,18 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
 	if (ECORE_IS_CMT(p_hwfn->p_dev)) {
 		DP_INFO(p_hwfn,
 			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
-		val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+		return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
 	} else {
 		DP_INFO(p_hwfn,
 			"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
-		val = 512 * 1024;
+		return 512 * 1024;
 	}
-
-	return val;
 }
 
-void ecore_init_dp(struct ecore_dev *p_dev,
-		   u32 dp_module, u8 dp_level, void *dp_ctx)
+void ecore_init_dp(struct ecore_dev	*p_dev,
+		   u32			dp_module,
+		   u8			dp_level,
+		   void		 *dp_ctx)
 {
 	u32 i;
 
@@ -1571,6 +1710,70 @@ void ecore_init_dp(struct ecore_dev *p_dev,
 	}
 }
 
+void ecore_init_int_dp(struct ecore_dev *p_dev, u32 dp_module, u8 dp_level)
+{
+	u32 i;
+
+	p_dev->dp_int_level = dp_level;
+	p_dev->dp_int_module = dp_module;
+	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+		p_hwfn->dp_int_level = dp_level;
+		p_hwfn->dp_int_module = dp_module;
+	}
+}
+
+#define ECORE_DP_INT_LOG_MAX_STR_SIZE 256 /* @DPDK */
+
+void ecore_dp_internal_log(struct ecore_dev *p_dev, char *fmt, ...)
+{
+	char buff[ECORE_DP_INT_LOG_MAX_STR_SIZE];
+	struct ecore_internal_trace *p_int_log;
+	u32 len, partial_len;
+	unsigned long flags;
+	osal_va_list args;
+	char *buf = buff;
+	u32 prod;
+
+	if (!p_dev)
+		return;
+
+	p_int_log = &p_dev->internal_trace;
+
+	if (!p_int_log->buf)
+		return;
+
+	OSAL_VA_START(args, fmt);
+	len = OSAL_VSNPRINTF(buf, ECORE_DP_INT_LOG_MAX_STR_SIZE, fmt, args);
+	OSAL_VA_END(args);
+
+	if (len > ECORE_DP_INT_LOG_MAX_STR_SIZE) {
+		len = ECORE_DP_INT_LOG_MAX_STR_SIZE;
+		buf[len - 1] = '\n';
+	}
+
+	partial_len = len;
+
+	OSAL_SPIN_LOCK_IRQSAVE(&p_int_log->lock, flags);
+	prod = p_int_log->prod % p_int_log->size;
+
+	if (p_int_log->size - prod <= len) {
+		partial_len = p_int_log->size - prod;
+		OSAL_MEMCPY(p_int_log->buf + prod, buf, partial_len);
+		p_int_log->prod += partial_len;
+		prod = p_int_log->prod % p_int_log->size;
+		buf += partial_len;
+		partial_len = len - partial_len;
+	}
+
+	OSAL_MEMCPY(p_int_log->buf + prod, buf, partial_len);
+
+	p_int_log->prod += partial_len;
+
+	OSAL_SPIN_UNLOCK_IRQSAVE(&p_int_log->lock, flags);
+}
+
 enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
 {
 	u8 i;
@@ -1581,19 +1784,32 @@ enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
 		p_hwfn->p_dev = p_dev;
 		p_hwfn->my_id = i;
 		p_hwfn->b_active = false;
+		p_hwfn->p_dummy_cb = ecore_int_dummy_comp_cb;
 
 #ifdef CONFIG_ECORE_LOCK_ALLOC
-		if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock))
+		if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock,
+					 "dma_info_lock"))
 			goto handle_err;
 #endif
 		OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock);
 	}
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+	if (OSAL_SPIN_LOCK_ALLOC(&p_dev->hwfns[0], &p_dev->internal_trace.lock,
+				 "internal_trace_lock"))
+		goto handle_err;
+#endif
+	OSAL_SPIN_LOCK_INIT(&p_dev->internal_trace.lock);
+
+	p_dev->p_dev = p_dev;
 
 	/* hwfn 0 is always active */
 	p_dev->hwfns[0].b_active = true;
 
 	/* set the default cache alignment to 128 (may be overridden later) */
 	p_dev->cache_shift = 7;
+
+	p_dev->ilt_page_size = ECORE_DEFAULT_ILT_PAGE_SIZE;
+
 	return ECORE_SUCCESS;
 #ifdef CONFIG_ECORE_LOCK_ALLOC
 handle_err:
@@ -1612,9 +1828,16 @@ static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
 	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 
 	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
+	qm_info->qm_pq_params = OSAL_NULL;
 	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
+	qm_info->qm_vport_params = OSAL_NULL;
 	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
+	qm_info->qm_port_params = OSAL_NULL;
 	OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
+	qm_info->wfq_data = OSAL_NULL;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+	OSAL_SPIN_LOCK_DEALLOC(&qm_info->qm_info_lock);
+#endif
 }
 
 static void ecore_dbg_user_data_free(struct ecore_hwfn *p_hwfn)
@@ -1627,49 +1850,66 @@ void ecore_resc_free(struct ecore_dev *p_dev)
 {
 	int i;
 
-	if (IS_VF(p_dev)) {
-		for_each_hwfn(p_dev, i)
-			ecore_l2_free(&p_dev->hwfns[i]);
-		return;
-	}
-
-	OSAL_FREE(p_dev, p_dev->fw_data);
-
 	OSAL_FREE(p_dev, p_dev->reset_stats);
+	p_dev->reset_stats = OSAL_NULL;
 
-	ecore_llh_free(p_dev);
+	if (IS_PF(p_dev)) {
+		OSAL_FREE(p_dev, p_dev->fw_data);
+		p_dev->fw_data = OSAL_NULL;
+
+		ecore_llh_free(p_dev);
+	}
 
 	for_each_hwfn(p_dev, i) {
 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 
+		ecore_spq_free(p_hwfn);
+		ecore_l2_free(p_hwfn);
+
+		if (IS_VF(p_dev)) {
+			ecore_db_recovery_teardown(p_hwfn);
+			continue;
+		}
+
 		ecore_cxt_mngr_free(p_hwfn);
 		ecore_qm_info_free(p_hwfn);
-		ecore_spq_free(p_hwfn);
 		ecore_eq_free(p_hwfn);
 		ecore_consq_free(p_hwfn);
 		ecore_int_free(p_hwfn);
+
 		ecore_iov_free(p_hwfn);
-		ecore_l2_free(p_hwfn);
 		ecore_dmae_info_free(p_hwfn);
 		ecore_dcbx_info_free(p_hwfn);
 		ecore_dbg_user_data_free(p_hwfn);
-		ecore_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
-		/* @@@TBD Flush work-queue ? */
+		ecore_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem);
+		/* @@@TBD Flush work-queue ?*/
 
 		/* destroy doorbell recovery mechanism */
 		ecore_db_recovery_teardown(p_hwfn);
 	}
+
+	if (IS_PF(p_dev)) {
+		OSAL_FREE(p_dev, p_dev->fw_data);
+		p_dev->fw_data = OSAL_NULL;
+	}
 }
 
 /******************** QM initialization *******************/
-
-/* bitmaps for indicating active traffic classes.
- * Special case for Arrowhead 4 port
- */
+/* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */
 /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
-#define ACTIVE_TCS_BMAP 0x9f
-/* 0..3 actually used, OOO and high priority stuff all use 3 */
-#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+#define ACTIVE_TCS_BMAP_E4 0x9f
+#define ACTIVE_TCS_BMAP_E5 0x1f /* 0..3 actualy used, 4 serves OOO */
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */
+
+#define ACTIVE_TCS_BMAP(_p_hwfn) \
+	(ECORE_IS_E4((_p_hwfn)->p_dev) ? \
+	 ACTIVE_TCS_BMAP_E4 : ACTIVE_TCS_BMAP_E5)
+
+static u16 ecore_init_qm_get_num_active_vfs(struct ecore_hwfn *p_hwfn)
+{
+	return IS_ECORE_SRIOV(p_hwfn->p_dev) ?
+		p_hwfn->pf_iov_info->max_active_vfs : 0;
+}
 
 /* determines the physical queue flags for a given PF. */
 static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
@@ -1680,8 +1920,10 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
 	flags = PQ_FLAGS_LB;
 
 	/* feature flags */
-	if (IS_ECORE_SRIOV(p_hwfn->p_dev))
+	if (ecore_init_qm_get_num_active_vfs(p_hwfn))
 		flags |= PQ_FLAGS_VFS;
+
+	/* @DPDK */
 	if (IS_ECORE_PACING(p_hwfn))
 		flags |= PQ_FLAGS_RLS;
 
@@ -1691,27 +1933,11 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
 		if (!IS_ECORE_PACING(p_hwfn))
 			flags |= PQ_FLAGS_MCOS;
 		break;
-	case ECORE_PCI_FCOE:
-		flags |= PQ_FLAGS_OFLD;
-		break;
-	case ECORE_PCI_ISCSI:
-		flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
-		break;
-	case ECORE_PCI_ETH_ROCE:
-		flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
-		if (!IS_ECORE_PACING(p_hwfn))
-			flags |= PQ_FLAGS_MCOS;
-		break;
-	case ECORE_PCI_ETH_IWARP:
-		flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
-		if (!IS_ECORE_PACING(p_hwfn))
-			flags |= PQ_FLAGS_MCOS;
-		break;
 	default:
-		DP_ERR(p_hwfn, "unknown personality %d\n",
-		       p_hwfn->hw_info.personality);
+		DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality);
 		return 0;
 	}
+
 	return flags;
 }
 
@@ -1723,8 +1949,53 @@ u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
 
 u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
 {
-	return IS_ECORE_SRIOV(p_hwfn->p_dev) ?
-			p_hwfn->p_dev->p_iov_info->total_vfs : 0;
+	return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0;
+}
+
+static u16 ecore_init_qm_get_num_vfs_pqs(struct ecore_hwfn *p_hwfn)
+{
+	u16 num_pqs, num_vfs = ecore_init_qm_get_num_active_vfs(p_hwfn);
+	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+	/* One L2 PQ per VF */
+	num_pqs = num_vfs;
+
+	/* Separate RDMA PQ per VF */
+	if ((PQ_FLAGS_VFR & pq_flags))
+		num_pqs += num_vfs;
+
+	/* Separate RDMA PQ for all VFs */
+	if ((PQ_FLAGS_VSR & pq_flags))
+		num_pqs += 1;
+
+	return num_pqs;
+}
+
+static bool ecore_lag_support(struct ecore_hwfn *p_hwfn)
+{
+	return (ECORE_IS_AH(p_hwfn->p_dev) &&
+		ECORE_IS_ROCE_PERSONALITY(p_hwfn) &&
+		OSAL_TEST_BIT(ECORE_MF_ROCE_LAG, &p_hwfn->p_dev->mf_bits));
+}
+
+static u8 ecore_init_qm_get_num_mtc_tcs(struct ecore_hwfn *p_hwfn)
+{
+	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+	if (!(PQ_FLAGS_MTC & pq_flags))
+		return 1;
+
+	return ecore_init_qm_get_num_tcs(p_hwfn);
+}
+
+static u8 ecore_init_qm_get_num_mtc_pqs(struct ecore_hwfn *p_hwfn)
+{
+	u32 num_ports, num_tcs;
+
+	num_ports = ecore_lag_support(p_hwfn) ? LAG_MAX_PORT_NUM : 1;
+	num_tcs = ecore_init_qm_get_num_mtc_tcs(p_hwfn);
+
+	return num_ports * num_tcs;
 }
 
 #define NUM_DEFAULT_RLS 1
@@ -1733,21 +2004,13 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
 {
 	u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
 
-	/* num RLs can't exceed resource amount of rls or vports or the
-	 * dcqcn qps
-	 */
-	num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
-				     RESC_NUM(p_hwfn, ECORE_VPORT));
+	/* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */
+	num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL), RESC_NUM(p_hwfn,
+				     ECORE_VPORT));
 
-	/* make sure after we reserve the default and VF rls we'll have
-	 * something left
-	 */
-	if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
-		DP_NOTICE(p_hwfn, false,
-			  "no rate limiters left for PF rate limiting"
-			  " [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
+	/* make sure after we reserve the default and VF rls we'll have something left */
+	if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
 		return 0;
-	}
 
 	/* subtract rls necessary for VFs and one default one for the PF */
 	num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
@@ -1755,17 +2018,40 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
 	return num_pf_rls;
 }
 
+static u16 ecore_init_qm_get_num_rls(struct ecore_hwfn *p_hwfn)
+{
+	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+	u16 num_rls = 0;
+
+	num_rls += (!!(PQ_FLAGS_RLS & pq_flags)) *
+		   ecore_init_qm_get_num_pf_rls(p_hwfn);
+
+	/* RL for each VF L2 PQ */
+	num_rls += (!!(PQ_FLAGS_VFS & pq_flags)) *
+		   ecore_init_qm_get_num_active_vfs(p_hwfn);
+
+	/* RL for each VF RDMA PQ */
+	num_rls += (!!(PQ_FLAGS_VFR & pq_flags)) *
+		   ecore_init_qm_get_num_active_vfs(p_hwfn);
+
+	/* RL for VF RDMA single PQ */
+	num_rls += (!!(PQ_FLAGS_VSR & pq_flags));
+
+	return num_rls;
+}
+
 u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
 {
 	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
 
-	/* all pqs share the same vport (hence the 1 below), except for vfs
-	 * and pf_rl pqs
-	 */
-	return (!!(PQ_FLAGS_RLS & pq_flags)) *
-		ecore_init_qm_get_num_pf_rls(p_hwfn) +
-	       (!!(PQ_FLAGS_VFS & pq_flags)) *
-		ecore_init_qm_get_num_vfs(p_hwfn) + 1;
+	/* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */
+	return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
+	       (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1;
+}
+
+static u8 ecore_init_qm_get_group_count(struct ecore_hwfn *p_hwfn)
+{
+	return p_hwfn->qm_info.offload_group_count;
 }
 
 /* calc amount of PQs according to the requested flags */
@@ -1773,16 +2059,15 @@ u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
 {
 	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
 
-	return (!!(PQ_FLAGS_RLS & pq_flags)) *
-		ecore_init_qm_get_num_pf_rls(p_hwfn) +
-	       (!!(PQ_FLAGS_MCOS & pq_flags)) *
-		ecore_init_qm_get_num_tcs(p_hwfn) +
+	return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
+	       (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) +
 	       (!!(PQ_FLAGS_LB & pq_flags)) +
 	       (!!(PQ_FLAGS_OOO & pq_flags)) +
 	       (!!(PQ_FLAGS_ACK & pq_flags)) +
-	       (!!(PQ_FLAGS_OFLD & pq_flags)) +
-	       (!!(PQ_FLAGS_VFS & pq_flags)) *
-		ecore_init_qm_get_num_vfs(p_hwfn);
+	       (!!(PQ_FLAGS_OFLD & pq_flags)) * ecore_init_qm_get_num_mtc_pqs(p_hwfn) +
+	       (!!(PQ_FLAGS_GRP & pq_flags)) * OFLD_GRP_SIZE +
+	       (!!(PQ_FLAGS_LLT & pq_flags)) * ecore_init_qm_get_num_mtc_pqs(p_hwfn) +
+	       (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs_pqs(p_hwfn);
 }
 
 /* initialize the top level QM params */
@@ -1793,7 +2078,8 @@ static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
 
 	/* pq and vport bases for this PF */
 	qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
-	qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+	qm_info->start_vport = (u16)RESC_START(p_hwfn, ECORE_VPORT);
+	qm_info->start_rl = (u16)RESC_START(p_hwfn, ECORE_RL);
 
 	/* rate limiting and weighted fair queueing are always enabled */
 	qm_info->vport_rl_en = 1;
@@ -1803,15 +2089,11 @@ static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
 	four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2;
 
 	/* in AH 4 port we have fewer TCs per port */
-	qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
-						     NUM_OF_PHYS_TCS;
+	qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS;
 
-	/* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and
-	 * 4 otherwise
-	 */
+	/* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */
 	if (!qm_info->ooo_tc)
-		qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
-					      DCBX_TCP_OOO_TC;
+		qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC;
 }
 
 /* initialize qm vport params */
@@ -1834,7 +2116,7 @@ static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
 
 	/* indicate how ooo and high pri traffic is dealt with */
 	active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
-		ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
+		ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP(p_hwfn);
 
 	for (i = 0; i < num_ports; i++) {
 		struct init_qm_port_params *p_qm_port =
@@ -1862,11 +2144,14 @@ static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
 
 	qm_info->num_pqs = 0;
 	qm_info->num_vports = 0;
+	qm_info->num_rls = 0;
 	qm_info->num_pf_rls = 0;
 	qm_info->num_vf_pqs = 0;
 	qm_info->first_vf_pq = 0;
 	qm_info->first_mcos_pq = 0;
 	qm_info->first_rl_pq = 0;
+	qm_info->single_vf_rdma_pq = 0;
+	qm_info->pq_overflow = false;
 }
 
 static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
@@ -1876,18 +2161,13 @@ static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
 	qm_info->num_vports++;
 
 	if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
-		DP_ERR(p_hwfn,
-		       "vport overflow! qm_info->num_vports %d,"
-		       " qm_init_get_num_vports() %d\n",
-		       qm_info->num_vports,
-		       ecore_init_qm_get_num_vports(p_hwfn));
+		DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+				qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
 }
 
 /* initialize a single pq and manage qm_info resources accounting.
- * The pq_init_flags param determines whether the PQ is rate limited
- * (for VF or PF)
- * and whether a new vport is allocated to the pq or not (i.e. vport will be
- * shared)
+ * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF)
+ * and whether a new vport is allocated to the pq or not (i.e. vport will be shared)
  */
 
 /* flags for pq init */
@@ -1898,65 +2178,108 @@ static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
 /* defines for pq init */
 #define PQ_INIT_DEFAULT_WRR_GROUP	1
 #define PQ_INIT_DEFAULT_TC		0
-#define PQ_INIT_OFLD_TC			(p_hwfn->hw_info.offload_tc)
 
-static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
-			     struct ecore_qm_info *qm_info,
-			     u8 tc, u32 pq_init_flags)
+void ecore_hw_info_set_offload_tc(struct ecore_hw_info *p_info, u8 tc)
 {
-	u16 pq_idx = qm_info->num_pqs, max_pq =
-					ecore_init_qm_get_num_pqs(p_hwfn);
+	p_info->offload_tc = tc;
+	p_info->offload_tc_set = true;
+}
 
-	if (pq_idx > max_pq)
-		DP_ERR(p_hwfn,
-		       "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+static bool ecore_is_offload_tc_set(struct ecore_hwfn *p_hwfn)
+{
+	return p_hwfn->hw_info.offload_tc_set;
+}
+
+u8 ecore_get_offload_tc(struct ecore_hwfn *p_hwfn)
+{
+	if (ecore_is_offload_tc_set(p_hwfn))
+		return p_hwfn->hw_info.offload_tc;
+
+	return PQ_INIT_DEFAULT_TC;
+}
+
+static void ecore_init_qm_pq_port(struct ecore_hwfn *p_hwfn,
+				  struct ecore_qm_info *qm_info,
+				  u8 tc, u32 pq_init_flags, u8 port)
+{
+	u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn);
+	u16 num_pf_pqs;
+
+	if (pq_idx > max_pq) {
+		qm_info->pq_overflow = true;
+		DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+	}
 
 	/* init pq params */
-	qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id;
-	qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
-						 qm_info->num_vports;
+	qm_info->qm_pq_params[pq_idx].port_id = port;
+	qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports;
 	qm_info->qm_pq_params[pq_idx].tc_id = tc;
 	qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
-	qm_info->qm_pq_params[pq_idx].rl_valid =
-		(pq_init_flags & PQ_INIT_PF_RL ||
-		 pq_init_flags & PQ_INIT_VF_RL);
 
-	/* The "rl_id" is set as the "vport_id" */
-	qm_info->qm_pq_params[pq_idx].rl_id =
-		qm_info->qm_pq_params[pq_idx].vport_id;
+	if (pq_init_flags & (PQ_INIT_PF_RL | PQ_INIT_VF_RL)) {
+		qm_info->qm_pq_params[pq_idx].rl_valid = 1;
+		qm_info->qm_pq_params[pq_idx].rl_id =
+			qm_info->start_rl + qm_info->num_rls++;
+	}
 
 	/* qm params accounting */
 	qm_info->num_pqs++;
+	if (pq_init_flags & PQ_INIT_VF_RL) {
+		qm_info->num_vf_pqs++;
+	} else {
+		num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
+		if (qm_info->ilt_pf_pqs && num_pf_pqs > qm_info->ilt_pf_pqs) {
+			qm_info->pq_overflow = true;
+			DP_ERR(p_hwfn,
+			       "ilt overflow! num_pf_pqs %d, qm_info->ilt_pf_pqs %d\n",
+			       num_pf_pqs, qm_info->ilt_pf_pqs);
+		}
+	}
+
 	if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
 		qm_info->num_vports++;
 
 	if (pq_init_flags & PQ_INIT_PF_RL)
 		qm_info->num_pf_rls++;
 
-	if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
-		DP_ERR(p_hwfn,
-		       "vport overflow! qm_info->num_vports %d,"
-		       " qm_init_get_num_vports() %d\n",
-		       qm_info->num_vports,
-		       ecore_init_qm_get_num_vports(p_hwfn));
+	if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) {
+		qm_info->pq_overflow = true;
+		DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+				qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
+	}
 
-	if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
-		DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d,"
-		       " qm_init_get_num_pf_rls() %d\n",
-		       qm_info->num_pf_rls,
-		       ecore_init_qm_get_num_pf_rls(p_hwfn));
+	if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn)) {
+		qm_info->pq_overflow = true;
+		DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
+		       qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn));
+	}
+}
+
+/* init one qm pq, assume port of the PF */
+static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
+			     struct ecore_qm_info *qm_info,
+			     u8 tc, u32 pq_init_flags)
+{
+	ecore_init_qm_pq_port(p_hwfn, qm_info, tc, pq_init_flags, p_hwfn->port_id);
 }
 
 /* get pq index according to PQ_FLAGS */
 static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
-					     u32 pq_flags)
+					     unsigned long pq_flags)
 {
 	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 
 	/* Can't have multiple flags set here */
-	if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags,
-				sizeof(pq_flags)) > 1)
+	if (OSAL_BITMAP_WEIGHT(&pq_flags,
+			       sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
+		DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
+		goto err;
+	}
+
+	if (!(ecore_get_pq_flags(p_hwfn) & pq_flags)) {
+		DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
 		goto err;
+	}
 
 	switch (pq_flags) {
 	case PQ_FLAGS_RLS:
@@ -1970,16 +2293,20 @@ static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
 	case PQ_FLAGS_ACK:
 		return &qm_info->pure_ack_pq;
 	case PQ_FLAGS_OFLD:
-		return &qm_info->offload_pq;
+		return &qm_info->first_ofld_pq;
+	case PQ_FLAGS_LLT:
+		return &qm_info->first_llt_pq;
 	case PQ_FLAGS_VFS:
 		return &qm_info->first_vf_pq;
+	case PQ_FLAGS_GRP:
+		return &qm_info->first_ofld_grp_pq;
+	case PQ_FLAGS_VSR:
+		return &qm_info->single_vf_rdma_pq;
 	default:
 		goto err;
 	}
-
 err:
-	DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
-	return OSAL_NULL;
+	return &qm_info->start_pq;
 }
 
 /* save pq index in qm info */
@@ -1991,59 +2318,285 @@ static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
 	*base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
 }
 
+static u16 ecore_qm_get_start_pq(struct ecore_hwfn *p_hwfn)
+{
+	u16 start_pq;
+
+	OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+	start_pq = p_hwfn->qm_info.start_pq;
+	OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+	return start_pq;
+}
+
 /* get tx pq index, with the PQ TX base already set (ready for context init) */
 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
 {
-	u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+	u16 *base_pq_idx;
+	u16 pq_idx;
+
+	OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+	base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+	pq_idx = *base_pq_idx + CM_TX_PQ_BASE;
+	OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+	return pq_idx;
+}
+
+u16 ecore_get_cm_pq_idx_grp(struct ecore_hwfn *p_hwfn, u8 idx)
+{
+	u16 pq_idx = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_GRP);
+	u8 max_idx = ecore_init_qm_get_group_count(p_hwfn);
 
-	return *base_pq_idx + CM_TX_PQ_BASE;
+	if (max_idx == 0) {
+		DP_ERR(p_hwfn, "pq with flag 0x%x do not exist\n",
+		       PQ_FLAGS_GRP);
+		return ecore_qm_get_start_pq(p_hwfn);
+	}
+
+	if (idx > max_idx)
+		DP_ERR(p_hwfn, "idx %d must be smaller than %d\n", idx, max_idx);
+
+	return pq_idx + (idx % max_idx);
 }
 
 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
 {
+	u16 pq_idx = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS);
 	u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
 
+	if (max_tc == 0) {
+		DP_ERR(p_hwfn, "pq with flag 0x%x do not exist\n",
+		       PQ_FLAGS_MCOS);
+		return ecore_qm_get_start_pq(p_hwfn);
+	}
+
 	if (tc > max_tc)
 		DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
 
-	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
+	return pq_idx + (tc % max_tc);
+}
+
+static u8 ecore_qm_get_pqs_per_vf(struct ecore_hwfn *p_hwfn)
+{
+	u8 pqs_per_vf;
+	u32 pq_flags;
+
+	/* When VFR is set, there is pair of PQs per VF. If VSR is set,
+	 * no additional action required in computing the per VF PQ.
+	 */
+	OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+	pq_flags = ecore_get_pq_flags(p_hwfn);
+	pqs_per_vf = (PQ_FLAGS_VFR & pq_flags) ? 2 : 1;
+	OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+	return pqs_per_vf;
 }
 
 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
 {
-	u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
+	u16 pq_idx = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS);
+	u16 max_vf = ecore_init_qm_get_num_active_vfs(p_hwfn);
+	u8 pqs_per_vf;
+
+	if (max_vf == 0) {
+		DP_ERR(p_hwfn, "pq with flag 0x%x do not exist\n",
+		       PQ_FLAGS_VFS);
+		return ecore_qm_get_start_pq(p_hwfn);
+	}
 
 	if (vf > max_vf)
 		DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
 
-	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
+	pqs_per_vf = ecore_qm_get_pqs_per_vf(p_hwfn);
+
+	return pq_idx + ((vf % max_vf) * pqs_per_vf);
+}
+
+u16 ecore_get_cm_pq_idx_vf_rdma(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+	u32 pq_flags;
+	u16 pq_idx;
+
+	OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+	pq_flags = ecore_get_pq_flags(p_hwfn);
+	OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+	/* If VSR is set, dedicated single PQ for VFs RDMA */
+	if (PQ_FLAGS_VSR & pq_flags)
+		pq_idx = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VSR);
+	else
+		pq_idx = ecore_get_cm_pq_idx_vf(p_hwfn, vf);
+
+	/* If VFR is set, VF's 2nd PQ is for RDMA */
+	if ((PQ_FLAGS_VFR & pq_flags))
+		pq_idx++;
+
+	return pq_idx;
 }
 
 u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
 {
+	u16 pq_idx = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS);
 	u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
 
-	/* for rate limiters, it is okay to use the modulo behavior - no
-	 * DP_ERR
+	if (max_rl == 0) {
+		DP_ERR(p_hwfn, "pq with flag 0x%x do not exist\n",
+		       PQ_FLAGS_RLS);
+		return ecore_qm_get_start_pq(p_hwfn);
+	}
+
+	/* When an invalid RL index is requested, return the highest
+	 * available RL PQ. "max_rl - 1" is the relative index of the
+	 * last PQ reserved for RLs.
 	 */
-	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + (rl % max_rl);
+	if (rl >= max_rl) {
+		DP_ERR(p_hwfn,
+		       "rl %hu is not a valid rate limiter, returning rl %hu\n",
+		       rl, max_rl - 1);
+		return pq_idx + max_rl - 1;
+	}
+
+	return pq_idx + rl;
 }
 
-u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+static u16 ecore_get_qm_pq_from_cm_pq(struct ecore_hwfn *p_hwfn, u16 cm_pq_id)
 {
-	u16 start_pq, pq, qm_pq_idx;
+	u16 start_pq = ecore_qm_get_start_pq(p_hwfn);
 
-	pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
-	start_pq = p_hwfn->qm_info.start_pq;
-	qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE;
+	return cm_pq_id - CM_TX_PQ_BASE - start_pq;
+}
+
+static u16 ecore_get_vport_id_from_pq(struct ecore_hwfn *p_hwfn, u16 pq_id)
+{
+	u16 vport_id;
+
+	OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+	vport_id = p_hwfn->qm_info.qm_pq_params[pq_id].vport_id;
+	OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+	return vport_id;
+}
+
+static u16 ecore_get_rl_id_from_pq(struct ecore_hwfn *p_hwfn, u16 pq_id)
+{
+	u16 rl_id;
+
+	OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+	rl_id = p_hwfn->qm_info.qm_pq_params[pq_id].rl_id;
+	OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
 
-	if (qm_pq_idx > p_hwfn->qm_info.num_pqs) {
+	return rl_id;
+}
+
+u16 ecore_get_pq_vport_id_from_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+{
+	u16 cm_pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
+	u16 qm_pq_id = ecore_get_qm_pq_from_cm_pq(p_hwfn, cm_pq_id);
+
+	return ecore_get_vport_id_from_pq(p_hwfn, qm_pq_id);
+}
+
+u16 ecore_get_pq_vport_id_from_vf(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+	u16 cm_pq_id = ecore_get_cm_pq_idx_vf(p_hwfn, vf);
+	u16 qm_pq_id = ecore_get_qm_pq_from_cm_pq(p_hwfn, cm_pq_id);
+
+	return ecore_get_vport_id_from_pq(p_hwfn, qm_pq_id);
+}
+
+u16 ecore_get_pq_rl_id_from_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+{
+	u16 cm_pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
+	u16 qm_pq_id = ecore_get_qm_pq_from_cm_pq(p_hwfn, cm_pq_id);
+
+	return ecore_get_rl_id_from_pq(p_hwfn, qm_pq_id);
+}
+
+u16 ecore_get_pq_rl_id_from_vf(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+	u16 cm_pq_id = ecore_get_cm_pq_idx_vf(p_hwfn, vf);
+	u16 qm_pq_id = ecore_get_qm_pq_from_cm_pq(p_hwfn, cm_pq_id);
+
+	return ecore_get_rl_id_from_pq(p_hwfn, qm_pq_id);
+}
+
+static u16 ecore_get_cm_pq_offset_mtc(struct ecore_hwfn *p_hwfn,
+				      u16 idx, u8 tc)
+{
+	u16 pq_offset = 0, max_pqs;
+	u8 num_ports, num_tcs;
+
+	num_ports = ecore_lag_support(p_hwfn) ? LAG_MAX_PORT_NUM : 1;
+	num_tcs = ecore_init_qm_get_num_mtc_tcs(p_hwfn);
+
+	/* add the port offset */
+	pq_offset += (idx % num_ports) * num_tcs;
+	/* add the tc offset */
+	pq_offset += tc % num_tcs;
+
+	/* Verify that the pq returned is within pqs range */
+	max_pqs = ecore_init_qm_get_num_mtc_pqs(p_hwfn);
+	if (pq_offset >= max_pqs) {
 		DP_ERR(p_hwfn,
-		       "qm_pq_idx %d must be smaller than %d\n",
-			qm_pq_idx, p_hwfn->qm_info.num_pqs);
+		       "pq_offset %d must be smaller than %d (idx %d tc %d)\n",
+		       pq_offset, max_pqs, idx, tc);
+		return 0;
 	}
 
-	return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id;
+	return pq_offset;
+}
+
+u16 ecore_get_cm_pq_idx_ofld_mtc(struct ecore_hwfn *p_hwfn,
+				  u16 idx, u8 tc)
+{
+	u16 first_ofld_pq, pq_offset;
+
+#ifdef CONFIG_DCQCN
+	if (p_hwfn->p_rdma_info->roce.dcqcn_enabled)
+		return ecore_get_cm_pq_idx_rl(p_hwfn, idx);
+#endif
+
+	first_ofld_pq = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+	pq_offset = ecore_get_cm_pq_offset_mtc(p_hwfn, idx, tc);
+
+	return first_ofld_pq + pq_offset;
+}
+
+u16 ecore_get_cm_pq_idx_llt_mtc(struct ecore_hwfn *p_hwfn,
+				 u16 idx, u8 tc)
+{
+	u16 first_llt_pq, pq_offset;
+
+#ifdef CONFIG_DCQCN
+	if (p_hwfn->p_rdma_info->roce.dcqcn_enabled)
+		return ecore_get_cm_pq_idx_rl(p_hwfn, idx);
+#endif
+
+	first_llt_pq = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
+	pq_offset = ecore_get_cm_pq_offset_mtc(p_hwfn, idx, tc);
+
+	return first_llt_pq + pq_offset;
+}
+
+u16 ecore_get_cm_pq_idx_ll2(struct ecore_hwfn *p_hwfn, u8 tc)
+{
+	switch (tc) {
+	case PURE_LB_TC:
+		return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+	case PKT_LB_TC:
+		return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
+	default:
+#ifdef CONFIG_DCQCN
+		/* In RoCE, when DCQCN is enabled, there are no OFLD pqs,
+		 * get the first RL pq.
+		 */
+		if (ECORE_IS_ROCE_PERSONALITY(p_hwfn) &&
+		    p_hwfn->p_rdma_info->roce.dcqcn_enabled)
+			return ecore_get_cm_pq_idx_rl(p_hwfn, 0);
+#endif
+		return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+	}
 }
 
 /* Functions for creating specific types of pqs */
@@ -2077,7 +2630,39 @@ static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
 		return;
 
 	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
-	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+	ecore_init_qm_pq(p_hwfn, qm_info, ecore_get_offload_tc(p_hwfn),
+			 PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_mtc_pqs(struct ecore_hwfn *p_hwfn)
+{
+	u8 num_tcs = ecore_init_qm_get_num_mtc_tcs(p_hwfn);
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	u8 second_port = p_hwfn->port_id;
+	u8 first_port = p_hwfn->port_id;
+	u8 tc;
+
+	/* if lag is not active, init all pqs with p_hwfn's default port */
+	if (ecore_lag_is_active(p_hwfn)) {
+		first_port = p_hwfn->lag_info.first_port;
+		second_port = p_hwfn->lag_info.second_port;
+	}
+
+	/* override pq's TC if offload TC is set */
+	for (tc = 0; tc < num_tcs; tc++)
+		ecore_init_qm_pq_port(p_hwfn, qm_info,
+				      ecore_is_offload_tc_set(p_hwfn) ?
+				      p_hwfn->hw_info.offload_tc : tc,
+				      PQ_INIT_SHARE_VPORT,
+				      first_port);
+	if (ecore_lag_support(p_hwfn))
+		/* initialize second port's pqs even if lag is not active */
+		for (tc = 0; tc < num_tcs; tc++)
+			ecore_init_qm_pq_port(p_hwfn, qm_info,
+					      ecore_is_offload_tc_set(p_hwfn) ?
+					      p_hwfn->hw_info.offload_tc : tc,
+					      PQ_INIT_SHARE_VPORT,
+					      second_port);
 }
 
 static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
@@ -2088,7 +2673,36 @@ static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
 		return;
 
 	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
-	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+	ecore_init_qm_mtc_pqs(p_hwfn);
+}
+
+static void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
+		return;
+
+	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
+	ecore_init_qm_mtc_pqs(p_hwfn);
+}
+
+static void ecore_init_qm_offload_pq_group(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	u8 idx;
+
+	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_GRP))
+		return;
+
+	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_GRP, qm_info->num_pqs);
+
+	/* iterate over offload pqs */
+	for (idx = 0; idx < ecore_init_qm_get_group_count(p_hwfn); idx++) {
+		ecore_init_qm_pq_port(p_hwfn, qm_info, qm_info->offload_group[idx].tc,
+				      PQ_INIT_SHARE_VPORT,
+				      qm_info->offload_group[idx].port);
+	}
 }
 
 static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
@@ -2104,34 +2718,76 @@ static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
 		ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
 }
 
+static void ecore_init_qm_vf_single_rdma_pq(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+	if (!(pq_flags & PQ_FLAGS_VSR))
+		return;
+
+	/* ecore_init_qm_pq_params() is going to increment vport ID anyway,
+	 * so keep it shared here so we don't waste a vport.
+	 */
+	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VSR, qm_info->num_pqs);
+	ecore_init_qm_pq(p_hwfn, qm_info, ecore_get_offload_tc(p_hwfn),
+			 PQ_INIT_VF_RL | PQ_INIT_SHARE_VPORT);
+}
+
 static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
 {
+	u16 vf_idx, num_vfs = ecore_init_qm_get_num_active_vfs(p_hwfn);
 	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
-	u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
+	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+	u32 l2_pq_init_flags = PQ_INIT_VF_RL;
 
-	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+	if (!(pq_flags & PQ_FLAGS_VFS))
 		return;
 
+	/* Mark PQ starting VF range */
 	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
 
-	qm_info->num_vf_pqs = num_vfs;
-	for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+	/* If VFR is set, the L2 PQ will share the rate limiter with the rdma PQ */
+	if (pq_flags & PQ_FLAGS_VFR)
+		l2_pq_init_flags |= PQ_INIT_SHARE_VPORT;
+
+	/* Init the per PF PQs */
+	for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) {
+		/* Per VF L2 PQ */
 		ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC,
-				 PQ_INIT_VF_RL);
+				 l2_pq_init_flags);
+
+		/* Per VF Rdma PQ */
+		if (pq_flags & PQ_FLAGS_VFR)
+			ecore_init_qm_pq(p_hwfn, qm_info,
+					 ecore_get_offload_tc(p_hwfn),
+					 PQ_INIT_VF_RL);
+	}
 }
 
 static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
 {
 	u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
+	struct ecore_lag_info *lag_info = &p_hwfn->lag_info;
 	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	u8 port = p_hwfn->port_id, tc;
 
 	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
 		return;
 
 	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
-	for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
-		ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC,
-				 PQ_INIT_PF_RL);
+	tc = ecore_get_offload_tc(p_hwfn);
+	for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) {
+		/* if lag is present, set these pqs per port according to parity */
+		if (lag_info->is_master &&
+		    lag_info->lag_type != ECORE_LAG_TYPE_NONE &&
+		    lag_info->port_num > 0)
+			port = (pf_rls_idx % lag_info->port_num == 0) ?
+			       lag_info->first_port : lag_info->second_port;
+
+		ecore_init_qm_pq_port(p_hwfn, qm_info, tc, PQ_INIT_PF_RL,
+				      port);
+	}
 }
 
 static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
@@ -2154,24 +2810,68 @@ static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
 	/* pq for offloaded protocol */
 	ecore_init_qm_offload_pq(p_hwfn);
 
-	/* done sharing vports */
+	/* low latency pq */
+	ecore_init_qm_low_latency_pq(p_hwfn);
+
+	/* per offload group pqs */
+	ecore_init_qm_offload_pq_group(p_hwfn);
+
+	/* Single VF-RDMA PQ, in case there weren't enough for each VF */
+	ecore_init_qm_vf_single_rdma_pq(p_hwfn);
+
+	/* PF done sharing vports, advance vport for first VF.
+	 * Vport ID is incremented in a separate function because we can't
+	 * rely on the last PF PQ to not use PQ_INIT_SHARE_VPORT, which can
+	 * be different in every QM reconfiguration.
+	 */
 	ecore_init_qm_advance_vport(p_hwfn);
 
 	/* pqs for vfs */
 	ecore_init_qm_vf_pqs(p_hwfn);
 }
 
-/* compare values of getters against resources amounts */
-static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
+/* Finds the optimal features configuration to maximize PQs utilization */
+static enum _ecore_status_t ecore_init_qm_features(struct ecore_hwfn *p_hwfn)
 {
-	if (ecore_init_qm_get_num_vports(p_hwfn) >
-	    RESC_NUM(p_hwfn, ECORE_VPORT)) {
+	if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) {
 		DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
 		return ECORE_INVAL;
 	}
 
-	if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
-		DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+	if (ecore_init_qm_get_num_pf_rls(p_hwfn) == 0) {
+		if (IS_ECORE_PACING(p_hwfn)) {
+			DP_ERR(p_hwfn, "No rate limiters available for PF\n");
+			return ECORE_INVAL;
+		}
+	}
+
+	/* For VF RDMA try to provide 2 PQs (separate PQ for RDMA) per VF */
+	if (ECORE_IS_RDMA_PERSONALITY(p_hwfn) && ECORE_IS_VF_RDMA(p_hwfn) &&
+	    ecore_init_qm_get_num_active_vfs(p_hwfn))
+		p_hwfn->qm_info.vf_rdma_en = true;
+
+	while (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ) ||
+	       ecore_init_qm_get_num_rls(p_hwfn) > RESC_NUM(p_hwfn, ECORE_RL)) {
+		if (IS_ECORE_QM_VF_RDMA(p_hwfn)) {
+			p_hwfn->qm_info.vf_rdma_en = false;
+			DP_NOTICE(p_hwfn, false,
+				  "PQ per rdma vf was disabled to reduce requested amount of pqs/rls. A single PQ for all rdma VFs will be used\n");
+			continue;
+		}
+
+		if (IS_ECORE_MULTI_TC_ROCE(p_hwfn)) {
+			p_hwfn->hw_info.multi_tc_roce_en = false;
+			DP_NOTICE(p_hwfn, false,
+				  "multi-tc roce was disabled to reduce requested amount of pqs/rls\n");
+			continue;
+		}
+
+		DP_ERR(p_hwfn,
+		       "Requested amount: %d pqs %d rls, Actual amount: %d pqs %d rls\n",
+		       ecore_init_qm_get_num_pqs(p_hwfn),
+		       ecore_init_qm_get_num_rls(p_hwfn),
+		       RESC_NUM(p_hwfn, ECORE_PQ),
+		       RESC_NUM(p_hwfn, ECORE_RL));
 		return ECORE_INVAL;
 	}
 
@@ -2189,35 +2889,38 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
 	struct init_qm_pq_params *pq;
 	int i, tc;
 
+	if (qm_info->pq_overflow)
+		return;
+
 	/* top level params */
-	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
-		   "qm init top level params: start_pq %d, start_vport %d,"
-		   " pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
-		   qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq,
-		   qm_info->offload_pq, qm_info->pure_ack_pq);
-	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
-		   "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d,"
-		   " num_vports %d, max_phys_tcs_per_port %d\n",
-		   qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs,
-		   qm_info->num_vf_pqs, qm_info->num_vports,
-		   qm_info->max_phys_tcs_per_port);
-	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
-		   "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d,"
-		   " pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
-		   qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en,
-		   qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl,
-		   qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+		   "qm init params: pq_flags 0x%x, num_pqs %d, num_vf_pqs %d, start_pq %d\n",
+		   ecore_get_pq_flags(p_hwfn), qm_info->num_pqs,
+		   qm_info->num_vf_pqs, qm_info->start_pq);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+		   "qm init params: pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d\n",
+		   qm_info->pf_rl_en, qm_info->pf_wfq_en,
+		   qm_info->vport_rl_en, qm_info->vport_wfq_en);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+		   "qm init params: num_vports %d, start_vport %d, num_rls %d, num_pf_rls %d, start_rl %d, pf_rl %d\n",
+		   qm_info->num_vports, qm_info->start_vport,
+		   qm_info->num_rls, qm_info->num_pf_rls,
+		   qm_info->start_rl, qm_info->pf_rl);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+		   "qm init params: pure_lb_pq %d, ooo_pq %d, pure_ack_pq %d, first_ofld_pq %d, first_llt_pq %d\n",
+		   qm_info->pure_lb_pq, qm_info->ooo_pq, qm_info->pure_ack_pq,
+		   qm_info->first_ofld_pq, qm_info->first_llt_pq);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+		   "qm init params: single_vf_rdma_pq %d, first_vf_pq %d, max_phys_tcs_per_port %d, pf_wfq %d\n",
+		   qm_info->single_vf_rdma_pq, qm_info->first_vf_pq,
+		   qm_info->max_phys_tcs_per_port, qm_info->pf_wfq);
 
 	/* port table */
 	for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) {
 		port = &qm_info->qm_port_params[i];
-		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
-			   "port idx %d, active %d, active_phys_tcs %d,"
-			   " num_pbf_cmd_lines %d, num_btb_blocks %d,"
-			   " reserved %d\n",
-			   i, port->active, port->active_phys_tcs,
-			   port->num_pbf_cmd_lines, port->num_btb_blocks,
-			   port->reserved);
+		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
+			   i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines,
+			   port->num_btb_blocks, port->reserved);
 	}
 
 	/* vport table */
@@ -2226,8 +2929,7 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
 		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, wfq %d, first_tx_pq_id [ ",
 			   qm_info->start_vport + i, vport->wfq);
 		for (tc = 0; tc < NUM_OF_TCS; tc++)
-			DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ",
-				   vport->first_tx_pq_id[tc]);
+			DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]);
 		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
 	}
 
@@ -2270,19 +2972,66 @@ static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
  * 4. activate init tool in QM_PF stage
  * 5. send an sdm_qm_cmd through rbc interface to release the QM
  */
-enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
-				     struct ecore_ptt *p_ptt)
+static enum _ecore_status_t __ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+					      struct ecore_ptt *p_ptt,
+					      bool b_can_sleep)
 {
-	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
-	bool b_rc;
+	struct ecore_resc_unlock_params resc_unlock_params;
+	struct ecore_resc_lock_params resc_lock_params;
+	bool b_rc, b_mfw_unlock = true;
+	struct ecore_qm_info *qm_info;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
 
-	/* multiple flows can issue qm reconf. Need to lock */
+	qm_info = &p_hwfn->qm_info;
+
+	/* Obtain MFW resource lock to sync with PFs with driver instances not
+	 * covered by the static global qm_lock (monolithic, dpdk, PDA).
+	 */
+	ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
+					 ECORE_RESC_LOCK_QM_RECONF, false);
+	resc_lock_params.sleep_b4_retry = b_can_sleep;
+	rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
+
+	/* If lock is taken we must abort. If MFW does not support the feature
+	 * or took too long to acquire the lock we soldier on.
+	 */
+	if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL && rc != ECORE_TIMEOUT) {
+		DP_ERR(p_hwfn,
+		       "QM reconf MFW lock is stuck. Failing reconf flow\n");
+		return ECORE_INVAL;
+	}
+
+	/* if MFW doesn't support, no need to unlock. There is no harm in
+	 * trying, but we would need to tweak the rc value in case of
+	 * ECORE_NOTIMPL, so seems nicer to avoid.
+	 */
+	if (rc == ECORE_NOTIMPL)
+		b_mfw_unlock = false;
+
+	/* Multiple hwfn flows can issue qm reconf. Need to lock between hwfn
+	 * flows.
+	 */
 	OSAL_SPIN_LOCK(&qm_lock);
 
+	/* qm_info is invalid while this lock is taken */
+	OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+
+	rc = ecore_init_qm_features(p_hwfn);
+	if (rc != ECORE_SUCCESS) {
+		OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+		goto unlock;
+	}
+
 	/* initialize ecore's qm data structure */
 	ecore_init_qm_info(p_hwfn);
 
+	OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+	if (qm_info->pq_overflow) {
+		rc = ECORE_INVAL;
+		goto unlock;
+	}
+
 	/* stop PF's qm queues */
 	b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
 				      qm_info->start_pq, qm_info->num_pqs);
@@ -2291,9 +3040,6 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
 		goto unlock;
 	}
 
-	/* clear the QM_PF runtime phase leftovers from previous init */
-	ecore_init_clear_rt_data(p_hwfn);
-
 	/* prepare QM portion of runtime array */
 	ecore_qm_init_pf(p_hwfn, p_ptt, false);
 
@@ -2310,39 +3056,66 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
 unlock:
 	OSAL_SPIN_UNLOCK(&qm_lock);
 
+	if (b_mfw_unlock)
+		rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+
 	return rc;
 }
 
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+				     struct ecore_ptt *p_ptt)
+{
+	return __ecore_qm_reconf(p_hwfn, p_ptt, true);
+}
+
+enum _ecore_status_t ecore_qm_reconf_intr(struct ecore_hwfn *p_hwfn,
+					  struct ecore_ptt *p_ptt)
+{
+	return __ecore_qm_reconf(p_hwfn, p_ptt, false);
+}
+
 static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	u16 max_pqs_num, max_vports_num;
 	enum _ecore_status_t rc;
 
-	rc = ecore_init_qm_sanity(p_hwfn);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+	rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_info->qm_info_lock,
+				  "qm_info_lock");
+	if (rc)
+		goto alloc_err;
+#endif
+	OSAL_SPIN_LOCK_INIT(&qm_info->qm_info_lock);
+
+	rc = ecore_init_qm_features(p_hwfn);
 	if (rc != ECORE_SUCCESS)
 		goto alloc_err;
 
+	max_pqs_num = (u16)RESC_NUM(p_hwfn, ECORE_PQ);
+	max_vports_num = (u16)RESC_NUM(p_hwfn, ECORE_VPORT);
+
 	qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
 					    sizeof(struct init_qm_pq_params) *
-					    ecore_init_qm_get_num_pqs(p_hwfn));
+					    max_pqs_num);
 	if (!qm_info->qm_pq_params)
 		goto alloc_err;
 
 	qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
-				       sizeof(struct init_qm_vport_params) *
-				       ecore_init_qm_get_num_vports(p_hwfn));
+					       sizeof(struct init_qm_vport_params) *
+					       max_vports_num);
 	if (!qm_info->qm_vport_params)
 		goto alloc_err;
 
 	qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
-				      sizeof(struct init_qm_port_params) *
-				      p_hwfn->p_dev->num_ports_in_engine);
+					      sizeof(struct init_qm_port_params) *
+					      p_hwfn->p_dev->num_ports_in_engine);
 	if (!qm_info->qm_port_params)
 		goto alloc_err;
 
 	qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
 					sizeof(struct ecore_wfq_data) *
-					ecore_init_qm_get_num_vports(p_hwfn));
+					max_vports_num);
 	if (!qm_info->wfq_data)
 		goto alloc_err;
 
@@ -2355,25 +3128,256 @@ static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
 }
 /******************** End QM initialization ***************/
 
-enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
+static enum _ecore_status_t ecore_lag_create_slave(struct ecore_hwfn *p_hwfn,
+						   u8 master_pfid)
+{
+	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+	u8 slave_ppfid = 1; /* TODO: Need some sort of resource management function
+			     * to return a free entry
+			     */
+	enum _ecore_status_t rc;
+
+	if (!p_ptt)
+		return ECORE_AGAIN;
+
+	rc = ecore_llh_map_ppfid_to_pfid(p_hwfn, p_ptt, slave_ppfid,
+					 master_pfid);
+	ecore_ptt_release(p_hwfn, p_ptt);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	/* Protocol filter for RoCE v1 */
+	rc = ecore_llh_add_protocol_filter(p_hwfn->p_dev, slave_ppfid,
+					   ECORE_LLH_FILTER_ETHERTYPE, 0x8915,
+					   ECORE_LLH_DONT_CARE);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	/* Protocol filter for RoCE v2 */
+	return ecore_llh_add_protocol_filter(p_hwfn->p_dev, slave_ppfid,
+					     ECORE_LLH_FILTER_UDP_DEST_PORT,
+					     ECORE_LLH_DONT_CARE, 4791);
+}
+
+static void ecore_lag_destroy_slave(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+	u8 slave_ppfid = 1; /* Need some sort of resource management function
+			     * to return a free entry
+			     */
+
+	/* Protocol filter for RoCE v1 */
+	ecore_llh_remove_protocol_filter(p_hwfn->p_dev, slave_ppfid,
+					 ECORE_LLH_FILTER_ETHERTYPE,
+					 0x8915, ECORE_LLH_DONT_CARE);
+
+	/* Protocol filter for RoCE v2 */
+	ecore_llh_remove_protocol_filter(p_hwfn->p_dev, slave_ppfid,
+					 ECORE_LLH_FILTER_UDP_DEST_PORT,
+					 ECORE_LLH_DONT_CARE, 4791);
+
+	if (p_ptt) {
+		ecore_llh_map_ppfid_to_pfid(p_hwfn, p_ptt, slave_ppfid,
+					    p_hwfn->rel_pf_id);
+		ecore_ptt_release(p_hwfn, p_ptt);
+	}
+}
+
+/* Map ports:
+ *      port 0/2 - 0/2
+ *      port 1/3 - 1/3
+ * If port 0/2 is down, map both to port 1/3, if port 1/3 is down, map both to
+ * port 0/2, and if both are down, it doesn't really matter.
+ */
+static void ecore_lag_map_ports(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_lag_info *lag_info = &p_hwfn->lag_info;
+
+	/* for now support only 2 ports in the bond */
+	if (lag_info->master_pf == 0) {
+		lag_info->first_port = (lag_info->active_ports & (1 << 0)) ? 0 : 1;
+		lag_info->second_port = (lag_info->active_ports & (1 << 1)) ? 1 : 0;
+	} else if (lag_info->master_pf == 2) {
+		lag_info->first_port = (lag_info->active_ports & (1 << 2)) ? 2 : 3;
+		lag_info->second_port = (lag_info->active_ports & (1 << 3)) ? 3 : 2;
+	}
+	lag_info->port_num = LAG_MAX_PORT_NUM;
+}
+
+/* The following function strongly assumes two ports only */
+static enum _ecore_status_t ecore_lag_create_master(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+	enum _ecore_status_t rc;
+
+	if (!p_ptt)
+		return ECORE_AGAIN;
+
+	ecore_lag_map_ports(p_hwfn);
+	rc = ecore_qm_reconf_intr(p_hwfn, p_ptt);
+	ecore_ptt_release(p_hwfn, p_ptt);
+
+	return rc;
+}
+
+/* The following function strongly assumes two ports only */
+static enum _ecore_status_t ecore_lag_destroy_master(struct ecore_hwfn *p_hwfn)
 {
+	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+	enum _ecore_status_t rc;
+
+	if (!p_ptt)
+		return ECORE_AGAIN;
+
+	p_hwfn->qm_info.offload_group_count = 0;
+
+	rc = ecore_qm_reconf_intr(p_hwfn, p_ptt);
+	ecore_ptt_release(p_hwfn, p_ptt);
+
+	return rc;
+}
+
+enum _ecore_status_t ecore_lag_create(struct ecore_dev *dev,
+				      enum ecore_lag_type lag_type,
+				      void (*link_change_cb)(void *cxt),
+				      void *cxt,
+				      u8 active_ports)
+{
+	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(dev);
+	u8 master_pfid = p_hwfn->abs_pf_id < 2 ? 0 : 2;
+
+	if (!ecore_lag_support(p_hwfn)) {
+		DP_NOTICE(p_hwfn, false, "RDMA bonding will not be configured - only supported on AH devices on default mode\n");
+		return ECORE_INVAL;
+	}
+
+	/* TODO: Check Supported MFW */
+	p_hwfn->lag_info.lag_type = lag_type;
+	p_hwfn->lag_info.link_change_cb = link_change_cb;
+	p_hwfn->lag_info.cxt = cxt;
+	p_hwfn->lag_info.active_ports = active_ports;
+	p_hwfn->lag_info.is_master = p_hwfn->abs_pf_id == master_pfid;
+	p_hwfn->lag_info.master_pf = master_pfid;
+
+	/* Configure RX for LAG */
+	if (p_hwfn->lag_info.is_master)
+		return ecore_lag_create_master(p_hwfn);
+
+	return ecore_lag_create_slave(p_hwfn, master_pfid);
+}
+
+/* Modify the link state of a given port */
+enum _ecore_status_t ecore_lag_modify(struct ecore_dev *dev,
+				      u8 port_id,
+				      u8 link_active)
+{
+	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(dev);
+	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+	struct ecore_lag_info *lag_info = &p_hwfn->lag_info;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
-	enum dbg_status debug_status = DBG_STATUS_OK;
-	int i;
+	unsigned long active_ports;
+	u8 curr_active;
 
-	if (IS_VF(p_dev)) {
-		for_each_hwfn(p_dev, i) {
-			rc = ecore_l2_alloc(&p_dev->hwfns[i]);
-			if (rc != ECORE_SUCCESS)
-				return rc;
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Active ports changed before %x link active %x por