DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ajit Khaparde <ajit.khaparde@broadcom.com>
To: dev@dpdk.org
Cc: Peter Spreadborough <peter.spreadborough@broadcom.com>,
	Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>,
	Randy Schacher <stuart.schacher@broadcom.com>
Subject: [dpdk-dev] [PATCH v3 22/51] net/bnxt: support EM and TCAM lookup with table scope
Date: Wed,  1 Jul 2020 21:11:05 -0700	[thread overview]
Message-ID: <20200702041134.43198-23-ajit.khaparde@broadcom.com> (raw)
In-Reply-To: <20200702041134.43198-1-ajit.khaparde@broadcom.com>

From: Peter Spreadborough <peter.spreadborough@broadcom.com>

- Support for table scope within the EM module
- Support for host and system memory
- Update TCAM set/free.
- Replace TF device type by HCAPI RM type.
- Update TCAM set and free for HCAPI RM type

Signed-off-by: Peter Spreadborough <peter.spreadborough@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Randy Schacher <stuart.schacher@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/meson.build                  |    5 +-
 drivers/net/bnxt/tf_core/Makefile             |    5 +-
 drivers/net/bnxt/tf_core/cfa_resource_types.h |    8 +-
 drivers/net/bnxt/tf_core/hwrm_tf.h            |  864 +-----------
 drivers/net/bnxt/tf_core/tf_core.c            |  100 +-
 drivers/net/bnxt/tf_core/tf_device.c          |   50 +-
 drivers/net/bnxt/tf_core/tf_device.h          |   86 +-
 drivers/net/bnxt/tf_core/tf_device_p4.c       |   14 +-
 drivers/net/bnxt/tf_core/tf_device_p4.h       |   20 +-
 drivers/net/bnxt/tf_core/tf_em.c              |  360 -----
 drivers/net/bnxt/tf_core/tf_em.h              |  310 +++-
 drivers/net/bnxt/tf_core/tf_em_common.c       |  281 ++++
 drivers/net/bnxt/tf_core/tf_em_common.h       |  107 ++
 drivers/net/bnxt/tf_core/tf_em_host.c         | 1146 +++++++++++++++
 drivers/net/bnxt/tf_core/tf_em_internal.c     |  312 +++++
 drivers/net/bnxt/tf_core/tf_em_system.c       |  118 ++
 drivers/net/bnxt/tf_core/tf_msg.c             | 1248 ++++-------------
 drivers/net/bnxt/tf_core/tf_msg.h             |  233 +--
 drivers/net/bnxt/tf_core/tf_rm.c              |   89 +-
 drivers/net/bnxt/tf_core/tf_rm_new.c          |   40 +-
 drivers/net/bnxt/tf_core/tf_tbl.c             | 1134 ---------------
 drivers/net/bnxt/tf_core/tf_tbl_type.c        |   39 +-
 drivers/net/bnxt/tf_core/tf_tcam.c            |   25 +-
 drivers/net/bnxt/tf_core/tf_tcam.h            |    4 +
 drivers/net/bnxt/tf_core/tf_util.c            |    4 +-
 25 files changed, 3030 insertions(+), 3572 deletions(-)
 delete mode 100644 drivers/net/bnxt/tf_core/tf_em.c
 create mode 100644 drivers/net/bnxt/tf_core/tf_em_common.c
 create mode 100644 drivers/net/bnxt/tf_core/tf_em_common.h
 create mode 100644 drivers/net/bnxt/tf_core/tf_em_host.c
 create mode 100644 drivers/net/bnxt/tf_core/tf_em_internal.c
 create mode 100644 drivers/net/bnxt/tf_core/tf_em_system.c

diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build
index 33e6ebd66..35038dc8b 100644
--- a/drivers/net/bnxt/meson.build
+++ b/drivers/net/bnxt/meson.build
@@ -28,7 +28,10 @@ sources = files('bnxt_cpr.c',
 	'tf_core/tf_msg.c',
 	'tf_core/rand.c',
 	'tf_core/stack.c',
-	'tf_core/tf_em.c',
+        'tf_core/tf_em_common.c',
+        'tf_core/tf_em_host.c',
+        'tf_core/tf_em_internal.c',
+        'tf_core/tf_em_system.c',
 	'tf_core/tf_rm.c',
 	'tf_core/tf_tbl.c',
 	'tf_core/tfp.c',
diff --git a/drivers/net/bnxt/tf_core/Makefile b/drivers/net/bnxt/tf_core/Makefile
index 5ed32f12a..f186741e4 100644
--- a/drivers/net/bnxt/tf_core/Makefile
+++ b/drivers/net/bnxt/tf_core/Makefile
@@ -12,8 +12,11 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_core.c
 SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_rm.c
 SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tfp.c
 SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_msg.c
-SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_em.c
 SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_tbl.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_em_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_em_internal.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_em_host.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_em_system.c
 SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_session.c
 SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_device.c
 SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_device_p4.c
diff --git a/drivers/net/bnxt/tf_core/cfa_resource_types.h b/drivers/net/bnxt/tf_core/cfa_resource_types.h
index 058d8cc88..6e79facec 100644
--- a/drivers/net/bnxt/tf_core/cfa_resource_types.h
+++ b/drivers/net/bnxt/tf_core/cfa_resource_types.h
@@ -202,7 +202,9 @@
 #define CFA_RESOURCE_TYPE_P45_SP_TCAM         0x1fUL
 /* VEB TCAM */
 #define CFA_RESOURCE_TYPE_P45_VEB_TCAM        0x20UL
-#define CFA_RESOURCE_TYPE_P45_LAST           CFA_RESOURCE_TYPE_P45_VEB_TCAM
+/* Table Scope */
+#define CFA_RESOURCE_TYPE_P45_TBL_SCOPE       0x21UL
+#define CFA_RESOURCE_TYPE_P45_LAST           CFA_RESOURCE_TYPE_P45_TBL_SCOPE
 
 
 /* Multicast Group */
@@ -269,7 +271,9 @@
 #define CFA_RESOURCE_TYPE_P4_MIRROR          0x1eUL
 /* Source Property TCAM */
 #define CFA_RESOURCE_TYPE_P4_SP_TCAM         0x1fUL
-#define CFA_RESOURCE_TYPE_P4_LAST           CFA_RESOURCE_TYPE_P4_SP_TCAM
+/* Table Scope */
+#define CFA_RESOURCE_TYPE_P4_TBL_SCOPE       0x20UL
+#define CFA_RESOURCE_TYPE_P4_LAST           CFA_RESOURCE_TYPE_P4_TBL_SCOPE
 
 
 #endif /* _CFA_RESOURCE_TYPES_H_ */
diff --git a/drivers/net/bnxt/tf_core/hwrm_tf.h b/drivers/net/bnxt/tf_core/hwrm_tf.h
index 1e78296c6..26836e488 100644
--- a/drivers/net/bnxt/tf_core/hwrm_tf.h
+++ b/drivers/net/bnxt/tf_core/hwrm_tf.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2020 Broadcom
+ * Copyright(c) 2019 Broadcom
  * All rights reserved.
  */
 #ifndef _HWRM_TF_H_
@@ -13,20 +13,8 @@ typedef enum tf_type {
 } tf_type_t;
 
 typedef enum tf_subtype {
-	HWRM_TFT_SESSION_ATTACH = 712,
-	HWRM_TFT_SESSION_HW_RESC_QCAPS = 721,
-	HWRM_TFT_SESSION_HW_RESC_ALLOC = 722,
-	HWRM_TFT_SESSION_HW_RESC_FREE = 723,
-	HWRM_TFT_SESSION_HW_RESC_FLUSH = 724,
-	HWRM_TFT_SESSION_SRAM_RESC_QCAPS = 725,
-	HWRM_TFT_SESSION_SRAM_RESC_ALLOC = 726,
-	HWRM_TFT_SESSION_SRAM_RESC_FREE = 727,
-	HWRM_TFT_SESSION_SRAM_RESC_FLUSH = 728,
-	HWRM_TFT_TBL_SCOPE_CFG = 731,
 	HWRM_TFT_REG_GET = 821,
 	HWRM_TFT_REG_SET = 822,
-	HWRM_TFT_TBL_TYPE_SET = 823,
-	HWRM_TFT_TBL_TYPE_GET = 824,
 	HWRM_TFT_TBL_TYPE_BULK_GET = 825,
 	TF_SUBTYPE_LAST = HWRM_TFT_TBL_TYPE_BULK_GET,
 } tf_subtype_t;
@@ -66,858 +54,8 @@ typedef enum tf_subtype {
 #define TF_BITS2BYTES(x) (((x) + 7) >> 3)
 #define TF_BITS2BYTES_WORD_ALIGN(x) ((((x) + 31) >> 5) * 4)
 
-struct tf_session_attach_input;
-struct tf_session_hw_resc_qcaps_input;
-struct tf_session_hw_resc_qcaps_output;
-struct tf_session_hw_resc_alloc_input;
-struct tf_session_hw_resc_alloc_output;
-struct tf_session_hw_resc_free_input;
-struct tf_session_hw_resc_flush_input;
-struct tf_session_sram_resc_qcaps_input;
-struct tf_session_sram_resc_qcaps_output;
-struct tf_session_sram_resc_alloc_input;
-struct tf_session_sram_resc_alloc_output;
-struct tf_session_sram_resc_free_input;
-struct tf_session_sram_resc_flush_input;
-struct tf_tbl_type_set_input;
-struct tf_tbl_type_get_input;
-struct tf_tbl_type_get_output;
 struct tf_tbl_type_bulk_get_input;
 struct tf_tbl_type_bulk_get_output;
-/* Input params for session attach */
-typedef struct tf_session_attach_input {
-	/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */
-	uint32_t			 fw_session_id;
-	/* Session Name */
-	char				 session_name[TF_SESSION_NAME_MAX];
-} tf_session_attach_input_t, *ptf_session_attach_input_t;
-
-/* Input params for session resource HW qcaps */
-typedef struct tf_session_hw_resc_qcaps_input {
-	/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */
-	uint32_t			 fw_session_id;
-	/* flags */
-	uint16_t			 flags;
-	/* When set to 0, indicates the query apply to RX */
-#define TF_SESSION_HW_RESC_QCAPS_INPUT_FLAGS_DIR_RX	  (0x0)
-	/* When set to 1, indicates the query apply to TX */
-#define TF_SESSION_HW_RESC_QCAPS_INPUT_FLAGS_DIR_TX	  (0x1)
-} tf_session_hw_resc_qcaps_input_t, *ptf_session_hw_resc_qcaps_input_t;
-
-/* Output params for session resource HW qcaps */
-typedef struct tf_session_hw_resc_qcaps_output {
-	/* Control Flags */
-	uint32_t			 flags;
-	/* When set to 0, indicates Static partitioning */
-#define TF_SESSION_HW_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_STATIC	  (0x0)
-	/* When set to 1, indicates Strategy 1 */
-#define TF_SESSION_HW_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_1	  (0x1)
-	/* When set to 1, indicates Strategy 2 */
-#define TF_SESSION_HW_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_2	  (0x2)
-	/* When set to 1, indicates Strategy 3 */
-#define TF_SESSION_HW_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_3	  (0x3)
-	/* Unused */
-	uint8_t			  unused[4];
-	/* Minimum guaranteed number of L2 Ctx */
-	uint16_t			 l2_ctx_tcam_entries_min;
-	/* Maximum non-guaranteed number of L2 Ctx */
-	uint16_t			 l2_ctx_tcam_entries_max;
-	/* Minimum guaranteed number of profile functions */
-	uint16_t			 prof_func_min;
-	/* Maximum non-guaranteed number of profile functions */
-	uint16_t			 prof_func_max;
-	/* Minimum guaranteed number of profile TCAM entries */
-	uint16_t			 prof_tcam_entries_min;
-	/* Maximum non-guaranteed number of profile TCAM entries */
-	uint16_t			 prof_tcam_entries_max;
-	/* Minimum guaranteed number of EM profile ID */
-	uint16_t			 em_prof_id_min;
-	/* Maximum non-guaranteed number of EM profile ID */
-	uint16_t			 em_prof_id_max;
-	/* Minimum guaranteed number of EM records entries */
-	uint16_t			 em_record_entries_min;
-	/* Maximum non-guaranteed number of EM record entries */
-	uint16_t			 em_record_entries_max;
-	/* Minimum guaranteed number of WC TCAM profile ID */
-	uint16_t			 wc_tcam_prof_id_min;
-	/* Maximum non-guaranteed number of WC TCAM profile ID */
-	uint16_t			 wc_tcam_prof_id_max;
-	/* Minimum guaranteed number of WC TCAM entries */
-	uint16_t			 wc_tcam_entries_min;
-	/* Maximum non-guaranteed number of WC TCAM entries */
-	uint16_t			 wc_tcam_entries_max;
-	/* Minimum guaranteed number of meter profiles */
-	uint16_t			 meter_profiles_min;
-	/* Maximum non-guaranteed number of meter profiles */
-	uint16_t			 meter_profiles_max;
-	/* Minimum guaranteed number of meter instances */
-	uint16_t			 meter_inst_min;
-	/* Maximum non-guaranteed number of meter instances */
-	uint16_t			 meter_inst_max;
-	/* Minimum guaranteed number of mirrors */
-	uint16_t			 mirrors_min;
-	/* Maximum non-guaranteed number of mirrors */
-	uint16_t			 mirrors_max;
-	/* Minimum guaranteed number of UPAR */
-	uint16_t			 upar_min;
-	/* Maximum non-guaranteed number of UPAR */
-	uint16_t			 upar_max;
-	/* Minimum guaranteed number of SP TCAM entries */
-	uint16_t			 sp_tcam_entries_min;
-	/* Maximum non-guaranteed number of SP TCAM entries */
-	uint16_t			 sp_tcam_entries_max;
-	/* Minimum guaranteed number of L2 Functions */
-	uint16_t			 l2_func_min;
-	/* Maximum non-guaranteed number of L2 Functions */
-	uint16_t			 l2_func_max;
-	/* Minimum guaranteed number of flexible key templates */
-	uint16_t			 flex_key_templ_min;
-	/* Maximum non-guaranteed number of flexible key templates */
-	uint16_t			 flex_key_templ_max;
-	/* Minimum guaranteed number of table Scopes */
-	uint16_t			 tbl_scope_min;
-	/* Maximum non-guaranteed number of table Scopes */
-	uint16_t			 tbl_scope_max;
-	/* Minimum guaranteed number of epoch0 entries */
-	uint16_t			 epoch0_entries_min;
-	/* Maximum non-guaranteed number of epoch0 entries */
-	uint16_t			 epoch0_entries_max;
-	/* Minimum guaranteed number of epoch1 entries */
-	uint16_t			 epoch1_entries_min;
-	/* Maximum non-guaranteed number of epoch1 entries */
-	uint16_t			 epoch1_entries_max;
-	/* Minimum guaranteed number of metadata */
-	uint16_t			 metadata_min;
-	/* Maximum non-guaranteed number of metadata */
-	uint16_t			 metadata_max;
-	/* Minimum guaranteed number of CT states */
-	uint16_t			 ct_state_min;
-	/* Maximum non-guaranteed number of CT states */
-	uint16_t			 ct_state_max;
-	/* Minimum guaranteed number of range profiles */
-	uint16_t			 range_prof_min;
-	/* Maximum non-guaranteed number range profiles */
-	uint16_t			 range_prof_max;
-	/* Minimum guaranteed number of range entries */
-	uint16_t			 range_entries_min;
-	/* Maximum non-guaranteed number of range entries */
-	uint16_t			 range_entries_max;
-	/* Minimum guaranteed number of LAG table entries */
-	uint16_t			 lag_tbl_entries_min;
-	/* Maximum non-guaranteed number of LAG table entries */
-	uint16_t			 lag_tbl_entries_max;
-} tf_session_hw_resc_qcaps_output_t, *ptf_session_hw_resc_qcaps_output_t;
-
-/* Input params for session resource HW alloc */
-typedef struct tf_session_hw_resc_alloc_input {
-	/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */
-	uint32_t			 fw_session_id;
-	/* flags */
-	uint16_t			 flags;
-	/* When set to 0, indicates the query apply to RX */
-#define TF_SESSION_HW_RESC_ALLOC_INPUT_FLAGS_DIR_RX	  (0x0)
-	/* When set to 1, indicates the query apply to TX */
-#define TF_SESSION_HW_RESC_ALLOC_INPUT_FLAGS_DIR_TX	  (0x1)
-	/* Unused */
-	uint8_t			  unused[2];
-	/* Number of L2 CTX TCAM entries to be allocated */
-	uint16_t			 num_l2_ctx_tcam_entries;
-	/* Number of profile functions to be allocated */
-	uint16_t			 num_prof_func_entries;
-	/* Number of profile TCAM entries to be allocated */
-	uint16_t			 num_prof_tcam_entries;
-	/* Number of EM profile ids to be allocated */
-	uint16_t			 num_em_prof_id;
-	/* Number of EM records entries to be allocated */
-	uint16_t			 num_em_record_entries;
-	/* Number of WC profiles ids to be allocated */
-	uint16_t			 num_wc_tcam_prof_id;
-	/* Number of WC TCAM entries to be allocated */
-	uint16_t			 num_wc_tcam_entries;
-	/* Number of meter profiles to be allocated */
-	uint16_t			 num_meter_profiles;
-	/* Number of meter instances to be allocated */
-	uint16_t			 num_meter_inst;
-	/* Number of mirrors to be allocated */
-	uint16_t			 num_mirrors;
-	/* Number of UPAR to be allocated */
-	uint16_t			 num_upar;
-	/* Number of SP TCAM entries to be allocated */
-	uint16_t			 num_sp_tcam_entries;
-	/* Number of L2 functions to be allocated */
-	uint16_t			 num_l2_func;
-	/* Number of flexible key templates to be allocated */
-	uint16_t			 num_flex_key_templ;
-	/* Number of table scopes to be allocated */
-	uint16_t			 num_tbl_scope;
-	/* Number of epoch0 entries to be allocated */
-	uint16_t			 num_epoch0_entries;
-	/* Number of epoch1 entries to be allocated */
-	uint16_t			 num_epoch1_entries;
-	/* Number of metadata to be allocated */
-	uint16_t			 num_metadata;
-	/* Number of CT states to be allocated */
-	uint16_t			 num_ct_state;
-	/* Number of range profiles to be allocated */
-	uint16_t			 num_range_prof;
-	/* Number of range Entries to be allocated */
-	uint16_t			 num_range_entries;
-	/* Number of LAG table entries to be allocated */
-	uint16_t			 num_lag_tbl_entries;
-} tf_session_hw_resc_alloc_input_t, *ptf_session_hw_resc_alloc_input_t;
-
-/* Output params for session resource HW alloc */
-typedef struct tf_session_hw_resc_alloc_output {
-	/* Starting index of L2 CTX TCAM entries allocated to the session */
-	uint16_t			 l2_ctx_tcam_entries_start;
-	/* Number of L2 CTX TCAM entries allocated */
-	uint16_t			 l2_ctx_tcam_entries_stride;
-	/* Starting index of profile functions allocated to the session */
-	uint16_t			 prof_func_start;
-	/* Number of profile functions allocated */
-	uint16_t			 prof_func_stride;
-	/* Starting index of profile TCAM entries allocated to the session */
-	uint16_t			 prof_tcam_entries_start;
-	/* Number of profile TCAM entries allocated */
-	uint16_t			 prof_tcam_entries_stride;
-	/* Starting index of EM profile ids allocated to the session */
-	uint16_t			 em_prof_id_start;
-	/* Number of EM profile ids allocated */
-	uint16_t			 em_prof_id_stride;
-	/* Starting index of EM record entries allocated to the session */
-	uint16_t			 em_record_entries_start;
-	/* Number of EM record entries allocated */
-	uint16_t			 em_record_entries_stride;
-	/* Starting index of WC TCAM profiles ids allocated to the session */
-	uint16_t			 wc_tcam_prof_id_start;
-	/* Number of WC TCAM profile ids allocated */
-	uint16_t			 wc_tcam_prof_id_stride;
-	/* Starting index of WC TCAM entries allocated to the session */
-	uint16_t			 wc_tcam_entries_start;
-	/* Number of WC TCAM allocated */
-	uint16_t			 wc_tcam_entries_stride;
-	/* Starting index of meter profiles allocated to the session */
-	uint16_t			 meter_profiles_start;
-	/* Number of meter profiles allocated */
-	uint16_t			 meter_profiles_stride;
-	/* Starting index of meter instance allocated to the session */
-	uint16_t			 meter_inst_start;
-	/* Number of meter instance allocated */
-	uint16_t			 meter_inst_stride;
-	/* Starting index of mirrors allocated to the session */
-	uint16_t			 mirrors_start;
-	/* Number of mirrors allocated */
-	uint16_t			 mirrors_stride;
-	/* Starting index of UPAR allocated to the session */
-	uint16_t			 upar_start;
-	/* Number of UPAR allocated */
-	uint16_t			 upar_stride;
-	/* Starting index of SP TCAM entries allocated to the session */
-	uint16_t			 sp_tcam_entries_start;
-	/* Number of SP TCAM entries allocated */
-	uint16_t			 sp_tcam_entries_stride;
-	/* Starting index of L2 functions allocated to the session */
-	uint16_t			 l2_func_start;
-	/* Number of L2 functions allocated */
-	uint16_t			 l2_func_stride;
-	/* Starting index of flexible key templates allocated to the session */
-	uint16_t			 flex_key_templ_start;
-	/* Number of flexible key templates allocated */
-	uint16_t			 flex_key_templ_stride;
-	/* Starting index of table scopes allocated to the session */
-	uint16_t			 tbl_scope_start;
-	/* Number of table scopes allocated */
-	uint16_t			 tbl_scope_stride;
-	/* Starting index of epoch0 entries allocated to the session */
-	uint16_t			 epoch0_entries_start;
-	/* Number of epoch0 entries allocated */
-	uint16_t			 epoch0_entries_stride;
-	/* Starting index of epoch1 entries allocated to the session */
-	uint16_t			 epoch1_entries_start;
-	/* Number of epoch1 entries allocated */
-	uint16_t			 epoch1_entries_stride;
-	/* Starting index of metadata allocated to the session */
-	uint16_t			 metadata_start;
-	/* Number of metadata allocated */
-	uint16_t			 metadata_stride;
-	/* Starting index of CT states allocated to the session */
-	uint16_t			 ct_state_start;
-	/* Number of CT states allocated */
-	uint16_t			 ct_state_stride;
-	/* Starting index of range profiles allocated to the session */
-	uint16_t			 range_prof_start;
-	/* Number range profiles allocated */
-	uint16_t			 range_prof_stride;
-	/* Starting index of range enntries allocated to the session */
-	uint16_t			 range_entries_start;
-	/* Number of range entries allocated */
-	uint16_t			 range_entries_stride;
-	/* Starting index of LAG table entries allocated to the session */
-	uint16_t			 lag_tbl_entries_start;
-	/* Number of LAG table entries allocated */
-	uint16_t			 lag_tbl_entries_stride;
-} tf_session_hw_resc_alloc_output_t, *ptf_session_hw_resc_alloc_output_t;
-
-/* Input params for session resource HW free */
-typedef struct tf_session_hw_resc_free_input {
-	/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */
-	uint32_t			 fw_session_id;
-	/* flags */
-	uint16_t			 flags;
-	/* When set to 0, indicates the query apply to RX */
-#define TF_SESSION_HW_RESC_FREE_INPUT_FLAGS_DIR_RX	  (0x0)
-	/* When set to 1, indicates the query apply to TX */
-#define TF_SESSION_HW_RESC_FREE_INPUT_FLAGS_DIR_TX	  (0x1)
-	/* Unused */
-	uint8_t			  unused[2];
-	/* Starting index of L2 CTX TCAM entries allocated to the session */
-	uint16_t			 l2_ctx_tcam_entries_start;
-	/* Number of L2 CTX TCAM entries allocated */
-	uint16_t			 l2_ctx_tcam_entries_stride;
-	/* Starting index of profile functions allocated to the session */
-	uint16_t			 prof_func_start;
-	/* Number of profile functions allocated */
-	uint16_t			 prof_func_stride;
-	/* Starting index of profile TCAM entries allocated to the session */
-	uint16_t			 prof_tcam_entries_start;
-	/* Number of profile TCAM entries allocated */
-	uint16_t			 prof_tcam_entries_stride;
-	/* Starting index of EM profile ids allocated to the session */
-	uint16_t			 em_prof_id_start;
-	/* Number of EM profile ids allocated */
-	uint16_t			 em_prof_id_stride;
-	/* Starting index of EM record entries allocated to the session */
-	uint16_t			 em_record_entries_start;
-	/* Number of EM record entries allocated */
-	uint16_t			 em_record_entries_stride;
-	/* Starting index of WC TCAM profiles ids allocated to the session */
-	uint16_t			 wc_tcam_prof_id_start;
-	/* Number of WC TCAM profile ids allocated */
-	uint16_t			 wc_tcam_prof_id_stride;
-	/* Starting index of WC TCAM entries allocated to the session */
-	uint16_t			 wc_tcam_entries_start;
-	/* Number of WC TCAM allocated */
-	uint16_t			 wc_tcam_entries_stride;
-	/* Starting index of meter profiles allocated to the session */
-	uint16_t			 meter_profiles_start;
-	/* Number of meter profiles allocated */
-	uint16_t			 meter_profiles_stride;
-	/* Starting index of meter instance allocated to the session */
-	uint16_t			 meter_inst_start;
-	/* Number of meter instance allocated */
-	uint16_t			 meter_inst_stride;
-	/* Starting index of mirrors allocated to the session */
-	uint16_t			 mirrors_start;
-	/* Number of mirrors allocated */
-	uint16_t			 mirrors_stride;
-	/* Starting index of UPAR allocated to the session */
-	uint16_t			 upar_start;
-	/* Number of UPAR allocated */
-	uint16_t			 upar_stride;
-	/* Starting index of SP TCAM entries allocated to the session */
-	uint16_t			 sp_tcam_entries_start;
-	/* Number of SP TCAM entries allocated */
-	uint16_t			 sp_tcam_entries_stride;
-	/* Starting index of L2 functions allocated to the session */
-	uint16_t			 l2_func_start;
-	/* Number of L2 functions allocated */
-	uint16_t			 l2_func_stride;
-	/* Starting index of flexible key templates allocated to the session */
-	uint16_t			 flex_key_templ_start;
-	/* Number of flexible key templates allocated */
-	uint16_t			 flex_key_templ_stride;
-	/* Starting index of table scopes allocated to the session */
-	uint16_t			 tbl_scope_start;
-	/* Number of table scopes allocated */
-	uint16_t			 tbl_scope_stride;
-	/* Starting index of epoch0 entries allocated to the session */
-	uint16_t			 epoch0_entries_start;
-	/* Number of epoch0 entries allocated */
-	uint16_t			 epoch0_entries_stride;
-	/* Starting index of epoch1 entries allocated to the session */
-	uint16_t			 epoch1_entries_start;
-	/* Number of epoch1 entries allocated */
-	uint16_t			 epoch1_entries_stride;
-	/* Starting index of metadata allocated to the session */
-	uint16_t			 metadata_start;
-	/* Number of metadata allocated */
-	uint16_t			 metadata_stride;
-	/* Starting index of CT states allocated to the session */
-	uint16_t			 ct_state_start;
-	/* Number of CT states allocated */
-	uint16_t			 ct_state_stride;
-	/* Starting index of range profiles allocated to the session */
-	uint16_t			 range_prof_start;
-	/* Number range profiles allocated */
-	uint16_t			 range_prof_stride;
-	/* Starting index of range enntries allocated to the session */
-	uint16_t			 range_entries_start;
-	/* Number of range entries allocated */
-	uint16_t			 range_entries_stride;
-	/* Starting index of LAG table entries allocated to the session */
-	uint16_t			 lag_tbl_entries_start;
-	/* Number of LAG table entries allocated */
-	uint16_t			 lag_tbl_entries_stride;
-} tf_session_hw_resc_free_input_t, *ptf_session_hw_resc_free_input_t;
-
-/* Input params for session resource HW flush */
-typedef struct tf_session_hw_resc_flush_input {
-	/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */
-	uint32_t			 fw_session_id;
-	/* flags */
-	uint16_t			 flags;
-	/* When set to 0, indicates the flush apply to RX */
-#define TF_SESSION_HW_RESC_FLUSH_INPUT_FLAGS_DIR_RX	  (0x0)
-	/* When set to 1, indicates the flush apply to TX */
-#define TF_SESSION_HW_RESC_FLUSH_INPUT_FLAGS_DIR_TX	  (0x1)
-	/* Unused */
-	uint8_t			  unused[2];
-	/* Starting index of L2 CTX TCAM entries allocated to the session */
-	uint16_t			 l2_ctx_tcam_entries_start;
-	/* Number of L2 CTX TCAM entries allocated */
-	uint16_t			 l2_ctx_tcam_entries_stride;
-	/* Starting index of profile functions allocated to the session */
-	uint16_t			 prof_func_start;
-	/* Number of profile functions allocated */
-	uint16_t			 prof_func_stride;
-	/* Starting index of profile TCAM entries allocated to the session */
-	uint16_t			 prof_tcam_entries_start;
-	/* Number of profile TCAM entries allocated */
-	uint16_t			 prof_tcam_entries_stride;
-	/* Starting index of EM profile ids allocated to the session */
-	uint16_t			 em_prof_id_start;
-	/* Number of EM profile ids allocated */
-	uint16_t			 em_prof_id_stride;
-	/* Starting index of EM record entries allocated to the session */
-	uint16_t			 em_record_entries_start;
-	/* Number of EM record entries allocated */
-	uint16_t			 em_record_entries_stride;
-	/* Starting index of WC TCAM profiles ids allocated to the session */
-	uint16_t			 wc_tcam_prof_id_start;
-	/* Number of WC TCAM profile ids allocated */
-	uint16_t			 wc_tcam_prof_id_stride;
-	/* Starting index of WC TCAM entries allocated to the session */
-	uint16_t			 wc_tcam_entries_start;
-	/* Number of WC TCAM allocated */
-	uint16_t			 wc_tcam_entries_stride;
-	/* Starting index of meter profiles allocated to the session */
-	uint16_t			 meter_profiles_start;
-	/* Number of meter profiles allocated */
-	uint16_t			 meter_profiles_stride;
-	/* Starting index of meter instance allocated to the session */
-	uint16_t			 meter_inst_start;
-	/* Number of meter instance allocated */
-	uint16_t			 meter_inst_stride;
-	/* Starting index of mirrors allocated to the session */
-	uint16_t			 mirrors_start;
-	/* Number of mirrors allocated */
-	uint16_t			 mirrors_stride;
-	/* Starting index of UPAR allocated to the session */
-	uint16_t			 upar_start;
-	/* Number of UPAR allocated */
-	uint16_t			 upar_stride;
-	/* Starting index of SP TCAM entries allocated to the session */
-	uint16_t			 sp_tcam_entries_start;
-	/* Number of SP TCAM entries allocated */
-	uint16_t			 sp_tcam_entries_stride;
-	/* Starting index of L2 functions allocated to the session */
-	uint16_t			 l2_func_start;
-	/* Number of L2 functions allocated */
-	uint16_t			 l2_func_stride;
-	/* Starting index of flexible key templates allocated to the session */
-	uint16_t			 flex_key_templ_start;
-	/* Number of flexible key templates allocated */
-	uint16_t			 flex_key_templ_stride;
-	/* Starting index of table scopes allocated to the session */
-	uint16_t			 tbl_scope_start;
-	/* Number of table scopes allocated */
-	uint16_t			 tbl_scope_stride;
-	/* Starting index of epoch0 entries allocated to the session */
-	uint16_t			 epoch0_entries_start;
-	/* Number of epoch0 entries allocated */
-	uint16_t			 epoch0_entries_stride;
-	/* Starting index of epoch1 entries allocated to the session */
-	uint16_t			 epoch1_entries_start;
-	/* Number of epoch1 entries allocated */
-	uint16_t			 epoch1_entries_stride;
-	/* Starting index of metadata allocated to the session */
-	uint16_t			 metadata_start;
-	/* Number of metadata allocated */
-	uint16_t			 metadata_stride;
-	/* Starting index of CT states allocated to the session */
-	uint16_t			 ct_state_start;
-	/* Number of CT states allocated */
-	uint16_t			 ct_state_stride;
-	/* Starting index of range profiles allocated to the session */
-	uint16_t			 range_prof_start;
-	/* Number range profiles allocated */
-	uint16_t			 range_prof_stride;
-	/* Starting index of range enntries allocated to the session */
-	uint16_t			 range_entries_start;
-	/* Number of range entries allocated */
-	uint16_t			 range_entries_stride;
-	/* Starting index of LAG table entries allocated to the session */
-	uint16_t			 lag_tbl_entries_start;
-	/* Number of LAG table entries allocated */
-	uint16_t			 lag_tbl_entries_stride;
-} tf_session_hw_resc_flush_input_t, *ptf_session_hw_resc_flush_input_t;
-
-/* Input params for session resource SRAM qcaps */
-typedef struct tf_session_sram_resc_qcaps_input {
-	/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */
-	uint32_t			 fw_session_id;
-	/* flags */
-	uint16_t			 flags;
-	/* When set to 0, indicates the query apply to RX */
-#define TF_SESSION_SRAM_RESC_QCAPS_INPUT_FLAGS_DIR_RX	  (0x0)
-	/* When set to 1, indicates the query apply to TX */
-#define TF_SESSION_SRAM_RESC_QCAPS_INPUT_FLAGS_DIR_TX	  (0x1)
-} tf_session_sram_resc_qcaps_input_t, *ptf_session_sram_resc_qcaps_input_t;
-
-/* Output params for session resource SRAM qcaps */
-typedef struct tf_session_sram_resc_qcaps_output {
-	/* Flags */
-	uint32_t			 flags;
-	/* When set to 0, indicates Static partitioning */
-#define TF_SESSION_SRAM_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_STATIC	  (0x0)
-	/* When set to 1, indicates Strategy 1 */
-#define TF_SESSION_SRAM_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_1	  (0x1)
-	/* When set to 1, indicates Strategy 2 */
-#define TF_SESSION_SRAM_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_2	  (0x2)
-	/* When set to 1, indicates Strategy 3 */
-#define TF_SESSION_SRAM_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_3	  (0x3)
-	/* Minimum guaranteed number of Full Action */
-	uint16_t			 full_action_min;
-	/* Maximum non-guaranteed number of Full Action */
-	uint16_t			 full_action_max;
-	/* Minimum guaranteed number of MCG */
-	uint16_t			 mcg_min;
-	/* Maximum non-guaranteed number of MCG */
-	uint16_t			 mcg_max;
-	/* Minimum guaranteed number of Encap 8B */
-	uint16_t			 encap_8b_min;
-	/* Maximum non-guaranteed number of Encap 8B */
-	uint16_t			 encap_8b_max;
-	/* Minimum guaranteed number of Encap 16B */
-	uint16_t			 encap_16b_min;
-	/* Maximum non-guaranteed number of Encap 16B */
-	uint16_t			 encap_16b_max;
-	/* Minimum guaranteed number of Encap 64B */
-	uint16_t			 encap_64b_min;
-	/* Maximum non-guaranteed number of Encap 64B */
-	uint16_t			 encap_64b_max;
-	/* Minimum guaranteed number of SP SMAC */
-	uint16_t			 sp_smac_min;
-	/* Maximum non-guaranteed number of SP SMAC */
-	uint16_t			 sp_smac_max;
-	/* Minimum guaranteed number of SP SMAC IPv4 */
-	uint16_t			 sp_smac_ipv4_min;
-	/* Maximum non-guaranteed number of SP SMAC IPv4 */
-	uint16_t			 sp_smac_ipv4_max;
-	/* Minimum guaranteed number of SP SMAC IPv6 */
-	uint16_t			 sp_smac_ipv6_min;
-	/* Maximum non-guaranteed number of SP SMAC IPv6 */
-	uint16_t			 sp_smac_ipv6_max;
-	/* Minimum guaranteed number of Counter 64B */
-	uint16_t			 counter_64b_min;
-	/* Maximum non-guaranteed number of Counter 64B */
-	uint16_t			 counter_64b_max;
-	/* Minimum guaranteed number of NAT SPORT */
-	uint16_t			 nat_sport_min;
-	/* Maximum non-guaranteed number of NAT SPORT */
-	uint16_t			 nat_sport_max;
-	/* Minimum guaranteed number of NAT DPORT */
-	uint16_t			 nat_dport_min;
-	/* Maximum non-guaranteed number of NAT DPORT */
-	uint16_t			 nat_dport_max;
-	/* Minimum guaranteed number of NAT S_IPV4 */
-	uint16_t			 nat_s_ipv4_min;
-	/* Maximum non-guaranteed number of NAT S_IPV4 */
-	uint16_t			 nat_s_ipv4_max;
-	/* Minimum guaranteed number of NAT D_IPV4 */
-	uint16_t			 nat_d_ipv4_min;
-	/* Maximum non-guaranteed number of NAT D_IPV4 */
-	uint16_t			 nat_d_ipv4_max;
-} tf_session_sram_resc_qcaps_output_t, *ptf_session_sram_resc_qcaps_output_t;
-
-/* Input params for session resource SRAM alloc */
-typedef struct tf_session_sram_resc_alloc_input {
-	/* FW Session Id */
-	uint32_t			 fw_session_id;
-	/* flags */
-	uint16_t			 flags;
-	/* When set to 0, indicates the query apply to RX */
-#define TF_SESSION_SRAM_RESC_ALLOC_INPUT_FLAGS_DIR_RX	  (0x0)
-	/* When set to 1, indicates the query apply to TX */
-#define TF_SESSION_SRAM_RESC_ALLOC_INPUT_FLAGS_DIR_TX	  (0x1)
-	/* Unused */
-	uint8_t			  unused[2];
-	/* Number of full action SRAM entries to be allocated */
-	uint16_t			 num_full_action;
-	/* Number of multicast groups to be allocated */
-	uint16_t			 num_mcg;
-	/* Number of Encap 8B entries to be allocated */
-	uint16_t			 num_encap_8b;
-	/* Number of Encap 16B entries to be allocated */
-	uint16_t			 num_encap_16b;
-	/* Number of Encap 64B entries to be allocated */
-	uint16_t			 num_encap_64b;
-	/* Number of SP SMAC entries to be allocated */
-	uint16_t			 num_sp_smac;
-	/* Number of SP SMAC IPv4 entries to be allocated */
-	uint16_t			 num_sp_smac_ipv4;
-	/* Number of SP SMAC IPv6 entries to be allocated */
-	uint16_t			 num_sp_smac_ipv6;
-	/* Number of Counter 64B entries to be allocated */
-	uint16_t			 num_counter_64b;
-	/* Number of NAT source ports to be allocated */
-	uint16_t			 num_nat_sport;
-	/* Number of NAT destination ports to be allocated */
-	uint16_t			 num_nat_dport;
-	/* Number of NAT source iPV4 addresses to be allocated */
-	uint16_t			 num_nat_s_ipv4;
-	/* Number of NAT destination IPV4 addresses to be allocated */
-	uint16_t			 num_nat_d_ipv4;
-} tf_session_sram_resc_alloc_input_t, *ptf_session_sram_resc_alloc_input_t;
-
-/* Output params for session resource SRAM alloc */
-typedef struct tf_session_sram_resc_alloc_output {
-	/* Unused */
-	uint8_t			  unused[2];
-	/* Starting index of full action SRAM entries allocated to the session */
-	uint16_t			 full_action_start;
-	/* Number of full action SRAM entries allocated */
-	uint16_t			 full_action_stride;
-	/* Starting index of multicast groups allocated to this session */
-	uint16_t			 mcg_start;
-	/* Number of multicast groups allocated */
-	uint16_t			 mcg_stride;
-	/* Starting index of encap 8B entries allocated to the session */
-	uint16_t			 encap_8b_start;
-	/* Number of encap 8B entries allocated */
-	uint16_t			 encap_8b_stride;
-	/* Starting index of encap 16B entries allocated to the session */
-	uint16_t			 encap_16b_start;
-	/* Number of encap 16B entries allocated */
-	uint16_t			 encap_16b_stride;
-	/* Starting index of encap 64B entries allocated to the session */
-	uint16_t			 encap_64b_start;
-	/* Number of encap 64B entries allocated */
-	uint16_t			 encap_64b_stride;
-	/* Starting index of SP SMAC entries allocated to the session */
-	uint16_t			 sp_smac_start;
-	/* Number of SP SMAC entries allocated */
-	uint16_t			 sp_smac_stride;
-	/* Starting index of SP SMAC IPv4 entries allocated to the session */
-	uint16_t			 sp_smac_ipv4_start;
-	/* Number of SP SMAC IPv4 entries allocated */
-	uint16_t			 sp_smac_ipv4_stride;
-	/* Starting index of SP SMAC IPv6 entries allocated to the session */
-	uint16_t			 sp_smac_ipv6_start;
-	/* Number of SP SMAC IPv6 entries allocated */
-	uint16_t			 sp_smac_ipv6_stride;
-	/* Starting index of Counter 64B entries allocated to the session */
-	uint16_t			 counter_64b_start;
-	/* Number of Counter 64B entries allocated */
-	uint16_t			 counter_64b_stride;
-	/* Starting index of NAT source ports allocated to the session */
-	uint16_t			 nat_sport_start;
-	/* Number of NAT source ports allocated */
-	uint16_t			 nat_sport_stride;
-	/* Starting index of NAT destination ports allocated to the session */
-	uint16_t			 nat_dport_start;
-	/* Number of NAT destination ports allocated */
-	uint16_t			 nat_dport_stride;
-	/* Starting index of NAT source IPV4 addresses allocated to the session */
-	uint16_t			 nat_s_ipv4_start;
-	/* Number of NAT source IPV4 addresses allocated */
-	uint16_t			 nat_s_ipv4_stride;
-	/*
-	 * Starting index of NAT destination IPV4 addresses allocated to the
-	 * session
-	 */
-	uint16_t			 nat_d_ipv4_start;
-	/* Number of NAT destination IPV4 addresses allocated */
-	uint16_t			 nat_d_ipv4_stride;
-} tf_session_sram_resc_alloc_output_t, *ptf_session_sram_resc_alloc_output_t;
-
-/* Input params for session resource SRAM free */
-typedef struct tf_session_sram_resc_free_input {
-	/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */
-	uint32_t			 fw_session_id;
-	/* flags */
-	uint16_t			 flags;
-	/* When set to 0, indicates the query apply to RX */
-#define TF_SESSION_SRAM_RESC_FREE_INPUT_FLAGS_DIR_RX	  (0x0)
-	/* When set to 1, indicates the query apply to TX */
-#define TF_SESSION_SRAM_RESC_FREE_INPUT_FLAGS_DIR_TX	  (0x1)
-	/* Starting index of full action SRAM entries allocated to the session */
-	uint16_t			 full_action_start;
-	/* Number of full action SRAM entries allocated */
-	uint16_t			 full_action_stride;
-	/* Starting index of multicast groups allocated to this session */
-	uint16_t			 mcg_start;
-	/* Number of multicast groups allocated */
-	uint16_t			 mcg_stride;
-	/* Starting index of encap 8B entries allocated to the session */
-	uint16_t			 encap_8b_start;
-	/* Number of encap 8B entries allocated */
-	uint16_t			 encap_8b_stride;
-	/* Starting index of encap 16B entries allocated to the session */
-	uint16_t			 encap_16b_start;
-	/* Number of encap 16B entries allocated */
-	uint16_t			 encap_16b_stride;
-	/* Starting index of encap 64B entries allocated to the session */
-	uint16_t			 encap_64b_start;
-	/* Number of encap 64B entries allocated */
-	uint16_t			 encap_64b_stride;
-	/* Starting index of SP SMAC entries allocated to the session */
-	uint16_t			 sp_smac_start;
-	/* Number of SP SMAC entries allocated */
-	uint16_t			 sp_smac_stride;
-	/* Starting index of SP SMAC IPv4 entries allocated to the session */
-	uint16_t			 sp_smac_ipv4_start;
-	/* Number of SP SMAC IPv4 entries allocated */
-	uint16_t			 sp_smac_ipv4_stride;
-	/* Starting index of SP SMAC IPv6 entries allocated to the session */
-	uint16_t			 sp_smac_ipv6_start;
-	/* Number of SP SMAC IPv6 entries allocated */
-	uint16_t			 sp_smac_ipv6_stride;
-	/* Starting index of Counter 64B entries allocated to the session */
-	uint16_t			 counter_64b_start;
-	/* Number of Counter 64B entries allocated */
-	uint16_t			 counter_64b_stride;
-	/* Starting index of NAT source ports allocated to the session */
-	uint16_t			 nat_sport_start;
-	/* Number of NAT source ports allocated */
-	uint16_t			 nat_sport_stride;
-	/* Starting index of NAT destination ports allocated to the session */
-	uint16_t			 nat_dport_start;
-	/* Number of NAT destination ports allocated */
-	uint16_t			 nat_dport_stride;
-	/* Starting index of NAT source IPV4 addresses allocated to the session */
-	uint16_t			 nat_s_ipv4_start;
-	/* Number of NAT source IPV4 addresses allocated */
-	uint16_t			 nat_s_ipv4_stride;
-	/*
-	 * Starting index of NAT destination IPV4 addresses allocated to the
-	 * session
-	 */
-	uint16_t			 nat_d_ipv4_start;
-	/* Number of NAT destination IPV4 addresses allocated */
-	uint16_t			 nat_d_ipv4_stride;
-} tf_session_sram_resc_free_input_t, *ptf_session_sram_resc_free_input_t;
-
-/* Input params for session resource SRAM flush */
-typedef struct tf_session_sram_resc_flush_input {
-	/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */
-	uint32_t			 fw_session_id;
-	/* flags */
-	uint16_t			 flags;
-	/* When set to 0, indicates the flush apply to RX */
-#define TF_SESSION_SRAM_RESC_FLUSH_INPUT_FLAGS_DIR_RX	  (0x0)
-	/* When set to 1, indicates the flush apply to TX */
-#define TF_SESSION_SRAM_RESC_FLUSH_INPUT_FLAGS_DIR_TX	  (0x1)
-	/* Starting index of full action SRAM entries allocated to the session */
-	uint16_t			 full_action_start;
-	/* Number of full action SRAM entries allocated */
-	uint16_t			 full_action_stride;
-	/* Starting index of multicast groups allocated to this session */
-	uint16_t			 mcg_start;
-	/* Number of multicast groups allocated */
-	uint16_t			 mcg_stride;
-	/* Starting index of encap 8B entries allocated to the session */
-	uint16_t			 encap_8b_start;
-	/* Number of encap 8B entries allocated */
-	uint16_t			 encap_8b_stride;
-	/* Starting index of encap 16B entries allocated to the session */
-	uint16_t			 encap_16b_start;
-	/* Number of encap 16B entries allocated */
-	uint16_t			 encap_16b_stride;
-	/* Starting index of encap 64B entries allocated to the session */
-	uint16_t			 encap_64b_start;
-	/* Number of encap 64B entries allocated */
-	uint16_t			 encap_64b_stride;
-	/* Starting index of SP SMAC entries allocated to the session */
-	uint16_t			 sp_smac_start;
-	/* Number of SP SMAC entries allocated */
-	uint16_t			 sp_smac_stride;
-	/* Starting index of SP SMAC IPv4 entries allocated to the session */
-	uint16_t			 sp_smac_ipv4_start;
-	/* Number of SP SMAC IPv4 entries allocated */
-	uint16_t			 sp_smac_ipv4_stride;
-	/* Starting index of SP SMAC IPv6 entries allocated to the session */
-	uint16_t			 sp_smac_ipv6_start;
-	/* Number of SP SMAC IPv6 entries allocated */
-	uint16_t			 sp_smac_ipv6_stride;
-	/* Starting index of Counter 64B entries allocated to the session */
-	uint16_t			 counter_64b_start;
-	/* Number of Counter 64B entries allocated */
-	uint16_t			 counter_64b_stride;
-	/* Starting index of NAT source ports allocated to the session */
-	uint16_t			 nat_sport_start;
-	/* Number of NAT source ports allocated */
-	uint16_t			 nat_sport_stride;
-	/* Starting index of NAT destination ports allocated to the session */
-	uint16_t			 nat_dport_start;
-	/* Number of NAT destination ports allocated */
-	uint16_t			 nat_dport_stride;
-	/* Starting index of NAT source IPV4 addresses allocated to the session */
-	uint16_t			 nat_s_ipv4_start;
-	/* Number of NAT source IPV4 addresses allocated */
-	uint16_t			 nat_s_ipv4_stride;
-	/*
-	 * Starting index of NAT destination IPV4 addresses allocated to the
-	 * session
-	 */
-	uint16_t			 nat_d_ipv4_start;
-	/* Number of NAT destination IPV4 addresses allocated */
-	uint16_t			 nat_d_ipv4_stride;
-} tf_session_sram_resc_flush_input_t, *ptf_session_sram_resc_flush_input_t;
-
-/* Input params for table type set */
-typedef struct tf_tbl_type_set_input {
-	/* Session Id */
-	uint32_t			 fw_session_id;
-	/* flags */
-	uint16_t			 flags;
-	/* When set to 0, indicates the get apply to RX */
-#define TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_RX			(0x0)
-	/* When set to 1, indicates the get apply to TX */
-#define TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX			(0x1)
-	/* Type of the object to set */
-	uint32_t			 type;
-	/* Size of the data to set in bytes */
-	uint16_t			 size;
-	/* Data to set */
-	uint8_t			  data[TF_BULK_SEND];
-	/* Index to set */
-	uint32_t			 index;
-} tf_tbl_type_set_input_t, *ptf_tbl_type_set_input_t;
-
-/* Input params for table type get */
-typedef struct tf_tbl_type_get_input {
-	/* Session Id */
-	uint32_t			 fw_session_id;
-	/* flags */
-	uint16_t			 flags;
-	/* When set to 0, indicates the get apply to RX */
-#define TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_RX			(0x0)
-	/* When set to 1, indicates the get apply to TX */
-#define TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX			(0x1)
-	/* Type of the object to set */
-	uint32_t			 type;
-	/* Index to get */
-	uint32_t			 index;
-} tf_tbl_type_get_input_t, *ptf_tbl_type_get_input_t;
-
-/* Output params for table type get */
-typedef struct tf_tbl_type_get_output {
-	/* Size of the data read in bytes */
-	uint16_t			 size;
-	/* Data read */
-	uint8_t			  data[TF_BULK_RECV];
-} tf_tbl_type_get_output_t, *ptf_tbl_type_get_output_t;
 
 /* Input params for table type get */
 typedef struct tf_tbl_type_bulk_get_input {
diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index 3e23d0513..8b3e15c8a 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -208,7 +208,15 @@ int tf_insert_em_entry(struct tf *tfp,
 		return rc;
 	}
 
-	rc = dev->ops->tf_dev_insert_em_entry(tfp, parms);
+	if (parms->mem == TF_MEM_EXTERNAL &&
+		dev->ops->tf_dev_insert_ext_em_entry != NULL)
+		rc = dev->ops->tf_dev_insert_ext_em_entry(tfp, parms);
+	else if (parms->mem == TF_MEM_INTERNAL &&
+		dev->ops->tf_dev_insert_int_em_entry != NULL)
+		rc = dev->ops->tf_dev_insert_int_em_entry(tfp, parms);
+	else
+		return -EINVAL;
+
 	if (rc) {
 		TFP_DRV_LOG(ERR,
 			    "%s: EM insert failed, rc:%s\n",
@@ -217,7 +225,7 @@ int tf_insert_em_entry(struct tf *tfp,
 		return rc;
 	}
 
-	return -EINVAL;
+	return 0;
 }
 
 /** Delete EM hash entry API
@@ -255,7 +263,13 @@ int tf_delete_em_entry(struct tf *tfp,
 		return rc;
 	}
 
-	rc = dev->ops->tf_dev_delete_em_entry(tfp, parms);
+	if (parms->mem == TF_MEM_EXTERNAL)
+		rc = dev->ops->tf_dev_delete_ext_em_entry(tfp, parms);
+	else if (parms->mem == TF_MEM_INTERNAL)
+		rc = dev->ops->tf_dev_delete_int_em_entry(tfp, parms);
+	else
+		return -EINVAL;
+
 	if (rc) {
 		TFP_DRV_LOG(ERR,
 			    "%s: EM delete failed, rc:%s\n",
@@ -806,3 +820,83 @@ tf_get_tbl_entry(struct tf *tfp,
 
 	return rc;
 }
+
+/* API defined in tf_core.h */
+int
+tf_alloc_tbl_scope(struct tf *tfp,
+		   struct tf_alloc_tbl_scope_parms *parms)
+{
+	struct tf_session *tfs;
+	struct tf_dev_info *dev;
+	int rc;
+
+	TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
+
+	/* Retrieve the session information */
+	rc = tf_session_get_session(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to lookup session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Retrieve the device information */
+	rc = tf_session_get_device(tfs, &dev);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to lookup device, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	if (dev->ops->tf_dev_alloc_tbl_scope != NULL) {
+		rc = dev->ops->tf_dev_alloc_tbl_scope(tfp, parms);
+	} else {
+		TFP_DRV_LOG(ERR,
+			    "Alloc table scope not supported by device\n");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+/* API defined in tf_core.h */
+int
+tf_free_tbl_scope(struct tf *tfp,
+		  struct tf_free_tbl_scope_parms *parms)
+{
+	struct tf_session *tfs;
+	struct tf_dev_info *dev;
+	int rc;
+
+	TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
+
+	/* Retrieve the session information */
+	rc = tf_session_get_session(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to lookup session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Retrieve the device information */
+	rc = tf_session_get_device(tfs, &dev);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to lookup device, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	if (dev->ops->tf_dev_free_tbl_scope) {
+		rc = dev->ops->tf_dev_free_tbl_scope(tfp, parms);
+	} else {
+		TFP_DRV_LOG(ERR,
+			    "Free table scope not supported by device\n");
+		return -EINVAL;
+	}
+
+	return rc;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_device.c b/drivers/net/bnxt/tf_core/tf_device.c
index 441d0c678..20b0c5948 100644
--- a/drivers/net/bnxt/tf_core/tf_device.c
+++ b/drivers/net/bnxt/tf_core/tf_device.c
@@ -6,6 +6,7 @@
 #include "tf_device.h"
 #include "tf_device_p4.h"
 #include "tfp.h"
+#include "tf_em.h"
 
 struct tf;
 
@@ -42,10 +43,7 @@ tf_dev_bind_p4(struct tf *tfp,
 	struct tf_ident_cfg_parms ident_cfg;
 	struct tf_tbl_cfg_parms tbl_cfg;
 	struct tf_tcam_cfg_parms tcam_cfg;
-
-	dev_handle->type = TF_DEVICE_TYPE_WH;
-	/* Initial function initialization */
-	dev_handle->ops = &tf_dev_ops_p4_init;
+	struct tf_em_cfg_parms em_cfg;
 
 	dev_handle->type = TF_DEVICE_TYPE_WH;
 	/* Initial function initialization */
@@ -86,6 +84,36 @@ tf_dev_bind_p4(struct tf *tfp,
 		goto fail;
 	}
 
+	/*
+	 * EEM
+	 */
+	em_cfg.num_elements = TF_EM_TBL_TYPE_MAX;
+	em_cfg.cfg = tf_em_ext_p4;
+	em_cfg.resources = resources;
+	em_cfg.mem_type = TF_EEM_MEM_TYPE_HOST;
+
+	rc = tf_em_ext_common_bind(tfp, &em_cfg);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "EEM initialization failure\n");
+		goto fail;
+	}
+
+	/*
+	 * EM
+	 */
+	em_cfg.num_elements = TF_EM_TBL_TYPE_MAX;
+	em_cfg.cfg = tf_em_int_p4;
+	em_cfg.resources = resources;
+	em_cfg.mem_type = 0; /* Not used by EM */
+
+	rc = tf_em_int_bind(tfp, &em_cfg);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "EM initialization failure\n");
+		goto fail;
+	}
+
 	/* Final function initialization */
 	dev_handle->ops = &tf_dev_ops_p4;
 
@@ -144,6 +172,20 @@ tf_dev_unbind_p4(struct tf *tfp)
 		fail = true;
 	}
 
+	rc = tf_em_ext_common_unbind(tfp);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Device unbind failed, EEM\n");
+		fail = true;
+	}
+
+	rc = tf_em_int_unbind(tfp);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Device unbind failed, EM\n");
+		fail = true;
+	}
+
 	if (fail)
 		return -1;
 
diff --git a/drivers/net/bnxt/tf_core/tf_device.h b/drivers/net/bnxt/tf_core/tf_device.h
index c8feac55d..2712d1039 100644
--- a/drivers/net/bnxt/tf_core/tf_device.h
+++ b/drivers/net/bnxt/tf_core/tf_device.h
@@ -15,12 +15,24 @@ struct tf;
 struct tf_session;
 
 /**
- *
+ * Device module types
  */
 enum tf_device_module_type {
+	/**
+	 * Identifier module
+	 */
 	TF_DEVICE_MODULE_TYPE_IDENTIFIER,
+	/**
+	 * Table type module
+	 */
 	TF_DEVICE_MODULE_TYPE_TABLE,
+	/**
+	 * TCAM module
+	 */
 	TF_DEVICE_MODULE_TYPE_TCAM,
+	/**
+	 * EM module
+	 */
 	TF_DEVICE_MODULE_TYPE_EM,
 	TF_DEVICE_MODULE_TYPE_MAX
 };
@@ -395,8 +407,8 @@ struct tf_dev_ops {
 	 *    0       - Success
 	 *    -EINVAL - Error
 	 */
-	int (*tf_dev_insert_em_entry)(struct tf *tfp,
-				      struct tf_insert_em_entry_parms *parms);
+	int (*tf_dev_insert_int_em_entry)(struct tf *tfp,
+					  struct tf_insert_em_entry_parms *parms);
 
 	/**
 	 * Delete EM hash entry API
@@ -411,8 +423,72 @@ struct tf_dev_ops {
 	 *    0       - Success
 	 *    -EINVAL - Error
 	 */
-	int (*tf_dev_delete_em_entry)(struct tf *tfp,
-				      struct tf_delete_em_entry_parms *parms);
+	int (*tf_dev_delete_int_em_entry)(struct tf *tfp,
+					  struct tf_delete_em_entry_parms *parms);
+
+	/**
+	 * Insert EEM hash entry API
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] parms
+	 *   Pointer to E/EM insert parameters
+	 *
+	 *  Returns:
+	 *    0       - Success
+	 *    -EINVAL - Error
+	 */
+	int (*tf_dev_insert_ext_em_entry)(struct tf *tfp,
+					  struct tf_insert_em_entry_parms *parms);
+
+	/**
+	 * Delete EEM hash entry API
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] parms
+	 *   Pointer to E/EM delete parameters
+	 *
+	 *    returns:
+	 *    0       - Success
+	 *    -EINVAL - Error
+	 */
+	int (*tf_dev_delete_ext_em_entry)(struct tf *tfp,
+					  struct tf_delete_em_entry_parms *parms);
+
+	/**
+	 * Allocate EEM table scope
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] parms
+	 *   Pointer to table scope alloc parameters
+	 *
+	 *    returns:
+	 *    0       - Success
+	 *    -EINVAL - Error
+	 */
+	int (*tf_dev_alloc_tbl_scope)(struct tf *tfp,
+				      struct tf_alloc_tbl_scope_parms *parms);
+
+	/**
+	 * Free EEM table scope
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] parms
+	 *   Pointer to table scope free parameters
+	 *
+	 *    returns:
+	 *    0       - Success
+	 *    -EINVAL - Error
+	 */
+	int (*tf_dev_free_tbl_scope)(struct tf *tfp,
+				     struct tf_free_tbl_scope_parms *parms);
 };
 
 /**
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c
index 9e332c594..127c655a6 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c
@@ -93,6 +93,12 @@ const struct tf_dev_ops tf_dev_ops_p4_init = {
 	.tf_dev_alloc_search_tcam = NULL,
 	.tf_dev_set_tcam = NULL,
 	.tf_dev_get_tcam = NULL,
+	.tf_dev_insert_int_em_entry = NULL,
+	.tf_dev_delete_int_em_entry = NULL,
+	.tf_dev_insert_ext_em_entry = NULL,
+	.tf_dev_delete_ext_em_entry = NULL,
+	.tf_dev_alloc_tbl_scope = NULL,
+	.tf_dev_free_tbl_scope = NULL,
 };
 
 /**
@@ -113,6 +119,10 @@ const struct tf_dev_ops tf_dev_ops_p4 = {
 	.tf_dev_alloc_search_tcam = NULL,
 	.tf_dev_set_tcam = tf_tcam_set,
 	.tf_dev_get_tcam = NULL,
-	.tf_dev_insert_em_entry = tf_em_insert_entry,
-	.tf_dev_delete_em_entry = tf_em_delete_entry,
+	.tf_dev_insert_int_em_entry = tf_em_insert_int_entry,
+	.tf_dev_delete_int_em_entry = tf_em_delete_int_entry,
+	.tf_dev_insert_ext_em_entry = tf_em_insert_ext_entry,
+	.tf_dev_delete_ext_em_entry = tf_em_delete_ext_entry,
+	.tf_dev_alloc_tbl_scope = tf_em_ext_common_alloc,
+	.tf_dev_free_tbl_scope = tf_em_ext_common_free,
 };
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.h b/drivers/net/bnxt/tf_core/tf_device_p4.h
index 411e21637..da6dd65a3 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.h
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.h
@@ -36,13 +36,12 @@ struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {
 	{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_MCG },
 	{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_8B },
 	{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_16B },
-	/* CFA_RESOURCE_TYPE_P4_SRAM_ENCAP_32B */
+	/* CFA_RESOURCE_TYPE_P4_ENCAP_32B */
 	{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
 	{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_64B },
 	{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_MAC },
-	/* CFA_RESOURCE_TYPE_P4_SRAM_SP_SMAC_IPV4 */
-	{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
-	/* CFA_RESOURCE_TYPE_P4_SRAM_SP_SMAC_IPV6 */
+	{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4 },
+	/* CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6 */
 	{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
 	{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_COUNTER_64B },
 	{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_SPORT },
@@ -77,4 +76,17 @@ struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {
 	/* CFA_RESOURCE_TYPE_P4_EXT */
 	{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID }
 };
+
+struct tf_rm_element_cfg tf_em_ext_p4[TF_EM_TBL_TYPE_MAX] = {
+	/* CFA_RESOURCE_TYPE_P4_EM_REC */
+	{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+	{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_TBL_SCOPE },
+};
+
+struct tf_rm_element_cfg tf_em_int_p4[TF_EM_TBL_TYPE_MAX] = {
+	{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_EM_REC },
+	/* CFA_RESOURCE_TYPE_P4_TBL_SCOPE */
+	{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+};
+
 #endif /* _TF_DEVICE_P4_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_em.c b/drivers/net/bnxt/tf_core/tf_em.c
deleted file mode 100644
index fcbbd7eca..000000000
--- a/drivers/net/bnxt/tf_core/tf_em.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2020 Broadcom
- * All rights reserved.
- */
-
-#include <string.h>
-#include <rte_common.h>
-#include <rte_errno.h>
-#include <rte_log.h>
-
-#include "tf_core.h"
-#include "tf_em.h"
-#include "tf_msg.h"
-#include "tfp.h"
-#include "lookup3.h"
-#include "tf_ext_flow_handle.h"
-
-#include "bnxt.h"
-
-
-static uint32_t tf_em_get_key_mask(int num_entries)
-{
-	uint32_t mask = num_entries - 1;
-
-	if (num_entries & 0x7FFF)
-		return 0;
-
-	if (num_entries > (128 * 1024 * 1024))
-		return 0;
-
-	return mask;
-}
-
-static void tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
-				   uint8_t	       *in_key,
-				   struct cfa_p4_eem_64b_entry *key_entry)
-{
-	key_entry->hdr.word1 = result->word1;
-
-	if (result->word1 & CFA_P4_EEM_ENTRY_ACT_REC_INT_MASK)
-		key_entry->hdr.pointer = result->pointer;
-	else
-		key_entry->hdr.pointer = result->pointer;
-
-	memcpy(key_entry->key, in_key, TF_HW_EM_KEY_MAX_SIZE + 4);
-
-#ifdef TF_EEM_DEBUG
-	dump_raw((uint8_t *)key_entry, TF_EM_KEY_RECORD_SIZE, "Create raw:");
-#endif
-}
-
-/** insert EEM entry API
- *
- * returns:
- *  0
- *  TF_ERR	    - unable to get lock
- *
- * insert callback returns:
- *   0
- *   TF_ERR_EM_DUP  - key is already in table
- */
-static int tf_insert_eem_entry(struct tf_tbl_scope_cb	   *tbl_scope_cb,
-			       struct tf_insert_em_entry_parms *parms)
-{
-	uint32_t	   mask;
-	uint32_t	   key0_hash;
-	uint32_t	   key1_hash;
-	uint32_t	   key0_index;
-	uint32_t	   key1_index;
-	struct cfa_p4_eem_64b_entry key_entry;
-	uint32_t	   index;
-	enum hcapi_cfa_em_table_type table_type;
-	uint32_t	   gfid;
-	struct hcapi_cfa_hwop op;
-	struct hcapi_cfa_key_tbl key_tbl;
-	struct hcapi_cfa_key_data key_obj;
-	struct hcapi_cfa_key_loc key_loc;
-	uint64_t big_hash;
-	int rc;
-
-	/* Get mask to use on hash */
-	mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
-
-	if (!mask)
-		return -EINVAL;
-
-#ifdef TF_EEM_DEBUG
-	dump_raw((uint8_t *)parms->key, TF_HW_EM_KEY_MAX_SIZE + 4, "In Key");
-#endif
-
-	big_hash = hcapi_cfa_key_hash((uint64_t *)parms->key,
-				      (TF_HW_EM_KEY_MAX_SIZE + 4) * 8);
-	key0_hash = (uint32_t)(big_hash >> 32);
-	key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
-
-	key0_index = key0_hash & mask;
-	key1_index = key1_hash & mask;
-
-#ifdef TF_EEM_DEBUG
-	TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
-	TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
-#endif
-	/*
-	 * Use the "result" arg to populate all of the key entry then
-	 * store the byte swapped "raw" entry in a local copy ready
-	 * for insertion in to the table.
-	 */
-	tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
-				((uint8_t *)parms->key),
-				&key_entry);
-
-	/*
-	 * Try to add to Key0 table, if that does not work then
-	 * try the key1 table.
-	 */
-	index = key0_index;
-	op.opcode = HCAPI_CFA_HWOPS_ADD;
-	key_tbl.base0 = (uint8_t *)
-	&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
-	key_obj.offset = (index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
-	key_obj.data = (uint8_t *)&key_entry;
-	key_obj.size = TF_EM_KEY_RECORD_SIZE;
-
-	rc = hcapi_cfa_key_hw_op(&op,
-				 &key_tbl,
-				 &key_obj,
-				 &key_loc);
-
-	if (rc == 0) {
-		table_type = TF_KEY0_TABLE;
-	} else {
-		index = key1_index;
-
-		key_tbl.base0 = (uint8_t *)
-		&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
-		key_obj.offset =
-			(index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
-
-		rc = hcapi_cfa_key_hw_op(&op,
-					 &key_tbl,
-					 &key_obj,
-					 &key_loc);
-		if (rc != 0)
-			return rc;
-
-		table_type = TF_KEY1_TABLE;
-	}
-
-	TF_SET_GFID(gfid,
-		    index,
-		    table_type);
-	TF_SET_FLOW_ID(parms->flow_id,
-		       gfid,
-		       TF_GFID_TABLE_EXTERNAL,
-		       parms->dir);
-	TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
-				     0,
-				     0,
-				     0,
-				     index,
-				     0,
-				     table_type);
-
-	return 0;
-}
-
-/**
- * Insert EM internal entry API
- *
- *  returns:
- *     0 - Success
- */
-static int tf_insert_em_internal_entry(struct tf                       *tfp,
-				       struct tf_insert_em_entry_parms *parms)
-{
-	int       rc;
-	uint32_t  gfid;
-	uint16_t  rptr_index = 0;
-	uint8_t   rptr_entry = 0;
-	uint8_t   num_of_entries = 0;
-	struct tf_session *session =
-		(struct tf_session *)(tfp->session->core_data);
-	struct stack *pool = &session->em_pool[parms->dir];
-	uint32_t index;
-
-	rc = stack_pop(pool, &index);
-
-	if (rc != 0) {
-		TFP_DRV_LOG(ERR,
-		   "dir:%d, EM entry index allocation failed\n",
-		   parms->dir);
-		return rc;
-	}
-
-	rptr_index = index * TF_SESSION_EM_ENTRY_SIZE;
-	rc = tf_msg_insert_em_internal_entry(tfp,
-					     parms,
-					     &rptr_index,
-					     &rptr_entry,
-					     &num_of_entries);
-	if (rc != 0)
-		return -1;
-
-	PMD_DRV_LOG(ERR,
-		   "Internal entry @ Index:%d rptr_index:0x%x rptr_entry:0x%x num_of_entries:%d\n",
-		   index * TF_SESSION_EM_ENTRY_SIZE,
-		   rptr_index,
-		   rptr_entry,
-		   num_of_entries);
-
-	TF_SET_GFID(gfid,
-		    ((rptr_index << TF_EM_INTERNAL_INDEX_SHIFT) |
-		     rptr_entry),
-		    0); /* N/A for internal table */
-
-	TF_SET_FLOW_ID(parms->flow_id,
-		       gfid,
-		       TF_GFID_TABLE_INTERNAL,
-		       parms->dir);
-
-	TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
-				     num_of_entries,
-				     0,
-				     0,
-				     rptr_index,
-				     rptr_entry,
-				     0);
-	return 0;
-}
-
-/** Delete EM internal entry API
- *
- * returns:
- * 0
- * -EINVAL
- */
-static int tf_delete_em_internal_entry(struct tf                       *tfp,
-				       struct tf_delete_em_entry_parms *parms)
-{
-	int rc;
-	struct tf_session *session =
-		(struct tf_session *)(tfp->session->core_data);
-	struct stack *pool = &session->em_pool[parms->dir];
-
-	rc = tf_msg_delete_em_entry(tfp, parms);
-
-	/* Return resource to pool */
-	if (rc == 0)
-		stack_push(pool, parms->index / TF_SESSION_EM_ENTRY_SIZE);
-
-	return rc;
-}
-
-
-/** delete EEM hash entry API
- *
- * returns:
- *   0
- *   -EINVAL	  - parameter error
- *   TF_NO_SESSION    - bad session ID
- *   TF_ERR_TBL_SCOPE - invalid table scope
- *   TF_ERR_TBL_IF    - invalid table interface
- *
- * insert callback returns
- *   0
- *   TF_NO_EM_MATCH - entry not found
- */
-static int tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
-			       struct tf_delete_em_entry_parms *parms)
-{
-	enum hcapi_cfa_em_table_type hash_type;
-	uint32_t index;
-	struct hcapi_cfa_hwop op;
-	struct hcapi_cfa_key_tbl key_tbl;
-	struct hcapi_cfa_key_data key_obj;
-	struct hcapi_cfa_key_loc key_loc;
-	int rc;
-
-	if (parms->flow_handle == 0)
-		return -EINVAL;
-
-	TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
-	TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
-
-	op.opcode = HCAPI_CFA_HWOPS_DEL;
-	key_tbl.base0 = (uint8_t *)
-	&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[(hash_type == 0 ?
-							  TF_KEY0_TABLE :
-							  TF_KEY1_TABLE)];
-	key_obj.offset = (index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
-	key_obj.data = NULL;
-	key_obj.size = TF_EM_KEY_RECORD_SIZE;
-
-	rc = hcapi_cfa_key_hw_op(&op,
-				 &key_tbl,
-				 &key_obj,
-				 &key_loc);
-
-	if (!rc)
-		return rc;
-
-	return 0;
-}
-
-/** insert EM hash entry API
- *
- *    returns:
- *    0       - Success
- *    -EINVAL - Error
- */
-int tf_em_insert_entry(struct tf *tfp,
-		       struct tf_insert_em_entry_parms *parms)
-{
-	struct tf_tbl_scope_cb *tbl_scope_cb;
-
-	tbl_scope_cb = tbl_scope_cb_find
-		((struct tf_session *)(tfp->session->core_data),
-		parms->tbl_scope_id);
-	if (tbl_scope_cb == NULL) {
-		TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
-		return -EINVAL;
-	}
-
-	/* Process the EM entry per Table Scope type */
-	if (parms->mem == TF_MEM_EXTERNAL)
-		/* External EEM */
-		return tf_insert_eem_entry
-			(tbl_scope_cb, parms);
-	else if (parms->mem == TF_MEM_INTERNAL)
-		/* Internal EM */
-		return tf_insert_em_internal_entry(tfp,	parms);
-
-	return -EINVAL;
-}
-
-/** Delete EM hash entry API
- *
- *    returns:
- *    0       - Success
- *    -EINVAL - Error
- */
-int tf_em_delete_entry(struct tf *tfp,
-		       struct tf_delete_em_entry_parms *parms)
-{
-	struct tf_tbl_scope_cb *tbl_scope_cb;
-
-	tbl_scope_cb = tbl_scope_cb_find
-		((struct tf_session *)(tfp->session->core_data),
-		parms->tbl_scope_id);
-	if (tbl_scope_cb == NULL) {
-		TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
-		return -EINVAL;
-	}
-	if (parms->mem == TF_MEM_EXTERNAL)
-		return tf_delete_eem_entry(tbl_scope_cb, parms);
-	else if (parms->mem == TF_MEM_INTERNAL)
-		return tf_delete_em_internal_entry(tfp, parms);
-
-	return -EINVAL;
-}
diff --git a/drivers/net/bnxt/tf_core/tf_em.h b/drivers/net/bnxt/tf_core/tf_em.h
index 2262ae7cc..cf799c200 100644
--- a/drivers/net/bnxt/tf_core/tf_em.h
+++ b/drivers/net/bnxt/tf_core/tf_em.h
@@ -9,6 +9,7 @@
 #include "tf_core.h"
 #include "tf_session.h"
 
+#define TF_HACK_TBL_SCOPE_BASE 68
 #define SUPPORT_CFA_HW_P4 1
 #define SUPPORT_CFA_HW_P58 0
 #define SUPPORT_CFA_HW_P59 0
@@ -19,6 +20,9 @@
 #define TF_HW_EM_KEY_MAX_SIZE 52
 #define TF_EM_KEY_RECORD_SIZE 64
 
+#define TF_EM_MAX_MASK 0x7FFF
+#define TF_EM_MAX_ENTRY (128 * 1024 * 1024)
+
 /*
  * Used to build GFID:
  *
@@ -44,6 +48,47 @@ struct tf_em_64b_entry {
 	uint8_t key[TF_EM_KEY_RECORD_SIZE - sizeof(struct cfa_p4_eem_entry_hdr)];
 };
 
+/** EEM Memory Type
+ *
+ */
+enum tf_mem_type {
+	TF_EEM_MEM_TYPE_INVALID,
+	TF_EEM_MEM_TYPE_HOST,
+	TF_EEM_MEM_TYPE_SYSTEM
+};
+
+/**
+ * tf_em_cfg_parms definition
+ */
+struct tf_em_cfg_parms {
+	/**
+	 * [in] Num entries in resource config
+	 */
+	uint16_t num_elements;
+	/**
+	 * [in] Resource config
+	 */
+	struct tf_rm_element_cfg *cfg;
+	/**
+	 * Session resource allocations
+	 */
+	struct tf_session_resources *resources;
+	/**
+	 * [in] Memory type.
+	 */
+	enum tf_mem_type mem_type;
+};
+
+/**
+ * @page table Table
+ *
+ * @ref tf_alloc_eem_tbl_scope
+ *
+ * @ref tf_free_eem_tbl_scope_cb
+ *
+ * @ref tbl_scope_cb_find
+ */
+
 /**
  * Allocates EEM Table scope
  *
@@ -78,29 +123,258 @@ int tf_free_eem_tbl_scope_cb(struct tf *tfp,
 			     struct tf_free_tbl_scope_parms *parms);
 
 /**
- * Function to search for table scope control block structure
- * with specified table scope ID.
+ * Insert record in to internal EM table
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_insert_int_entry(struct tf *tfp,
+			   struct tf_insert_em_entry_parms *parms);
+
+/**
+ * Delete record from internal EM table
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_delete_int_entry(struct tf *tfp,
+			   struct tf_delete_em_entry_parms *parms);
+
+/**
+ * Insert record in to external EEM table
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_insert_ext_entry(struct tf *tfp,
+			   struct tf_insert_em_entry_parms *parms);
+
+/**
+ * Insert record from external EEM table
  *
- * [in] session
- *   Session to use for the search of the table scope control block
- * [in] tbl_scope_id
- *   Table scope ID to search for
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
  *
  * Returns:
- *  Pointer to the found table scope control block struct or NULL if
- *  table scope control block struct not found
+ *   0       - Success
+ *   -EINVAL - Parameter error
  */
-struct tf_tbl_scope_cb *tbl_scope_cb_find(struct tf_session *session,
-					  uint32_t tbl_scope_id);
+int tf_em_delete_ext_entry(struct tf *tfp,
+			   struct tf_delete_em_entry_parms *parms);
 
-void *tf_em_get_table_page(struct tf_tbl_scope_cb *tbl_scope_cb,
-			   enum tf_dir dir,
-			   uint32_t offset,
-			   enum hcapi_cfa_em_table_type table_type);
+/**
+ * Insert record in to external system EEM table
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_insert_ext_sys_entry(struct tf *tfp,
+			       struct tf_insert_em_entry_parms *parms);
+
+/**
+ * Delete record from external system EEM table
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_delete_ext_sys_entry(struct tf *tfp,
+			       struct tf_delete_em_entry_parms *parms);
 
-int tf_em_insert_entry(struct tf *tfp,
-		       struct tf_insert_em_entry_parms *parms);
+/**
+ * Bind internal EM device interface
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_int_bind(struct tf *tfp,
+		   struct tf_em_cfg_parms *parms);
 
-int tf_em_delete_entry(struct tf *tfp,
-		       struct tf_delete_em_entry_parms *parms);
+/**
+ * Unbind internal EM device interface
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_int_unbind(struct tf *tfp);
+
+/**
+ * Common bind for EEM device interface. Used for both host and
+ * system memory
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_ext_common_bind(struct tf *tfp,
+			  struct tf_em_cfg_parms *parms);
+
+/**
+ * Common unbind for EEM device interface. Used for both host and
+ * system memory
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_ext_common_unbind(struct tf *tfp);
+
+/**
+ * Alloc for external EEM using host memory
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_ext_host_alloc(struct tf *tfp,
+			 struct tf_alloc_tbl_scope_parms *parms);
+
+/**
+ * Free for external EEM using host memory
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_ext_host_free(struct tf *tfp,
+			struct tf_free_tbl_scope_parms *parms);
+
+/**
+ * Alloc for external EEM using system memory
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_ext_system_alloc(struct tf *tfp,
+			 struct tf_alloc_tbl_scope_parms *parms);
+
+/**
+ * Free for external EEM using system memory
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_ext_system_free(struct tf *tfp,
+			struct tf_free_tbl_scope_parms *parms);
+
+/**
+ * Common free for external EEM using host or system memory
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_ext_common_free(struct tf *tfp,
+			  struct tf_free_tbl_scope_parms *parms);
+
+/**
+ * Common alloc for external EEM using host or system memory
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_ext_common_alloc(struct tf *tfp,
+			   struct tf_alloc_tbl_scope_parms *parms);
 #endif /* _TF_EM_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_em_common.c b/drivers/net/bnxt/tf_core/tf_em_common.c
new file mode 100644
index 000000000..ba6aa7ac1
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_em_common.c
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <math.h>
+#include <sys/param.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_log.h>
+
+#include "tf_core.h"
+#include "tf_util.h"
+#include "tf_common.h"
+#include "tf_em.h"
+#include "tf_em_common.h"
+#include "tf_msg.h"
+#include "tfp.h"
+#include "tf_device.h"
+#include "tf_ext_flow_handle.h"
+#include "cfa_resource_types.h"
+
+#include "bnxt.h"
+
+
+/**
+ * EM DBs.
+ */
+void *eem_db[TF_DIR_MAX];
+
+/**
+ * Init flag, set on bind and cleared on unbind
+ */
+static uint8_t init;
+
+/**
+ * Host or system
+ */
+static enum tf_mem_type mem_type;
+
+/* API defined in tf_em.h */
+struct tf_tbl_scope_cb *
+tbl_scope_cb_find(struct tf_session *session,
+		  uint32_t tbl_scope_id)
+{
+	int i;
+	struct tf_rm_is_allocated_parms parms;
+	int allocated;
+
+	/* Check that id is valid */
+	parms.rm_db = eem_db[TF_DIR_RX];
+	parms.db_index = 1/**** TYPE TABLE-SCOPE??? ****/;
+	parms.index = tbl_scope_id + TF_HACK_TBL_SCOPE_BASE;
+	parms.allocated = &allocated;
+
+	i = tf_rm_is_allocated(&parms);
+
+	if (i < 0 || !allocated)
+		return NULL;
+
+	for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
+		if (session->tbl_scopes[i].tbl_scope_id == tbl_scope_id)
+			return &session->tbl_scopes[i];
+	}
+
+	return NULL;
+}
+
+int
+tf_create_tbl_pool_external(enum tf_dir dir,
+			    struct tf_tbl_scope_cb *tbl_scope_cb,
+			    uint32_t num_entries,
+			    uint32_t entry_sz_bytes)
+{
+	struct tfp_calloc_parms parms;
+	uint32_t i;
+	int32_t j;
+	int rc = 0;
+	struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
+
+	parms.nitems = num_entries;
+	parms.size = sizeof(uint32_t);
+	parms.alignment = 0;
+
+	if (tfp_calloc(&parms) != 0) {
+		TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
+			    tf_dir_2_str(dir), strerror(ENOMEM));
+		return -ENOMEM;
+	}
+
+	/* Create empty stack
+	 */
+	rc = stack_init(num_entries, parms.mem_va, pool);
+
+	if (rc != 0) {
+		TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
+			    tf_dir_2_str(dir), strerror(-rc));
+		goto cleanup;
+	}
+
+	/* Save the  malloced memory address so that it can
+	 * be freed when the table scope is freed.
+	 */
+	tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
+
+	/* Fill pool with indexes in reverse
+	 */
+	j = (num_entries - 1) * entry_sz_bytes;
+
+	for (i = 0; i < num_entries; i++) {
+		rc = stack_push(pool, j);
+		if (rc != 0) {
+			TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
+				    tf_dir_2_str(dir), strerror(-rc));
+			goto cleanup;
+		}
+
+		if (j < 0) {
+			TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
+				    dir, j);
+			goto cleanup;
+		}
+		j -= entry_sz_bytes;
+	}
+
+	if (!stack_is_full(pool)) {
+		rc = -EINVAL;
+		TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
+			    tf_dir_2_str(dir), strerror(-rc));
+		goto cleanup;
+	}
+	return 0;
+cleanup:
+	tfp_free((void *)parms.mem_va);
+	return rc;
+}
+
+/**
+ * Destroy External Tbl pool of memory indexes.
+ *
+ * [in] dir
+ *   direction
+ * [in] tbl_scope_cb
+ *   pointer to the table scope
+ */
+void
+tf_destroy_tbl_pool_external(enum tf_dir dir,
+			     struct tf_tbl_scope_cb *tbl_scope_cb)
+{
+	uint32_t *ext_act_pool_mem =
+		tbl_scope_cb->ext_act_pool_mem[dir];
+
+	tfp_free(ext_act_pool_mem);
+}
+
+uint32_t
+tf_em_get_key_mask(int num_entries)
+{
+	uint32_t mask = num_entries - 1;
+
+	if (num_entries & TF_EM_MAX_MASK)
+		return 0;
+
+	if (num_entries > TF_EM_MAX_ENTRY)
+		return 0;
+
+	return mask;
+}
+
+void
+tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
+		       uint8_t *in_key,
+		       struct cfa_p4_eem_64b_entry *key_entry)
+{
+	key_entry->hdr.word1 = result->word1;
+
+	if (result->word1 & CFA_P4_EEM_ENTRY_ACT_REC_INT_MASK)
+		key_entry->hdr.pointer = result->pointer;
+	else
+		key_entry->hdr.pointer = result->pointer;
+
+	memcpy(key_entry->key, in_key, TF_HW_EM_KEY_MAX_SIZE + 4);
+
+#ifdef TF_EEM_DEBUG
+	dump_raw((uint8_t *)key_entry, TF_EM_KEY_RECORD_SIZE, "Create raw:");
+#endif
+}
+
+int
+tf_em_ext_common_bind(struct tf *tfp,
+		      struct tf_em_cfg_parms *parms)
+{
+	int rc;
+	int i;
+	struct tf_rm_create_db_parms db_cfg = { 0 };
+
+	TF_CHECK_PARMS2(tfp, parms);
+
+	if (init) {
+		TFP_DRV_LOG(ERR,
+			    "Identifier already initialized\n");
+		return -EINVAL;
+	}
+
+	db_cfg.type = TF_DEVICE_MODULE_TYPE_EM;
+	db_cfg.num_elements = parms->num_elements;
+	db_cfg.cfg = parms->cfg;
+
+	for (i = 0; i < TF_DIR_MAX; i++) {
+		db_cfg.dir = i;
+		db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
+		db_cfg.rm_db = &eem_db[i];
+		rc = tf_rm_create_db(tfp, &db_cfg);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "%s: EM DB creation failed\n",
+				    tf_dir_2_str(i));
+
+			return rc;
+		}
+	}
+
+	mem_type = parms->mem_type;
+	init = 1;
+
+	return 0;
+}
+
+int
+tf_em_ext_common_unbind(struct tf *tfp)
+{
+	int rc;
+	int i;
+	struct tf_rm_free_db_parms fparms = { 0 };
+
+	TF_CHECK_PARMS1(tfp);
+
+	/* Bail if nothing has been initialized done silent as to
+	 * allow for creation cleanup.
+	 */
+	if (!init) {
+		TFP_DRV_LOG(ERR,
+			    "No EM DBs created\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < TF_DIR_MAX; i++) {
+		fparms.dir = i;
+		fparms.rm_db = eem_db[i];
+		rc = tf_rm_free_db(tfp, &fparms);
+		if (rc)
+			return rc;
+
+		eem_db[i] = NULL;
+	}
+
+	init = 0;
+
+	return 0;
+}
+
+int
+tf_em_ext_common_alloc(struct tf *tfp,
+		       struct tf_alloc_tbl_scope_parms *parms)
+{
+	if (mem_type == TF_EEM_MEM_TYPE_HOST)
+		return tf_em_ext_host_alloc(tfp, parms);
+	else
+		return tf_em_ext_system_alloc(tfp, parms);
+}
+
+int
+tf_em_ext_common_free(struct tf *tfp,
+		      struct tf_free_tbl_scope_parms *parms)
+{
+	if (mem_type == TF_EEM_MEM_TYPE_HOST)
+		return tf_em_ext_host_free(tfp, parms);
+	else
+		return tf_em_ext_system_free(tfp, parms);
+}
diff --git a/drivers/net/bnxt/tf_core/tf_em_common.h b/drivers/net/bnxt/tf_core/tf_em_common.h
new file mode 100644
index 000000000..45699a7c3
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_em_common.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TF_EM_COMMON_H_
+#define _TF_EM_COMMON_H_
+
+#include "tf_core.h"
+#include "tf_session.h"
+
+
+/**
+ * Function to search for table scope control block structure
+ * with specified table scope ID.
+ *
+ * [in] session
+ *   Session to use for the search of the table scope control block
+ * [in] tbl_scope_id
+ *   Table scope ID to search for
+ *
+ * Returns:
+ *  Pointer to the found table scope control block struct or NULL if
+ *   table scope control block struct not found
+ */
+struct tf_tbl_scope_cb *tbl_scope_cb_find(struct tf_session *session,
+					  uint32_t tbl_scope_id);
+
+/**
+ * Create and initialize a stack to use for action entries
+ *
+ * [in] dir
+ *   Direction
+ * [in] tbl_scope_id
+ *   Table scope ID
+ * [in] num_entries
+ *   Number of EEM entries
+ * [in] entry_sz_bytes
+ *   Size of the entry
+ *
+ * Returns:
+ *   0       - Success
+ *   -ENOMEM - Out of memory
+ *   -EINVAL - Failure
+ */
+int tf_create_tbl_pool_external(enum tf_dir dir,
+				struct tf_tbl_scope_cb *tbl_scope_cb,
+				uint32_t num_entries,
+				uint32_t entry_sz_bytes);
+
+/**
+ * Delete and cleanup action record allocation stack
+ *
+ * [in] dir
+ *   Direction
+ * [in] tbl_scope_id
+ *   Table scope ID
+ *
+ */
+void tf_destroy_tbl_pool_external(enum tf_dir dir,
+				  struct tf_tbl_scope_cb *tbl_scope_cb);
+
+/**
+ * Get hash mask for current EEM table size
+ *
+ * [in] num_entries
+ *   Number of EEM entries
+ */
+uint32_t tf_em_get_key_mask(int num_entries);
+
+/**
+ * Populate key_entry
+ *
+ * [in] result
+ *   Entry data
+ * [in] in_key
+ *   Key data
+ * [out] key_entry
+ *   Completed key record
+ */
+void tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
+			    uint8_t	       *in_key,
+			    struct cfa_p4_eem_64b_entry *key_entry);
+
+/**
+ * Find base page address for offset into specified table type
+ *
+ * [in] tbl_scope_cb
+ *   Table scope
+ * [in] dir
+ *   Direction
+ * [in] Offset
+ *   Offset in to table
+ * [in] table_type
+ *   Table type
+ *
+ * Returns:
+ *
+ * 0                                 - Failure
+ * Void pointer to page base address - Success
+ */
+void *tf_em_get_table_page(struct tf_tbl_scope_cb *tbl_scope_cb,
+			   enum tf_dir dir,
+			   uint32_t offset,
+			   enum hcapi_cfa_em_table_type table_type);
+
+#endif /* _TF_EM_COMMON_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_em_host.c b/drivers/net/bnxt/tf_core/tf_em_host.c
new file mode 100644
index 000000000..8be39afdd
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_em_host.c
@@ -0,0 +1,1146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <math.h>
+#include <sys/param.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_log.h>
+
+#include "tf_core.h"
+#include "tf_util.h"
+#include "tf_common.h"
+#include "tf_em.h"
+#include "tf_em_common.h"
+#include "tf_msg.h"
+#include "tfp.h"
+#include "lookup3.h"
+#include "tf_ext_flow_handle.h"
+
+#include "bnxt.h"
+
+
+#define PTU_PTE_VALID          0x1UL
+#define PTU_PTE_LAST           0x2UL
+#define PTU_PTE_NEXT_TO_LAST   0x4UL
+
+/* Number of pointers per page_size */
+#define MAX_PAGE_PTRS(page_size)  ((page_size) / sizeof(void *))
+
+#define TF_EM_PG_SZ_4K        (1 << 12)
+#define TF_EM_PG_SZ_8K        (1 << 13)
+#define TF_EM_PG_SZ_64K       (1 << 16)
+#define TF_EM_PG_SZ_256K      (1 << 18)
+#define TF_EM_PG_SZ_1M        (1 << 20)
+#define TF_EM_PG_SZ_2M        (1 << 21)
+#define TF_EM_PG_SZ_4M        (1 << 22)
+#define TF_EM_PG_SZ_1G        (1 << 30)
+
+#define TF_EM_CTX_ID_INVALID   0xFFFF
+
+#define TF_EM_MIN_ENTRIES     (1 << 15) /* 32K */
+#define TF_EM_MAX_ENTRIES     (1 << 27) /* 128M */
+
+/**
+ * EM DBs.
+ */
+extern void *eem_db[TF_DIR_MAX];
+
+/**
+ * Function to free a page table
+ *
+ * [in] tp
+ *   Pointer to the page table to free
+ */
+static void
+tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl *tp)
+{
+	uint32_t i;
+
+	for (i = 0; i < tp->pg_count; i++) {
+		if (!tp->pg_va_tbl[i]) {
+			TFP_DRV_LOG(WARNING,
+				    "No mapping for page: %d table: %016" PRIu64 "\n",
+				    i,
+				    (uint64_t)(uintptr_t)tp);
+			continue;
+		}
+
+		tfp_free(tp->pg_va_tbl[i]);
+		tp->pg_va_tbl[i] = NULL;
+	}
+
+	tp->pg_count = 0;
+	tfp_free(tp->pg_va_tbl);
+	tp->pg_va_tbl = NULL;
+	tfp_free(tp->pg_pa_tbl);
+	tp->pg_pa_tbl = NULL;
+}
+
+/**
+ * Function to free an EM table
+ *
+ * [in] tbl
+ *   Pointer to the EM table to free
+ */
+static void
+tf_em_free_page_table(struct hcapi_cfa_em_table *tbl)
+{
+	struct hcapi_cfa_em_page_tbl *tp;
+	int i;
+
+	for (i = 0; i < tbl->num_lvl; i++) {
+		tp = &tbl->pg_tbl[i];
+		TFP_DRV_LOG(INFO,
+			   "EEM: Freeing page table: size %u lvl %d cnt %u\n",
+			   TF_EM_PAGE_SIZE,
+			    i,
+			    tp->pg_count);
+
+		tf_em_free_pg_tbl(tp);
+	}
+
+	tbl->l0_addr = NULL;
+	tbl->l0_dma_addr = 0;
+	tbl->num_lvl = 0;
+	tbl->num_data_pages = 0;
+}
+
+/**
+ * Allocation of page tables
+ *
+ * [in] tfp
+ *   Pointer to a TruFlow handle
+ *
+ * [in] pg_count
+ *   Page count to allocate
+ *
+ * [in] pg_size
+ *   Size of each page
+ *
+ * Returns:
+ *   0       - Success
+ *   -ENOMEM - Out of memmory
+ */
+static int
+tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl *tp,
+		   uint32_t pg_count,
+		   uint32_t pg_size)
+{
+	uint32_t i;
+	struct tfp_calloc_parms parms;
+
+	parms.nitems = pg_count;
+	parms.size = sizeof(void *);
+	parms.alignment = 0;
+
+	if (tfp_calloc(&parms) != 0)
+		return -ENOMEM;
+
+	tp->pg_va_tbl = parms.mem_va;
+
+	if (tfp_calloc(&parms) != 0) {
+		tfp_free(tp->pg_va_tbl);
+		return -ENOMEM;
+	}
+
+	tp->pg_pa_tbl = parms.mem_va;
+
+	tp->pg_count = 0;
+	tp->pg_size = pg_size;
+
+	for (i = 0; i < pg_count; i++) {
+		parms.nitems = 1;
+		parms.size = pg_size;
+		parms.alignment = TF_EM_PAGE_ALIGNMENT;
+
+		if (tfp_calloc(&parms) != 0)
+			goto cleanup;
+
+		tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
+		tp->pg_va_tbl[i] = parms.mem_va;
+
+		memset(tp->pg_va_tbl[i], 0, pg_size);
+		tp->pg_count++;
+	}
+
+	return 0;
+
+cleanup:
+	tf_em_free_pg_tbl(tp);
+	return -ENOMEM;
+}
+
+/**
+ * Allocates EM page tables
+ *
+ * [in] tbl
+ *   Table to allocate pages for
+ *
+ * Returns:
+ *   0       - Success
+ *   -ENOMEM - Out of memmory
+ */
+static int
+tf_em_alloc_page_table(struct hcapi_cfa_em_table *tbl)
+{
+	struct hcapi_cfa_em_page_tbl *tp;
+	int rc = 0;
+	int i;
+	uint32_t j;
+
+	for (i = 0; i < tbl->num_lvl; i++) {
+		tp = &tbl->pg_tbl[i];
+
+		rc = tf_em_alloc_pg_tbl(tp,
+					tbl->page_cnt[i],
+					TF_EM_PAGE_SIZE);
+		if (rc) {
+			TFP_DRV_LOG(WARNING,
+				"Failed to allocate page table: lvl: %d, rc:%s\n",
+				i,
+				strerror(-rc));
+			goto cleanup;
+		}
+
+		for (j = 0; j < tp->pg_count; j++) {
+			TFP_DRV_LOG(INFO,
+				"EEM: Allocated page table: size %u lvl %d cnt"
+				" %u VA:%p PA:%p\n",
+				TF_EM_PAGE_SIZE,
+				i,
+				tp->pg_count,
+				(void *)(uintptr_t)tp->pg_va_tbl[j],
+				(void *)(uintptr_t)tp->pg_pa_tbl[j]);
+		}
+	}
+	return rc;
+
+cleanup:
+	tf_em_free_page_table(tbl);
+	return rc;
+}
+
+/**
+ * Links EM page tables
+ *
+ * [in] tp
+ *   Pointer to page table
+ *
+ * [in] tp_next
+ *   Pointer to the next page table
+ *
+ * [in] set_pte_last
+ *   Flag controlling if the page table is last
+ */
+static void
+tf_em_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
+		      struct hcapi_cfa_em_page_tbl *tp_next,
+		      bool set_pte_last)
+{
+	uint64_t *pg_pa = tp_next->pg_pa_tbl;
+	uint64_t *pg_va;
+	uint64_t valid;
+	uint32_t k = 0;
+	uint32_t i;
+	uint32_t j;
+
+	for (i = 0; i < tp->pg_count; i++) {
+		pg_va = tp->pg_va_tbl[i];
+
+		for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
+			if (k == tp_next->pg_count - 2 && set_pte_last)
+				valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
+			else if (k == tp_next->pg_count - 1 && set_pte_last)
+				valid = PTU_PTE_LAST | PTU_PTE_VALID;
+			else
+				valid = PTU_PTE_VALID;
+
+			pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
+			if (++k >= tp_next->pg_count)
+				return;
+		}
+	}
+}
+
+/**
+ * Setup a EM page table
+ *
+ * [in] tbl
+ *   Pointer to EM page table
+ */
+static void
+tf_em_setup_page_table(struct hcapi_cfa_em_table *tbl)
+{
+	struct hcapi_cfa_em_page_tbl *tp_next;
+	struct hcapi_cfa_em_page_tbl *tp;
+	bool set_pte_last = 0;
+	int i;
+
+	for (i = 0; i < tbl->num_lvl - 1; i++) {
+		tp = &tbl->pg_tbl[i];
+		tp_next = &tbl->pg_tbl[i + 1];
+		if (i == tbl->num_lvl - 2)
+			set_pte_last = 1;
+		tf_em_link_page_table(tp, tp_next, set_pte_last);
+	}
+
+	tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
+	tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
+}
+
+/**
+ * Given the page size, size of each data item (entry size),
+ * and the total number of entries needed, determine the number
+ * of page table levels and the number of data pages required.
+ *
+ * [in] page_size
+ *   Page size
+ *
+ * [in] entry_size
+ *   Entry size
+ *
+ * [in] num_entries
+ *   Number of entries needed
+ *
+ * [out] num_data_pages
+ *   Number of pages required
+ *
+ * Returns:
+ *   Success  - Number of EM page levels required
+ *   -ENOMEM  - Out of memory
+ */
+static int
+tf_em_size_page_tbl_lvl(uint32_t page_size,
+			uint32_t entry_size,
+			uint32_t num_entries,
+			uint64_t *num_data_pages)
+{
+	uint64_t lvl_data_size = page_size;
+	int lvl = TF_PT_LVL_0;
+	uint64_t data_size;
+
+	*num_data_pages = 0;
+	data_size = (uint64_t)num_entries * entry_size;
+
+	while (lvl_data_size < data_size) {
+		lvl++;
+
+		if (lvl == TF_PT_LVL_1)
+			lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
+				page_size;
+		else if (lvl == TF_PT_LVL_2)
+			lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
+				MAX_PAGE_PTRS(page_size) * page_size;
+		else
+			return -ENOMEM;
+	}
+
+	*num_data_pages = roundup(data_size, page_size) / page_size;
+
+	return lvl;
+}
+
+/**
+ * Return the number of page table pages needed to
+ * reference the given number of next level pages.
+ *
+ * [in] num_pages
+ *   Number of EM pages
+ *
+ * [in] page_size
+ *   Size of each EM page
+ *
+ * Returns:
+ *   Number of EM page table pages
+ */
+static uint32_t
+tf_em_page_tbl_pgcnt(uint32_t num_pages,
+		     uint32_t page_size)
+{
+	return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
+		       MAX_PAGE_PTRS(page_size);
+	return 0;
+}
+
+/**
+ * Given the number of data pages, page_size and the maximum
+ * number of page table levels (already determined), size
+ * the number of page table pages required at each level.
+ *
+ * [in] max_lvl
+ *   Max number of levels
+ *
+ * [in] num_data_pages
+ *   Number of EM data pages
+ *
+ * [in] page_size
+ *   Size of an EM page
+ *
+ * [out] *page_cnt
+ *   EM page count
+ */
+static void
+tf_em_size_page_tbls(int max_lvl,
+		     uint64_t num_data_pages,
+		     uint32_t page_size,
+		     uint32_t *page_cnt)
+{
+	if (max_lvl == TF_PT_LVL_0) {
+		page_cnt[TF_PT_LVL_0] = num_data_pages;
+	} else if (max_lvl == TF_PT_LVL_1) {
+		page_cnt[TF_PT_LVL_1] = num_data_pages;
+		page_cnt[TF_PT_LVL_0] =
+		tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
+	} else if (max_lvl == TF_PT_LVL_2) {
+		page_cnt[TF_PT_LVL_2] = num_data_pages;
+		page_cnt[TF_PT_LVL_1] =
+		tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
+		page_cnt[TF_PT_LVL_0] =
+		tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
+	} else {
+		return;
+	}
+}
+
+/**
+ * Size the EM table based on capabilities
+ *
+ * [in] tbl
+ *   EM table to size
+ *
+ * Returns:
+ *   0        - Success
+ *   - EINVAL - Parameter error
+ *   - ENOMEM - Out of memory
+ */
+static int
+tf_em_size_table(struct hcapi_cfa_em_table *tbl)
+{
+	uint64_t num_data_pages;
+	uint32_t *page_cnt;
+	int max_lvl;
+	uint32_t num_entries;
+	uint32_t cnt = TF_EM_MIN_ENTRIES;
+
+	/* Ignore entry if both size and number are zero */
+	if (!tbl->entry_size && !tbl->num_entries)
+		return 0;
+
+	/* If only one is set then error */
+	if (!tbl->entry_size || !tbl->num_entries)
+		return -EINVAL;
+
+	/* Determine number of page table levels and the number
+	 * of data pages needed to process the given eem table.
+	 */
+	if (tbl->type == TF_RECORD_TABLE) {
+		/*
+		 * For action records just a memory size is provided. Work
+		 * backwards to resolve to number of entries
+		 */
+		num_entries = tbl->num_entries / tbl->entry_size;
+		if (num_entries < TF_EM_MIN_ENTRIES) {
+			num_entries = TF_EM_MIN_ENTRIES;
+		} else {
+			while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
+				cnt *= 2;
+			num_entries = cnt;
+		}
+	} else {
+		num_entries = tbl->num_entries;
+	}
+
+	max_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE,
+					  tbl->entry_size,
+					  tbl->num_entries,
+					  &num_data_pages);
+	if (max_lvl < 0) {
+		TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
+		TFP_DRV_LOG(WARNING,
+			    "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
+			    tbl->type, (uint64_t)num_entries * tbl->entry_size,
+			    TF_EM_PAGE_SIZE);
+		return -ENOMEM;
+	}
+
+	tbl->num_lvl = max_lvl + 1;
+	tbl->num_data_pages = num_data_pages;
+
+	/* Determine the number of pages needed at each level */
+	page_cnt = tbl->page_cnt;
+	memset(page_cnt, 0, sizeof(tbl->page_cnt));
+	tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
+				page_cnt);
+
+	TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
+	TFP_DRV_LOG(INFO,
+		    "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
+		    max_lvl + 1,
+		    (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
+		    num_data_pages,
+		    page_cnt[TF_PT_LVL_0],
+		    page_cnt[TF_PT_LVL_1],
+		    page_cnt[TF_PT_LVL_2]);
+
+	return 0;
+}
+
+/**
+ * Unregisters EM Ctx in Firmware
+ *
+ * [in] tfp
+ *   Pointer to a TruFlow handle
+ *
+ * [in] tbl_scope_cb
+ *   Pointer to a table scope control block
+ *
+ * [in] dir
+ *   Receive or transmit direction
+ */
+static void
+tf_em_ctx_unreg(struct tf *tfp,
+		struct tf_tbl_scope_cb *tbl_scope_cb,
+		int dir)
+{
+	struct hcapi_cfa_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
+	struct hcapi_cfa_em_table *tbl;
+	int i;
+
+	for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
+		tbl = &ctxp->em_tables[i];
+
+		if (tbl->num_entries != 0 && tbl->entry_size != 0) {
+			tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
+			tf_em_free_page_table(tbl);
+		}
+	}
+}
+
+/**
+ * Registers EM Ctx in Firmware
+ *
+ * [in] tfp
+ *   Pointer to a TruFlow handle
+ *
+ * [in] tbl_scope_cb
+ *   Pointer to a table scope control block
+ *
+ * [in] dir
+ *   Receive or transmit direction
+ *
+ * Returns:
+ *   0       - Success
+ *   -ENOMEM - Out of Memory
+ */
+static int
+tf_em_ctx_reg(struct tf *tfp,
+	      struct tf_tbl_scope_cb *tbl_scope_cb,
+	      int dir)
+{
+	struct hcapi_cfa_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
+	struct hcapi_cfa_em_table *tbl;
+	int rc;
+	int i;
+
+	for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
+		tbl = &ctxp->em_tables[i];
+
+		if (tbl->num_entries && tbl->entry_size) {
+			rc = tf_em_size_table(tbl);
+
+			if (rc)
+				goto cleanup;
+
+			rc = tf_em_alloc_page_table(tbl);
+			if (rc)
+				goto cleanup;
+
+			tf_em_setup_page_table(tbl);
+			rc = tf_msg_em_mem_rgtr(tfp,
+						tbl->num_lvl - 1,
+						TF_EM_PAGE_SIZE_ENUM,
+						tbl->l0_dma_addr,
+						&tbl->ctx_id);
+			if (rc)
+				goto cleanup;
+		}
+	}
+	return rc;
+
+cleanup:
+	tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
+	return rc;
+}
+
+
+/**
+ * Validates EM number of entries requested
+ *
+ * [in] tbl_scope_cb
+ *   Pointer to table scope control block to be populated
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+static int
+tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
+			   struct tf_alloc_tbl_scope_parms *parms)
+{
+	uint32_t cnt;
+
+	if (parms->rx_mem_size_in_mb != 0) {
+		uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
+		uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
+				     + 1);
+		uint32_t num_entries = (parms->rx_mem_size_in_mb *
+					TF_MEGABYTE) / (key_b + action_b);
+
+		if (num_entries < TF_EM_MIN_ENTRIES) {
+			TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
+				    "%uMB\n",
+				    parms->rx_mem_size_in_mb);
+			return -EINVAL;
+		}
+
+		cnt = TF_EM_MIN_ENTRIES;
+		while (num_entries > cnt &&
+		       cnt <= TF_EM_MAX_ENTRIES)
+			cnt *= 2;
+
+		if (cnt > TF_EM_MAX_ENTRIES) {
+			TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
+				    "%u\n",
+		       (parms->tx_num_flows_in_k * TF_KILOBYTE));
+			return -EINVAL;
+		}
+
+		parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
+	} else {
+		if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
+		    TF_EM_MIN_ENTRIES ||
+		    (parms->rx_num_flows_in_k * TF_KILOBYTE) >
+		    tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
+			TFP_DRV_LOG(ERR,
+				    "EEM: Invalid number of Rx flows "
+				    "requested:%u max:%u\n",
+				    parms->rx_num_flows_in_k * TF_KILOBYTE,
+			tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
+			return -EINVAL;
+		}
+
+		/* must be a power-of-2 supported value
+		 * in the range 32K - 128M
+		 */
+		cnt = TF_EM_MIN_ENTRIES;
+		while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
+		       cnt <= TF_EM_MAX_ENTRIES)
+			cnt *= 2;
+
+		if (cnt > TF_EM_MAX_ENTRIES) {
+			TFP_DRV_LOG(ERR,
+				    "EEM: Invalid number of Rx requested: %u\n",
+				    (parms->rx_num_flows_in_k * TF_KILOBYTE));
+			return -EINVAL;
+		}
+	}
+
+	if (parms->tx_mem_size_in_mb != 0) {
+		uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
+		uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
+				     + 1);
+		uint32_t num_entries = (parms->tx_mem_size_in_mb *
+					(TF_KILOBYTE * TF_KILOBYTE)) /
+			(key_b + action_b);
+
+		if (num_entries < TF_EM_MIN_ENTRIES) {
+			TFP_DRV_LOG(ERR,
+				    "EEM: Insufficient memory requested:%uMB\n",
+				    parms->rx_mem_size_in_mb);
+			return -EINVAL;
+		}
+
+		cnt = TF_EM_MIN_ENTRIES;
+		while (num_entries > cnt &&
+		       cnt <= TF_EM_MAX_ENTRIES)
+			cnt *= 2;
+
+		if (cnt > TF_EM_MAX_ENTRIES) {
+			TFP_DRV_LOG(ERR,
+				    "EEM: Invalid number of Tx requested: %u\n",
+		       (parms->tx_num_flows_in_k * TF_KILOBYTE));
+			return -EINVAL;
+		}
+
+		parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
+	} else {
+		if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
+		    TF_EM_MIN_ENTRIES ||
+		    (parms->tx_num_flows_in_k * TF_KILOBYTE) >
+		    tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
+			TFP_DRV_LOG(ERR,
+				    "EEM: Invalid number of Tx flows "
+				    "requested:%u max:%u\n",
+				    (parms->tx_num_flows_in_k * TF_KILOBYTE),
+			tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
+			return -EINVAL;
+		}
+
+		cnt = TF_EM_MIN_ENTRIES;
+		while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
+		       cnt <= TF_EM_MAX_ENTRIES)
+			cnt *= 2;
+
+		if (cnt > TF_EM_MAX_ENTRIES) {
+			TFP_DRV_LOG(ERR,
+				    "EEM: Invalid number of Tx requested: %u\n",
+		       (parms->tx_num_flows_in_k * TF_KILOBYTE));
+			return -EINVAL;
+		}
+	}
+
+	if (parms->rx_num_flows_in_k != 0 &&
+	    (parms->rx_max_key_sz_in_bits / 8 == 0)) {
+		TFP_DRV_LOG(ERR,
+			    "EEM: Rx key size required: %u\n",
+			    (parms->rx_max_key_sz_in_bits));
+		return -EINVAL;
+	}
+
+	if (parms->tx_num_flows_in_k != 0 &&
+	    (parms->tx_max_key_sz_in_bits / 8 == 0)) {
+		TFP_DRV_LOG(ERR,
+			    "EEM: Tx key size required: %u\n",
+			    (parms->tx_max_key_sz_in_bits));
+		return -EINVAL;
+	}
+	/* Rx */
+	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
+		parms->rx_num_flows_in_k * TF_KILOBYTE;
+	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
+		parms->rx_max_key_sz_in_bits / 8;
+
+	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
+		parms->rx_num_flows_in_k * TF_KILOBYTE;
+	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
+		parms->rx_max_key_sz_in_bits / 8;
+
+	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
+		parms->rx_num_flows_in_k * TF_KILOBYTE;
+	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
+		parms->rx_max_action_entry_sz_in_bits / 8;
+
+	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries = 0;
+
+	/* Tx */
+	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
+		parms->tx_num_flows_in_k * TF_KILOBYTE;
+	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
+		parms->tx_max_key_sz_in_bits / 8;
+
+	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
+		parms->tx_num_flows_in_k * TF_KILOBYTE;
+	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
+		parms->tx_max_key_sz_in_bits / 8;
+
+	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
+		parms->tx_num_flows_in_k * TF_KILOBYTE;
+	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
+		parms->tx_max_action_entry_sz_in_bits / 8;
+
+	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries = 0;
+
+	return 0;
+}
+
+/** insert EEM entry API
+ *
+ * returns:
+ *  0
+ *  TF_ERR	    - unable to get lock
+ *
+ * insert callback returns:
+ *   0
+ *   TF_ERR_EM_DUP  - key is already in table
+ */
+static int
+tf_insert_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
+		    struct tf_insert_em_entry_parms *parms)
+{
+	uint32_t mask;
+	uint32_t key0_hash;
+	uint32_t key1_hash;
+	uint32_t key0_index;
+	uint32_t key1_index;
+	struct cfa_p4_eem_64b_entry key_entry;
+	uint32_t index;
+	enum hcapi_cfa_em_table_type table_type;
+	uint32_t gfid;
+	struct hcapi_cfa_hwop op;
+	struct hcapi_cfa_key_tbl key_tbl;
+	struct hcapi_cfa_key_data key_obj;
+	struct hcapi_cfa_key_loc key_loc;
+	uint64_t big_hash;
+	int rc;
+
+	/* Get mask to use on hash */
+	mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
+
+	if (!mask)
+		return -EINVAL;
+
+#ifdef TF_EEM_DEBUG
+	dump_raw((uint8_t *)parms->key, TF_HW_EM_KEY_MAX_SIZE + 4, "In Key");
+#endif
+
+	big_hash = hcapi_cfa_key_hash((uint64_t *)parms->key,
+				      (TF_HW_EM_KEY_MAX_SIZE + 4) * 8);
+	key0_hash = (uint32_t)(big_hash >> 32);
+	key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
+
+	key0_index = key0_hash & mask;
+	key1_index = key1_hash & mask;
+
+#ifdef TF_EEM_DEBUG
+	TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
+	TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
+#endif
+	/*
+	 * Use the "result" arg to populate all of the key entry then
+	 * store the byte swapped "raw" entry in a local copy ready
+	 * for insertion in to the table.
+	 */
+	tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
+				((uint8_t *)parms->key),
+				&key_entry);
+
+	/*
+	 * Try to add to Key0 table, if that does not work then
+	 * try the key1 table.
+	 */
+	index = key0_index;
+	op.opcode = HCAPI_CFA_HWOPS_ADD;
+	key_tbl.base0 = (uint8_t *)
+		&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
+	key_obj.offset = (index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
+	key_obj.data = (uint8_t *)&key_entry;
+	key_obj.size = TF_EM_KEY_RECORD_SIZE;
+
+	rc = hcapi_cfa_key_hw_op(&op,
+				 &key_tbl,
+				 &key_obj,
+				 &key_loc);
+
+	if (rc == 0) {
+		table_type = TF_KEY0_TABLE;
+	} else {
+		index = key1_index;
+
+		key_tbl.base0 = (uint8_t *)
+		&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
+		key_obj.offset =
+			(index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
+
+		rc = hcapi_cfa_key_hw_op(&op,
+					 &key_tbl,
+					 &key_obj,
+					 &key_loc);
+		if (rc != 0)
+			return rc;
+
+		table_type = TF_KEY1_TABLE;
+	}
+
+	TF_SET_GFID(gfid,
+		    index,
+		    table_type);
+	TF_SET_FLOW_ID(parms->flow_id,
+		       gfid,
+		       TF_GFID_TABLE_EXTERNAL,
+		       parms->dir);
+	TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
+				     0,
+				     0,
+				     0,
+				     index,
+				     0,
+				     table_type);
+
+	return 0;
+}
+
+/** delete EEM hash entry API
+ *
+ * returns:
+ *   0
+ *   -EINVAL	  - parameter error
+ *   TF_NO_SESSION    - bad session ID
+ *   TF_ERR_TBL_SCOPE - invalid table scope
+ *   TF_ERR_TBL_IF    - invalid table interface
+ *
+ * insert callback returns
+ *   0
+ *   TF_NO_EM_MATCH - entry not found
+ */
+static int
+tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
+		    struct tf_delete_em_entry_parms *parms)
+{
+	enum hcapi_cfa_em_table_type hash_type;
+	uint32_t index;
+	struct hcapi_cfa_hwop op;
+	struct hcapi_cfa_key_tbl key_tbl;
+	struct hcapi_cfa_key_data key_obj;
+	struct hcapi_cfa_key_loc key_loc;
+	int rc;
+
+	if (parms->flow_handle == 0)
+		return -EINVAL;
+
+	TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
+	TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
+
+	op.opcode = HCAPI_CFA_HWOPS_DEL;
+	key_tbl.base0 = (uint8_t *)
+	&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[(hash_type == 0 ?
+							  TF_KEY0_TABLE :
+							  TF_KEY1_TABLE)];
+	key_obj.offset = (index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
+	key_obj.data = NULL;
+	key_obj.size = TF_EM_KEY_RECORD_SIZE;
+
+	rc = hcapi_cfa_key_hw_op(&op,
+				 &key_tbl,
+				 &key_obj,
+				 &key_loc);
+
+	if (!rc)
+		return rc;
+
+	return 0;
+}
+
+/** insert EM hash entry API
+ *
+ *    returns:
+ *    0       - Success
+ *    -EINVAL - Error
+ */
+int
+tf_em_insert_ext_entry(struct tf *tfp,
+		       struct tf_insert_em_entry_parms *parms)
+{
+	struct tf_tbl_scope_cb *tbl_scope_cb;
+
+	tbl_scope_cb =
+	tbl_scope_cb_find((struct tf_session *)(tfp->session->core_data),
+			  parms->tbl_scope_id);
+	if (tbl_scope_cb == NULL) {
+		TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
+		return -EINVAL;
+	}
+
+	return tf_insert_eem_entry(tbl_scope_cb, parms);
+}
+
+/** Delete EM hash entry API
+ *
+ *    returns:
+ *    0       - Success
+ *    -EINVAL - Error
+ */
+int
+tf_em_delete_ext_entry(struct tf *tfp,
+		       struct tf_delete_em_entry_parms *parms)
+{
+	struct tf_tbl_scope_cb *tbl_scope_cb;
+
+	tbl_scope_cb =
+	tbl_scope_cb_find((struct tf_session *)(tfp->session->core_data),
+			  parms->tbl_scope_id);
+	if (tbl_scope_cb == NULL) {
+		TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
+		return -EINVAL;
+	}
+
+	return tf_delete_eem_entry(tbl_scope_cb, parms);
+}
+
+int
+tf_em_ext_host_alloc(struct tf *tfp,
+		     struct tf_alloc_tbl_scope_parms *parms)
+{
+	int rc;
+	enum tf_dir dir;
+	struct tf_tbl_scope_cb *tbl_scope_cb;
+	struct hcapi_cfa_em_table *em_tables;
+	struct tf_session *session;
+	struct tf_free_tbl_scope_parms free_parms;
+	struct tf_rm_allocate_parms aparms = { 0 };
+	struct tf_rm_free_parms fparms = { 0 };
+
+	session = (struct tf_session *)tfp->session->core_data;
+
+	/* Get Table Scope control block from the session pool */
+	aparms.rm_db = eem_db[TF_DIR_RX];
+	aparms.db_index = 1/**** TYPE TABLE-SCOPE??? ****/;
+	aparms.index = (uint32_t *)&parms->tbl_scope_id;
+	rc = tf_rm_allocate(&aparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to allocate table scope\n");
+		return rc;
+	}
+
+	parms->tbl_scope_id -= TF_HACK_TBL_SCOPE_BASE;
+	tbl_scope_cb = &session->tbl_scopes[parms->tbl_scope_id];
+	tbl_scope_cb->index = parms->tbl_scope_id;
+	tbl_scope_cb->tbl_scope_id = parms->tbl_scope_id;
+
+	for (dir = 0; dir < TF_DIR_MAX; dir++) {
+		rc = tf_msg_em_qcaps(tfp,
+				     dir,
+				     &tbl_scope_cb->em_caps[dir]);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "EEM: Unable to query for EEM capability,"
+				    " rc:%s\n",
+				    strerror(-rc));
+			goto cleanup;
+		}
+	}
+
+	/*
+	 * Validate and setup table sizes
+	 */
+	if (tf_em_validate_num_entries(tbl_scope_cb, parms))
+		goto cleanup;
+
+	for (dir = 0; dir < TF_DIR_MAX; dir++) {
+		/*
+		 * Allocate tables and signal configuration to FW
+		 */
+		rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "EEM: Unable to register for EEM ctx,"
+				    " rc:%s\n",
+				    strerror(-rc));
+			goto cleanup;
+		}
+
+		em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
+		rc = tf_msg_em_cfg(tfp,
+				   em_tables[TF_KEY0_TABLE].num_entries,
+				   em_tables[TF_KEY0_TABLE].ctx_id,
+				   em_tables[TF_KEY1_TABLE].ctx_id,
+				   em_tables[TF_RECORD_TABLE].ctx_id,
+				   em_tables[TF_EFC_TABLE].ctx_id,
+				   parms->hw_flow_cache_flush_timer,
+				   dir);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "TBL: Unable to configure EEM in firmware"
+				    " rc:%s\n",
+				    strerror(-rc));
+			goto cleanup_full;
+		}
+
+		rc = tf_msg_em_op(tfp,
+				  dir,
+				  HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
+
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "EEM: Unable to enable EEM in firmware"
+				    " rc:%s\n",
+				    strerror(-rc));
+			goto cleanup_full;
+		}
+
+		/* Allocate the pool of offsets of the external memory.
+		 * Initially, this is a single fixed size pool for all external
+		 * actions related to a single table scope.
+		 */
+		rc = tf_create_tbl_pool_external(dir,
+					    tbl_scope_cb,
+					    em_tables[TF_RECORD_TABLE].num_entries,
+					    em_tables[TF_RECORD_TABLE].entry_size);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "%s TBL: Unable to allocate idx pools %s\n",
+				    tf_dir_2_str(dir),
+				    strerror(-rc));
+			goto cleanup_full;
+		}
+	}
+
+	return 0;
+
+cleanup_full:
+	free_parms.tbl_scope_id = parms->tbl_scope_id;
+	tf_em_ext_host_free(tfp, &free_parms);
+	return -EINVAL;
+
+cleanup:
+	/* Free Table control block */
+	fparms.rm_db = eem_db[TF_DIR_RX];
+	fparms.db_index = 1/**** TYPE TABLE-SCOPE??? ****/;
+	fparms.index = parms->tbl_scope_id + TF_HACK_TBL_SCOPE_BASE;
+	tf_rm_free(&fparms);
+	return -EINVAL;
+}
+
+int
+tf_em_ext_host_free(struct tf *tfp,
+		    struct tf_free_tbl_scope_parms *parms)
+{
+	int rc = 0;
+	enum tf_dir  dir;
+	struct tf_tbl_scope_cb *tbl_scope_cb;
+	struct tf_session *session;
+	struct tf_rm_free_parms aparms = { 0 };
+
+	session = (struct tf_session *)(tfp->session->core_data);
+
+	tbl_scope_cb = tbl_scope_cb_find(session,
+					 parms->tbl_scope_id);
+
+	if (tbl_scope_cb == NULL) {
+		TFP_DRV_LOG(ERR, "Table scope error\n");
+		return -EINVAL;
+	}
+
+	/* Free Table control block */
+	aparms.rm_db = eem_db[TF_DIR_RX];
+	aparms.db_index = 1/**** TYPE TABLE-SCOPE??? ****/;
+	aparms.index = parms->tbl_scope_id + TF_HACK_TBL_SCOPE_BASE;
+	rc = tf_rm_free(&aparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to free table scope\n");
+	}
+
+	/* free table scope locks */
+	for (dir = 0; dir < TF_DIR_MAX; dir++) {
+		/* Free associated external pools
+		 */
+		tf_destroy_tbl_pool_external(dir,
+					     tbl_scope_cb);
+		tf_msg_em_op(tfp,
+			     dir,
+			     HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
+
+		/* free table scope and all associated resources */
+		tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
+	}
+
+	return rc;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_em_internal.c b/drivers/net/bnxt/tf_core/tf_em_internal.c
new file mode 100644
index 000000000..9be91ad5d
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_em_internal.c
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_log.h>
+
+#include "tf_core.h"
+#include "tf_util.h"
+#include "tf_common.h"
+#include "tf_em.h"
+#include "tf_msg.h"
+#include "tfp.h"
+#include "tf_ext_flow_handle.h"
+
+#include "bnxt.h"
+
+/**
+ * EM DBs.
+ */
+static void *em_db[TF_DIR_MAX];
+
+/**
+ * Init flag, set on bind and cleared on unbind
+ */
+static uint8_t init;
+
+/**
+ * Create EM Tbl pool of memory indexes.
+ *
+ * [in] session
+ *   Pointer to session
+ * [in] dir
+ *   direction
+ * [in] num_entries
+ *   number of entries to write
+ *
+ * Return:
+ *  0       - Success, entry allocated - no search support
+ *  -ENOMEM -EINVAL -EOPNOTSUPP
+ *          - Failure, entry not allocated, out of resources
+ */
+static int
+tf_create_em_pool(struct tf_session *session,
+		  enum tf_dir dir,
+		  uint32_t num_entries)
+{
+	struct tfp_calloc_parms parms;
+	uint32_t i, j;
+	int rc = 0;
+	struct stack *pool = &session->em_pool[dir];
+
+	parms.nitems = num_entries;
+	parms.size = sizeof(uint32_t);
+	parms.alignment = 0;
+
+	rc = tfp_calloc(&parms);
+
+	if (rc) {
+		TFP_DRV_LOG(ERR, "EM pool allocation failure %s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Create empty stack
+	 */
+	rc = stack_init(num_entries, (uint32_t *)parms.mem_va, pool);
+
+	if (rc) {
+		TFP_DRV_LOG(ERR, "EM pool stack init failure %s\n",
+			    strerror(-rc));
+		goto cleanup;
+	}
+
+	/* Fill pool with indexes
+	 */
+	j = num_entries - 1;
+
+	for (i = 0; i < num_entries; i++) {
+		rc = stack_push(pool, j);
+		if (rc) {
+			TFP_DRV_LOG(ERR, "EM pool stack push failure %s\n",
+				    strerror(-rc));
+			goto cleanup;
+		}
+		j--;
+	}
+
+	if (!stack_is_full(pool)) {
+		rc = -EINVAL;
+		TFP_DRV_LOG(ERR, "EM pool stack failure %s\n",
+			    strerror(-rc));
+		goto cleanup;
+	}
+
+	return 0;
+cleanup:
+	tfp_free((void *)parms.mem_va);
+	return rc;
+}
+
+/**
+ * Create EM Tbl pool of memory indexes.
+ *
+ * [in] session
+ *   Pointer to session
+ * [in] dir
+ *   direction
+ *
+ * Return:
+ */
+static void
+tf_free_em_pool(struct tf_session *session,
+		enum tf_dir dir)
+{
+	struct stack *pool = &session->em_pool[dir];
+	uint32_t *ptr;
+
+	ptr = stack_items(pool);
+
+	if (ptr != NULL)
+		tfp_free(ptr);
+}
+
+/**
+ * Insert EM internal entry API
+ *
+ *  returns:
+ *     0 - Success
+ */
+int
+tf_em_insert_int_entry(struct tf *tfp,
+		       struct tf_insert_em_entry_parms *parms)
+{
+	int rc;
+	uint32_t gfid;
+	uint16_t rptr_index = 0;
+	uint8_t rptr_entry = 0;
+	uint8_t num_of_entries = 0;
+	struct tf_session *session =
+		(struct tf_session *)(tfp->session->core_data);
+	struct stack *pool = &session->em_pool[parms->dir];
+	uint32_t index;
+
+	rc = stack_pop(pool, &index);
+
+	if (rc) {
+		PMD_DRV_LOG
+		  (ERR,
+		   "dir:%d, EM entry index allocation failed\n",
+		   parms->dir);
+		return rc;
+	}
+
+	rptr_index = index * TF_SESSION_EM_ENTRY_SIZE;
+	rc = tf_msg_insert_em_internal_entry(tfp,
+					     parms,
+					     &rptr_index,
+					     &rptr_entry,
+					     &num_of_entries);
+	if (rc)
+		return -1;
+
+	PMD_DRV_LOG
+		  (ERR,
+		   "Internal entry @ Index:%d rptr_index:0x%x rptr_entry:0x%x num_of_entries:%d\n",
+		   index * TF_SESSION_EM_ENTRY_SIZE,
+		   rptr_index,
+		   rptr_entry,
+		   num_of_entries);
+
+	TF_SET_GFID(gfid,
+		    ((rptr_index << TF_EM_INTERNAL_INDEX_SHIFT) |
+		     rptr_entry),
+		    0); /* N/A for internal table */
+
+	TF_SET_FLOW_ID(parms->flow_id,
+		       gfid,
+		       TF_GFID_TABLE_INTERNAL,
+		       parms->dir);
+
+	TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
+				     (uint32_t)num_of_entries,
+				     0,
+				     0,
+				     rptr_index,
+				     rptr_entry,
+				     0);
+	return 0;
+}
+
+
+/** Delete EM internal entry API
+ *
+ * returns:
+ * 0
+ * -EINVAL
+ */
+int
+tf_em_delete_int_entry(struct tf *tfp,
+		       struct tf_delete_em_entry_parms *parms)
+{
+	int rc = 0;
+	struct tf_session *session =
+		(struct tf_session *)(tfp->session->core_data);
+	struct stack *pool = &session->em_pool[parms->dir];
+
+	rc = tf_msg_delete_em_entry(tfp, parms);
+
+	/* Return resource to pool */
+	if (rc == 0)
+		stack_push(pool, parms->index / TF_SESSION_EM_ENTRY_SIZE);
+
+	return rc;
+}
+
+int
+tf_em_int_bind(struct tf *tfp,
+	       struct tf_em_cfg_parms *parms)
+{
+	int rc;
+	int i;
+	struct tf_rm_create_db_parms db_cfg = { 0 };
+	struct tf_session *session;
+
+	TF_CHECK_PARMS2(tfp, parms);
+
+	if (init) {
+		TFP_DRV_LOG(ERR,
+			    "Identifier already initialized\n");
+		return -EINVAL;
+	}
+
+	session = (struct tf_session *)tfp->session->core_data;
+
+	for (i = 0; i < TF_DIR_MAX; i++) {
+		tf_create_em_pool(session,
+				  i,
+				  TF_SESSION_EM_POOL_SIZE);
+	}
+
+	/*
+	 * I'm not sure that this code is needed.
+	 * leaving for now until resolved
+	 */
+	if (parms->num_elements) {
+		db_cfg.type = TF_DEVICE_MODULE_TYPE_EM;
+		db_cfg.num_elements = parms->num_elements;
+		db_cfg.cfg = parms->cfg;
+
+		for (i = 0; i < TF_DIR_MAX; i++) {
+			db_cfg.dir = i;
+			db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
+			db_cfg.rm_db = &em_db[i];
+			rc = tf_rm_create_db(tfp, &db_cfg);
+			if (rc) {
+				TFP_DRV_LOG(ERR,
+					    "%s: EM DB creation failed\n",
+					    tf_dir_2_str(i));
+
+				return rc;
+			}
+		}
+	}
+
+	init = 1;
+	return 0;
+}
+
+int
+tf_em_int_unbind(struct tf *tfp)
+{
+	int rc;
+	int i;
+	struct tf_rm_free_db_parms fparms = { 0 };
+	struct tf_session *session;
+
+	TF_CHECK_PARMS1(tfp);
+
+	/* Bail if nothing has been initialized done silent as to
+	 * allow for creation cleanup.
+	 */
+	if (!init) {
+		TFP_DRV_LOG(ERR,
+			    "No EM DBs created\n");
+		return -EINVAL;
+	}
+
+	session = (struct tf_session *)tfp->session->core_data;
+
+	for (i = 0; i < TF_DIR_MAX; i++)
+		tf_free_em_pool(session, i);
+
+	for (i = 0; i < TF_DIR_MAX; i++) {
+		fparms.dir = i;
+		fparms.rm_db = em_db[i];
+		if (em_db[i] != NULL) {
+			rc = tf_rm_free_db(tfp, &fparms);
+			if (rc)
+				return rc;
+		}
+
+		em_db[i] = NULL;
+	}
+
+	init = 0;
+
+	return 0;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_em_system.c b/drivers/net/bnxt/tf_core/tf_em_system.c
new file mode 100644
index 000000000..ee18a0c70
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_em_system.c
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_log.h>
+
+#include "tf_core.h"
+#include "tf_em.h"
+#include "tf_em_common.h"
+#include "tf_msg.h"
+#include "tfp.h"
+#include "lookup3.h"
+#include "tf_ext_flow_handle.h"
+
+#include "bnxt.h"
+
+
+/** insert EEM entry API
+ *
+ * returns:
+ *  0
+ *  TF_ERR	    - unable to get lock
+ *
+ * insert callback returns:
+ *   0
+ *   TF_ERR_EM_DUP  - key is already in table
+ */
+static int
+tf_insert_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb __rte_unused,
+		    struct tf_insert_em_entry_parms *parms __rte_unused)
+{
+	return 0;
+}
+
+/** delete EEM hash entry API
+ *
+ * returns:
+ *   0
+ *   -EINVAL	  - parameter error
+ *   TF_NO_SESSION    - bad session ID
+ *   TF_ERR_TBL_SCOPE - invalid table scope
+ *   TF_ERR_TBL_IF    - invalid table interface
+ *
+ * insert callback returns
+ *   0
+ *   TF_NO_EM_MATCH - entry not found
+ */
+static int
+tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb __rte_unused,
+		    struct tf_delete_em_entry_parms *parms __rte_unused)
+{
+	return 0;
+}
+
+/** insert EM hash entry API
+ *
+ *    returns:
+ *    0       - Success
+ *    -EINVAL - Error
+ */
+int
+tf_em_insert_ext_sys_entry(struct tf *tfp,
+			   struct tf_insert_em_entry_parms *parms)
+{
+	struct tf_tbl_scope_cb *tbl_scope_cb;
+
+	tbl_scope_cb = tbl_scope_cb_find
+		((struct tf_session *)(tfp->session->core_data),
+		parms->tbl_scope_id);
+	if (tbl_scope_cb == NULL) {
+		TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
+		return -EINVAL;
+	}
+
+	return tf_insert_eem_entry
+		(tbl_scope_cb, parms);
+}
+
+/** Delete EM hash entry API
+ *
+ *    returns:
+ *    0       - Success
+ *    -EINVAL - Error
+ */
+int
+tf_em_delete_ext_sys_entry(struct tf *tfp,
+			   struct tf_delete_em_entry_parms *parms)
+{
+	struct tf_tbl_scope_cb *tbl_scope_cb;
+
+	tbl_scope_cb = tbl_scope_cb_find
+		((struct tf_session *)(tfp->session->core_data),
+		parms->tbl_scope_id);
+	if (tbl_scope_cb == NULL) {
+		TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
+		return -EINVAL;
+	}
+
+	return tf_delete_eem_entry(tbl_scope_cb, parms);
+}
+
+int
+tf_em_ext_system_alloc(struct tf *tfp __rte_unused,
+		       struct tf_alloc_tbl_scope_parms *parms __rte_unused)
+{
+	return 0;
+}
+
+int
+tf_em_ext_system_free(struct tf *tfp __rte_unused,
+		      struct tf_free_tbl_scope_parms *parms __rte_unused)
+{
+	return 0;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_msg.c b/drivers/net/bnxt/tf_core/tf_msg.c
index c015b0ce2..d8b80bc84 100644
--- a/drivers/net/bnxt/tf_core/tf_msg.c
+++ b/drivers/net/bnxt/tf_core/tf_msg.c
@@ -18,82 +18,6 @@
 #include "hwrm_tf.h"
 #include "tf_em.h"
 
-/**
- * Endian converts min and max values from the HW response to the query
- */
-#define TF_HW_RESP_TO_QUERY(query, index, response, element) do {            \
-	(query)->hw_query[index].min =                                       \
-		tfp_le_to_cpu_16(response. element ## _min);                 \
-	(query)->hw_query[index].max =                                       \
-		tfp_le_to_cpu_16(response. element ## _max);                 \
-} while (0)
-
-/**
- * Endian converts the number of entries from the alloc to the request
- */
-#define TF_HW_ALLOC_TO_REQ(alloc, index, request, element)                   \
-	(request. num_ ## element = tfp_cpu_to_le_16((alloc)->hw_num[index]))
-
-/**
- * Endian converts the start and stride value from the free to the request
- */
-#define TF_HW_FREE_TO_REQ(hw_entry, index, request, element) do {            \
-	request.element ## _start =                                          \
-		tfp_cpu_to_le_16(hw_entry[index].start);                     \
-	request.element ## _stride =                                         \
-		tfp_cpu_to_le_16(hw_entry[index].stride);                    \
-} while (0)
-
-/**
- * Endian converts the start and stride from the HW response to the
- * alloc
- */
-#define TF_HW_RESP_TO_ALLOC(hw_entry, index, response, element) do {         \
-	hw_entry[index].start =                                              \
-		tfp_le_to_cpu_16(response.element ## _start);                \
-	hw_entry[index].stride =                                             \
-		tfp_le_to_cpu_16(response.element ## _stride);               \
-} while (0)
-
-/**
- * Endian converts min and max values from the SRAM response to the
- * query
- */
-#define TF_SRAM_RESP_TO_QUERY(query, index, response, element) do {          \
-	(query)->sram_query[index].min =                                     \
-		tfp_le_to_cpu_16(response.element ## _min);                  \
-	(query)->sram_query[index].max =                                     \
-		tfp_le_to_cpu_16(response.element ## _max);                  \
-} while (0)
-
-/**
- * Endian converts the number of entries from the action (alloc) to
- * the request
- */
-#define TF_SRAM_ALLOC_TO_REQ(action, index, request, element)                \
-	(request. num_ ## element = tfp_cpu_to_le_16((action)->sram_num[index]))
-
-/**
- * Endian converts the start and stride value from the free to the request
- */
-#define TF_SRAM_FREE_TO_REQ(sram_entry, index, request, element) do {        \
-	request.element ## _start =                                          \
-		tfp_cpu_to_le_16(sram_entry[index].start);                   \
-	request.element ## _stride =                                         \
-		tfp_cpu_to_le_16(sram_entry[index].stride);                  \
-} while (0)
-
-/**
- * Endian converts the start and stride from the HW response to the
- * alloc
- */
-#define TF_SRAM_RESP_TO_ALLOC(sram_entry, index, response, element) do {     \
-	sram_entry[index].start =                                            \
-		tfp_le_to_cpu_16(response.element ## _start);                \
-	sram_entry[index].stride =                                           \
-		tfp_le_to_cpu_16(response.element ## _stride);               \
-} while (0)
-
 /**
  * This is the MAX data we can transport across regular HWRM
  */
@@ -107,39 +31,6 @@ struct tf_msg_dma_buf {
 	uint64_t pa_addr;
 };
 
-static int
-tf_tcam_tbl_2_hwrm(enum tf_tcam_tbl_type tcam_type,
-		   uint32_t *hwrm_type)
-{
-	int rc = 0;
-
-	switch (tcam_type) {
-	case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
-		*hwrm_type = TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY;
-		break;
-	case TF_TCAM_TBL_TYPE_PROF_TCAM:
-		*hwrm_type = TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY;
-		break;
-	case TF_TCAM_TBL_TYPE_WC_TCAM:
-		*hwrm_type = TF_DEV_DATA_TYPE_TF_WC_ENTRY;
-		break;
-	case TF_TCAM_TBL_TYPE_VEB_TCAM:
-		rc = -EOPNOTSUPP;
-		break;
-	case TF_TCAM_TBL_TYPE_SP_TCAM:
-		rc = -EOPNOTSUPP;
-		break;
-	case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
-		rc = -EOPNOTSUPP;
-		break;
-	default:
-		rc = -EOPNOTSUPP;
-		break;
-	}
-
-	return rc;
-}
-
 /**
  * Allocates a DMA buffer that can be used for message transfer.
  *
@@ -185,13 +76,8 @@ tf_msg_free_dma_buf(struct tf_msg_dma_buf *buf)
 	tfp_free(buf->va_addr);
 }
 
-/**
- * NEW HWRM direct messages
- */
+/* HWRM Direct messages */
 
-/**
- * Sends session open request to TF Firmware
- */
 int
 tf_msg_session_open(struct tf *tfp,
 		    char *ctrl_chan_name,
@@ -222,9 +108,6 @@ tf_msg_session_open(struct tf *tfp,
 	return rc;
 }
 
-/**
- * Sends session attach request to TF Firmware
- */
 int
 tf_msg_session_attach(struct tf *tfp __rte_unused,
 		      char *ctrl_chan_name __rte_unused,
@@ -233,9 +116,6 @@ tf_msg_session_attach(struct tf *tfp __rte_unused,
 	return -1;
 }
 
-/**
- * Sends session close request to TF Firmware
- */
 int
 tf_msg_session_close(struct tf *tfp)
 {
@@ -261,14 +141,11 @@ tf_msg_session_close(struct tf *tfp)
 	return rc;
 }
 
-/**
- * Sends session query config request to TF Firmware
- */
 int
 tf_msg_session_qcfg(struct tf *tfp)
 {
 	int rc;
-	struct hwrm_tf_session_qcfg_input  req = { 0 };
+	struct hwrm_tf_session_qcfg_input req = { 0 };
 	struct hwrm_tf_session_qcfg_output resp = { 0 };
 	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
 	struct tfp_send_msg_parms parms = { 0 };
@@ -289,636 +166,6 @@ tf_msg_session_qcfg(struct tf *tfp)
 	return rc;
 }
 
-/**
- * Sends session HW resource query capability request to TF Firmware
- */
-int
-tf_msg_session_hw_resc_qcaps(struct tf *tfp,
-			     enum tf_dir dir,
-			     struct tf_rm_hw_query *query)
-{
-	int rc;
-	struct tfp_send_msg_parms parms = { 0 };
-	struct tf_session_hw_resc_qcaps_input req = { 0 };
-	struct tf_session_hw_resc_qcaps_output resp = { 0 };
-	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
-
-	memset(query, 0, sizeof(*query));
-
-	/* Populate the request */
-	req.fw_session_id =
-		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
-	req.flags = tfp_cpu_to_le_16(dir);
-
-	MSG_PREP(parms,
-		 TF_KONG_MB,
-		 HWRM_TF,
-		 HWRM_TFT_SESSION_HW_RESC_QCAPS,
-		 req,
-		 resp);
-
-	rc = tfp_send_msg_tunneled(tfp, &parms);
-	if (rc)
-		return rc;
-
-	/* Process the response */
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
-			    l2_ctx_tcam_entries);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_FUNC, resp,
-			    prof_func);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_TCAM, resp,
-			    prof_tcam_entries);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
-			    em_prof_id);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_REC, resp,
-			    em_record_entries);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
-			    wc_tcam_prof_id);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM, resp,
-			    wc_tcam_entries);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_PROF, resp,
-			    meter_profiles);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_INST,
-			    resp, meter_inst);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_MIRROR, resp,
-			    mirrors);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_UPAR, resp,
-			    upar);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_SP_TCAM, resp,
-			    sp_tcam_entries);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_FUNC, resp,
-			    l2_func);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_FKB, resp,
-			    flex_key_templ);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
-			    tbl_scope);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH0, resp,
-			    epoch0_entries);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH1, resp,
-			    epoch1_entries);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METADATA, resp,
-			    metadata);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_CT_STATE, resp,
-			    ct_state);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_PROF, resp,
-			    range_prof);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
-			    range_entries);
-	TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
-			    lag_tbl_entries);
-
-	return tfp_le_to_cpu_32(parms.tf_resp_code);
-}
-
-/**
- * Sends session HW resource allocation request to TF Firmware
- */
-int
-tf_msg_session_hw_resc_alloc(struct tf *tfp __rte_unused,
-			     enum tf_dir dir,
-			     struct tf_rm_hw_alloc *hw_alloc __rte_unused,
-			     struct tf_rm_entry *hw_entry __rte_unused)
-{
-	int rc;
-	struct tfp_send_msg_parms parms = { 0 };
-	struct tf_session_hw_resc_alloc_input req = { 0 };
-	struct tf_session_hw_resc_alloc_output resp = { 0 };
-	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
-
-	memset(hw_entry, 0, sizeof(*hw_entry));
-
-	/* Populate the request */
-	req.fw_session_id =
-		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
-	req.flags = tfp_cpu_to_le_16(dir);
-
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
-			   l2_ctx_tcam_entries);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_FUNC, req,
-			   prof_func_entries);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_TCAM, req,
-			   prof_tcam_entries);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_PROF_ID, req,
-			   em_prof_id);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_REC, req,
-			   em_record_entries);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
-			   wc_tcam_prof_id);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM, req,
-			   wc_tcam_entries);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_PROF, req,
-			   meter_profiles);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_INST, req,
-			   meter_inst);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_MIRROR, req,
-			   mirrors);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_UPAR, req,
-			   upar);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_SP_TCAM, req,
-			   sp_tcam_entries);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_FUNC, req,
-			   l2_func);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_FKB, req,
-			   flex_key_templ);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_TBL_SCOPE, req,
-			   tbl_scope);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH0, req,
-			   epoch0_entries);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH1, req,
-			   epoch1_entries);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METADATA, req,
-			   metadata);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_CT_STATE, req,
-			   ct_state);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_PROF, req,
-			   range_prof);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
-			   range_entries);
-	TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_LAG_ENTRY, req,
-			   lag_tbl_entries);
-
-	MSG_PREP(parms,
-		 TF_KONG_MB,
-		 HWRM_TF,
-		 HWRM_TFT_SESSION_HW_RESC_ALLOC,
-		 req,
-		 resp);
-
-	rc = tfp_send_msg_tunneled(tfp, &parms);
-	if (rc)
-		return rc;
-
-	/* Process the response */
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
-			    l2_ctx_tcam_entries);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, resp,
-			    prof_func);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, resp,
-			    prof_tcam_entries);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
-			    em_prof_id);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_REC, resp,
-			    em_record_entries);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
-			    wc_tcam_prof_id);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, resp,
-			    wc_tcam_entries);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_PROF, resp,
-			    meter_profiles);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_INST, resp,
-			    meter_inst);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_MIRROR, resp,
-			    mirrors);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_UPAR, resp,
-			    upar);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, resp,
-			    sp_tcam_entries);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, resp,
-			    l2_func);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_FKB, resp,
-			    flex_key_templ);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
-			    tbl_scope);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH0, resp,
-			    epoch0_entries);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH1, resp,
-			    epoch1_entries);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METADATA, resp,
-			    metadata);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_CT_STATE, resp,
-			    ct_state);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, resp,
-			    range_prof);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
-			    range_entries);
-	TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
-			    lag_tbl_entries);
-
-	return tfp_le_to_cpu_32(parms.tf_resp_code);
-}
-
-/**
- * Sends session HW resource free request to TF Firmware
- */
-int
-tf_msg_session_hw_resc_free(struct tf *tfp,
-			    enum tf_dir dir,
-			    struct tf_rm_entry *hw_entry)
-{
-	int rc;
-	struct tfp_send_msg_parms parms = { 0 };
-	struct tf_session_hw_resc_free_input req = { 0 };
-	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
-
-	memset(hw_entry, 0, sizeof(*hw_entry));
-
-	/* Populate the request */
-	req.fw_session_id =
-		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
-	req.flags = tfp_cpu_to_le_16(dir);
-
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
-			  l2_ctx_tcam_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
-			  prof_func);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
-			  prof_tcam_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
-			  em_prof_id);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
-			  em_record_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
-			  wc_tcam_prof_id);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
-			  wc_tcam_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
-			  meter_profiles);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
-			  meter_inst);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
-			  mirrors);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
-			  upar);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
-			  sp_tcam_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
-			  l2_func);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
-			  flex_key_templ);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
-			  tbl_scope);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
-			  epoch0_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
-			  epoch1_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
-			  metadata);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
-			  ct_state);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
-			  range_prof);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
-			  range_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
-			  lag_tbl_entries);
-
-	MSG_PREP_NO_RESP(parms,
-			 TF_KONG_MB,
-			 HWRM_TF,
-			 HWRM_TFT_SESSION_HW_RESC_FREE,
-			 req);
-
-	rc = tfp_send_msg_tunneled(tfp, &parms);
-	if (rc)
-		return rc;
-
-	return tfp_le_to_cpu_32(parms.tf_resp_code);
-}
-
-/**
- * Sends session HW resource flush request to TF Firmware
- */
-int
-tf_msg_session_hw_resc_flush(struct tf *tfp,
-			     enum tf_dir dir,
-			     struct tf_rm_entry *hw_entry)
-{
-	int rc;
-	struct tfp_send_msg_parms parms = { 0 };
-	struct tf_session_hw_resc_free_input req = { 0 };
-	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
-
-	/* Populate the request */
-	req.fw_session_id =
-		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
-	req.flags = tfp_cpu_to_le_16(dir);
-
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
-			  l2_ctx_tcam_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
-			  prof_func);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
-			  prof_tcam_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
-			  em_prof_id);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
-			  em_record_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
-			  wc_tcam_prof_id);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
-			  wc_tcam_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
-			  meter_profiles);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
-			  meter_inst);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
-			  mirrors);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
-			  upar);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
-			  sp_tcam_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
-			  l2_func);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
-			  flex_key_templ);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
-			  tbl_scope);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
-			  epoch0_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
-			  epoch1_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
-			  metadata);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
-			  ct_state);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
-			  range_prof);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
-			  range_entries);
-	TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
-			  lag_tbl_entries);
-
-	MSG_PREP_NO_RESP(parms,
-			 TF_KONG_MB,
-			 TF_TYPE_TRUFLOW,
-			 HWRM_TFT_SESSION_HW_RESC_FLUSH,
-			 req);
-
-	rc = tfp_send_msg_tunneled(tfp, &parms);
-	if (rc)
-		return rc;
-
-	return tfp_le_to_cpu_32(parms.tf_resp_code);
-}
-
-/**
- * Sends session SRAM resource query capability request to TF Firmware
- */
-int
-tf_msg_session_sram_resc_qcaps(struct tf *tfp __rte_unused,
-			       enum tf_dir dir,
-			       struct tf_rm_sram_query *query __rte_unused)
-{
-	int rc;
-	struct tfp_send_msg_parms parms = { 0 };
-	struct tf_session_sram_resc_qcaps_input req = { 0 };
-	struct tf_session_sram_resc_qcaps_output resp = { 0 };
-	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
-
-	/* Populate the request */
-	req.fw_session_id =
-		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
-	req.flags = tfp_cpu_to_le_16(dir);
-
-	MSG_PREP(parms,
-		 TF_KONG_MB,
-		 HWRM_TF,
-		 HWRM_TFT_SESSION_SRAM_RESC_QCAPS,
-		 req,
-		 resp);
-
-	rc = tfp_send_msg_tunneled(tfp, &parms);
-	if (rc)
-		return rc;
-
-	/* Process the response */
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_FULL_ACTION, resp,
-			      full_action);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_MCG, resp,
-			      mcg);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
-			      encap_8b);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
-			      encap_16b);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
-			      encap_64b);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
-			      sp_smac);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, resp,
-			      sp_smac_ipv4);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, resp,
-			      sp_smac_ipv6);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
-			      counter_64b);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
-			      nat_sport);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
-			      nat_dport);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
-			      nat_s_ipv4);
-	TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
-			      nat_d_ipv4);
-
-	return tfp_le_to_cpu_32(parms.tf_resp_code);
-}
-
-/**
- * Sends session SRAM resource allocation request to TF Firmware
- */
-int
-tf_msg_session_sram_resc_alloc(struct tf *tfp __rte_unused,
-			       enum tf_dir dir,
-			       struct tf_rm_sram_alloc *sram_alloc __rte_unused,
-			       struct tf_rm_entry *sram_entry __rte_unused)
-{
-	int rc;
-	struct tfp_send_msg_parms parms = { 0 };
-	struct tf_session_sram_resc_alloc_input req = { 0 };
-	struct tf_session_sram_resc_alloc_output resp;
-	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
-
-	memset(&resp, 0, sizeof(resp));
-
-	/* Populate the request */
-	req.fw_session_id =
-		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
-	req.flags = tfp_cpu_to_le_16(dir);
-
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
-			     full_action);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_MCG, req,
-			     mcg);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
-			     encap_8b);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
-			     encap_16b);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
-			     encap_64b);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC, req,
-			     sp_smac);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
-			     req, sp_smac_ipv4);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
-			     req, sp_smac_ipv6);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_COUNTER_64B,
-			     req, counter_64b);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
-			     nat_sport);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
-			     nat_dport);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
-			     nat_s_ipv4);
-	TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
-			     nat_d_ipv4);
-
-	MSG_PREP(parms,
-		 TF_KONG_MB,
-		 HWRM_TF,
-		 HWRM_TFT_SESSION_SRAM_RESC_ALLOC,
-		 req,
-		 resp);
-
-	rc = tfp_send_msg_tunneled(tfp, &parms);
-	if (rc)
-		return rc;
-
-	/* Process the response */
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION,
-			      resp, full_action);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_MCG, resp,
-			      mcg);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
-			      encap_8b);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
-			      encap_16b);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
-			      encap_64b);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
-			      sp_smac);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
-			      resp, sp_smac_ipv4);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
-			      resp, sp_smac_ipv6);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
-			      counter_64b);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
-			      nat_sport);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
-			      nat_dport);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
-			      nat_s_ipv4);
-	TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
-			      nat_d_ipv4);
-
-	return tfp_le_to_cpu_32(parms.tf_resp_code);
-}
-
-/**
- * Sends session SRAM resource free request to TF Firmware
- */
-int
-tf_msg_session_sram_resc_free(struct tf *tfp __rte_unused,
-			      enum tf_dir dir,
-			      struct tf_rm_entry *sram_entry __rte_unused)
-{
-	int rc;
-	struct tfp_send_msg_parms parms = { 0 };
-	struct tf_session_sram_resc_free_input req = { 0 };
-	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
-
-	/* Populate the request */
-	req.fw_session_id =
-		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
-	req.flags = tfp_cpu_to_le_16(dir);
-
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
-			    full_action);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
-			    mcg);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
-			    encap_8b);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
-			    encap_16b);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
-			    encap_64b);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
-			    sp_smac);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
-			    sp_smac_ipv4);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
-			    sp_smac_ipv6);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
-			    counter_64b);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
-			    nat_sport);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
-			    nat_dport);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
-			    nat_s_ipv4);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
-			    nat_d_ipv4);
-
-	MSG_PREP_NO_RESP(parms,
-			 TF_KONG_MB,
-			 HWRM_TF,
-			 HWRM_TFT_SESSION_SRAM_RESC_FREE,
-			 req);
-
-	rc = tfp_send_msg_tunneled(tfp, &parms);
-	if (rc)
-		return rc;
-
-	return tfp_le_to_cpu_32(parms.tf_resp_code);
-}
-
-/**
- * Sends session SRAM resource flush request to TF Firmware
- */
-int
-tf_msg_session_sram_resc_flush(struct tf *tfp,
-			       enum tf_dir dir,
-			       struct tf_rm_entry *sram_entry)
-{
-	int rc;
-	struct tfp_send_msg_parms parms = { 0 };
-	struct tf_session_sram_resc_free_input req = { 0 };
-	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
-
-	/* Populate the request */
-	req.fw_session_id =
-		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
-	req.flags = tfp_cpu_to_le_16(dir);
-
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
-			    full_action);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
-			    mcg);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
-			    encap_8b);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
-			    encap_16b);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
-			    encap_64b);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
-			    sp_smac);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
-			    sp_smac_ipv4);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
-			    sp_smac_ipv6);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
-			    counter_64b);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
-			    nat_sport);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
-			    nat_dport);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
-			    nat_s_ipv4);
-	TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
-			    nat_d_ipv4);
-
-	MSG_PREP_NO_RESP(parms,
-			 TF_KONG_MB,
-			 TF_TYPE_TRUFLOW,
-			 HWRM_TFT_SESSION_SRAM_RESC_FLUSH,
-			 req);
-
-	rc = tfp_send_msg_tunneled(tfp, &parms);
-	if (rc)
-		return rc;
-
-	return tfp_le_to_cpu_32(parms.tf_resp_code);
-}
-
 int
 tf_msg_session_resc_qcaps(struct tf *tfp,
 			  enum tf_dir dir,
@@ -973,7 +220,7 @@ tf_msg_session_resc_qcaps(struct tf *tfp,
 	/* Process the response
 	 * Should always get expected number of entries
 	 */
-	if (resp.size != size) {
+	if (tfp_le_to_cpu_32(resp.size) != size) {
 		TFP_DRV_LOG(ERR,
 			    "%s: QCAPS message size error, rc:%s\n",
 			    tf_dir_2_str(dir),
@@ -981,14 +228,14 @@ tf_msg_session_resc_qcaps(struct tf *tfp,
 		return -EINVAL;
 	}
 
-	printf("size: %d\n", resp.size);
+	printf("size: %d\n", tfp_le_to_cpu_32(resp.size));
 
 	/* Post process the response */
 	data = (struct tf_rm_resc_req_entry *)qcaps_buf.va_addr;
 
 	printf("\nQCAPS\n");
 	for (i = 0; i < size; i++) {
-		query[i].type = tfp_cpu_to_le_32(data[i].type);
+		query[i].type = tfp_le_to_cpu_32(data[i].type);
 		query[i].min = tfp_le_to_cpu_16(data[i].min);
 		query[i].max = tfp_le_to_cpu_16(data[i].max);
 
@@ -1078,7 +325,7 @@ tf_msg_session_resc_alloc(struct tf *tfp,
 	/* Process the response
 	 * Should always get expected number of entries
 	 */
-	if (resp.size != size) {
+	if (tfp_le_to_cpu_32(resp.size) != size) {
 		TFP_DRV_LOG(ERR,
 			    "%s: Alloc message size error, rc:%s\n",
 			    tf_dir_2_str(dir),
@@ -1087,14 +334,14 @@ tf_msg_session_resc_alloc(struct tf *tfp,
 	}
 
 	printf("\nRESV\n");
-	printf("size: %d\n", resp.size);
+	printf("size: %d\n", tfp_le_to_cpu_32(resp.size));
 
 	/* Post process the response */
 	resv_data = (struct tf_rm_resc_entry *)resv_buf.va_addr;
 	for (i = 0; i < size; i++) {
-		resv[i].type = tfp_cpu_to_le_32(resv_data[i].type);
-		resv[i].start = tfp_cpu_to_le_16(resv_data[i].start);
-		resv[i].stride = tfp_cpu_to_le_16(resv_data[i].stride);
+		resv[i].type = tfp_le_to_cpu_32(resv_data[i].type);
+		resv[i].start = tfp_le_to_cpu_16(resv_data[i].start);
+		resv[i].stride = tfp_le_to_cpu_16(resv_data[i].stride);
 
 		printf("%d type: %d(0x%x) %d %d\n",
 		       i,
@@ -1173,24 +420,112 @@ tf_msg_session_resc_flush(struct tf *tfp,
 	return rc;
 }
 
-/**
- * Sends EM mem register request to Firmware
- */
-int tf_msg_em_mem_rgtr(struct tf *tfp,
-		       int           page_lvl,
-		       int           page_size,
-		       uint64_t      dma_addr,
-		       uint16_t     *ctx_id)
+int
+tf_msg_insert_em_internal_entry(struct tf *tfp,
+				struct tf_insert_em_entry_parms *em_parms,
+				uint16_t *rptr_index,
+				uint8_t *rptr_entry,
+				uint8_t *num_of_entries)
 {
 	int rc;
-	struct hwrm_tf_ctxt_mem_rgtr_input req = { 0 };
-	struct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 };
 	struct tfp_send_msg_parms parms = { 0 };
+	struct hwrm_tf_em_insert_input req = { 0 };
+	struct hwrm_tf_em_insert_output resp = { 0 };
+	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+	struct tf_em_64b_entry *em_result =
+		(struct tf_em_64b_entry *)em_parms->em_record;
+	uint32_t flags;
+
+	req.fw_session_id =
+		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+	tfp_memcpy(req.em_key,
+		   em_parms->key,
+		   ((em_parms->key_sz_in_bits + 7) / 8));
+
+	flags = (em_parms->dir == TF_DIR_TX ?
+		 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX :
+		 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX);
+	req.flags = tfp_cpu_to_le_16(flags);
+	req.strength = (em_result->hdr.word1 &
+			CFA_P4_EEM_ENTRY_STRENGTH_MASK) >>
+			CFA_P4_EEM_ENTRY_STRENGTH_SHIFT;
+	req.em_key_bitlen = em_parms->key_sz_in_bits;
+	req.action_ptr = em_result->hdr.pointer;
+	req.em_record_idx = *rptr_index;
+
+	parms.tf_type = HWRM_TF_EM_INSERT;
+	parms.req_data = (uint32_t *)&req;
+	parms.req_size = sizeof(req);
+	parms.resp_data = (uint32_t *)&resp;
+	parms.resp_size = sizeof(resp);
+	parms.mailbox = TF_KONG_MB;
+
+	rc = tfp_send_msg_direct(tfp,
+				 &parms);
+	if (rc)
+		return rc;
+
+	*rptr_entry = resp.rptr_entry;
+	*rptr_index = resp.rptr_index;
+	*num_of_entries = resp.num_of_entries;
+
+	return 0;
+}
+
+int
+tf_msg_delete_em_entry(struct tf *tfp,
+		       struct tf_delete_em_entry_parms *em_parms)
+{
+	int rc;
+	struct tfp_send_msg_parms parms = { 0 };
+	struct hwrm_tf_em_delete_input req = { 0 };
+	struct hwrm_tf_em_delete_output resp = { 0 };
+	uint32_t flags;
+	struct tf_session *tfs =
+		(struct tf_session *)(tfp->session->core_data);
+
+	req.fw_session_id =
+		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+
+	flags = (em_parms->dir == TF_DIR_TX ?
+		 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX :
+		 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX);
+	req.flags = tfp_cpu_to_le_16(flags);
+	req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
+
+	parms.tf_type = HWRM_TF_EM_DELETE;
+	parms.req_data = (uint32_t *)&req;
+	parms.req_size = sizeof(req);
+	parms.resp_data = (uint32_t *)&resp;
+	parms.resp_size = sizeof(resp);
+	parms.mailbox = TF_KONG_MB;
+
+	rc = tfp_send_msg_direct(tfp,
+				 &parms);
+	if (rc)
+		return rc;
+
+	em_parms->index = tfp_le_to_cpu_16(resp.em_index);
+
+	return 0;
+}
+
+int
+tf_msg_em_mem_rgtr(struct tf *tfp,
+		   int page_lvl,
+		   int page_size,
+		   uint64_t dma_addr,
+		   uint16_t *ctx_id)
+{
+	int rc;
+	struct hwrm_tf_ctxt_mem_rgtr_input req = { 0 };
+	struct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 };
+	struct tfp_send_msg_parms parms = { 0 };
+
+	req.page_level = page_lvl;
+	req.page_size = page_size;
+	req.page_dir = tfp_cpu_to_le_64(dma_addr);
 
-	req.page_level = page_lvl;
-	req.page_size = page_size;
-	req.page_dir = tfp_cpu_to_le_64(dma_addr);
-
 	parms.tf_type = HWRM_TF_CTXT_MEM_RGTR;
 	parms.req_data = (uint32_t *)&req;
 	parms.req_size = sizeof(req);
@@ -1208,11 +543,9 @@ int tf_msg_em_mem_rgtr(struct tf *tfp,
 	return rc;
 }
 
-/**
- * Sends EM mem unregister request to Firmware
- */
-int tf_msg_em_mem_unrgtr(struct tf *tfp,
-			 uint16_t  *ctx_id)
+int
+tf_msg_em_mem_unrgtr(struct tf *tfp,
+		     uint16_t *ctx_id)
 {
 	int rc;
 	struct hwrm_tf_ctxt_mem_unrgtr_input req = {0};
@@ -1233,12 +566,10 @@ int tf_msg_em_mem_unrgtr(struct tf *tfp,
 	return rc;
 }
 
-/**
- * Sends EM qcaps request to Firmware
- */
-int tf_msg_em_qcaps(struct tf *tfp,
-		    int dir,
-		    struct tf_em_caps *em_caps)
+int
+tf_msg_em_qcaps(struct tf *tfp,
+		int dir,
+		struct tf_em_caps *em_caps)
 {
 	int rc;
 	struct hwrm_tf_ext_em_qcaps_input  req = {0};
@@ -1273,17 +604,15 @@ int tf_msg_em_qcaps(struct tf *tfp,
 	return rc;
 }
 
-/**
- * Sends EM config request to Firmware
- */
-int tf_msg_em_cfg(struct tf *tfp,
-		  uint32_t   num_entries,
-		  uint16_t   key0_ctx_id,
-		  uint16_t   key1_ctx_id,
-		  uint16_t   record_ctx_id,
-		  uint16_t   efc_ctx_id,
-		  uint8_t    flush_interval,
-		  int        dir)
+int
+tf_msg_em_cfg(struct tf *tfp,
+	      uint32_t num_entries,
+	      uint16_t key0_ctx_id,
+	      uint16_t key1_ctx_id,
+	      uint16_t record_ctx_id,
+	      uint16_t efc_ctx_id,
+	      uint8_t flush_interval,
+	      int dir)
 {
 	int rc;
 	struct hwrm_tf_ext_em_cfg_input  req = {0};
@@ -1317,42 +646,23 @@ int tf_msg_em_cfg(struct tf *tfp,
 	return rc;
 }
 
-/**
- * Sends EM internal insert request to Firmware
- */
-int tf_msg_insert_em_internal_entry(struct tf *tfp,
-				struct tf_insert_em_entry_parms *em_parms,
-				uint16_t *rptr_index,
-				uint8_t *rptr_entry,
-				uint8_t *num_of_entries)
+int
+tf_msg_em_op(struct tf *tfp,
+	     int dir,
+	     uint16_t op)
 {
-	int                         rc;
-	struct tfp_send_msg_parms        parms = { 0 };
-	struct hwrm_tf_em_insert_input   req = { 0 };
-	struct hwrm_tf_em_insert_output  resp = { 0 };
-	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
-	struct tf_em_64b_entry *em_result =
-		(struct tf_em_64b_entry *)em_parms->em_record;
+	int rc;
+	struct hwrm_tf_ext_em_op_input req = {0};
+	struct hwrm_tf_ext_em_op_output resp = {0};
 	uint32_t flags;
+	struct tfp_send_msg_parms parms = { 0 };
 
-	req.fw_session_id =
-		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
-	tfp_memcpy(req.em_key,
-		   em_parms->key,
-		   ((em_parms->key_sz_in_bits + 7) / 8));
-
-	flags = (em_parms->dir == TF_DIR_TX ?
-		 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX :
-		 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX);
-	req.flags = tfp_cpu_to_le_16(flags);
-	req.strength =
-		(em_result->hdr.word1 & CFA_P4_EEM_ENTRY_STRENGTH_MASK) >>
-		CFA_P4_EEM_ENTRY_STRENGTH_SHIFT;
-	req.em_key_bitlen = em_parms->key_sz_in_bits;
-	req.action_ptr = em_result->hdr.pointer;
-	req.em_record_idx = *rptr_index;
+	flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
+		 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
+	req.flags = tfp_cpu_to_le_32(flags);
+	req.op = tfp_cpu_to_le_16(op);
 
-	parms.tf_type = HWRM_TF_EM_INSERT;
+	parms.tf_type = HWRM_TF_EXT_EM_OP;
 	parms.req_data = (uint32_t *)&req;
 	parms.req_size = sizeof(req);
 	parms.resp_data = (uint32_t *)&resp;
@@ -1361,75 +671,86 @@ int tf_msg_insert_em_internal_entry(struct tf *tfp,
 
 	rc = tfp_send_msg_direct(tfp,
 				 &parms);
-	if (rc)
-		return rc;
-
-	*rptr_entry = resp.rptr_entry;
-	*rptr_index = resp.rptr_index;
-	*num_of_entries = resp.num_of_entries;
-
-	return 0;
+	return rc;
 }
 
-/**
- * Sends EM delete insert request to Firmware
- */
-int tf_msg_delete_em_entry(struct tf *tfp,
-			   struct tf_delete_em_entry_parms *em_parms)
+int
+tf_msg_tcam_entry_set(struct tf *tfp,
+		      struct tf_tcam_set_parms *parms)
 {
-	int                             rc;
-	struct tfp_send_msg_parms       parms = { 0 };
-	struct hwrm_tf_em_delete_input  req = { 0 };
-	struct hwrm_tf_em_delete_output resp = { 0 };
-	uint32_t flags;
-	struct tf_session *tfs =
-		(struct tf_session *)(tfp->session->core_data);
+	int rc;
+	struct tfp_send_msg_parms mparms = { 0 };
+	struct hwrm_tf_tcam_set_input req = { 0 };
+	struct hwrm_tf_tcam_set_output resp = { 0 };
+	struct tf_msg_dma_buf buf = { 0 };
+	uint8_t *data = NULL;
+	int data_size = 0;
 
-	req.fw_session_id =
-		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+	req.type = parms->hcapi_type;
+	req.idx = tfp_cpu_to_le_16(parms->idx);
+	if (parms->dir == TF_DIR_TX)
+		req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX;
 
-	flags = (em_parms->dir == TF_DIR_TX ?
-		 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX :
-		 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX);
-	req.flags = tfp_cpu_to_le_16(flags);
-	req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
+	req.key_size = parms->key_size;
+	req.mask_offset = parms->key_size;
+	/* Result follows after key and mask, thus multiply by 2 */
+	req.result_offset = 2 * parms->key_size;
+	req.result_size = parms->result_size;
+	data_size = 2 * req.key_size + req.result_size;
 
-	parms.tf_type = HWRM_TF_EM_DELETE;
-	parms.req_data = (uint32_t *)&req;
-	parms.req_size = sizeof(req);
-	parms.resp_data = (uint32_t *)&resp;
-	parms.resp_size = sizeof(resp);
-	parms.mailbox = TF_KONG_MB;
+	if (data_size <= TF_PCI_BUF_SIZE_MAX) {
+		/* use pci buffer */
+		data = &req.dev_data[0];
+	} else {
+		/* use dma buffer */
+		req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA;
+		rc = tf_msg_alloc_dma_buf(&buf, data_size);
+		if (rc)
+			goto cleanup;
+		data = buf.va_addr;
+		tfp_memcpy(&req.dev_data[0],
+			   &buf.pa_addr,
+			   sizeof(buf.pa_addr));
+	}
+
+	tfp_memcpy(&data[0], parms->key, parms->key_size);
+	tfp_memcpy(&data[parms->key_size], parms->mask, parms->key_size);
+	tfp_memcpy(&data[req.result_offset], parms->result, parms->result_size);
+
+	mparms.tf_type = HWRM_TF_TCAM_SET;
+	mparms.req_data = (uint32_t *)&req;
+	mparms.req_size = sizeof(req);
+	mparms.resp_data = (uint32_t *)&resp;
+	mparms.resp_size = sizeof(resp);
+	mparms.mailbox = TF_KONG_MB;
 
 	rc = tfp_send_msg_direct(tfp,
-				 &parms);
+				 &mparms);
 	if (rc)
-		return rc;
+		goto cleanup;
 
-	em_parms->index = tfp_le_to_cpu_16(resp.em_index);
+cleanup:
+	tf_msg_free_dma_buf(&buf);
 
-	return 0;
+	return rc;
 }
 
-/**
- * Sends EM operation request to Firmware
- */
-int tf_msg_em_op(struct tf *tfp,
-		 int dir,
-		 uint16_t op)
+int
+tf_msg_tcam_entry_free(struct tf *tfp,
+		       struct tf_tcam_free_parms *in_parms)
 {
 	int rc;
-	struct hwrm_tf_ext_em_op_input req = {0};
-	struct hwrm_tf_ext_em_op_output resp = {0};
-	uint32_t flags;
+	struct hwrm_tf_tcam_free_input req =  { 0 };
+	struct hwrm_tf_tcam_free_output resp = { 0 };
 	struct tfp_send_msg_parms parms = { 0 };
 
-	flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
-		 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
-	req.flags = tfp_cpu_to_le_32(flags);
-	req.op = tfp_cpu_to_le_16(op);
+	req.type = in_parms->hcapi_type;
+	req.count = 1;
+	req.idx_list[0] = tfp_cpu_to_le_16(in_parms->idx);
+	if (in_parms->dir == TF_DIR_TX)
+		req.flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX;
 
-	parms.tf_type = HWRM_TF_EXT_EM_OP;
+	parms.tf_type = HWRM_TF_TCAM_FREE;
 	parms.req_data = (uint32_t *)&req;
 	parms.req_size = sizeof(req);
 	parms.resp_data = (uint32_t *)&resp;
@@ -1444,21 +765,32 @@ int tf_msg_em_op(struct tf *tfp,
 int
 tf_msg_set_tbl_entry(struct tf *tfp,
 		     enum tf_dir dir,
-		     enum tf_tbl_type type,
+		     uint16_t hcapi_type,
 		     uint16_t size,
 		     uint8_t *data,
 		     uint32_t index)
 {
 	int rc;
+	struct hwrm_tf_tbl_type_set_input req = { 0 };
+	struct hwrm_tf_tbl_type_set_output resp = { 0 };
 	struct tfp_send_msg_parms parms = { 0 };
-	struct tf_tbl_type_set_input req = { 0 };
-	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+	struct tf_session *tfs;
+
+	/* Retrieve the session information */
+	rc = tf_session_get_session(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
 
 	/* Populate the request */
 	req.fw_session_id =
 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
 	req.flags = tfp_cpu_to_le_16(dir);
-	req.type = tfp_cpu_to_le_32(type);
+	req.type = tfp_cpu_to_le_32(hcapi_type);
 	req.size = tfp_cpu_to_le_16(size);
 	req.index = tfp_cpu_to_le_32(index);
 
@@ -1466,13 +798,15 @@ tf_msg_set_tbl_entry(struct tf *tfp,
 		   data,
 		   size);
 
-	MSG_PREP_NO_RESP(parms,
-			 TF_KONG_MB,
-			 HWRM_TF,
-			 HWRM_TFT_TBL_TYPE_SET,
-			 req);
+	parms.tf_type = HWRM_TF_TBL_TYPE_SET;
+	parms.req_data = (uint32_t *)&req;
+	parms.req_size = sizeof(req);
+	parms.resp_data = (uint32_t *)&resp;
+	parms.resp_size = sizeof(resp);
+	parms.mailbox = TF_KONG_MB;
 
-	rc = tfp_send_msg_tunneled(tfp, &parms);
+	rc = tfp_send_msg_direct(tfp,
+				 &parms);
 	if (rc)
 		return rc;
 
@@ -1482,32 +816,43 @@ tf_msg_set_tbl_entry(struct tf *tfp,
 int
 tf_msg_get_tbl_entry(struct tf *tfp,
 		     enum tf_dir dir,
-		     enum tf_tbl_type type,
+		     uint16_t hcapi_type,
 		     uint16_t size,
 		     uint8_t *data,
 		     uint32_t index)
 {
 	int rc;
+	struct hwrm_tf_tbl_type_get_input req = { 0 };
+	struct hwrm_tf_tbl_type_get_output resp = { 0 };
 	struct tfp_send_msg_parms parms = { 0 };
-	struct tf_tbl_type_get_input req = { 0 };
-	struct tf_tbl_type_get_output resp = { 0 };
-	struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+	struct tf_session *tfs;
+
+	/* Retrieve the session information */
+	rc = tf_session_get_session(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
 
 	/* Populate the request */
 	req.fw_session_id =
 		tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
 	req.flags = tfp_cpu_to_le_16(dir);
-	req.type = tfp_cpu_to_le_32(type);
+	req.type = tfp_cpu_to_le_32(hcapi_type);
 	req.index = tfp_cpu_to_le_32(index);
 
-	MSG_PREP(parms,
-		 TF_KONG_MB,
-		 HWRM_TF,
-		 HWRM_TFT_TBL_TYPE_GET,
-		 req,
-		 resp);
+	parms.tf_type = HWRM_TF_TBL_TYPE_GET;
+	parms.req_data = (uint32_t *)&req;
+	parms.req_size = sizeof(req);
+	parms.resp_data = (uint32_t *)&resp;
+	parms.resp_size = sizeof(resp);
+	parms.mailbox = TF_KONG_MB;
 
-	rc = tfp_send_msg_tunneled(tfp, &parms);
+	rc = tfp_send_msg_direct(tfp,
+				 &parms);
 	if (rc)
 		return rc;
 
@@ -1522,6 +867,8 @@ tf_msg_get_tbl_entry(struct tf *tfp,
 	return tfp_le_to_cpu_32(parms.tf_resp_code);
 }
 
+/* HWRM Tunneled messages */
+
 int
 tf_msg_bulk_get_tbl_entry(struct tf *tfp,
 			  struct tf_bulk_get_tbl_entry_parms *params)
@@ -1562,96 +909,3 @@ tf_msg_bulk_get_tbl_entry(struct tf *tfp,
 
 	return tfp_le_to_cpu_32(parms.tf_resp_code);
 }
-
-int
-tf_msg_tcam_entry_set(struct tf *tfp,
-		      struct tf_tcam_set_parms *parms)
-{
-	int rc;
-	struct tfp_send_msg_parms mparms = { 0 };
-	struct hwrm_tf_tcam_set_input req = { 0 };
-	struct hwrm_tf_tcam_set_output resp = { 0 };
-	struct tf_msg_dma_buf buf = { 0 };
-	uint8_t *data = NULL;
-	int data_size = 0;
-
-	req.type = parms->type;
-
-	req.idx = tfp_cpu_to_le_16(parms->idx);
-	if (parms->dir == TF_DIR_TX)
-		req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX;
-
-	req.key_size = parms->key_size;
-	req.mask_offset = parms->key_size;
-	/* Result follows after key and mask, thus multiply by 2 */
-	req.result_offset = 2 * parms->key_size;
-	req.result_size = parms->result_size;
-	data_size = 2 * req.key_size + req.result_size;
-
-	if (data_size <= TF_PCI_BUF_SIZE_MAX) {
-		/* use pci buffer */
-		data = &req.dev_data[0];
-	} else {
-		/* use dma buffer */
-		req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA;
-		rc = tf_msg_alloc_dma_buf(&buf, data_size);
-		if (rc)
-			goto cleanup;
-		data = buf.va_addr;
-		tfp_memcpy(&req.dev_data[0],
-			   &buf.pa_addr,
-			   sizeof(buf.pa_addr));
-	}
-
-	tfp_memcpy(&data[0], parms->key, parms->key_size);
-	tfp_memcpy(&data[parms->key_size], parms->mask, parms->key_size);
-	tfp_memcpy(&data[req.result_offset], parms->result, parms->result_size);
-
-	mparms.tf_type = HWRM_TF_TCAM_SET;
-	mparms.req_data = (uint32_t *)&req;
-	mparms.req_size = sizeof(req);
-	mparms.resp_data = (uint32_t *)&resp;
-	mparms.resp_size = sizeof(resp);
-	mparms.mailbox = TF_KONG_MB;
-
-	rc = tfp_send_msg_direct(tfp,
-				 &mparms);
-	if (rc)
-		goto cleanup;
-
-cleanup:
-	tf_msg_free_dma_buf(&buf);
-
-	return rc;
-}
-
-int
-tf_msg_tcam_entry_free(struct tf *tfp,
-		       struct tf_tcam_free_parms *in_parms)
-{
-	int rc;
-	struct hwrm_tf_tcam_free_input req =  { 0 };
-	struct hwrm_tf_tcam_free_output resp = { 0 };
-	struct tfp_send_msg_parms parms = { 0 };
-
-	/* Populate the request */
-	rc = tf_tcam_tbl_2_hwrm(in_parms->type, &req.type);
-	if (rc != 0)
-		return rc;
-
-	req.count = 1;
-	req.idx_list[0] = tfp_cpu_to_le_16(in_parms->idx);
-	if (in_parms->dir == TF_DIR_TX)
-		req.flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX;
-
-	parms.tf_type = HWRM_TF_TCAM_FREE;
-	parms.req_data = (uint32_t *)&req;
-	parms.req_size = sizeof(req);
-	parms.resp_data = (uint32_t *)&resp;
-	parms.resp_size = sizeof(resp);
-	parms.mailbox = TF_KONG_MB;
-
-	rc = tfp_send_msg_direct(tfp,
-				 &parms);
-	return rc;
-}
diff --git a/drivers/net/bnxt/tf_core/tf_msg.h b/drivers/net/bnxt/tf_core/tf_msg.h
index 1ff1044e8..8e276d4c0 100644
--- a/drivers/net/bnxt/tf_core/tf_msg.h
+++ b/drivers/net/bnxt/tf_core/tf_msg.h
@@ -16,6 +16,8 @@
 
 struct tf;
 
+/* HWRM Direct messages */
+
 /**
  * Sends session open request to Firmware
  *
@@ -29,7 +31,7 @@ struct tf;
  *   Pointer to the fw_session_id that is allocated on firmware side
  *
  * Returns:
- *
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_session_open(struct tf *tfp,
 			char *ctrl_chan_name,
@@ -46,7 +48,7 @@ int tf_msg_session_open(struct tf *tfp,
  *   time of session open
  *
  * Returns:
- *
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_session_attach(struct tf *tfp,
 			  char *ctrl_channel_name,
@@ -59,73 +61,21 @@ int tf_msg_session_attach(struct tf *tfp,
  *   Pointer to session handle
  *
  * Returns:
- *
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_session_close(struct tf *tfp);
 
 /**
  * Sends session query config request to TF Firmware
+ *
+ * [in] session
+ *   Pointer to session handle
+ *
+ * Returns:
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_session_qcfg(struct tf *tfp);
 
-/**
- * Sends session HW resource query capability request to TF Firmware
- */
-int tf_msg_session_hw_resc_qcaps(struct tf *tfp,
-				 enum tf_dir dir,
-				 struct tf_rm_hw_query *hw_query);
-
-/**
- * Sends session HW resource allocation request to TF Firmware
- */
-int tf_msg_session_hw_resc_alloc(struct tf *tfp,
-				 enum tf_dir dir,
-				 struct tf_rm_hw_alloc *hw_alloc,
-				 struct tf_rm_entry *hw_entry);
-
-/**
- * Sends session HW resource free request to TF Firmware
- */
-int tf_msg_session_hw_resc_free(struct tf *tfp,
-				enum tf_dir dir,
-				struct tf_rm_entry *hw_entry);
-
-/**
- * Sends session HW resource flush request to TF Firmware
- */
-int tf_msg_session_hw_resc_flush(struct tf *tfp,
-				 enum tf_dir dir,
-				 struct tf_rm_entry *hw_entry);
-
-/**
- * Sends session SRAM resource query capability request to TF Firmware
- */
-int tf_msg_session_sram_resc_qcaps(struct tf *tfp,
-				   enum tf_dir dir,
-				   struct tf_rm_sram_query *sram_query);
-
-/**
- * Sends session SRAM resource allocation request to TF Firmware
- */
-int tf_msg_session_sram_resc_alloc(struct tf *tfp,
-				   enum tf_dir dir,
-				   struct tf_rm_sram_alloc *sram_alloc,
-				   struct tf_rm_entry *sram_entry);
-
-/**
- * Sends session SRAM resource free request to TF Firmware
- */
-int tf_msg_session_sram_resc_free(struct tf *tfp,
-				  enum tf_dir dir,
-				  struct tf_rm_entry *sram_entry);
-
-/**
- * Sends session SRAM resource flush request to TF Firmware
- */
-int tf_msg_session_sram_resc_flush(struct tf *tfp,
-				   enum tf_dir dir,
-				   struct tf_rm_entry *sram_entry);
-
 /**
  * Sends session HW resource query capability request to TF Firmware
  *
@@ -183,6 +133,21 @@ int tf_msg_session_resc_alloc(struct tf *tfp,
 
 /**
  * Sends session resource flush request to TF Firmware
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] dir
+ *   Receive or Transmit direction
+ *
+ * [in] size
+ *   Number of elements in the req and resv arrays
+ *
+ * [in] resv
+ *   Pointer to an array of reserved elements that needs to be flushed
+ *
+ * Returns:
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_session_resc_flush(struct tf *tfp,
 			      enum tf_dir dir,
@@ -190,6 +155,24 @@ int tf_msg_session_resc_flush(struct tf *tfp,
 			      struct tf_rm_resc_entry *resv);
 /**
  * Sends EM internal insert request to Firmware
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] params
+ *   Pointer to em insert parameter list
+ *
+ * [in] rptr_index
+ *   Record ptr index
+ *
+ * [in] rptr_entry
+ *   Record ptr entry
+ *
+ * [in] num_of_entries
+ *   Number of entries to insert
+ *
+ * Returns:
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_insert_em_internal_entry(struct tf *tfp,
 				    struct tf_insert_em_entry_parms *params,
@@ -198,26 +181,75 @@ int tf_msg_insert_em_internal_entry(struct tf *tfp,
 				    uint8_t *num_of_entries);
 /**
  * Sends EM internal delete request to Firmware
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] em_parms
+ *   Pointer to em delete parameters
+ *
+ * Returns:
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_delete_em_entry(struct tf *tfp,
 			   struct tf_delete_em_entry_parms *em_parms);
+
 /**
  * Sends EM mem register request to Firmware
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] page_lvl
+ *   Page level
+ *
+ * [in] page_size
+ *   Page size
+ *
+ * [in] dma_addr
+ *   DMA Address for the memory page
+ *
+ * [in] ctx_id
+ *   Context id
+ *
+ * Returns:
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_em_mem_rgtr(struct tf *tfp,
-		       int           page_lvl,
-		       int           page_size,
-		       uint64_t      dma_addr,
-		       uint16_t     *ctx_id);
+		       int page_lvl,
+		       int page_size,
+		       uint64_t dma_addr,
+		       uint16_t *ctx_id);
 
 /**
  * Sends EM mem unregister request to Firmware
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] ctx_id
+ *   Context id
+ *
+ * Returns:
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_em_mem_unrgtr(struct tf *tfp,
-			 uint16_t     *ctx_id);
+			 uint16_t *ctx_id);
 
 /**
  * Sends EM qcaps request to Firmware
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] dir
+ *   Receive or Transmit direction
+ *
+ * [in] em_caps
+ *   Pointer to EM capabilities
+ *
+ * Returns:
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_em_qcaps(struct tf *tfp,
 		    int dir,
@@ -225,22 +257,63 @@ int tf_msg_em_qcaps(struct tf *tfp,
 
 /**
  * Sends EM config request to Firmware
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] num_entries
+ *   EM Table, key 0, number of entries to configure
+ *
+ * [in] key0_ctx_id
+ *   EM Table, Key 0 context id
+ *
+ * [in] key1_ctx_id
+ *   EM Table, Key 1 context id
+ *
+ * [in] record_ctx_id
+ *   EM Table, Record context id
+ *
+ * [in] efc_ctx_id
+ *   EM Table, EFC Table context id
+ *
+ * [in] flush_interval
+ *   Flush pending HW cached flows every 1/10th of value set in
+ *   seconds, both idle and active flows are flushed from the HW
+ *   cache. If set to 0, this feature will be disabled.
+ *
+ * [in] dir
+ *   Receive or Transmit direction
+ *
+ * Returns:
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_em_cfg(struct tf *tfp,
-		  uint32_t      num_entries,
-		  uint16_t      key0_ctx_id,
-		  uint16_t      key1_ctx_id,
-		  uint16_t      record_ctx_id,
-		  uint16_t      efc_ctx_id,
-		  uint8_t       flush_interval,
-		  int           dir);
+		  uint32_t num_entries,
+		  uint16_t key0_ctx_id,
+		  uint16_t key1_ctx_id,
+		  uint16_t record_ctx_id,
+		  uint16_t efc_ctx_id,
+		  uint8_t flush_interval,
+		  int dir);
 
 /**
  * Sends EM operation request to Firmware
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] dir
+ *   Receive or Transmit direction
+ *
+ * [in] op
+ *   CFA Operator
+ *
+ * Returns:
+ *   0 on Success else internal Truflow error
  */
 int tf_msg_em_op(struct tf *tfp,
-		 int        dir,
-		 uint16_t   op);
+		 int dir,
+		 uint16_t op);
 
 /**
  * Sends tcam entry 'set' to the Firmware.
@@ -281,7 +354,7 @@ int tf_msg_tcam_entry_free(struct tf *tfp,
  * [in] dir
  *   Direction location of the element to set
  *
- * [in] type
+ * [in] hcapi_type
  *   Type of the object to set
  *
  * [in] size
@@ -298,7 +371,7 @@ int tf_msg_tcam_entry_free(struct tf *tfp,
  */
 int tf_msg_set_tbl_entry(struct tf *tfp,
 			 enum tf_dir dir,
-			 enum tf_tbl_type type,
+			 uint16_t hcapi_type,
 			 uint16_t size,
 			 uint8_t *data,
 			 uint32_t index);
@@ -312,7 +385,7 @@ int tf_msg_set_tbl_entry(struct tf *tfp,
  * [in] dir
  *   Direction location of the element to get
  *
- * [in] type
+ * [in] hcapi_type
  *   Type of the object to get
  *
  * [in] size
@@ -329,11 +402,13 @@ int tf_msg_set_tbl_entry(struct tf *tfp,
  */
 int tf_msg_get_tbl_entry(struct tf *tfp,
 			 enum tf_dir dir,
-			 enum tf_tbl_type type,
+			 uint16_t hcapi_type,
 			 uint16_t size,
 			 uint8_t *data,
 			 uint32_t index);
 
+/* HWRM Tunneled messages */
+
 /**
  * Sends bulk get message of a Table Type element to the firmware.
  *
diff --git a/drivers/net/bnxt/tf_core/tf_rm.c b/drivers/net/bnxt/tf_core/tf_rm.c
index b6fe2f1ad..e0a84e64d 100644
--- a/drivers/net/bnxt/tf_core/tf_rm.c
+++ b/drivers/net/bnxt/tf_core/tf_rm.c
@@ -1818,16 +1818,8 @@ tf_rm_allocate_validate_hw(struct tf *tfp,
 		hw_entries = tfs->resc.tx.hw_entry;
 
 	/* Query for Session HW Resources */
-	rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query);
-	if (rc) {
-		/* Log error */
-		TFP_DRV_LOG(ERR,
-			    "%s, HW qcaps message send failed, rc:%s\n",
-			    tf_dir_2_str(dir),
-			    strerror(-rc));
-		goto cleanup;
-	}
 
+	memset(&hw_query, 0, sizeof(hw_query)); /* RSXX */
 	rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag);
 	if (rc) {
 		/* Log error */
@@ -1846,16 +1838,6 @@ tf_rm_allocate_validate_hw(struct tf *tfp,
 		hw_alloc.hw_num[i] = hw_query.hw_query[i].max;
 
 	/* Allocate Session HW Resources */
-	rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries);
-	if (rc) {
-		/* Log error */
-		TFP_DRV_LOG(ERR,
-			    "%s, HW alloc message send failed, rc:%s\n",
-			    tf_dir_2_str(dir),
-			    strerror(-rc));
-		goto cleanup;
-	}
-
 	/* Perform HW allocation validation as its possible the
 	 * resource availability changed between qcaps and alloc
 	 */
@@ -1906,17 +1888,7 @@ tf_rm_allocate_validate_sram(struct tf *tfp,
 	else
 		sram_entries = tfs->resc.tx.sram_entry;
 
-	/* Query for Session SRAM Resources */
-	rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
-	if (rc) {
-		/* Log error */
-		TFP_DRV_LOG(ERR,
-			    "%s, SRAM qcaps message send failed, rc:%s\n",
-			    tf_dir_2_str(dir),
-			    strerror(-rc));
-		goto cleanup;
-	}
-
+	memset(&sram_query, 0, sizeof(sram_query)); /* RSXX */
 	rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
 	if (rc) {
 		/* Log error */
@@ -1934,20 +1906,6 @@ tf_rm_allocate_validate_sram(struct tf *tfp,
 	for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)
 		sram_alloc.sram_num[i] = sram_query.sram_query[i].max;
 
-	/* Allocate Session SRAM Resources */
-	rc = tf_msg_session_sram_resc_alloc(tfp,
-					    dir,
-					    &sram_alloc,
-					    sram_entries);
-	if (rc) {
-		/* Log error */
-		TFP_DRV_LOG(ERR,
-			    "%s, SRAM alloc message send failed, rc:%s\n",
-			    tf_dir_2_str(dir),
-			    strerror(-rc));
-		goto cleanup;
-	}
-
 	/* Perform SRAM allocation validation as its possible the
 	 * resource availability changed between qcaps and alloc
 	 */
@@ -2798,17 +2756,6 @@ tf_rm_close(struct tf *tfp)
 
 			/* Log the entries to be flushed */
 			tf_rm_log_hw_flush(i, hw_flush_entries);
-			rc = tf_msg_session_hw_resc_flush(tfp,
-							  i,
-							  hw_flush_entries);
-			if (rc) {
-				rc_close = rc;
-				/* Log error */
-				TFP_DRV_LOG(ERR,
-					    "%s, HW flush failed, rc:%s\n",
-					    tf_dir_2_str(i),
-					    strerror(-rc));
-			}
 		}
 
 		/* Check for any not previously freed SRAM resources
@@ -2828,38 +2775,6 @@ tf_rm_close(struct tf *tfp)
 
 			/* Log the entries to be flushed */
 			tf_rm_log_sram_flush(i, sram_flush_entries);
-
-			rc = tf_msg_session_sram_resc_flush(tfp,
-							    i,
-							    sram_flush_entries);
-			if (rc) {
-				rc_close = rc;
-				/* Log error */
-				TFP_DRV_LOG(ERR,
-					    "%s, HW flush failed, rc:%s\n",
-					    tf_dir_2_str(i),
-					    strerror(-rc));
-			}
-		}
-
-		rc = tf_msg_session_hw_resc_free(tfp, i, hw_entries);
-		if (rc) {
-			rc_close = rc;
-			/* Log error */
-			TFP_DRV_LOG(ERR,
-				    "%s, HW free failed, rc:%s\n",
-				    tf_dir_2_str(i),
-				    strerror(-rc));
-		}
-
-		rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries);
-		if (rc) {
-			rc_close = rc;
-			/* Log error */
-			TFP_DRV_LOG(ERR,
-				    "%s, SRAM free failed, rc:%s\n",
-				    tf_dir_2_str(i),
-				    strerror(-rc));
 		}
 	}
 
diff --git a/drivers/net/bnxt/tf_core/tf_rm_new.c b/drivers/net/bnxt/tf_core/tf_rm_new.c
index de8f11955..2d9be654a 100644
--- a/drivers/net/bnxt/tf_core/tf_rm_new.c
+++ b/drivers/net/bnxt/tf_core/tf_rm_new.c
@@ -95,7 +95,9 @@ struct tf_rm_new_db {
  *   - EOPNOTSUPP - Operation not supported
  */
 static void
-tf_rm_count_hcapi_reservations(struct tf_rm_element_cfg *cfg,
+tf_rm_count_hcapi_reservations(enum tf_dir dir,
+			       enum tf_device_module_type type,
+			       struct tf_rm_element_cfg *cfg,
 			       uint16_t *reservations,
 			       uint16_t count,
 			       uint16_t *valid_count)
@@ -107,6 +109,26 @@ tf_rm_count_hcapi_reservations(struct tf_rm_element_cfg *cfg,
 		if (cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI &&
 		    reservations[i] > 0)
 			cnt++;
+
+		/* Only log msg if a type is attempted reserved and
+		 * not supported. We ignore EM module as its using a
+		 * split configuration array thus it would fail for
+		 * this type of check.
+		 */
+		if (type != TF_DEVICE_MODULE_TYPE_EM &&
+		    cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
+		    reservations[i] > 0) {
+			TFP_DRV_LOG(ERR,
+				"%s, %s, %s allocation not supported\n",
+				tf_device_module_type_2_str(type),
+				tf_dir_2_str(dir),
+				tf_device_module_type_subtype_2_str(type, i));
+			printf("%s, %s, %s allocation of %d not supported\n",
+				tf_device_module_type_2_str(type),
+				tf_dir_2_str(dir),
+			       tf_device_module_type_subtype_2_str(type, i),
+			       reservations[i]);
+		}
 	}
 
 	*valid_count = cnt;
@@ -405,7 +427,9 @@ tf_rm_create_db(struct tf *tfp,
 	 * the DB holds them all as to give a fast lookup. We can also
 	 * remove entries where there are no request for elements.
 	 */
-	tf_rm_count_hcapi_reservations(parms->cfg,
+	tf_rm_count_hcapi_reservations(parms->dir,
+				       parms->type,
+				       parms->cfg,
 				       parms->alloc_cnt,
 				       parms->num_elements,
 				       &hcapi_items);
@@ -507,6 +531,11 @@ tf_rm_create_db(struct tf *tfp,
 			db[i].alloc.entry.start = resv[j].start;
 			db[i].alloc.entry.stride = resv[j].stride;
 
+			printf("Entry:%d Start:%d Stride:%d\n",
+			       i,
+			       resv[j].start,
+			       resv[j].stride);
+
 			/* Create pool */
 			pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
 				     sizeof(struct bitalloc));
@@ -548,11 +577,16 @@ tf_rm_create_db(struct tf *tfp,
 		}
 	}
 
-	rm_db->num_entries = i;
+	rm_db->num_entries = parms->num_elements;
 	rm_db->dir = parms->dir;
 	rm_db->type = parms->type;
 	*parms->rm_db = (void *)rm_db;
 
+	printf("%s: type:%d num_entries:%d\n",
+	       tf_dir_2_str(parms->dir),
+	       parms->type,
+	       i);
+
 	tfp_free((void *)req);
 	tfp_free((void *)resv);
 
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.c b/drivers/net/bnxt/tf_core/tf_tbl.c
index e594f0248..d7f5de4c4 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_tbl.c
@@ -26,741 +26,6 @@
 #include "stack.h"
 #include "tf_common.h"
 
-#define PTU_PTE_VALID          0x1UL
-#define PTU_PTE_LAST           0x2UL
-#define PTU_PTE_NEXT_TO_LAST   0x4UL
-
-/* Number of pointers per page_size */
-#define	MAX_PAGE_PTRS(page_size)  ((page_size) / sizeof(void *))
-
-#define TF_EM_PG_SZ_4K        (1 << 12)
-#define TF_EM_PG_SZ_8K        (1 << 13)
-#define TF_EM_PG_SZ_64K       (1 << 16)
-#define TF_EM_PG_SZ_256K      (1 << 18)
-#define TF_EM_PG_SZ_1M        (1 << 20)
-#define TF_EM_PG_SZ_2M        (1 << 21)
-#define TF_EM_PG_SZ_4M        (1 << 22)
-#define TF_EM_PG_SZ_1G        (1 << 30)
-
-#define	TF_EM_CTX_ID_INVALID   0xFFFF
-
-#define	TF_EM_MIN_ENTRIES     (1 << 15) /* 32K */
-#define	TF_EM_MAX_ENTRIES     (1 << 27) /* 128M */
-
-/**
- * Function to free a page table
- *
- * [in] tp
- *   Pointer to the page table to free
- */
-static void
-tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl *tp)
-{
-	uint32_t i;
-
-	for (i = 0; i < tp->pg_count; i++) {
-		if (!tp->pg_va_tbl[i]) {
-			TFP_DRV_LOG(WARNING,
-				    "No mapping for page: %d table: %016" PRIu64 "\n",
-				    i,
-				    (uint64_t)(uintptr_t)tp);
-			continue;
-		}
-
-		tfp_free(tp->pg_va_tbl[i]);
-		tp->pg_va_tbl[i] = NULL;
-	}
-
-	tp->pg_count = 0;
-	tfp_free(tp->pg_va_tbl);
-	tp->pg_va_tbl = NULL;
-	tfp_free(tp->pg_pa_tbl);
-	tp->pg_pa_tbl = NULL;
-}
-
-/**
- * Function to free an EM table
- *
- * [in] tbl
- *   Pointer to the EM table to free
- */
-static void
-tf_em_free_page_table(struct hcapi_cfa_em_table *tbl)
-{
-	struct hcapi_cfa_em_page_tbl *tp;
-	int i;
-
-	for (i = 0; i < tbl->num_lvl; i++) {
-		tp = &tbl->pg_tbl[i];
-		TFP_DRV_LOG(INFO,
-			   "EEM: Freeing page table: size %u lvl %d cnt %u\n",
-			   TF_EM_PAGE_SIZE,
-			    i,
-			    tp->pg_count);
-
-		tf_em_free_pg_tbl(tp);
-	}
-
-	tbl->l0_addr = NULL;
-	tbl->l0_dma_addr = 0;
-	tbl->num_lvl = 0;
-	tbl->num_data_pages = 0;
-}
-
-/**
- * Allocation of page tables
- *
- * [in] tfp
- *   Pointer to a TruFlow handle
- *
- * [in] pg_count
- *   Page count to allocate
- *
- * [in] pg_size
- *   Size of each page
- *
- * Returns:
- *   0       - Success
- *   -ENOMEM - Out of memory
- */
-static int
-tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl *tp,
-		   uint32_t pg_count,
-		   uint32_t pg_size)
-{
-	uint32_t i;
-	struct tfp_calloc_parms parms;
-
-	parms.nitems = pg_count;
-	parms.size = sizeof(void *);
-	parms.alignment = 0;
-
-	if (tfp_calloc(&parms) != 0)
-		return -ENOMEM;
-
-	tp->pg_va_tbl = parms.mem_va;
-
-	if (tfp_calloc(&parms) != 0) {
-		tfp_free(tp->pg_va_tbl);
-		return -ENOMEM;
-	}
-
-	tp->pg_pa_tbl = parms.mem_va;
-
-	tp->pg_count = 0;
-	tp->pg_size = pg_size;
-
-	for (i = 0; i < pg_count; i++) {
-		parms.nitems = 1;
-		parms.size = pg_size;
-		parms.alignment = TF_EM_PAGE_ALIGNMENT;
-
-		if (tfp_calloc(&parms) != 0)
-			goto cleanup;
-
-		tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
-		tp->pg_va_tbl[i] = parms.mem_va;
-
-		memset(tp->pg_va_tbl[i], 0, pg_size);
-		tp->pg_count++;
-	}
-
-	return 0;
-
-cleanup:
-	tf_em_free_pg_tbl(tp);
-	return -ENOMEM;
-}
-
-/**
- * Allocates EM page tables
- *
- * [in] tbl
- *   Table to allocate pages for
- *
- * Returns:
- *   0       - Success
- *   -ENOMEM - Out of memory
- */
-static int
-tf_em_alloc_page_table(struct hcapi_cfa_em_table *tbl)
-{
-	struct hcapi_cfa_em_page_tbl *tp;
-	int rc = 0;
-	int i;
-	uint32_t j;
-
-	for (i = 0; i < tbl->num_lvl; i++) {
-		tp = &tbl->pg_tbl[i];
-
-		rc = tf_em_alloc_pg_tbl(tp,
-					tbl->page_cnt[i],
-					TF_EM_PAGE_SIZE);
-		if (rc) {
-			TFP_DRV_LOG(WARNING,
-				"Failed to allocate page table: lvl: %d, rc:%s\n",
-				i,
-				strerror(-rc));
-			goto cleanup;
-		}
-
-		for (j = 0; j < tp->pg_count; j++) {
-			TFP_DRV_LOG(INFO,
-				"EEM: Allocated page table: size %u lvl %d cnt"
-				" %u VA:%p PA:%p\n",
-				TF_EM_PAGE_SIZE,
-				i,
-				tp->pg_count,
-				(uint32_t *)tp->pg_va_tbl[j],
-				(uint32_t *)(uintptr_t)tp->pg_pa_tbl[j]);
-		}
-	}
-	return rc;
-
-cleanup:
-	tf_em_free_page_table(tbl);
-	return rc;
-}
-
-/**
- * Links EM page tables
- *
- * [in] tp
- *   Pointer to page table
- *
- * [in] tp_next
- *   Pointer to the next page table
- *
- * [in] set_pte_last
- *   Flag controlling if the page table is last
- */
-static void
-tf_em_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
-		      struct hcapi_cfa_em_page_tbl *tp_next,
-		      bool set_pte_last)
-{
-	uint64_t *pg_pa = tp_next->pg_pa_tbl;
-	uint64_t *pg_va;
-	uint64_t valid;
-	uint32_t k = 0;
-	uint32_t i;
-	uint32_t j;
-
-	for (i = 0; i < tp->pg_count; i++) {
-		pg_va = tp->pg_va_tbl[i];
-
-		for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
-			if (k == tp_next->pg_count - 2 && set_pte_last)
-				valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
-			else if (k == tp_next->pg_count - 1 && set_pte_last)
-				valid = PTU_PTE_LAST | PTU_PTE_VALID;
-			else
-				valid = PTU_PTE_VALID;
-
-			pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
-			if (++k >= tp_next->pg_count)
-				return;
-		}
-	}
-}
-
-/**
- * Setup a EM page table
- *
- * [in] tbl
- *   Pointer to EM page table
- */
-static void
-tf_em_setup_page_table(struct hcapi_cfa_em_table *tbl)
-{
-	struct hcapi_cfa_em_page_tbl *tp_next;
-	struct hcapi_cfa_em_page_tbl *tp;
-	bool set_pte_last = 0;
-	int i;
-
-	for (i = 0; i < tbl->num_lvl - 1; i++) {
-		tp = &tbl->pg_tbl[i];
-		tp_next = &tbl->pg_tbl[i + 1];
-		if (i == tbl->num_lvl - 2)
-			set_pte_last = 1;
-		tf_em_link_page_table(tp, tp_next, set_pte_last);
-	}
-
-	tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
-	tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
-}
-
-/**
- * Given the page size, size of each data item (entry size),
- * and the total number of entries needed, determine the number
- * of page table levels and the number of data pages required.
- *
- * [in] page_size
- *   Page size
- *
- * [in] entry_size
- *   Entry size
- *
- * [in] num_entries
- *   Number of entries needed
- *
- * [out] num_data_pages
- *   Number of pages required
- *
- * Returns:
- *   Success  - Number of EM page levels required
- *   -ENOMEM  - Out of memory
- */
-static int
-tf_em_size_page_tbl_lvl(uint32_t page_size,
-			uint32_t entry_size,
-			uint32_t num_entries,
-			uint64_t *num_data_pages)
-{
-	uint64_t lvl_data_size = page_size;
-	int lvl = TF_PT_LVL_0;
-	uint64_t data_size;
-
-	*num_data_pages = 0;
-	data_size = (uint64_t)num_entries * entry_size;
-
-	while (lvl_data_size < data_size) {
-		lvl++;
-
-		if (lvl == TF_PT_LVL_1)
-			lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
-				page_size;
-		else if (lvl == TF_PT_LVL_2)
-			lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
-				MAX_PAGE_PTRS(page_size) * page_size;
-		else
-			return -ENOMEM;
-	}
-
-	*num_data_pages = roundup(data_size, page_size) / page_size;
-
-	return lvl;
-}
-
-/**
- * Return the number of page table pages needed to
- * reference the given number of next level pages.
- *
- * [in] num_pages
- *   Number of EM pages
- *
- * [in] page_size
- *   Size of each EM page
- *
- * Returns:
- *   Number of EM page table pages
- */
-static uint32_t
-tf_em_page_tbl_pgcnt(uint32_t num_pages,
-		     uint32_t page_size)
-{
-	return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
-		       MAX_PAGE_PTRS(page_size);
-	return 0;
-}
-
-/**
- * Given the number of data pages, page_size and the maximum
- * number of page table levels (already determined), size
- * the number of page table pages required at each level.
- *
- * [in] max_lvl
- *   Max number of levels
- *
- * [in] num_data_pages
- *   Number of EM data pages
- *
- * [in] page_size
- *   Size of an EM page
- *
- * [out] *page_cnt
- *   EM page count
- */
-static void
-tf_em_size_page_tbls(int max_lvl,
-		     uint64_t num_data_pages,
-		     uint32_t page_size,
-		     uint32_t *page_cnt)
-{
-	if (max_lvl == TF_PT_LVL_0) {
-		page_cnt[TF_PT_LVL_0] = num_data_pages;
-	} else if (max_lvl == TF_PT_LVL_1) {
-		page_cnt[TF_PT_LVL_1] = num_data_pages;
-		page_cnt[TF_PT_LVL_0] =
-		tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
-	} else if (max_lvl == TF_PT_LVL_2) {
-		page_cnt[TF_PT_LVL_2] = num_data_pages;
-		page_cnt[TF_PT_LVL_1] =
-		tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
-		page_cnt[TF_PT_LVL_0] =
-		tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
-	} else {
-		return;
-	}
-}
-
-/**
- * Size the EM table based on capabilities
- *
- * [in] tbl
- *   EM table to size
- *
- * Returns:
- *   0        - Success
- *   - EINVAL - Parameter error
- *   - ENOMEM - Out of memory
- */
-static int
-tf_em_size_table(struct hcapi_cfa_em_table *tbl)
-{
-	uint64_t num_data_pages;
-	uint32_t *page_cnt;
-	int max_lvl;
-	uint32_t num_entries;
-	uint32_t cnt = TF_EM_MIN_ENTRIES;
-
-	/* Ignore entry if both size and number are zero */
-	if (!tbl->entry_size && !tbl->num_entries)
-		return 0;
-
-	/* If only one is set then error */
-	if (!tbl->entry_size || !tbl->num_entries)
-		return -EINVAL;
-
-	/* Determine number of page table levels and the number
-	 * of data pages needed to process the given eem table.
-	 */
-	if (tbl->type == TF_RECORD_TABLE) {
-		/*
-		 * For action records just a memory size is provided. Work
-		 * backwards to resolve to number of entries
-		 */
-		num_entries = tbl->num_entries / tbl->entry_size;
-		if (num_entries < TF_EM_MIN_ENTRIES) {
-			num_entries = TF_EM_MIN_ENTRIES;
-		} else {
-			while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
-				cnt *= 2;
-			num_entries = cnt;
-		}
-	} else {
-		num_entries = tbl->num_entries;
-	}
-
-	max_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE,
-					  tbl->entry_size,
-					  tbl->num_entries,
-					  &num_data_pages);
-	if (max_lvl < 0) {
-		TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
-		TFP_DRV_LOG(WARNING,
-			    "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
-			    tbl->type, (uint64_t)num_entries * tbl->entry_size,
-			    TF_EM_PAGE_SIZE);
-		return -ENOMEM;
-	}
-
-	tbl->num_lvl = max_lvl + 1;
-	tbl->num_data_pages = num_data_pages;
-
-	/* Determine the number of pages needed at each level */
-	page_cnt = tbl->page_cnt;
-	memset(page_cnt, 0, sizeof(tbl->page_cnt));
-	tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
-				page_cnt);
-
-	TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
-	TFP_DRV_LOG(INFO,
-		    "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
-		    max_lvl + 1,
-		    (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
-		    num_data_pages,
-		    page_cnt[TF_PT_LVL_0],
-		    page_cnt[TF_PT_LVL_1],
-		    page_cnt[TF_PT_LVL_2]);
-
-	return 0;
-}
-
-/**
- * Unregisters EM Ctx in Firmware
- *
- * [in] tfp
- *   Pointer to a TruFlow handle
- *
- * [in] tbl_scope_cb
- *   Pointer to a table scope control block
- *
- * [in] dir
- *   Receive or transmit direction
- */
-static void
-tf_em_ctx_unreg(struct tf *tfp,
-		struct tf_tbl_scope_cb *tbl_scope_cb,
-		int dir)
-{
-	struct hcapi_cfa_em_ctx_mem_info *ctxp =
-		&tbl_scope_cb->em_ctx_info[dir];
-	struct hcapi_cfa_em_table *tbl;
-	int i;
-
-	for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
-		tbl = &ctxp->em_tables[i];
-
-		if (tbl->num_entries != 0 && tbl->entry_size != 0) {
-			tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
-			tf_em_free_page_table(tbl);
-		}
-	}
-}
-
-/**
- * Registers EM Ctx in Firmware
- *
- * [in] tfp
- *   Pointer to a TruFlow handle
- *
- * [in] tbl_scope_cb
- *   Pointer to a table scope control block
- *
- * [in] dir
- *   Receive or transmit direction
- *
- * Returns:
- *   0       - Success
- *   -ENOMEM - Out of Memory
- */
-static int
-tf_em_ctx_reg(struct tf *tfp,
-	      struct tf_tbl_scope_cb *tbl_scope_cb,
-	      int dir)
-{
-	struct hcapi_cfa_em_ctx_mem_info *ctxp =
-		&tbl_scope_cb->em_ctx_info[dir];
-	struct hcapi_cfa_em_table *tbl;
-	int rc = 0;
-	int i;
-
-	for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
-		tbl = &ctxp->em_tables[i];
-
-		if (tbl->num_entries && tbl->entry_size) {
-			rc = tf_em_size_table(tbl);
-
-			if (rc)
-				goto cleanup;
-
-			rc = tf_em_alloc_page_table(tbl);
-			if (rc)
-				goto cleanup;
-
-			tf_em_setup_page_table(tbl);
-			rc = tf_msg_em_mem_rgtr(tfp,
-						tbl->num_lvl - 1,
-						TF_EM_PAGE_SIZE_ENUM,
-						tbl->l0_dma_addr,
-						&tbl->ctx_id);
-			if (rc)
-				goto cleanup;
-		}
-	}
-	return rc;
-
-cleanup:
-	tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
-	return rc;
-}
-
-/**
- * Validates EM number of entries requested
- *
- * [in] tbl_scope_cb
- *   Pointer to table scope control block to be populated
- *
- * [in] parms
- *   Pointer to input parameters
- *
- * Returns:
- *   0       - Success
- *   -EINVAL - Parameter error
- */
-static int
-tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
-			   struct tf_alloc_tbl_scope_parms *parms)
-{
-	uint32_t cnt;
-
-	if (parms->rx_mem_size_in_mb != 0) {
-		uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
-		uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
-				     + 1);
-		uint32_t num_entries = (parms->rx_mem_size_in_mb *
-					TF_MEGABYTE) / (key_b + action_b);
-
-		if (num_entries < TF_EM_MIN_ENTRIES) {
-			TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
-				    "%uMB\n",
-				    parms->rx_mem_size_in_mb);
-			return -EINVAL;
-		}
-
-		cnt = TF_EM_MIN_ENTRIES;
-		while (num_entries > cnt &&
-		       cnt <= TF_EM_MAX_ENTRIES)
-			cnt *= 2;
-
-		if (cnt > TF_EM_MAX_ENTRIES) {
-			TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
-				    "%u\n",
-		       (parms->tx_num_flows_in_k * TF_KILOBYTE));
-			return -EINVAL;
-		}
-
-		parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
-	} else {
-		if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
-		    TF_EM_MIN_ENTRIES ||
-		    (parms->rx_num_flows_in_k * TF_KILOBYTE) >
-		    tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
-			TFP_DRV_LOG(ERR,
-				    "EEM: Invalid number of Rx flows "
-				    "requested:%u max:%u\n",
-				    parms->rx_num_flows_in_k * TF_KILOBYTE,
-			tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
-			return -EINVAL;
-		}
-
-		/* must be a power-of-2 supported value
-		 * in the range 32K - 128M
-		 */
-		cnt = TF_EM_MIN_ENTRIES;
-		while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
-		       cnt <= TF_EM_MAX_ENTRIES)
-			cnt *= 2;
-
-		if (cnt > TF_EM_MAX_ENTRIES) {
-			TFP_DRV_LOG(ERR,
-				    "EEM: Invalid number of Rx requested: %u\n",
-				    (parms->rx_num_flows_in_k * TF_KILOBYTE));
-			return -EINVAL;
-		}
-	}
-
-	if (parms->tx_mem_size_in_mb != 0) {
-		uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
-		uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
-				     + 1);
-		uint32_t num_entries = (parms->tx_mem_size_in_mb *
-					(TF_KILOBYTE * TF_KILOBYTE)) /
-			(key_b + action_b);
-
-		if (num_entries < TF_EM_MIN_ENTRIES) {
-			TFP_DRV_LOG(ERR,
-				    "EEM: Insufficient memory requested:%uMB\n",
-				    parms->rx_mem_size_in_mb);
-			return -EINVAL;
-		}
-
-		cnt = TF_EM_MIN_ENTRIES;
-		while (num_entries > cnt &&
-		       cnt <= TF_EM_MAX_ENTRIES)
-			cnt *= 2;
-
-		if (cnt > TF_EM_MAX_ENTRIES) {
-			TFP_DRV_LOG(ERR,
-				    "EEM: Invalid number of Tx requested: %u\n",
-		       (parms->tx_num_flows_in_k * TF_KILOBYTE));
-			return -EINVAL;
-		}
-
-		parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
-	} else {
-		if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
-		    TF_EM_MIN_ENTRIES ||
-		    (parms->tx_num_flows_in_k * TF_KILOBYTE) >
-		    tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
-			TFP_DRV_LOG(ERR,
-				    "EEM: Invalid number of Tx flows "
-				    "requested:%u max:%u\n",
-				    (parms->tx_num_flows_in_k * TF_KILOBYTE),
-			tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
-			return -EINVAL;
-		}
-
-		cnt = TF_EM_MIN_ENTRIES;
-		while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
-		       cnt <= TF_EM_MAX_ENTRIES)
-			cnt *= 2;
-
-		if (cnt > TF_EM_MAX_ENTRIES) {
-			TFP_DRV_LOG(ERR,
-				    "EEM: Invalid number of Tx requested: %u\n",
-		       (parms->tx_num_flows_in_k * TF_KILOBYTE));
-			return -EINVAL;
-		}
-	}
-
-	if (parms->rx_num_flows_in_k != 0 &&
-	    (parms->rx_max_key_sz_in_bits / 8 == 0)) {
-		TFP_DRV_LOG(ERR,
-			    "EEM: Rx key size required: %u\n",
-			    (parms->rx_max_key_sz_in_bits));
-		return -EINVAL;
-	}
-
-	if (parms->tx_num_flows_in_k != 0 &&
-	    (parms->tx_max_key_sz_in_bits / 8 == 0)) {
-		TFP_DRV_LOG(ERR,
-			    "EEM: Tx key size required: %u\n",
-			    (parms->tx_max_key_sz_in_bits));
-		return -EINVAL;
-	}
-	/* Rx */
-	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
-		parms->rx_num_flows_in_k * TF_KILOBYTE;
-	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
-		parms->rx_max_key_sz_in_bits / 8;
-
-	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
-		parms->rx_num_flows_in_k * TF_KILOBYTE;
-	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
-		parms->rx_max_key_sz_in_bits / 8;
-
-	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
-		parms->rx_num_flows_in_k * TF_KILOBYTE;
-	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
-		parms->rx_max_action_entry_sz_in_bits / 8;
-
-	tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
-		0;
-
-	/* Tx */
-	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
-		parms->tx_num_flows_in_k * TF_KILOBYTE;
-	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
-		parms->tx_max_key_sz_in_bits / 8;
-
-	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
-		parms->tx_num_flows_in_k * TF_KILOBYTE;
-	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
-		parms->tx_max_key_sz_in_bits / 8;
-
-	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
-		parms->tx_num_flows_in_k * TF_KILOBYTE;
-	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
-		parms->tx_max_action_entry_sz_in_bits / 8;
-
-	tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
-		0;
-
-	return 0;
-}
-
 /**
  * Internal function to get a Table Entry. Supports all Table Types
  * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
@@ -883,289 +148,6 @@ tf_free_tbl_entry_shadow(struct tf_session *tfs,
 }
 #endif /* TF_SHADOW */
 
-/**
- * Create External Tbl pool of memory indexes.
- *
- * [in] dir
- *   direction
- * [in] tbl_scope_cb
- *   pointer to the table scope
- * [in] num_entries
- *   number of entries to write
- * [in] entry_sz_bytes
- *   size of each entry
- *
- * Return:
- *  0       - Success, entry allocated - no search support
- *  -ENOMEM -EINVAL -EOPNOTSUPP
- *          - Failure, entry not allocated, out of resources
- */
-static int
-tf_create_tbl_pool_external(enum tf_dir dir,
-			    struct tf_tbl_scope_cb *tbl_scope_cb,
-			    uint32_t num_entries,
-			    uint32_t entry_sz_bytes)
-{
-	struct tfp_calloc_parms parms;
-	uint32_t i;
-	int32_t j;
-	int rc = 0;
-	struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
-
-	parms.nitems = num_entries;
-	parms.size = sizeof(uint32_t);
-	parms.alignment = 0;
-
-	if (tfp_calloc(&parms) != 0) {
-		TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
-			    tf_dir_2_str(dir), strerror(ENOMEM));
-		return -ENOMEM;
-	}
-
-	/* Create empty stack
-	 */
-	rc = stack_init(num_entries, parms.mem_va, pool);
-
-	if (rc != 0) {
-		TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
-			    tf_dir_2_str(dir), strerror(-rc));
-		goto cleanup;
-	}
-
-	/* Save the  malloced memory address so that it can
-	 * be freed when the table scope is freed.
-	 */
-	tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
-
-	/* Fill pool with indexes in reverse
-	 */
-	j = (num_entries - 1) * entry_sz_bytes;
-
-	for (i = 0; i < num_entries; i++) {
-		rc = stack_push(pool, j);
-		if (rc != 0) {
-			TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
-				    tf_dir_2_str(dir), strerror(-rc));
-			goto cleanup;
-		}
-
-		if (j < 0) {
-			TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
-				    dir, j);
-			goto cleanup;
-		}
-		j -= entry_sz_bytes;
-	}
-
-	if (!stack_is_full(pool)) {
-		rc = -EINVAL;
-		TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
-			    tf_dir_2_str(dir), strerror(-rc));
-		goto cleanup;
-	}
-	return 0;
-cleanup:
-	tfp_free((void *)parms.mem_va);
-	return rc;
-}
-
-/**
- * Destroy External Tbl pool of memory indexes.
- *
- * [in] dir
- *   direction
- * [in] tbl_scope_cb
- *   pointer to the table scope
- *
- */
-static void
-tf_destroy_tbl_pool_external(enum tf_dir dir,
-			     struct tf_tbl_scope_cb *tbl_scope_cb)
-{
-	uint32_t *ext_act_pool_mem =
-		tbl_scope_cb->ext_act_pool_mem[dir];
-
-	tfp_free(ext_act_pool_mem);
-}
-
-/* API defined in tf_em.h */
-struct tf_tbl_scope_cb *
-tbl_scope_cb_find(struct tf_session *session,
-		  uint32_t tbl_scope_id)
-{
-	int i;
-
-	/* Check that id is valid */
-	i = ba_inuse(session->tbl_scope_pool_rx, tbl_scope_id);
-	if (i < 0)
-		return NULL;
-
-	for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
-		if (session->tbl_scopes[i].tbl_scope_id == tbl_scope_id)
-			return &session->tbl_scopes[i];
-	}
-
-	return NULL;
-}
-
-/* API defined in tf_core.h */
-int
-tf_free_eem_tbl_scope_cb(struct tf *tfp,
-			 struct tf_free_tbl_scope_parms *parms)
-{
-	int rc = 0;
-	enum tf_dir  dir;
-	struct tf_tbl_scope_cb *tbl_scope_cb;
-	struct tf_session *session;
-
-	session = (struct tf_session *)(tfp->session->core_data);
-
-	tbl_scope_cb = tbl_scope_cb_find(session,
-					 parms->tbl_scope_id);
-
-	if (tbl_scope_cb == NULL) {
-		TFP_DRV_LOG(ERR, "Table scope error\n");
-		return -EINVAL;
-	}
-
-	/* Free Table control block */
-	ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
-
-	/* free table scope locks */
-	for (dir = 0; dir < TF_DIR_MAX; dir++) {
-		/* Free associated external pools
-		 */
-		tf_destroy_tbl_pool_external(dir,
-					     tbl_scope_cb);
-		tf_msg_em_op(tfp,
-			     dir,
-			     HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
-
-		/* free table scope and all associated resources */
-		tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
-	}
-
-	return rc;
-}
-
-/* API defined in tf_em.h */
-int
-tf_alloc_eem_tbl_scope(struct tf *tfp,
-		       struct tf_alloc_tbl_scope_parms *parms)
-{
-	int rc;
-	enum tf_dir dir;
-	struct tf_tbl_scope_cb *tbl_scope_cb;
-	struct hcapi_cfa_em_table *em_tables;
-	int index;
-	struct tf_session *session;
-	struct tf_free_tbl_scope_parms free_parms;
-
-	session = (struct tf_session *)tfp->session->core_data;
-
-	/* Get Table Scope control block from the session pool */
-	index = ba_alloc(session->tbl_scope_pool_rx);
-	if (index == -1) {
-		TFP_DRV_LOG(ERR, "EEM: Unable to allocate table scope "
-			    "Control Block\n");
-		return -ENOMEM;
-	}
-
-	tbl_scope_cb = &session->tbl_scopes[index];
-	tbl_scope_cb->index = index;
-	tbl_scope_cb->tbl_scope_id = index;
-	parms->tbl_scope_id = index;
-
-	for (dir = 0; dir < TF_DIR_MAX; dir++) {
-		rc = tf_msg_em_qcaps(tfp,
-				     dir,
-				     &tbl_scope_cb->em_caps[dir]);
-		if (rc) {
-			TFP_DRV_LOG(ERR,
-				    "EEM: Unable to query for EEM capability,"
-				    " rc:%s\n",
-				    strerror(-rc));
-			goto cleanup;
-		}
-	}
-
-	/*
-	 * Validate and setup table sizes
-	 */
-	if (tf_em_validate_num_entries(tbl_scope_cb, parms))
-		goto cleanup;
-
-	for (dir = 0; dir < TF_DIR_MAX; dir++) {
-		/*
-		 * Allocate tables and signal configuration to FW
-		 */
-		rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
-		if (rc) {
-			TFP_DRV_LOG(ERR,
-				    "EEM: Unable to register for EEM ctx,"
-				    " rc:%s\n",
-				    strerror(-rc));
-			goto cleanup;
-		}
-
-		em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
-		rc = tf_msg_em_cfg(tfp,
-				   em_tables[TF_KEY0_TABLE].num_entries,
-				   em_tables[TF_KEY0_TABLE].ctx_id,
-				   em_tables[TF_KEY1_TABLE].ctx_id,
-				   em_tables[TF_RECORD_TABLE].ctx_id,
-				   em_tables[TF_EFC_TABLE].ctx_id,
-				   parms->hw_flow_cache_flush_timer,
-				   dir);
-		if (rc) {
-			TFP_DRV_LOG(ERR,
-				    "TBL: Unable to configure EEM in firmware"
-				    " rc:%s\n",
-				    strerror(-rc));
-			goto cleanup_full;
-		}
-
-		rc = tf_msg_em_op(tfp,
-				  dir,
-				  HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
-
-		if (rc) {
-			TFP_DRV_LOG(ERR,
-				    "EEM: Unable to enable EEM in firmware"
-				    " rc:%s\n",
-				    strerror(-rc));
-			goto cleanup_full;
-		}
-
-		/* Allocate the pool of offsets of the external memory.
-		 * Initially, this is a single fixed size pool for all external
-		 * actions related to a single table scope.
-		 */
-		rc = tf_create_tbl_pool_external(dir,
-				    tbl_scope_cb,
-				    em_tables[TF_RECORD_TABLE].num_entries,
-				    em_tables[TF_RECORD_TABLE].entry_size);
-		if (rc) {
-			TFP_DRV_LOG(ERR,
-				    "%s TBL: Unable to allocate idx pools %s\n",
-				    tf_dir_2_str(dir),
-				    strerror(-rc));
-			goto cleanup_full;
-		}
-	}
-
-	return 0;
-
-cleanup_full:
-	free_parms.tbl_scope_id = index;
-	tf_free_eem_tbl_scope_cb(tfp, &free_parms);
-	return -EINVAL;
-
-cleanup:
-	/* Free Table control block */
-	ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
-	return -EINVAL;
-}
 
  /* API defined in tf_core.h */
 int
@@ -1196,119 +178,3 @@ tf_bulk_get_tbl_entry(struct tf *tfp,
 
 	return rc;
 }
-
-/* API defined in tf_core.h */
-int
-tf_alloc_tbl_scope(struct tf *tfp,
-		   struct tf_alloc_tbl_scope_parms *parms)
-{
-	int rc;
-
-	TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
-
-	rc = tf_alloc_eem_tbl_scope(tfp, parms);
-
-	return rc;
-}
-
-/* API defined in tf_core.h */
-int
-tf_free_tbl_scope(struct tf *tfp,
-		  struct tf_free_tbl_scope_parms *parms)
-{
-	int rc;
-
-	TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
-
-	/* free table scope and all associated resources */
-	rc = tf_free_eem_tbl_scope_cb(tfp, parms);
-
-	return rc;
-}
-
-static void
-tf_dump_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
-			struct hcapi_cfa_em_page_tbl *tp_next)
-{
-	uint64_t *pg_va;
-	uint32_t i;
-	uint32_t j;
-	uint32_t k = 0;
-
-	printf("pg_count:%d pg_size:0x%x\n",
-	       tp->pg_count,
-	       tp->pg_size);
-	for (i = 0; i < tp->pg_count; i++) {
-		pg_va = tp->pg_va_tbl[i];
-		printf("\t%p\n", (void *)pg_va);
-		for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
-			printf("\t\t%p\n", (void *)(uintptr_t)pg_va[j]);
-			if (((pg_va[j] & 0x7) ==
-			     tfp_cpu_to_le_64(PTU_PTE_LAST |
-					      PTU_PTE_VALID)))
-				return;
-
-			if (!(pg_va[j] & tfp_cpu_to_le_64(PTU_PTE_VALID))) {
-				printf("** Invalid entry **\n");
-				return;
-			}
-
-			if (++k >= tp_next->pg_count) {
-				printf("** Shouldn't get here **\n");
-				return;
-			}
-		}
-	}
-}
-
-void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id);
-
-void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id)
-{
-	struct tf_session      *session;
-	struct tf_tbl_scope_cb *tbl_scope_cb;
-	struct hcapi_cfa_em_page_tbl *tp;
-	struct hcapi_cfa_em_page_tbl *tp_next;
-	struct hcapi_cfa_em_table *tbl;
-	int i;
-	int j;
-	int dir;
-
-	printf("called %s\n", __func__);
-
-	/* find session struct */
-	session = (struct tf_session *)tfp->session->core_data;
-
-	/* find control block for table scope */
-	tbl_scope_cb = tbl_scope_cb_find(session,
-					 tbl_scope_id);
-	if (tbl_scope_cb == NULL)
-		PMD_DRV_LOG(ERR, "No table scope\n");
-
-	for (dir = 0; dir < TF_DIR_MAX; dir++) {
-		printf("Direction %s:\n", (dir == TF_DIR_RX ? "Rx" : "Tx"));
-
-		for (j = TF_KEY0_TABLE; j < TF_MAX_TABLE; j++) {
-			tbl = &tbl_scope_cb->em_ctx_info[dir].em_tables[j];
-			printf
-	("Table: j:%d type:%d num_entries:%d entry_size:0x%x num_lvl:%d ",
-			       j,
-			       tbl->type,
-			       tbl->num_entries,
-			       tbl->entry_size,
-			       tbl->num_lvl);
-			if (tbl->pg_tbl[0].pg_va_tbl &&
-			    tbl->pg_tbl[0].pg_pa_tbl)
-				printf("%p %p\n",
-			       tbl->pg_tbl[0].pg_va_tbl[0],
-			       (void *)(uintptr_t)tbl->pg_tbl[0].pg_pa_tbl[0]);
-			for (i = 0; i < tbl->num_lvl - 1; i++) {
-				printf("Level:%d\n", i);
-				tp = &tbl->pg_tbl[i];
-				tp_next = &tbl->pg_tbl[i + 1];
-				tf_dump_link_page_table(tp, tp_next);
-			}
-			printf("\n");
-		}
-	}
-}
diff --git a/drivers/net/bnxt/tf_core/tf_tbl_type.c b/drivers/net/bnxt/tf_core/tf_tbl_type.c
index bdf7d2089..2f5af6060 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl_type.c
+++ b/drivers/net/bnxt/tf_core/tf_tbl_type.c
@@ -209,8 +209,10 @@ tf_tbl_set(struct tf *tfp,
 	   struct tf_tbl_set_parms *parms)
 {
 	int rc;
-	struct tf_rm_is_allocated_parms aparms;
 	int allocated = 0;
+	uint16_t hcapi_type;
+	struct tf_rm_is_allocated_parms aparms = { 0 };
+	struct tf_rm_get_hcapi_parms hparms = { 0 };
 
 	TF_CHECK_PARMS3(tfp, parms, parms->data);
 
@@ -240,9 +242,22 @@ tf_tbl_set(struct tf *tfp,
 	}
 
 	/* Set the entry */
+	hparms.rm_db = tbl_db[parms->dir];
+	hparms.db_index = parms->type;
+	hparms.hcapi_type = &hcapi_type;
+	rc = tf_rm_get_hcapi_type(&hparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s, Failed type lookup, type:%d, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    parms->type,
+			    strerror(-rc));
+		return rc;
+	}
+
 	rc = tf_msg_set_tbl_entry(tfp,
 				  parms->dir,
-				  parms->type,
+				  hcapi_type,
 				  parms->data_sz_in_bytes,
 				  parms->data,
 				  parms->idx);
@@ -262,8 +277,10 @@ tf_tbl_get(struct tf *tfp,
 	   struct tf_tbl_get_parms *parms)
 {
 	int rc;
-	struct tf_rm_is_allocated_parms aparms;
+	uint16_t hcapi_type;
 	int allocated = 0;
+	struct tf_rm_is_allocated_parms aparms = { 0 };
+	struct tf_rm_get_hcapi_parms hparms = { 0 };
 
 	TF_CHECK_PARMS3(tfp, parms, parms->data);
 
@@ -292,10 +309,24 @@ tf_tbl_get(struct tf *tfp,
 		return -EINVAL;
 	}
 
+	/* Set the entry */
+	hparms.rm_db = tbl_db[parms->dir];
+	hparms.db_index = parms->type;
+	hparms.hcapi_type = &hcapi_type;
+	rc = tf_rm_get_hcapi_type(&hparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s, Failed type lookup, type:%d, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    parms->type,
+			    strerror(-rc));
+		return rc;
+	}
+
 	/* Get the entry */
 	rc = tf_msg_get_tbl_entry(tfp,
 				  parms->dir,
-				  parms->type,
+				  hcapi_type,
 				  parms->data_sz_in_bytes,
 				  parms->data,
 				  parms->idx);
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.c b/drivers/net/bnxt/tf_core/tf_tcam.c
index 260fb15a6..a1761ad56 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_tcam.c
@@ -53,7 +53,6 @@ tf_tcam_bind(struct tf *tfp,
 		return -EINVAL;
 	}
 
-	db_cfg.num_elements = parms->num_elements;
 	db_cfg.type = TF_DEVICE_MODULE_TYPE_TCAM;
 	db_cfg.num_elements = parms->num_elements;
 	db_cfg.cfg = parms->cfg;
@@ -174,14 +173,15 @@ tf_tcam_alloc(struct tf *tfp,
 }
 
 int
-tf_tcam_free(struct tf *tfp __rte_unused,
-	     struct tf_tcam_free_parms *parms __rte_unused)
+tf_tcam_free(struct tf *tfp,
+	     struct tf_tcam_free_parms *parms)
 {
 	int rc;
 	struct tf_session *tfs;
 	struct tf_dev_info *dev;
 	struct tf_rm_is_allocated_parms aparms = { 0 };
 	struct tf_rm_free_parms fparms = { 0 };
+	struct tf_rm_get_hcapi_parms hparms = { 0 };
 	uint16_t num_slice_per_row = 1;
 	int allocated = 0;
 
@@ -253,6 +253,15 @@ tf_tcam_free(struct tf *tfp __rte_unused,
 		return rc;
 	}
 
+	/* Convert TF type to HCAPI RM type */
+	hparms.rm_db = tcam_db[parms->dir];
+	hparms.db_index = parms->type;
+	hparms.hcapi_type = &parms->hcapi_type;
+
+	rc = tf_rm_get_hcapi_type(&hparms);
+	if (rc)
+		return rc;
+
 	rc = tf_msg_tcam_entry_free(tfp, parms);
 	if (rc) {
 		/* Log error */
@@ -281,6 +290,7 @@ tf_tcam_set(struct tf *tfp __rte_unused,
 	struct tf_session *tfs;
 	struct tf_dev_info *dev;
 	struct tf_rm_is_allocated_parms aparms = { 0 };
+	struct tf_rm_get_hcapi_parms hparms = { 0 };
 	uint16_t num_slice_per_row = 1;
 	int allocated = 0;
 
@@ -338,6 +348,15 @@ tf_tcam_set(struct tf *tfp __rte_unused,
 		return rc;
 	}
 
+	/* Convert TF type to HCAPI RM type */
+	hparms.rm_db = tcam_db[parms->dir];
+	hparms.db_index = parms->type;
+	hparms.hcapi_type = &parms->hcapi_type;
+
+	rc = tf_rm_get_hcapi_type(&hparms);
+	if (rc)
+		return rc;
+
 	rc = tf_msg_tcam_entry_set(tfp, parms);
 	if (rc) {
 		/* Log error */
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.h b/drivers/net/bnxt/tf_core/tf_tcam.h
index 5090dfd9f..ee5bacc09 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_tcam.h
@@ -76,6 +76,10 @@ struct tf_tcam_free_parms {
 	 * [in] Type of the allocation type
 	 */
 	enum tf_tcam_tbl_type type;
+	/**
+	 * [in] Type of HCAPI
+	 */
+	uint16_t hcapi_type;
 	/**
 	 * [in] Index to free
 	 */
diff --git a/drivers/net/bnxt/tf_core/tf_util.c b/drivers/net/bnxt/tf_core/tf_util.c
index 16c43eb67..5472a9aac 100644
--- a/drivers/net/bnxt/tf_core/tf_util.c
+++ b/drivers/net/bnxt/tf_core/tf_util.c
@@ -152,9 +152,9 @@ tf_device_module_type_subtype_2_str(enum tf_device_module_type dm_type,
 	case TF_DEVICE_MODULE_TYPE_IDENTIFIER:
 		return tf_ident_2_str(mod_type);
 	case TF_DEVICE_MODULE_TYPE_TABLE:
-		return tf_tcam_tbl_2_str(mod_type);
-	case TF_DEVICE_MODULE_TYPE_TCAM:
 		return tf_tbl_type_2_str(mod_type);
+	case TF_DEVICE_MODULE_TYPE_TCAM:
+		return tf_tcam_tbl_2_str(mod_type);
 	case TF_DEVICE_MODULE_TYPE_EM:
 		return tf_em_tbl_type_2_str(mod_type);
 	default:
-- 
2.21.1 (Apple Git-122.3)


  parent reply	other threads:[~2020-07-02  4:16 UTC|newest]

Thread overview: 271+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-12 13:28 [dpdk-dev] [PATCH 00/50] add features for host-based flow management Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 01/50] net/bnxt: Basic infrastructure support for VF representors Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 02/50] net/bnxt: Infrastructure support for VF-reps data path Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 03/50] net/bnxt: add support to get FID, default vnic ID and svif of VF-Rep Endpoint Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 04/50] net/bnxt: initialize parent PF information Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 05/50] net/bnxt: modify ulp_port_db_dev_port_intf_update prototype Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 06/50] net/bnxt: get port & function related information Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 07/50] net/bnxt: add support for bnxt_hwrm_port_phy_qcaps Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 08/50] net/bnxt: modify port_db to store & retrieve more info Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 09/50] net/bnxt: add support for Exact Match Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 10/50] net/bnxt: modify EM insert and delete to use HWRM direct Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 11/50] net/bnxt: add multi device support Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 12/50] net/bnxt: support bulk table get and mirror Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 13/50] net/bnxt: update multi device design support Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 14/50] net/bnxt: support two-level priority for TCAMs Somnath Kotur
2020-06-12 13:28 ` [dpdk-dev] [PATCH 15/50] net/bnxt: add HCAPI interface support Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 16/50] net/bnxt: add core changes for EM and EEM lookups Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 17/50] net/bnxt: implement support for TCAM access Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 18/50] net/bnxt: multiple device implementation Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 19/50] net/bnxt: update identifier with remap support Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 20/50] net/bnxt: update RM with residual checker Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 21/50] net/bnxt: support two level priority for TCAMs Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 22/50] net/bnxt: support EM and TCAM lookup with table scope Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 23/50] net/bnxt: update table get to use new design Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 24/50] net/bnxt: update RM to support HCAPI only Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 25/50] net/bnxt: remove table scope from session Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 26/50] net/bnxt: add external action alloc and free Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 27/50] net/bnxt: align CFA resources with RM Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 28/50] net/bnxt: implement IF tables set and get Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 29/50] net/bnxt: add TF register and unregister Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 30/50] net/bnxt: add global config set and get APIs Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 31/50] net/bnxt: add support for EEM System memory Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 32/50] net/bnxt: integrate with the latest tf_core library Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 33/50] net/bnxt: add support for internal encap records Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 34/50] net/bnxt: add support for if table processing Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 35/50] net/bnxt: disable vector mode in tx direction when truflow is enabled Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 36/50] net/bnxt: add index opcode and index operand mapper table Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 37/50] net/bnxt: add support for global resource templates Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 38/50] net/bnxt: add support for internal exact match entries Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 39/50] net/bnxt: add support for conditional execution of mapper tables Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 40/50] net/bnxt: enable HWRM_PORT_MAC_QCFG for trusted vf Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 41/50] net/bnxt: enhancements for port db Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 42/50] net/bnxt: fix for VF to VFR conduit Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 43/50] net/bnxt: fix to parse representor along with other dev-args Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 44/50] net/bnxt: fill mapper parameters with default rules info Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 45/50] net/bnxt: add support for vf rep and stat templates Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 46/50] net/bnxt: create default flow rules for the VF-rep conduit Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 47/50] net/bnxt: add ingress & egress port default rules Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 48/50] net/bnxt: fill cfa_action in the tx buffer descriptor properly Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 49/50] net/bnxt: support for ULP Flow counter Manager Somnath Kotur
2020-06-12 13:29 ` [dpdk-dev] [PATCH 50/50] net/bnxt: Add support for flow query with action_type COUNT Somnath Kotur
2020-07-01  6:51 ` [dpdk-dev] [PATCH v2 00/51] add features for host-based flow management Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 01/51] net/bnxt: add basic infrastructure for VF representors Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 02/51] net/bnxt: add support for VF-reps data path Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 03/51] net/bnxt: get IDs for VF-Rep endpoint Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 04/51] net/bnxt: initialize parent PF information Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 05/51] net/bnxt: modify port db dev interface Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 06/51] net/bnxt: get port and function info Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 07/51] net/bnxt: add support for hwrm port phy qcaps Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 08/51] net/bnxt: modify port db to handle more info Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 09/51] net/bnxt: add support for exact match Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 10/51] net/bnxt: modify EM insert and delete to use HWRM direct Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 11/51] net/bnxt: add multi device support Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 12/51] net/bnxt: support bulk table get and mirror Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 13/51] net/bnxt: update multi device design support Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 14/51] net/bnxt: support two-level priority for TCAMs Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 15/51] net/bnxt: add HCAPI interface support Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 16/51] net/bnxt: add core changes for EM and EEM lookups Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 17/51] net/bnxt: implement support for TCAM access Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 18/51] net/bnxt: multiple device implementation Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 19/51] net/bnxt: update identifier with remap support Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 20/51] net/bnxt: update RM with residual checker Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 21/51] net/bnxt: support two level priority for TCAMs Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 22/51] net/bnxt: support EM and TCAM lookup with table scope Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 23/51] net/bnxt: update table get to use new design Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 24/51] net/bnxt: update RM to support HCAPI only Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 25/51] net/bnxt: remove table scope from session Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 26/51] net/bnxt: add external action alloc and free Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 27/51] net/bnxt: align CFA resources with RM Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 28/51] net/bnxt: implement IF tables set and get Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 29/51] net/bnxt: add TF register and unregister Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 30/51] net/bnxt: add global config set and get APIs Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 31/51] net/bnxt: add support for EEM System memory Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 32/51] net/bnxt: integrate with the latest tf core changes Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 33/51] net/bnxt: add support for internal encap records Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 34/51] net/bnxt: add support for if table processing Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 35/51] net/bnxt: disable Tx vector mode if truflow is enabled Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 36/51] net/bnxt: add index opcode and operand to mapper table Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 37/51] net/bnxt: add support for global resource templates Ajit Khaparde
2020-07-01  6:51   ` [dpdk-dev] [PATCH v2 38/51] net/bnxt: add support for internal exact match entries Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 39/51] net/bnxt: add support for conditional execution of mapper tables Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 40/51] net/bnxt: enable port MAC qcfg command for trusted VF Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 41/51] net/bnxt: enhancements for port db Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 42/51] net/bnxt: manage VF to VFR conduit Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 43/51] net/bnxt: parse representor along with other dev-args Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 44/51] net/bnxt: fill mapper parameters with default rules info Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 45/51] net/bnxt: add VF-rep and stat templates Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 46/51] net/bnxt: create default flow rules for the VF-rep conduit Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 47/51] net/bnxt: add port default rules for ingress and egress Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 48/51] net/bnxt: fill cfa action in the Tx descriptor Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 49/51] net/bnxt: add ULP Flow counter Manager Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 50/51] net/bnxt: add support for count action in flow query Ajit Khaparde
2020-07-01  6:52   ` [dpdk-dev] [PATCH v2 51/51] doc: update release notes Ajit Khaparde
2020-07-01 14:26   ` [dpdk-dev] [PATCH v2 00/51] add features for host-based flow management Ajit Khaparde
2020-07-01 21:31     ` Ferruh Yigit
2020-07-02  4:10       ` [dpdk-dev] [PATCH v3 " Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 01/51] net/bnxt: add basic infrastructure for VF reps Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 02/51] net/bnxt: add support for VF-reps data path Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 03/51] net/bnxt: get IDs for VF-Rep endpoint Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 04/51] net/bnxt: initialize parent PF information Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 05/51] net/bnxt: modify port db dev interface Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 06/51] net/bnxt: get port and function info Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 07/51] net/bnxt: add support for hwrm port phy qcaps Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 08/51] net/bnxt: modify port db to handle more info Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 09/51] net/bnxt: add support for exact match Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 10/51] net/bnxt: modify EM insert and delete to use HWRM direct Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 11/51] net/bnxt: add multi device support Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 12/51] net/bnxt: support bulk table get and mirror Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 13/51] net/bnxt: update multi device design support Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 14/51] net/bnxt: support two-level priority for TCAMs Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 15/51] net/bnxt: add HCAPI interface support Ajit Khaparde
2020-07-02  4:10         ` [dpdk-dev] [PATCH v3 16/51] net/bnxt: add core changes for EM and EEM lookups Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 17/51] net/bnxt: implement support for TCAM access Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 18/51] net/bnxt: multiple device implementation Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 19/51] net/bnxt: update identifier with remap support Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 20/51] net/bnxt: update RM with residual checker Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 21/51] net/bnxt: support two level priority for TCAMs Ajit Khaparde
2020-07-02  4:11         ` Ajit Khaparde [this message]
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 23/51] net/bnxt: update table get to use new design Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 24/51] net/bnxt: update RM to support HCAPI only Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 25/51] net/bnxt: remove table scope from session Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 26/51] net/bnxt: add external action alloc and free Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 27/51] net/bnxt: align CFA resources with RM Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 28/51] net/bnxt: implement IF tables set and get Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 29/51] net/bnxt: add TF register and unregister Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 30/51] net/bnxt: add global config set and get APIs Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 31/51] net/bnxt: add support for EEM System memory Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 32/51] net/bnxt: integrate with the latest tf core changes Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 33/51] net/bnxt: add support for internal encap records Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 34/51] net/bnxt: add support for if table processing Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 35/51] net/bnxt: disable Tx vector mode if truflow is enabled Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 36/51] net/bnxt: add index opcode and operand to mapper table Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 37/51] net/bnxt: add support for global resource templates Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 38/51] net/bnxt: add support for internal exact match entries Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 39/51] net/bnxt: add conditional execution of mapper tables Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 40/51] net/bnxt: enable port MAC qcfg for trusted VF Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 41/51] net/bnxt: enhancements for port db Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 42/51] net/bnxt: manage VF to VFR conduit Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 43/51] net/bnxt: parse reps along with other dev-args Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 44/51] net/bnxt: fill mapper parameters with default rules Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 45/51] net/bnxt: add VF-rep and stat templates Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 46/51] net/bnxt: create default flow rules for the VF-rep Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 47/51] net/bnxt: add port default rules for ingress and egress Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 48/51] net/bnxt: fill cfa action in the Tx descriptor Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 49/51] net/bnxt: add ULP Flow counter Manager Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 50/51] net/bnxt: add support for count action in flow query Ajit Khaparde
2020-07-02  4:11         ` [dpdk-dev] [PATCH v3 51/51] doc: update release notes Ajit Khaparde
2020-07-02 23:27       ` [dpdk-dev] [PATCH v4 00/51] add features for host-based flow management Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 01/51] net/bnxt: add basic infrastructure for VF reps Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 02/51] net/bnxt: add support for VF-reps data path Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 03/51] net/bnxt: get IDs for VF-Rep endpoint Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 04/51] net/bnxt: initialize parent PF information Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 05/51] net/bnxt: modify port db dev interface Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 06/51] net/bnxt: get port and function info Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 07/51] net/bnxt: add support for hwrm port phy qcaps Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 08/51] net/bnxt: modify port db to handle more info Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 09/51] net/bnxt: add support for exact match Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 10/51] net/bnxt: modify EM insert and delete to use HWRM direct Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 11/51] net/bnxt: add multi device support Ajit Khaparde
2020-07-02 23:27         ` [dpdk-dev] [PATCH v4 12/51] net/bnxt: support bulk table get and mirror Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 13/51] net/bnxt: update multi device design support Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 14/51] net/bnxt: support two-level priority for TCAMs Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 15/51] net/bnxt: add HCAPI interface support Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 16/51] net/bnxt: add core changes for EM and EEM lookups Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 17/51] net/bnxt: implement support for TCAM access Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 18/51] net/bnxt: multiple device implementation Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 19/51] net/bnxt: update identifier with remap support Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 20/51] net/bnxt: update RM with residual checker Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 21/51] net/bnxt: support two level priority for TCAMs Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 22/51] net/bnxt: support EM and TCAM lookup with table scope Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 23/51] net/bnxt: update table get to use new design Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 24/51] net/bnxt: update RM to support HCAPI only Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 25/51] net/bnxt: remove table scope from session Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 26/51] net/bnxt: add external action alloc and free Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 27/51] net/bnxt: align CFA resources with RM Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 28/51] net/bnxt: implement IF tables set and get Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 29/51] net/bnxt: add TF register and unregister Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 30/51] net/bnxt: add global config set and get APIs Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 31/51] net/bnxt: add support for EEM System memory Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 32/51] net/bnxt: integrate with the latest tf core changes Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 33/51] net/bnxt: add support for internal encap records Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 34/51] net/bnxt: add support for if table processing Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 35/51] net/bnxt: disable Tx vector mode if truflow is enabled Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 36/51] net/bnxt: add index opcode and operand to mapper table Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 37/51] net/bnxt: add support for global resource templates Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 38/51] net/bnxt: add support for internal exact match entries Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 39/51] net/bnxt: add support for conditional execution of mapper tables Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 40/51] net/bnxt: enable port MAC qcfg command for trusted VF Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 41/51] net/bnxt: enhancements for port db Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 42/51] net/bnxt: manage VF to VFR conduit Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 43/51] net/bnxt: parse reps along with other dev-args Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 44/51] net/bnxt: fill mapper parameters with default rules Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 45/51] net/bnxt: add VF-rep and stat templates Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 46/51] net/bnxt: create default flow rules for the VF-rep Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 47/51] net/bnxt: add port default rules for ingress and egress Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 48/51] net/bnxt: fill cfa action in the Tx descriptor Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 49/51] net/bnxt: add ULP Flow counter Manager Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 50/51] net/bnxt: add support for count action in flow query Ajit Khaparde
2020-07-02 23:28         ` [dpdk-dev] [PATCH v4 51/51] doc: update release notes Ajit Khaparde
2020-07-03 21:01       ` [dpdk-dev] [PATCH v5 00/51] net/bnxt: add features for host-based flow management Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 01/51] net/bnxt: add basic infrastructure for VF reps Ajit Khaparde
2020-07-06 10:07           ` Ferruh Yigit
2020-07-06 14:04             ` Somnath Kotur
2020-07-06 14:14               ` Ajit Khaparde
2020-07-06 18:35                 ` Ferruh Yigit
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 02/51] net/bnxt: add support for VF-reps data path Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 03/51] net/bnxt: get IDs for VF-Rep endpoint Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 04/51] net/bnxt: initialize parent PF information Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 05/51] net/bnxt: modify port db dev interface Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 06/51] net/bnxt: get port and function info Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 07/51] net/bnxt: add support for hwrm port phy qcaps Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 08/51] net/bnxt: modify port db to handle more info Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 09/51] net/bnxt: add support for exact match Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 10/51] net/bnxt: use HWRM direct for EM insert and delete Ajit Khaparde
2020-07-06 18:47           ` Ferruh Yigit
2020-07-06 19:11           ` Ferruh Yigit
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 11/51] net/bnxt: add multi device support Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 12/51] net/bnxt: support bulk table get and mirror Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 13/51] net/bnxt: update multi device design support Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 14/51] net/bnxt: support two-level priority for TCAMs Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 15/51] net/bnxt: add HCAPI interface support Ajit Khaparde
2020-07-07  8:03           ` Ferruh Yigit
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 16/51] net/bnxt: add core changes for EM and EEM lookups Ajit Khaparde
2020-07-07  8:08           ` Ferruh Yigit
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 17/51] net/bnxt: implement support for TCAM access Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 18/51] net/bnxt: multiple device implementation Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 19/51] net/bnxt: update identifier with remap support Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 20/51] net/bnxt: update RM with residual checker Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 21/51] net/bnxt: support two level priority for TCAMs Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 22/51] net/bnxt: use table scope for EM and TCAM lookup Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 23/51] net/bnxt: update table get to use new design Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 24/51] net/bnxt: update RM to support HCAPI only Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 25/51] net/bnxt: remove table scope from session Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 26/51] net/bnxt: add external action alloc and free Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 27/51] net/bnxt: align CFA resources with RM Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 28/51] net/bnxt: implement IF tables set and get Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 29/51] net/bnxt: add TF register and unregister Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 30/51] net/bnxt: add global config set and get APIs Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 31/51] net/bnxt: add support for EEM System memory Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 32/51] net/bnxt: integrate with the latest tf core changes Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 33/51] net/bnxt: add support for internal encap records Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 34/51] net/bnxt: add support for if table processing Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 35/51] net/bnxt: disable Tx vector mode if truflow is set Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 36/51] net/bnxt: add index opcode and operand to mapper table Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 37/51] net/bnxt: add support for global resource templates Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 38/51] net/bnxt: add support for internal exact match Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 39/51] net/bnxt: add conditional execution of mapper tables Ajit Khaparde
2020-07-03 21:01         ` [dpdk-dev] [PATCH v5 40/51] net/bnxt: allow port MAC qcfg command for trusted VF Ajit Khaparde
2020-07-03 21:02         ` [dpdk-dev] [PATCH v5 41/51] net/bnxt: enhancements for port db Ajit Khaparde
2020-07-03 21:02         ` [dpdk-dev] [PATCH v5 42/51] net/bnxt: manage VF to VFR conduit Ajit Khaparde
2020-07-03 21:02         ` [dpdk-dev] [PATCH v5 43/51] net/bnxt: parse reps along with other dev-args Ajit Khaparde
2020-07-03 21:02         ` [dpdk-dev] [PATCH v5 44/51] net/bnxt: fill mapper parameters with default rules Ajit Khaparde
2020-07-03 21:02         ` [dpdk-dev] [PATCH v5 45/51] net/bnxt: add VF-rep and stat templates Ajit Khaparde
2020-07-03 21:02         ` [dpdk-dev] [PATCH v5 46/51] net/bnxt: create default flow rules for the VF-rep Ajit Khaparde
2020-07-03 21:02         ` [dpdk-dev] [PATCH v5 47/51] net/bnxt: add port default rules for ingress and egress Ajit Khaparde
2020-07-03 21:02         ` [dpdk-dev] [PATCH v5 48/51] net/bnxt: fill cfa action in the Tx descriptor Ajit Khaparde
2020-07-03 21:02         ` [dpdk-dev] [PATCH v5 49/51] net/bnxt: add ULP Flow counter Manager Ajit Khaparde
2020-07-03 21:02         ` [dpdk-dev] [PATCH v5 50/51] net/bnxt: add support for count action in flow query Ajit Khaparde
2020-07-03 21:02         ` [dpdk-dev] [PATCH v5 51/51] doc: update release notes Ajit Khaparde
2020-07-06  1:47         ` [dpdk-dev] [PATCH v5 00/51] net/bnxt: add features for host-based flow management Ajit Khaparde
2020-07-06 10:10         ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200702041134.43198-23-ajit.khaparde@broadcom.com \
    --to=ajit.khaparde@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=peter.spreadborough@broadcom.com \
    --cc=stuart.schacher@broadcom.com \
    --cc=venkatkumar.duvvuru@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).