DPDK patches and discussions
 help / color / mirror / Atom feed
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
To: dev@dpdk.org
Cc: Farah Smith <farah.smith@broadcom.com>
Subject: [dpdk-dev] [PATCH 04/14] net/bnxt: add Thor SRAM mgr model
Date: Wed,  1 Sep 2021 19:54:23 +0530	[thread overview]
Message-ID: <20210901142433.8444-5-venkatkumar.duvvuru@broadcom.com> (raw)
In-Reply-To: <20210901142433.8444-1-venkatkumar.duvvuru@broadcom.com>

From: Farah Smith <farah.smith@broadcom.com>

Add dynamic SRAM manager allocation support.

Signed-off-by: Farah Smith <farah.smith@broadcom.com>
Reviewed-by: Shahaji Bhosle <shahaji.bhosle@broadcom.com>
Reviewed-by: Peter Spreadborough <peter.spreadborough@broadcom.com>
---
 drivers/net/bnxt/tf_core/ll.c             |   3 +
 drivers/net/bnxt/tf_core/ll.h             |  50 +-
 drivers/net/bnxt/tf_core/meson.build      |   2 +
 drivers/net/bnxt/tf_core/tf_core.c        | 104 ++-
 drivers/net/bnxt/tf_core/tf_core.h        |  48 +-
 drivers/net/bnxt/tf_core/tf_device.c      |  40 +-
 drivers/net/bnxt/tf_core/tf_device.h      | 133 ++-
 drivers/net/bnxt/tf_core/tf_device_p4.c   |  75 +-
 drivers/net/bnxt/tf_core/tf_device_p4.h   |  50 +-
 drivers/net/bnxt/tf_core/tf_device_p58.c  | 105 ++-
 drivers/net/bnxt/tf_core/tf_device_p58.h  |  60 +-
 drivers/net/bnxt/tf_core/tf_msg.c         |   2 +-
 drivers/net/bnxt/tf_core/tf_rm.c          |  46 +-
 drivers/net/bnxt/tf_core/tf_rm.h          |  62 +-
 drivers/net/bnxt/tf_core/tf_session.c     |  56 ++
 drivers/net/bnxt/tf_core/tf_session.h     |  58 +-
 drivers/net/bnxt/tf_core/tf_sram_mgr.c    | 971 ++++++++++++++++++++++
 drivers/net/bnxt/tf_core/tf_sram_mgr.h    | 317 +++++++
 drivers/net/bnxt/tf_core/tf_tbl.c         | 186 +----
 drivers/net/bnxt/tf_core/tf_tbl.h         |  15 +-
 drivers/net/bnxt/tf_core/tf_tbl_sram.c    | 713 ++++++++++++++++
 drivers/net/bnxt/tf_core/tf_tbl_sram.h    | 154 ++++
 drivers/net/bnxt/tf_core/tf_tcam.c        |  10 +-
 drivers/net/bnxt/tf_core/tf_tcam.h        |   7 +
 drivers/net/bnxt/tf_core/tf_tcam_shared.c |  28 +-
 drivers/net/bnxt/tf_core/tf_util.c        |  10 +
 drivers/net/bnxt/tf_ulp/bnxt_ulp.c        |  23 +
 meson_options.txt                         |   2 +
 28 files changed, 2978 insertions(+), 352 deletions(-)
 create mode 100644 drivers/net/bnxt/tf_core/tf_sram_mgr.c
 create mode 100644 drivers/net/bnxt/tf_core/tf_sram_mgr.h
 create mode 100644 drivers/net/bnxt/tf_core/tf_tbl_sram.c
 create mode 100644 drivers/net/bnxt/tf_core/tf_tbl_sram.h

diff --git a/drivers/net/bnxt/tf_core/ll.c b/drivers/net/bnxt/tf_core/ll.c
index cd168a7970..f2bdff6b9e 100644
--- a/drivers/net/bnxt/tf_core/ll.c
+++ b/drivers/net/bnxt/tf_core/ll.c
@@ -13,6 +13,7 @@ void ll_init(struct ll *ll)
 {
 	ll->head = NULL;
 	ll->tail = NULL;
+	ll->cnt = 0;
 }
 
 /* insert entry in linked list */
@@ -30,6 +31,7 @@ void ll_insert(struct ll *ll,
 		entry->next->prev = entry;
 		ll->head = entry->next->prev;
 	}
+	ll->cnt++;
 }
 
 /* delete entry from linked list */
@@ -49,4 +51,5 @@ void ll_delete(struct ll *ll,
 		entry->prev->next = entry->next;
 		entry->next->prev = entry->prev;
 	}
+	ll->cnt--;
 }
diff --git a/drivers/net/bnxt/tf_core/ll.h b/drivers/net/bnxt/tf_core/ll.h
index 239478b4f8..9cf8f64ec2 100644
--- a/drivers/net/bnxt/tf_core/ll.h
+++ b/drivers/net/bnxt/tf_core/ll.h
@@ -8,6 +8,8 @@
 #ifndef _LL_H_
 #define _LL_H_
 
+#include <stdint.h>
+
 /* linked list entry */
 struct ll_entry {
 	struct ll_entry *prev;
@@ -18,6 +20,7 @@ struct ll_entry {
 struct ll {
 	struct ll_entry *head;
 	struct ll_entry *tail;
+	uint32_t cnt;
 };
 
 /**
@@ -28,7 +31,7 @@ struct ll {
 void ll_init(struct ll *ll);
 
 /**
- * Linked list insert
+ * Linked list insert head
  *
  * [in] ll, linked list where element is inserted
  * [in] entry, entry to be added
@@ -43,4 +46,49 @@ void ll_insert(struct ll *ll, struct ll_entry *entry);
  */
 void ll_delete(struct ll *ll, struct ll_entry *entry);
 
+/**
+ * Linked list return next entry without deleting it
+ *
+ * Useful in performing search
+ *
+ * [in] Entry in the list
+ */
+static inline struct ll_entry *ll_next(struct ll_entry *entry)
+{
+	return entry->next;
+}
+
+/**
+ * Linked list return the head of the list without removing it
+ *
+ * Useful in performing search
+ *
+ * [in] ll, linked list
+ */
+static inline struct ll_entry *ll_head(struct ll *ll)
+{
+	return ll->head;
+}
+
+/**
+ * Linked list return the tail of the list without removing it
+ *
+ * Useful in performing search
+ *
+ * [in] ll, linked list
+ */
+static inline struct ll_entry *ll_tail(struct ll *ll)
+{
+	return ll->tail;
+}
+
+/**
+ * Linked list return the number of entries in the list
+ *
+ * [in] ll, linked list
+ */
+static inline uint32_t ll_cnt(struct ll *ll)
+{
+	return ll->cnt;
+}
 #endif /* _LL_H_ */
diff --git a/drivers/net/bnxt/tf_core/meson.build b/drivers/net/bnxt/tf_core/meson.build
index f28e77ec2e..b7333a431b 100644
--- a/drivers/net/bnxt/tf_core/meson.build
+++ b/drivers/net/bnxt/tf_core/meson.build
@@ -16,6 +16,8 @@ sources += files(
         'stack.c',
         'tf_rm.c',
         'tf_tbl.c',
+	'tf_tbl_sram.c',
+	'tf_sram_mgr.c',
         'tf_em_common.c',
         'tf_em_host.c',
         'tf_em_internal.c',
diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index 5458f76e2d..936102c804 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -1079,17 +1079,16 @@ tf_alloc_tbl_entry(struct tf *tfp,
 				    strerror(-rc));
 			return rc;
 		}
-
-	} else {
-		if (dev->ops->tf_dev_alloc_tbl == NULL) {
-			rc = -EOPNOTSUPP;
+	} else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {
+		rc = dev->ops->tf_dev_alloc_sram_tbl(tfp, &aparms);
+		if (rc) {
 			TFP_DRV_LOG(ERR,
-				    "%s: Operation not supported, rc:%s\n",
+				    "%s: SRAM table allocation failed, rc:%s\n",
 				    tf_dir_2_str(parms->dir),
 				    strerror(-rc));
-			return -EOPNOTSUPP;
+			return rc;
 		}
-
+	} else {
 		rc = dev->ops->tf_dev_alloc_tbl(tfp, &aparms);
 		if (rc) {
 			TFP_DRV_LOG(ERR,
@@ -1162,15 +1161,16 @@ tf_free_tbl_entry(struct tf *tfp,
 				    strerror(-rc));
 			return rc;
 		}
-	} else {
-		if (dev->ops->tf_dev_free_tbl == NULL) {
-			rc = -EOPNOTSUPP;
+	} else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {
+		rc = dev->ops->tf_dev_free_sram_tbl(tfp, &fparms);
+		if (rc) {
 			TFP_DRV_LOG(ERR,
-				    "%s: Operation not supported, rc:%s\n",
+				    "%s: SRAM table free failed, rc:%s\n",
 				    tf_dir_2_str(parms->dir),
 				    strerror(-rc));
-			return -EOPNOTSUPP;
+			return rc;
 		}
+	} else {
 
 		rc = dev->ops->tf_dev_free_tbl(tfp, &fparms);
 		if (rc) {
@@ -1181,7 +1181,6 @@ tf_free_tbl_entry(struct tf *tfp,
 			return rc;
 		}
 	}
-
 	return 0;
 }
 
@@ -1244,6 +1243,15 @@ tf_set_tbl_entry(struct tf *tfp,
 				    strerror(-rc));
 			return rc;
 		}
+	}  else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {
+		rc = dev->ops->tf_dev_set_sram_tbl(tfp, &sparms);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "%s: SRAM table set failed, rc:%s\n",
+				    tf_dir_2_str(parms->dir),
+				    strerror(-rc));
+			return rc;
+		}
 	} else {
 		if (dev->ops->tf_dev_set_tbl == NULL) {
 			rc = -EOPNOTSUPP;
@@ -1300,28 +1308,39 @@ tf_get_tbl_entry(struct tf *tfp,
 			    strerror(-rc));
 		return rc;
 	}
-
-	if (dev->ops->tf_dev_get_tbl == NULL) {
-		rc = -EOPNOTSUPP;
-		TFP_DRV_LOG(ERR,
-			    "%s: Operation not supported, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return -EOPNOTSUPP;
-	}
-
 	gparms.dir = parms->dir;
 	gparms.type = parms->type;
 	gparms.data = parms->data;
 	gparms.data_sz_in_bytes = parms->data_sz_in_bytes;
 	gparms.idx = parms->idx;
-	rc = dev->ops->tf_dev_get_tbl(tfp, &gparms);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Table get failed, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
+
+	if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {
+		rc = dev->ops->tf_dev_get_sram_tbl(tfp, &gparms);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "%s: SRAM table get failed, rc:%s\n",
+				    tf_dir_2_str(parms->dir),
+				    strerror(-rc));
+			return rc;
+		}
+	} else {
+		if (dev->ops->tf_dev_get_tbl == NULL) {
+			rc = -EOPNOTSUPP;
+			TFP_DRV_LOG(ERR,
+				    "%s: Operation not supported, rc:%s\n",
+				    tf_dir_2_str(parms->dir),
+				    strerror(-rc));
+			return -EOPNOTSUPP;
+		}
+
+		rc = dev->ops->tf_dev_get_tbl(tfp, &gparms);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "%s: Table get failed, rc:%s\n",
+				    tf_dir_2_str(parms->dir),
+				    strerror(-rc));
+			return rc;
+		}
 	}
 
 	return rc;
@@ -1361,6 +1380,13 @@ tf_bulk_get_tbl_entry(struct tf *tfp,
 		return rc;
 	}
 
+	bparms.dir = parms->dir;
+	bparms.type = parms->type;
+	bparms.starting_idx = parms->starting_idx;
+	bparms.num_entries = parms->num_entries;
+	bparms.entry_sz_in_bytes = parms->entry_sz_in_bytes;
+	bparms.physical_mem_addr = parms->physical_mem_addr;
+
 	if (parms->type == TF_TBL_TYPE_EXT) {
 		/* Not supported, yet */
 		rc = -EOPNOTSUPP;
@@ -1370,10 +1396,17 @@ tf_bulk_get_tbl_entry(struct tf *tfp,
 			    strerror(-rc));
 
 		return rc;
+	} else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {
+		rc = dev->ops->tf_dev_get_bulk_sram_tbl(tfp, &bparms);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "%s: SRAM table bulk get failed, rc:%s\n",
+				    tf_dir_2_str(parms->dir),
+				    strerror(-rc));
+		}
+		return rc;
 	}
 
-	/* Internal table type processing */
-
 	if (dev->ops->tf_dev_get_bulk_tbl == NULL) {
 		rc = -EOPNOTSUPP;
 		TFP_DRV_LOG(ERR,
@@ -1383,12 +1416,6 @@ tf_bulk_get_tbl_entry(struct tf *tfp,
 		return -EOPNOTSUPP;
 	}
 
-	bparms.dir = parms->dir;
-	bparms.type = parms->type;
-	bparms.starting_idx = parms->starting_idx;
-	bparms.num_entries = parms->num_entries;
-	bparms.entry_sz_in_bytes = parms->entry_sz_in_bytes;
-	bparms.physical_mem_addr = parms->physical_mem_addr;
 	rc = dev->ops->tf_dev_get_bulk_tbl(tfp, &bparms);
 	if (rc) {
 		TFP_DRV_LOG(ERR,
@@ -1397,7 +1424,6 @@ tf_bulk_get_tbl_entry(struct tf *tfp,
 			    strerror(-rc));
 		return rc;
 	}
-
 	return rc;
 }
 
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index af8d13bd7e..fb02c2b161 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -65,6 +65,16 @@ enum tf_ext_mem_chan_type {
 	TF_EXT_MEM_CHAN_TYPE_MAX
 };
 
+/**
+ * WC TCAM number of slice per row that devices supported
+ */
+enum tf_wc_num_slice {
+	TF_WC_TCAM_1_SLICE_PER_ROW = 1,
+	TF_WC_TCAM_2_SLICE_PER_ROW = 2,
+	TF_WC_TCAM_4_SLICE_PER_ROW = 4,
+	TF_WC_TCAM_8_SLICE_PER_ROW = 8,
+};
+
 /**
  * EEM record AR helper
  *
@@ -670,6 +680,13 @@ struct tf_open_session_parms {
 	 */
 	void *bp;
 
+	/**
+	 * [in]
+	 *
+	 * The number of slices per row for WC TCAM entry.
+	 */
+	enum tf_wc_num_slice wc_num_slices;
+
 	/**
 	 * [out] shared_session_creator
 	 *
@@ -734,8 +751,6 @@ int tf_open_session(struct tf *tfp,
 /**
  * General internal resource info
  *
- * TODO: remove tf_rm_new_entry structure and use this structure
- * internally.
  */
 struct tf_resource_info {
 	uint16_t start;
@@ -1656,12 +1671,7 @@ struct tf_alloc_tbl_entry_parms {
  * entry of the indicated type for this TruFlow session.
  *
  * Allocates an index table record. This function will attempt to
- * allocate an entry or search an index table for a matching entry if
- * search is enabled (only the shadow copy of the table is accessed).
- *
- * If search is not enabled, the first available free entry is
- * returned. If search is enabled and a matching entry to entry_data
- * is found hit is set to TRUE and success is returned.
+ * allocate an index table entry.
  *
  * External types:
  *
@@ -1670,8 +1680,8 @@ struct tf_alloc_tbl_entry_parms {
  * Allocates an external index table action record.
  *
  * NOTE:
- * Implementation of the internals of this function will be a stack with push
- * and pop.
+ * Implementation of the internals of the external function will be a stack with
+ * push and pop.
  *
  * Returns success or failure code.
  */
@@ -1707,20 +1717,15 @@ struct tf_free_tbl_entry_parms {
  *
  * Internal types:
  *
- * If session has shadow_copy enabled the shadow DB is searched and if
- * found the element ref_cnt is decremented. If ref_cnt goes to
- * zero then the element is returned to the session pool.
- *
- * If the session does not have a shadow DB the element is free'ed and
- * given back to the session pool.
+ * The element is freed and given back to the session pool.
  *
  * External types:
  *
- * Free's an external index table action record.
+ * Frees an external index table action record.
  *
  * NOTE:
- * Implementation of the internals of this function will be a stack with push
- * and pop.
+ * Implementation of the internals of the external table will be a stack with
+ * push and pop.
  *
  * Returns success or failure code.
  */
@@ -1764,9 +1769,8 @@ struct tf_set_tbl_entry_parms {
 /**
  * set index table entry
  *
- * Used to insert an application programmed index table entry into a
- * previous allocated table location.  A shadow copy of the table
- * is maintained (if enabled) (only for internal objects)
+ * Used to set an application programmed index table entry into a
+ * previous allocated table location.
  *
  * Returns success or failure code.
  */
diff --git a/drivers/net/bnxt/tf_core/tf_device.c b/drivers/net/bnxt/tf_core/tf_device.c
index 498e668b16..25a7166bbb 100644
--- a/drivers/net/bnxt/tf_core/tf_device.c
+++ b/drivers/net/bnxt/tf_core/tf_device.c
@@ -11,10 +11,14 @@
 #include "tf_rm.h"
 #ifdef TF_TCAM_SHARED
 #include "tf_tcam_shared.h"
+#include "tf_tbl_sram.h"
 #endif /* TF_TCAM_SHARED */
 
 struct tf;
 
+/* Number of slices per row for WC TCAM */
+uint16_t g_wc_num_slices_per_row = TF_WC_TCAM_1_SLICE_PER_ROW;
+
 /* Forward declarations */
 static int tf_dev_unbind_p4(struct tf *tfp);
 static int tf_dev_unbind_p58(struct tf *tfp);
@@ -83,7 +87,8 @@ static int
 tf_dev_bind_p4(struct tf *tfp,
 	       bool shadow_copy,
 	       struct tf_session_resources *resources,
-	       struct tf_dev_info *dev_handle)
+	       struct tf_dev_info *dev_handle,
+	       enum tf_wc_num_slice wc_num_slices)
 {
 	int rc;
 	int frc;
@@ -131,7 +136,6 @@ tf_dev_bind_p4(struct tf *tfp,
 	if (rsv_cnt) {
 		tbl_cfg.num_elements = TF_TBL_TYPE_MAX;
 		tbl_cfg.cfg = tf_tbl_p4;
-		tbl_cfg.shadow_copy = shadow_copy;
 		tbl_cfg.resources = resources;
 		rc = tf_tbl_bind(tfp, &tbl_cfg);
 		if (rc) {
@@ -151,6 +155,7 @@ tf_dev_bind_p4(struct tf *tfp,
 		tcam_cfg.cfg = tf_tcam_p4;
 		tcam_cfg.shadow_copy = shadow_copy;
 		tcam_cfg.resources = resources;
+		tcam_cfg.wc_num_slices = wc_num_slices;
 #ifdef TF_TCAM_SHARED
 		rc = tf_tcam_shared_bind(tfp, &tcam_cfg);
 #else /* !TF_TCAM_SHARED */
@@ -369,7 +374,8 @@ static int
 tf_dev_bind_p58(struct tf *tfp,
 		bool shadow_copy,
 		struct tf_session_resources *resources,
-		struct tf_dev_info *dev_handle)
+		struct tf_dev_info *dev_handle,
+		enum tf_wc_num_slice wc_num_slices)
 {
 	int rc;
 	int frc;
@@ -414,7 +420,6 @@ tf_dev_bind_p58(struct tf *tfp,
 	if (rsv_cnt) {
 		tbl_cfg.num_elements = TF_TBL_TYPE_MAX;
 		tbl_cfg.cfg = tf_tbl_p58;
-		tbl_cfg.shadow_copy = shadow_copy;
 		tbl_cfg.resources = resources;
 		rc = tf_tbl_bind(tfp, &tbl_cfg);
 		if (rc) {
@@ -423,6 +428,13 @@ tf_dev_bind_p58(struct tf *tfp,
 			goto fail;
 		}
 		no_rsv_flag = false;
+
+		rc = tf_tbl_sram_bind(tfp);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "SRAM table initialization failure\n");
+			goto fail;
+		}
 	}
 
 	rsv_cnt = tf_dev_reservation_check(TF_TCAM_TBL_TYPE_MAX,
@@ -433,6 +445,7 @@ tf_dev_bind_p58(struct tf *tfp,
 		tcam_cfg.cfg = tf_tcam_p58;
 		tcam_cfg.shadow_copy = shadow_copy;
 		tcam_cfg.resources = resources;
+		tcam_cfg.wc_num_slices = wc_num_slices;
 #ifdef TF_TCAM_SHARED
 		rc = tf_tcam_shared_bind(tfp, &tcam_cfg);
 #else /* !TF_TCAM_SHARED */
@@ -565,6 +578,18 @@ tf_dev_unbind_p58(struct tf *tfp)
 		fail = true;
 	}
 
+	/* Unbind the SRAM table prior to table as the table manager
+	 * owns and frees the table DB while the SRAM table manager owns
+	 * and manages it's internal data structures.  SRAM table manager
+	 * relies on the table rm_db to exist.
+	 */
+	rc = tf_tbl_sram_unbind(tfp);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Device unbind failed, SRAM table\n");
+		fail = true;
+	}
+
 	rc = tf_tbl_unbind(tfp);
 	if (rc) {
 		TFP_DRV_LOG(INFO,
@@ -606,6 +631,7 @@ tf_dev_bind(struct tf *tfp __rte_unused,
 	    enum tf_device_type type,
 	    bool shadow_copy,
 	    struct tf_session_resources *resources,
+	    uint16_t wc_num_slices,
 	    struct tf_dev_info *dev_handle)
 {
 	switch (type) {
@@ -615,13 +641,15 @@ tf_dev_bind(struct tf *tfp __rte_unused,
 		return tf_dev_bind_p4(tfp,
 				      shadow_copy,
 				      resources,
-				      dev_handle);
+				      dev_handle,
+				      wc_num_slices);
 	case TF_DEVICE_TYPE_THOR:
 		dev_handle->type = type;
 		return tf_dev_bind_p58(tfp,
 				       shadow_copy,
 				       resources,
-				       dev_handle);
+				       dev_handle,
+				       wc_num_slices);
 	default:
 		TFP_DRV_LOG(ERR,
 			    "No such device\n");
diff --git a/drivers/net/bnxt/tf_core/tf_device.h b/drivers/net/bnxt/tf_core/tf_device.h
index b43cfc6925..9b0c037db0 100644
--- a/drivers/net/bnxt/tf_core/tf_device.h
+++ b/drivers/net/bnxt/tf_core/tf_device.h
@@ -57,6 +57,9 @@ struct tf_dev_info {
  * [in] resources
  *   Pointer to resource allocation information
  *
+ * [in] wc_num_slices
+ *   Number of slices per row for WC
+ *
  * [out] dev_handle
  *   Device handle
  *
@@ -69,6 +72,7 @@ int tf_dev_bind(struct tf *tfp,
 		enum tf_device_type type,
 		bool shadow_copy,
 		struct tf_session_resources *resources,
+		uint16_t wc_num_slices,
 		struct tf_dev_info *dev_handle);
 
 /**
@@ -139,6 +143,23 @@ struct tf_dev_ops {
 				       uint16_t resource_id,
 				       const char **resource_str);
 
+	/**
+	 * Set the WC TCAM slice information that the device
+	 * supports.
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] num_slices_per_row
+	 *   Number of slices per row the device supports
+	 *
+	 * Returns
+	 *   - (0) if successful.
+	 *   - (-EINVAL) on failure.
+	 */
+	int (*tf_dev_set_tcam_slice_info)(struct tf *tfp,
+					  enum tf_wc_num_slice num_slices_per_row);
+
 	/**
 	 * Retrieves the WC TCAM slice information that the device
 	 * supports.
@@ -241,6 +262,22 @@ struct tf_dev_ops {
 	int (*tf_dev_get_ident_resc_info)(struct tf *tfp,
 					  struct tf_identifier_resource_info *parms);
 
+	/**
+	 * Indicates whether the index table type is SRAM managed
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] type
+	 *   Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD
+	 *
+	 * Returns
+	 *   - (0) if the table is not managed by the SRAM manager
+	 *   - (1) if the table is managed by the SRAM manager
+	 */
+	bool (*tf_dev_is_sram_managed)(struct tf *tfp,
+				       enum tf_tbl_type tbl_type);
+
 	/**
 	 * Get SRAM table information.
 	 *
@@ -289,6 +326,25 @@ struct tf_dev_ops {
 	int (*tf_dev_alloc_tbl)(struct tf *tfp,
 				struct tf_tbl_alloc_parms *parms);
 
+	/**
+	 * Allocation of an SRAM index table type element.
+	 *
+	 * This API allocates the specified table type element from a
+	 * device specific table type DB. The allocated element is
+	 * returned.
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] parms
+	 *   Pointer to table allocation parameters
+	 *
+	 * Returns
+	 *   - (0) if successful.
+	 *   - (-EINVAL) on failure.
+	 */
+	int (*tf_dev_alloc_sram_tbl)(struct tf *tfp,
+				     struct tf_tbl_alloc_parms *parms);
 	/**
 	 * Allocation of a external table type element.
 	 *
@@ -327,7 +383,24 @@ struct tf_dev_ops {
 	 */
 	int (*tf_dev_free_tbl)(struct tf *tfp,
 			       struct tf_tbl_free_parms *parms);
-
+	/**
+	 * Free of an SRAM table type element.
+	 *
+	 * This API free's a previous allocated table type element from a
+	 * device specific table type DB.
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] parms
+	 *   Pointer to table free parameters
+	 *
+	 * Returns
+	 *   - (0) if successful.
+	 *   - (-EINVAL) on failure.
+	 */
+	int (*tf_dev_free_sram_tbl)(struct tf *tfp,
+				    struct tf_tbl_free_parms *parms);
 	/**
 	 * Free of a external table type element.
 	 *
@@ -385,6 +458,25 @@ struct tf_dev_ops {
 	int (*tf_dev_set_ext_tbl)(struct tf *tfp,
 				  struct tf_tbl_set_parms *parms);
 
+	/**
+	 * Sets the specified SRAM table type element.
+	 *
+	 * This API sets the specified element data by invoking the
+	 * firmware.
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] parms
+	 *   Pointer to table set parameters
+	 *
+	 * Returns
+	 *   - (0) if successful.
+	 *   - (-EINVAL) on failure.
+	 */
+	int (*tf_dev_set_sram_tbl)(struct tf *tfp,
+				   struct tf_tbl_set_parms *parms);
+
 	/**
 	 * Retrieves the specified table type element.
 	 *
@@ -404,6 +496,25 @@ struct tf_dev_ops {
 	int (*tf_dev_get_tbl)(struct tf *tfp,
 			      struct tf_tbl_get_parms *parms);
 
+	/**
+	 * Retrieves the specified SRAM table type element.
+	 *
+	 * This API retrieves the specified element data by invoking the
+	 * firmware.
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] parms
+	 *   Pointer to table get parameters
+	 *
+	 * Returns
+	 *   - (0) if successful.
+	 *   - (-EINVAL) on failure.
+	 */
+	int (*tf_dev_get_sram_tbl)(struct tf *tfp,
+				   struct tf_tbl_get_parms *parms);
+
 	/**
 	 * Retrieves the specified table type element using 'bulk'
 	 * mechanism.
@@ -424,6 +535,26 @@ struct tf_dev_ops {
 	int (*tf_dev_get_bulk_tbl)(struct tf *tfp,
 				   struct tf_tbl_get_bulk_parms *parms);
 
+	/**
+	 * Retrieves the specified SRAM table type element using 'bulk'
+	 * mechanism.
+	 *
+	 * This API retrieves the specified element data by invoking the
+	 * firmware.
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] parms
+	 *   Pointer to table get bulk parameters
+	 *
+	 * Returns
+	 *   - (0) if successful.
+	 *   - (-EINVAL) on failure.
+	 */
+	int (*tf_dev_get_bulk_sram_tbl)(struct tf *tfp,
+					struct tf_tbl_get_bulk_parms *parms);
+
 	/**
 	 * Gets the increment value to add to the shared session resource
 	 * start offset by for each count in the "stride"
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c
index 2e7ccec123..826cd0cdbc 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c
@@ -118,14 +118,48 @@ tf_dev_p4_get_resource_str(struct tf *tfp __rte_unused,
 }
 
 /**
- * Device specific function that retrieves the WC TCAM slices the
+ * Device specific function that set the WC TCAM slices the
  * device supports.
  *
  * [in] tfp
  *   Pointer to TF handle
  *
- * [out] slice_size
- *   Pointer to the WC TCAM slice size
+ * [in] num_slices_per_row
+ *   The WC TCAM row slice configuration
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+static int
+tf_dev_p4_set_tcam_slice_info(struct tf *tfp __rte_unused,
+			      enum tf_wc_num_slice num_slices_per_row)
+{
+	switch (num_slices_per_row) {
+	case TF_WC_TCAM_1_SLICE_PER_ROW:
+	case TF_WC_TCAM_2_SLICE_PER_ROW:
+	case TF_WC_TCAM_4_SLICE_PER_ROW:
+		g_wc_num_slices_per_row = num_slices_per_row;
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * Device specific function that retrieves the TCAM slices the
+ * device supports.
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] type
+ *   TF TCAM type
+ *
+ * [in] key_sz
+ *   The key size
  *
  * [out] num_slices_per_row
  *   Pointer to the WC TCAM row slice configuration
@@ -141,11 +175,10 @@ tf_dev_p4_get_tcam_slice_info(struct tf *tfp __rte_unused,
 			      uint16_t *num_slices_per_row)
 {
 /* Single slice support */
-#define CFA_P4_WC_TCAM_SLICES_PER_ROW 1
 #define CFA_P4_WC_TCAM_SLICE_SIZE     12
 
 	if (type == TF_TCAM_TBL_TYPE_WC_TCAM) {
-		*num_slices_per_row = CFA_P4_WC_TCAM_SLICES_PER_ROW;
+		*num_slices_per_row = g_wc_num_slices_per_row;
 		if (key_sz > *num_slices_per_row * CFA_P4_WC_TCAM_SLICE_SIZE)
 			return -ENOTSUP;
 	} else { /* for other type of tcam */
@@ -220,26 +253,51 @@ static int tf_dev_p4_word_align(uint16_t size)
 	return ((((size) + 31) >> 5) * 4);
 }
 
+/**
+ * Indicates whether the index table type is SRAM managed
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] type
+ *   Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD
+ *
+ * Returns
+ *   - (0) if the table is not managed by the SRAM manager
+ *   - (1) if the table is managed by the SRAM manager
+ */
+static bool tf_dev_p4_is_sram_managed(struct tf *tfp __rte_unused,
+				      enum tf_tbl_type type __rte_unused)
+{
+	return false;
+}
 /**
  * Truflow P4 device specific functions
  */
 const struct tf_dev_ops tf_dev_ops_p4_init = {
 	.tf_dev_get_max_types = tf_dev_p4_get_max_types,
 	.tf_dev_get_resource_str = tf_dev_p4_get_resource_str,
+	.tf_dev_set_tcam_slice_info = tf_dev_p4_set_tcam_slice_info,
 	.tf_dev_get_tcam_slice_info = tf_dev_p4_get_tcam_slice_info,
 	.tf_dev_alloc_ident = NULL,
 	.tf_dev_free_ident = NULL,
 	.tf_dev_search_ident = NULL,
 	.tf_dev_get_ident_resc_info = NULL,
 	.tf_dev_get_tbl_info = NULL,
+	.tf_dev_is_sram_managed = tf_dev_p4_is_sram_managed,
 	.tf_dev_alloc_ext_tbl = NULL,
 	.tf_dev_alloc_tbl = NULL,
+	.tf_dev_alloc_sram_tbl = NULL,
 	.tf_dev_free_ext_tbl = NULL,
 	.tf_dev_free_tbl = NULL,
+	.tf_dev_free_sram_tbl = NULL,
 	.tf_dev_set_tbl = NULL,
 	.tf_dev_set_ext_tbl = NULL,
+	.tf_dev_set_sram_tbl = NULL,
 	.tf_dev_get_tbl = NULL,
+	.tf_dev_get_sram_tbl = NULL,
 	.tf_dev_get_bulk_tbl = NULL,
+	.tf_dev_get_bulk_sram_tbl = NULL,
 	.tf_dev_get_shared_tbl_increment = tf_dev_p4_get_shared_tbl_increment,
 	.tf_dev_get_tbl_resc_info = NULL,
 	.tf_dev_alloc_tcam = NULL,
@@ -271,20 +329,27 @@ const struct tf_dev_ops tf_dev_ops_p4_init = {
 const struct tf_dev_ops tf_dev_ops_p4 = {
 	.tf_dev_get_max_types = tf_dev_p4_get_max_types,
 	.tf_dev_get_resource_str = tf_dev_p4_get_resource_str,
+	.tf_dev_set_tcam_slice_info = tf_dev_p4_set_tcam_slice_info,
 	.tf_dev_get_tcam_slice_info = tf_dev_p4_get_tcam_slice_info,
 	.tf_dev_alloc_ident = tf_ident_alloc,
 	.tf_dev_free_ident = tf_ident_free,
 	.tf_dev_search_ident = tf_ident_search,
 	.tf_dev_get_ident_resc_info = tf_ident_get_resc_info,
 	.tf_dev_get_tbl_info = NULL,
+	.tf_dev_is_sram_managed = tf_dev_p4_is_sram_managed,
 	.tf_dev_alloc_tbl = tf_tbl_alloc,
 	.tf_dev_alloc_ext_tbl = tf_tbl_ext_alloc,
+	.tf_dev_alloc_sram_tbl = tf_tbl_alloc,
 	.tf_dev_free_tbl = tf_tbl_free,
 	.tf_dev_free_ext_tbl = tf_tbl_ext_free,
+	.tf_dev_free_sram_tbl = tf_tbl_free,
 	.tf_dev_set_tbl = tf_tbl_set,
 	.tf_dev_set_ext_tbl = tf_tbl_ext_common_set,
+	.tf_dev_set_sram_tbl = NULL,
 	.tf_dev_get_tbl = tf_tbl_get,
+	.tf_dev_get_sram_tbl = NULL,
 	.tf_dev_get_bulk_tbl = tf_tbl_bulk_get,
+	.tf_dev_get_bulk_sram_tbl = NULL,
 	.tf_dev_get_shared_tbl_increment = tf_dev_p4_get_shared_tbl_increment,
 	.tf_dev_get_tbl_resc_info = tf_tbl_get_resc_info,
 #ifdef TF_TCAM_SHARED
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.h b/drivers/net/bnxt/tf_core/tf_device_p4.h
index a73ba3cd70..c1357913f1 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.h
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.h
@@ -15,101 +15,101 @@
 struct tf_rm_element_cfg tf_ident_p4[TF_IDENT_TYPE_MAX] = {
 	[TF_IDENT_TYPE_L2_CTXT_HIGH] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_HIGH,
-		0, 0, 0
+		0, 0
 	},
 	[TF_IDENT_TYPE_L2_CTXT_LOW] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_LOW,
-		0, 0, 0
+		0, 0
 	},
 	[TF_IDENT_TYPE_PROF_FUNC] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_FUNC,
-		0, 0, 0
+		0, 0
 	},
 	[TF_IDENT_TYPE_WC_PROF] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID,
-		0, 0, 0
+		0, 0
 	},
 	[TF_IDENT_TYPE_EM_PROF] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_EM_PROF_ID,
-		0, 0, 0
+		0, 0
 	},
 };
 
 struct tf_rm_element_cfg tf_tcam_p4[TF_TCAM_TBL_TYPE_MAX] = {
 	[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TCAM_TBL_TYPE_PROF_TCAM] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_TCAM,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TCAM_TBL_TYPE_WC_TCAM] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TCAM_TBL_TYPE_SP_TCAM] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_TCAM,
-		0, 0, 0
+		0, 0
 	},
 };
 
 struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {
 	[TF_TBL_TYPE_FULL_ACT_RECORD] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_FULL_ACTION,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_MCAST_GROUPS] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MCG,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_ACT_ENCAP_8B] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_8B,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_ACT_ENCAP_16B] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_16B,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_ACT_ENCAP_64B] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_64B,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_ACT_SP_SMAC] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_ACT_STATS_64] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_COUNTER_64B,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_ACT_MODIFY_IPV4] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_IPV4,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_METER_PROF] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER_PROF,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_METER_INST] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_MIRROR_CONFIG] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MIRROR,
-		0, 0, 0
+		0, 0
 	},
 
 };
@@ -117,14 +117,14 @@ struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {
 struct tf_rm_element_cfg tf_em_ext_p4[TF_EM_TBL_TYPE_MAX] = {
 	[TF_EM_TBL_TYPE_TBL_SCOPE] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_TBL_SCOPE,
-		0, 0, 0
+		0, 0
 	},
 };
 
 struct tf_rm_element_cfg tf_em_int_p4[TF_EM_TBL_TYPE_MAX] = {
 	[TF_EM_TBL_TYPE_EM_RECORD] = {
 		TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_EM_REC,
-		0, 0, 0
+		0, 0
 	},
 };
 
diff --git a/drivers/net/bnxt/tf_core/tf_device_p58.c b/drivers/net/bnxt/tf_core/tf_device_p58.c
index a492c62bff..47d7836a58 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p58.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p58.c
@@ -17,6 +17,7 @@
 #include "tf_if_tbl.h"
 #include "tfp.h"
 #include "tf_msg_common.h"
+#include "tf_tbl_sram.h"
 
 #define TF_DEV_P58_PARIF_MAX 16
 #define TF_DEV_P58_PF_MASK 0xfUL
@@ -105,14 +106,48 @@ tf_dev_p58_get_resource_str(struct tf *tfp __rte_unused,
 }
 
 /**
- * Device specific function that retrieves the WC TCAM slices the
+ * Device specific function that set the WC TCAM slices the
  * device supports.
  *
  * [in] tfp
  *   Pointer to TF handle
  *
- * [out] slice_size
- *   Pointer to the WC TCAM slice size
+ * [in] num_slices_per_row
+ *   The WC TCAM row slice configuration
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+static int
+tf_dev_p58_set_tcam_slice_info(struct tf *tfp __rte_unused,
+			       enum tf_wc_num_slice num_slices_per_row)
+{
+	switch (num_slices_per_row) {
+	case TF_WC_TCAM_1_SLICE_PER_ROW:
+	case TF_WC_TCAM_2_SLICE_PER_ROW:
+	case TF_WC_TCAM_4_SLICE_PER_ROW:
+		g_wc_num_slices_per_row = num_slices_per_row;
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * Device specific function that retrieves the TCAM slices the
+ * device supports.
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] type
+ *   TF TCAM type
+ *
+ * [in] key_sz
+ *   The key size
  *
  * [out] num_slices_per_row
  *   Pointer to the WC TCAM row slice configuration
@@ -123,16 +158,13 @@ tf_dev_p58_get_resource_str(struct tf *tfp __rte_unused,
  */
 static int
 tf_dev_p58_get_tcam_slice_info(struct tf *tfp __rte_unused,
-			      enum tf_tcam_tbl_type type,
-			      uint16_t key_sz,
-			      uint16_t *num_slices_per_row)
+			       enum tf_tcam_tbl_type type,
+			       uint16_t key_sz,
+			       uint16_t *num_slices_per_row)
 {
-#define CFA_P58_WC_TCAM_SLICES_PER_ROW 1
 #define CFA_P58_WC_TCAM_SLICE_SIZE     24
-
 	if (type == TF_TCAM_TBL_TYPE_WC_TCAM) {
-		/* only support single slice key size now */
-		*num_slices_per_row = CFA_P58_WC_TCAM_SLICES_PER_ROW;
+		*num_slices_per_row = g_wc_num_slices_per_row;
 		if (key_sz > *num_slices_per_row * CFA_P58_WC_TCAM_SLICE_SIZE)
 			return -ENOTSUP;
 	} else { /* for other type of tcam */
@@ -194,6 +226,44 @@ static int tf_dev_p58_get_shared_tbl_increment(struct tf *tfp __rte_unused,
 	return 0;
 }
 
+/**
+ * Indicates whether the index table type is SRAM managed
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] type
+ *   Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD
+ *
+ * Returns
+ *   - (0) if the table is not managed by the SRAM manager
+ *   - (1) if the table is managed by the SRAM manager
+ */
+static bool tf_dev_p58_is_sram_managed(struct tf *tfp __rte_unused,
+				       enum tf_tbl_type type)
+{
+	switch (type) {
+	case TF_TBL_TYPE_FULL_ACT_RECORD:
+	case TF_TBL_TYPE_COMPACT_ACT_RECORD:
+	case TF_TBL_TYPE_ACT_ENCAP_8B:
+	case TF_TBL_TYPE_ACT_ENCAP_16B:
+	case TF_TBL_TYPE_ACT_ENCAP_32B:
+	case TF_TBL_TYPE_ACT_ENCAP_64B:
+	case TF_TBL_TYPE_ACT_SP_SMAC:
+	case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
+	case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
+	case TF_TBL_TYPE_ACT_STATS_64:
+	case TF_TBL_TYPE_ACT_MODIFY_IPV4:
+	case TF_TBL_TYPE_ACT_MODIFY_8B:
+	case TF_TBL_TYPE_ACT_MODIFY_16B:
+	case TF_TBL_TYPE_ACT_MODIFY_32B:
+	case TF_TBL_TYPE_ACT_MODIFY_64B:
+		return true;
+	default:
+		return false;
+	}
+}
+
 #define TF_DEV_P58_BANK_SZ_64B 2048
 /**
  * Get SRAM table information.
@@ -265,26 +335,34 @@ static int tf_dev_p58_get_sram_tbl_info(struct tf *tfp __rte_unused,
 	}
 	return 0;
 }
+
 /**
  * Truflow P58 device specific functions
  */
 const struct tf_dev_ops tf_dev_ops_p58_init = {
 	.tf_dev_get_max_types = tf_dev_p58_get_max_types,
 	.tf_dev_get_resource_str = tf_dev_p58_get_resource_str,
+	.tf_dev_set_tcam_slice_info = tf_dev_p58_set_tcam_slice_info,
 	.tf_dev_get_tcam_slice_info = tf_dev_p58_get_tcam_slice_info,
 	.tf_dev_alloc_ident = NULL,
 	.tf_dev_free_ident = NULL,
 	.tf_dev_search_ident = NULL,
 	.tf_dev_get_ident_resc_info = NULL,
 	.tf_dev_get_tbl_info = NULL,
+	.tf_dev_is_sram_managed = tf_dev_p58_is_sram_managed,
 	.tf_dev_alloc_ext_tbl = NULL,
 	.tf_dev_alloc_tbl = NULL,
+	.tf_dev_alloc_sram_tbl = NULL,
 	.tf_dev_free_ext_tbl = NULL,
 	.tf_dev_free_tbl = NULL,
+	.tf_dev_free_sram_tbl = NULL,
 	.tf_dev_set_tbl = NULL,
 	.tf_dev_set_ext_tbl = NULL,
+	.tf_dev_set_sram_tbl = NULL,
 	.tf_dev_get_tbl = NULL,
+	.tf_dev_get_sram_tbl = NULL,
 	.tf_dev_get_bulk_tbl = NULL,
+	.tf_dev_get_bulk_sram_tbl = NULL,
 	.tf_dev_get_shared_tbl_increment = tf_dev_p58_get_shared_tbl_increment,
 	.tf_dev_get_tbl_resc_info = NULL,
 	.tf_dev_alloc_tcam = NULL,
@@ -316,20 +394,27 @@ const struct tf_dev_ops tf_dev_ops_p58_init = {
 const struct tf_dev_ops tf_dev_ops_p58 = {
 	.tf_dev_get_max_types = tf_dev_p58_get_max_types,
 	.tf_dev_get_resource_str = tf_dev_p58_get_resource_str,
+	.tf_dev_set_tcam_slice_info = tf_dev_p58_set_tcam_slice_info,
 	.tf_dev_get_tcam_slice_info = tf_dev_p58_get_tcam_slice_info,
 	.tf_dev_alloc_ident = tf_ident_alloc,
 	.tf_dev_free_ident = tf_ident_free,
 	.tf_dev_search_ident = tf_ident_search,
 	.tf_dev_get_ident_resc_info = tf_ident_get_resc_info,
+	.tf_dev_is_sram_managed = tf_dev_p58_is_sram_managed,
 	.tf_dev_get_tbl_info = tf_dev_p58_get_sram_tbl_info,
 	.tf_dev_alloc_tbl = tf_tbl_alloc,
+	.tf_dev_alloc_sram_tbl = tf_tbl_sram_alloc,
 	.tf_dev_alloc_ext_tbl = tf_tbl_ext_alloc,
 	.tf_dev_free_tbl = tf_tbl_free,
 	.tf_dev_free_ext_tbl = tf_tbl_ext_free,
+	.tf_dev_free_sram_tbl = tf_tbl_sram_free,
 	.tf_dev_set_tbl = tf_tbl_set,
 	.tf_dev_set_ext_tbl = tf_tbl_ext_common_set,
+	.tf_dev_set_sram_tbl = tf_tbl_sram_set,
 	.tf_dev_get_tbl = tf_tbl_get,
+	.tf_dev_get_sram_tbl = tf_tbl_sram_get,
 	.tf_dev_get_bulk_tbl = tf_tbl_bulk_get,
+	.tf_dev_get_bulk_sram_tbl = tf_tbl_sram_bulk_get,
 	.tf_dev_get_shared_tbl_increment = tf_dev_p58_get_shared_tbl_increment,
 	.tf_dev_get_tbl_resc_info = tf_tbl_get_resc_info,
 #ifdef TF_TCAM_SHARED
diff --git a/drivers/net/bnxt/tf_core/tf_device_p58.h b/drivers/net/bnxt/tf_core/tf_device_p58.h
index 8c2e07aa34..3e8759f2df 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p58.h
+++ b/drivers/net/bnxt/tf_core/tf_device_p58.h
@@ -15,107 +15,107 @@
 struct tf_rm_element_cfg tf_ident_p58[TF_IDENT_TYPE_MAX] = {
 	[TF_IDENT_TYPE_L2_CTXT_HIGH] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_HIGH,
-		0, 0, 0
+		0, 0
 	},
 	[TF_IDENT_TYPE_L2_CTXT_LOW] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_LOW,
-		0, 0, 0
+		0, 0
 	},
 	[TF_IDENT_TYPE_PROF_FUNC] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_FUNC,
-		0, 0, 0
+		0, 0
 	},
 	[TF_IDENT_TYPE_WC_PROF] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM_PROF_ID,
-		0, 0, 0
+		0, 0
 	},
 	[TF_IDENT_TYPE_EM_PROF] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_PROF_ID,
-		0, 0, 0
+		0, 0
 	},
 };
 
 struct tf_rm_element_cfg tf_tcam_p58[TF_TCAM_TBL_TYPE_MAX] = {
 	[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TCAM_TBL_TYPE_PROF_TCAM] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_TCAM,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TCAM_TBL_TYPE_WC_TCAM] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TCAM_TBL_TYPE_VEB_TCAM] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_VEB_TCAM,
-		0, 0, 0
+		0, 0
 	},
 };
 
 struct tf_rm_element_cfg tf_tbl_p58[TF_TBL_TYPE_MAX] = {
 	[TF_TBL_TYPE_EM_FKB] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_FKB,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_WC_FKB] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_FKB,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_METER_PROF] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_PROF,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_METER_INST] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_METER_DROP_CNT] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_DROP_CNT,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_MIRROR_CONFIG] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_MIRROR,
-		0, 0, 0
+		0, 0
 	},
 	[TF_TBL_TYPE_METADATA] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METADATA,
-		0, 0, 0
+		0, 0
 	},
 	/* Policy - ARs in bank 1 */
 	[TF_TBL_TYPE_FULL_ACT_RECORD] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1,
-		.slices          = 1,
+		.slices          = 4,
 	},
 	[TF_TBL_TYPE_COMPACT_ACT_RECORD] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
 		.parent_subtype  = TF_TBL_TYPE_FULL_ACT_RECORD,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1,
-		.slices          = 1,
+		.slices          = 8,
 	},
 	/* Policy - Encaps in bank 2 */
 	[TF_TBL_TYPE_ACT_ENCAP_8B] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
-		.slices          = 1,
+		.slices          = 8,
 	},
 	[TF_TBL_TYPE_ACT_ENCAP_16B] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
 		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
-		.slices          = 1,
+		.slices          = 4,
 	},
 	[TF_TBL_TYPE_ACT_ENCAP_32B] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
 		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
-		.slices          = 1,
+		.slices          = 2,
 	},
 	[TF_TBL_TYPE_ACT_ENCAP_64B] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
@@ -128,19 +128,19 @@ struct tf_rm_element_cfg tf_tbl_p58[TF_TBL_TYPE_MAX] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
 		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
-		.slices          = 1,
+		.slices          = 8,
 	},
 	[TF_TBL_TYPE_ACT_MODIFY_16B] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
 		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
-		.slices          = 1,
+		.slices          = 4,
 	},
 	[TF_TBL_TYPE_ACT_MODIFY_32B] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
 		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
-		.slices          = 1,
+		.slices          = 2,
 	},
 	[TF_TBL_TYPE_ACT_MODIFY_64B] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
@@ -152,32 +152,32 @@ struct tf_rm_element_cfg tf_tbl_p58[TF_TBL_TYPE_MAX] = {
 	[TF_TBL_TYPE_ACT_SP_SMAC] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,
-		.slices          = 1,
+		.slices          = 8,
 	},
 	[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
 		.parent_subtype  = TF_TBL_TYPE_ACT_SP_SMAC,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,
-		.slices          = 1,
+		.slices          = 4,
 	},
 	[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
 		.parent_subtype  = TF_TBL_TYPE_ACT_SP_SMAC,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,
-		.slices          = 1,
+		.slices          = 2,
 	},
 	/* Policy - Stats in bank 3 */
 	[TF_TBL_TYPE_ACT_STATS_64] = {
 		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
 		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_3,
-		.slices          = 1,
+		.slices          = 8,
 	},
 };
 
 struct tf_rm_element_cfg tf_em_int_p58[TF_EM_TBL_TYPE_MAX] = {
 	[TF_EM_TBL_TYPE_EM_RECORD] = {
 		TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P58_EM_REC,
-		0, 0, 0
+		0, 0
 	},
 };
 
diff --git a/drivers/net/bnxt/tf_core/tf_msg.c b/drivers/net/bnxt/tf_core/tf_msg.c
index e07d9168be..0fbb2fe837 100644
--- a/drivers/net/bnxt/tf_core/tf_msg.c
+++ b/drivers/net/bnxt/tf_core/tf_msg.c
@@ -2231,7 +2231,7 @@ tf_msg_get_if_tbl_entry(struct tf *tfp,
 	if (rc != 0)
 		return rc;
 
-	tfp_memcpy(params->data, resp.data, req.size);
+	tfp_memcpy(&params->data[0], resp.data, req.size);
 
 	return 0;
 }
diff --git a/drivers/net/bnxt/tf_core/tf_rm.c b/drivers/net/bnxt/tf_core/tf_rm.c
index 0a46e2a343..03c958a7d6 100644
--- a/drivers/net/bnxt/tf_core/tf_rm.c
+++ b/drivers/net/bnxt/tf_core/tf_rm.c
@@ -34,6 +34,12 @@ struct tf_rm_element {
 	 */
 	uint16_t hcapi_type;
 
+	/**
+	 * Resource slices.  How many slices will fit in the
+	 * resource pool chunk size.
+	 */
+	uint8_t slices;
+
 	/**
 	 * HCAPI RM allocated range information for the element.
 	 */
@@ -356,12 +362,15 @@ tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
  *     -          - Failure if negative
  */
 static int
-tf_rm_update_parent_reservations(struct tf_rm_element_cfg *cfg,
+tf_rm_update_parent_reservations(struct tf *tfp,
+				 struct tf_dev_info *dev,
+				 struct tf_rm_element_cfg *cfg,
 				 uint16_t *alloc_cnt,
 				 uint16_t num_elements,
 				 uint16_t *req_cnt)
 {
 	int parent, child;
+	const char *type_str;
 
 	/* Search through all the elements */
 	for (parent = 0; parent < num_elements; parent++) {
@@ -377,15 +386,25 @@ tf_rm_update_parent_reservations(struct tf_rm_element_cfg *cfg,
 			if (alloc_cnt[parent] % cfg[parent].slices)
 				combined_cnt++;
 
+			if (alloc_cnt[parent]) {
+				dev->ops->tf_dev_get_resource_str(tfp,
+							 cfg[parent].hcapi_type,
+							 &type_str);
+			}
+
 			/* Search again through all the elements */
 			for (child = 0; child < num_elements; child++) {
 				/* If this is one of my children */
 				if (cfg[child].cfg_type ==
 				    TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
-				    cfg[child].parent_subtype == parent) {
+				    cfg[child].parent_subtype == parent &&
+				    alloc_cnt[child]) {
 					uint16_t cnt = 0;
 					RTE_ASSERT(cfg[child].slices);
 
+					dev->ops->tf_dev_get_resource_str(tfp,
+							  cfg[child].hcapi_type,
+							   &type_str);
 					/* Increment the parents combined count
 					 * with each child's count adjusted for
 					 * number of slices per RM allocated item.
@@ -479,7 +498,7 @@ tf_rm_create_db(struct tf *tfp,
 
 	/* Update the req_cnt based upon the element configuration
 	 */
-	tf_rm_update_parent_reservations(parms->cfg,
+	tf_rm_update_parent_reservations(tfp, dev, parms->cfg,
 					 parms->alloc_cnt,
 					 parms->num_elements,
 					 req_cnt);
@@ -594,6 +613,7 @@ tf_rm_create_db(struct tf *tfp,
 
 		db[i].cfg_type = cfg->cfg_type;
 		db[i].hcapi_type = cfg->hcapi_type;
+		db[i].slices = cfg->slices;
 
 		/* Save the parent subtype for later use to find the pool
 		 */
@@ -1271,6 +1291,26 @@ tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
 
 	return 0;
 }
+int
+tf_rm_get_slices(struct tf_rm_get_slices_parms *parms)
+{
+	struct tf_rm_new_db *rm_db;
+	enum tf_rm_elem_cfg_type cfg_type;
+
+	TF_CHECK_PARMS2(parms, parms->rm_db);
+	rm_db = (struct tf_rm_new_db *)parms->rm_db;
+	TF_CHECK_PARMS1(rm_db->db);
+
+	cfg_type = rm_db->db[parms->subtype].cfg_type;
+
+	/* Bail out if not controlled by HCAPI */
+	if (cfg_type == TF_RM_ELEM_CFG_NULL)
+		return -ENOTSUP;
+
+	*parms->slices = rm_db->db[parms->subtype].slices;
+
+	return 0;
+}
 
 int
 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
diff --git a/drivers/net/bnxt/tf_core/tf_rm.h b/drivers/net/bnxt/tf_core/tf_rm.h
index 8b984112e8..da7d0c7211 100644
--- a/drivers/net/bnxt/tf_core/tf_rm.h
+++ b/drivers/net/bnxt/tf_core/tf_rm.h
@@ -43,16 +43,6 @@ struct tf;
  * support module, not called directly.
  */
 
-/**
- * Resource reservation single entry result. Used when accessing HCAPI
- * RM on the firmware.
- */
-struct tf_rm_new_entry {
-	/** Starting index of the allocated resource */
-	uint16_t start;
-	/** Number of allocated elements */
-	uint16_t stride;
-};
 
 /**
  * RM Element configuration enumeration. Used by the Device to
@@ -114,10 +104,6 @@ struct tf_rm_element_cfg {
 	 */
 	enum tf_rm_elem_cfg_type cfg_type;
 
-	/* If a HCAPI to TF type conversion is required then TF type
-	 * can be added here.
-	 */
-
 	/**
 	 * HCAPI RM Type for the element. Used for TF to HCAPI type
 	 * conversion.
@@ -125,28 +111,19 @@ struct tf_rm_element_cfg {
 	uint16_t hcapi_type;
 
 	/**
-	 * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD
+	 * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD/PARENT
 	 *
 	 * Parent Truflow module subtype associated with this resource type.
 	 */
 	uint16_t parent_subtype;
 
 	/**
-	 * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD
+	 * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD/PARENT
 	 *
 	 * Resource slices.  How many slices will fit in the
 	 * resource pool chunk size.
 	 */
 	uint8_t slices;
-
-	/**
-	 * Pool element divider count
-	 * If 0 or 1, there is 1:1 correspondence between the RM
-	 * BA pool resource element and the HCAPI RM firmware
-	 * resource.  If > 1, the RM BA pool element has a 1:n
-	 * correspondence to the HCAPI RM firmware resource.
-	 */
-	uint8_t divider;
 };
 
 /**
@@ -160,7 +137,7 @@ struct tf_rm_alloc_info {
 	 * In case of dynamic allocation support this would have
 	 * to be changed to linked list of tf_rm_entry instead.
 	 */
-	struct tf_rm_new_entry entry;
+	struct tf_resource_info entry;
 };
 
 /**
@@ -331,6 +308,25 @@ struct tf_rm_get_hcapi_parms {
 	 */
 	uint16_t *hcapi_type;
 };
+/**
+ * Get Slices parameters for a single element
+ */
+struct tf_rm_get_slices_parms {
+	/**
+	 * [in] RM DB Handle
+	 */
+	void *rm_db;
+	/**
+	 * [in] TF subtype indicates which DB entry to perform the
+	 * action on. (e.g. TF_TBL_TYPE_FULL_ACTION subtype of module
+	 * TF_MODULE_TYPE_TABLE)
+	 */
+	uint16_t subtype;
+	/**
+	 * [in/out] Pointer to number of slices for the given type
+	 */
+	uint16_t *slices;
+};
 
 /**
  * Get InUse count parameters for single element
@@ -394,6 +390,8 @@ struct tf_rm_check_indexes_in_range_parms {
  * @ref tf_rm_get_hcapi_type
  *
  * @ref tf_rm_get_inuse_count
+ *
+ * @ref tf_rm_get_slice_size
  */
 
 /**
@@ -571,5 +569,17 @@ int tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms);
 int
 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms);
 
+/**
+ * Get the number of slices per resource bit allocator for the resource type
+ *
+ * [in] parms
+ *   Pointer to get inuse parameters
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+int
+tf_rm_get_slices(struct tf_rm_get_slices_parms *parms);
 
 #endif /* TF_RM_NEW_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_session.c b/drivers/net/bnxt/tf_core/tf_session.c
index 90b65c59e6..3e6664e9f2 100644
--- a/drivers/net/bnxt/tf_core/tf_session.c
+++ b/drivers/net/bnxt/tf_core/tf_session.c
@@ -202,6 +202,7 @@ tf_session_create(struct tf *tfp,
 			 parms->open_cfg->device_type,
 			 session->shadow_copy,
 			 &parms->open_cfg->resources,
+			 parms->open_cfg->wc_num_slices,
 			 &session->dev);
 
 	/* Logging handled by dev_bind */
@@ -705,6 +706,22 @@ tf_session_get_session(struct tf *tfp,
 	return rc;
 }
 
+int tf_session_get(struct tf *tfp,
+		   struct tf_session **tfs,
+		   struct tf_dev_info **tfd)
+{
+	int rc;
+	rc = tf_session_get_session_internal(tfp, tfs);
+
+	/* Logging done by tf_session_get_session_internal */
+	if (rc)
+		return rc;
+
+	rc = tf_session_get_device(*tfs, tfd);
+
+	return rc;
+}
+
 struct tf_session_client *
 tf_session_get_session_client(struct tf_session *tfs,
 			      union tf_session_client_id session_client_id)
@@ -1012,4 +1029,43 @@ tf_session_set_tcam_shared_db(struct tf *tfp,
 	tfs->tcam_shared_db_handle = tcam_shared_db_handle;
 	return rc;
 }
+
+int
+tf_session_get_sram_db(struct tf *tfp,
+		       void **sram_handle)
+{
+	struct tf_session *tfs = NULL;
+	int rc = 0;
+
+	*sram_handle = NULL;
+
+	if (tfp == NULL)
+		return (-EINVAL);
+
+	rc = tf_session_get_session_internal(tfp, &tfs);
+	if (rc)
+		return rc;
+
+	*sram_handle = tfs->sram_handle;
+	return rc;
+}
+
+int
+tf_session_set_sram_db(struct tf *tfp,
+		       void *sram_handle)
+{
+	struct tf_session *tfs = NULL;
+	int rc = 0;
+
+	if (tfp == NULL)
+		return (-EINVAL);
+
+	rc = tf_session_get_session_internal(tfp, &tfs);
+	if (rc)
+		return rc;
+
+	tfs->sram_handle = sram_handle;
+	return rc;
+}
+
 #endif /* TF_TCAM_SHARED */
diff --git a/drivers/net/bnxt/tf_core/tf_session.h b/drivers/net/bnxt/tf_core/tf_session.h
index d68421cd13..c1d7f70060 100644
--- a/drivers/net/bnxt/tf_core/tf_session.h
+++ b/drivers/net/bnxt/tf_core/tf_session.h
@@ -166,6 +166,10 @@ struct tf_session {
 	 */
 	void *tcam_shared_db_handle;
 #endif /* TF_TCAM_SHARED */
+	/**
+	 * SRAM db reference for the session
+	 */
+	void *sram_handle;
 };
 
 /**
@@ -278,6 +282,10 @@ struct tf_session_close_session_parms {
  *
  * @ref tf_session_set_tcam_shared_db
  * #endif
+ *
+ * @ref tf_session_get_sram_db
+ *
+ * @ref tf_session_set_sram_db
  */
 
 /**
@@ -435,11 +443,11 @@ tf_session_find_session_client_by_fid(struct tf_session *tfs,
 /**
  * Looks up the device information from the TF Session.
  *
- * [in] tfp
- *   Pointer to TF handle
+ * [in] tfs
+ *   Pointer to session handle
  *
  * [out] tfd
- *   Pointer pointer to the device
+ *   Pointer to the device
  *
  * Returns
  *   - (0) if successful.
@@ -448,6 +456,26 @@ tf_session_find_session_client_by_fid(struct tf_session *tfs,
 int tf_session_get_device(struct tf_session *tfs,
 			  struct tf_dev_info **tfd);
 
+/**
+ * Returns the session and the device from the tfp.
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [out] tfs
+ *   Pointer to the session
+ *
+ * [out] tfd
+ *   Pointer to the device
+
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+int tf_session_get(struct tf *tfp,
+		   struct tf_session **tfs,
+		   struct tf_dev_info **tfd);
+
 /**
  * Looks up the FW Session id the requested TF handle.
  *
@@ -614,4 +642,28 @@ int
 tf_session_get_tcam_shared_db(struct tf *tfp,
 			      void **tcam_shared_db_handle);
 
+/**
+ * Set the pointer to the SRAM database
+ *
+ * [in] session, pointer to the session
+ *
+ * Returns:
+ *   - the pointer to the parent bnxt struct
+ */
+int
+tf_session_set_sram_db(struct tf *tfp,
+		       void *sram_handle);
+
+/**
+ * Get the pointer to the SRAM database
+ *
+ * [in] session, pointer to the session
+ *
+ * Returns:
+ *   - the pointer to the parent bnxt struct
+ */
+int
+tf_session_get_sram_db(struct tf *tfp,
+		       void **sram_handle);
+
 #endif /* _TF_SESSION_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_sram_mgr.c b/drivers/net/bnxt/tf_core/tf_sram_mgr.c
new file mode 100644
index 0000000000..f633a78b25
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_sram_mgr.c
@@ -0,0 +1,971 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include "tf_sram_mgr.h"
+#include "tf_core.h"
+#include "tf_rm.h"
+#include "tf_common.h"
+#include "assert.h"
+#include "tf_util.h"
+#include "tfp.h"
+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)
+#include "tf_msg.h"
+#endif
+/***************************
+ * Internal Data Structures
+ ***************************/
+
+/**
+ * TF SRAM block info
+ *
+ * Contains all the information about a particular 64B SRAM
+ * block and the slices within it.
+ */
+struct tf_sram_block {
+	/* Previous block
+	 */
+	struct tf_sram_block *prev;
+	/* Next block
+	 */
+	struct tf_sram_block *next;
+
+	/** Bitmap indicating which slices are in use
+	 *  If a bit is set, it indicates the slice
+	 *  in the row is in use.
+	 */
+	uint8_t in_use_mask;
+
+	/** Block id - this is a 64B offset
+	 */
+	uint16_t block_id;
+};
+
+/**
+ * TF SRAM block list
+ *
+ * List of 64B SRAM blocks used for fixed size slices (8, 16, 32, 64B)
+ */
+struct tf_sram_slice_list {
+	/** Pointer to head of linked list of blocks.
+	 */
+	struct tf_sram_block *head;
+
+	/** Pointer to tail of linked list of blocks.
+	 */
+	struct tf_sram_block *tail;
+
+	/** Total count of blocks
+	 */
+	uint32_t cnt;
+
+	/** First non-full block in the list
+	 */
+	struct tf_sram_block *first_not_full_block;
+
+	/** Entry slice size for this list
+	 */
+	enum tf_sram_slice_size size;
+};
+
+
+/**
+ * TF SRAM bank info consists of lists of different slice sizes per bank
+ */
+struct tf_sram_bank_info {
+	struct tf_sram_slice_list slice[TF_SRAM_SLICE_SIZE_MAX];
+};
+
+/**
+ * SRAM banks consist of SRAM bank information
+ */
+struct tf_sram_bank {
+	struct tf_sram_bank_info bank[TF_SRAM_BANK_ID_MAX];
+};
+
+/**
+ * SRAM banks consist of SRAM bank information
+ */
+struct tf_sram {
+	struct tf_sram_bank dir[TF_DIR_MAX];
+};
+
+/**********************
+ * Internal functions
+ **********************/
+
+/**
+ * Get slice size in string format
+ */
+const char
+*tf_sram_slice_2_str(enum tf_sram_slice_size slice_size)
+{
+	switch (slice_size) {
+	case TF_SRAM_SLICE_SIZE_8B:
+		return "8B slice";
+	case TF_SRAM_SLICE_SIZE_16B:
+		return "16B slice";
+	case TF_SRAM_SLICE_SIZE_32B:
+		return "32B slice";
+	case TF_SRAM_SLICE_SIZE_64B:
+		return "64B slice";
+	default:
+		return "Invalid slice size";
+	}
+}
+
+/**
+ * Get bank in string format
+ */
+const char
+*tf_sram_bank_2_str(enum tf_sram_bank_id bank_id)
+{
+	switch (bank_id) {
+	case TF_SRAM_BANK_ID_0:
+		return "bank_0";
+	case TF_SRAM_BANK_ID_1:
+		return "bank_1";
+	case TF_SRAM_BANK_ID_2:
+		return "bank_2";
+	case TF_SRAM_BANK_ID_3:
+		return "bank_3";
+	default:
+		return "Invalid bank_id";
+	}
+}
+
+/**
+ * TF SRAM get slice list
+ */
+static int
+tf_sram_get_slice_list(struct tf_sram *sram,
+		       struct tf_sram_slice_list **slice_list,
+		       enum tf_sram_slice_size slice_size,
+		       enum tf_dir dir,
+		       enum tf_sram_bank_id bank_id)
+{
+	int rc = 0;
+
+	TF_CHECK_PARMS2(sram, slice_list);
+
+	*slice_list = &sram->dir[dir].bank[bank_id].slice[slice_size];
+
+	return rc;
+}
+
+uint16_t tf_sram_bank_2_base_offset[TF_SRAM_BANK_ID_MAX] = {
+	0,
+	2048,
+	4096,
+	6144
+};
+
+/**
+ * Translate a block id and bank_id to an 8B offset
+ */
+static void
+tf_sram_block_id_2_offset(enum tf_sram_bank_id bank_id, uint16_t block_id,
+			  uint16_t *offset)
+{
+	*offset = (block_id + tf_sram_bank_2_base_offset[bank_id]) << 3;
+}
+
+/**
+ * Translates an 8B offset and bank_id to a block_id
+ */
+static void
+tf_sram_offset_2_block_id(enum tf_sram_bank_id bank_id, uint16_t offset,
+			  uint16_t *block_id, uint16_t *slice_offset)
+{
+	*slice_offset = offset & 0x7;
+	*block_id = ((offset & ~0x7) >> 3) -
+		    tf_sram_bank_2_base_offset[bank_id];
+}
+
+/**
+ * Find a matching block_id within the slice list
+ */
+static struct tf_sram_block
+*tf_sram_find_block(uint16_t block_id, struct tf_sram_slice_list *slice_list)
+{
+	uint32_t cnt;
+	struct tf_sram_block *block;
+
+	cnt = slice_list->cnt;
+	block = slice_list->head;
+
+	while (cnt > 0 && block) {
+		if (block->block_id == block_id)
+			return block;
+		block = block->next;
+		cnt--;
+	}
+	return NULL;
+}
+
+/**
+ * Given the current block get the next block within the slice list
+ *
+ * List is not changed.
+ */
+static struct tf_sram_block
+*tf_sram_get_next_block(struct tf_sram_block *block)
+{
+	struct tf_sram_block *nblock;
+
+	if (block != NULL)
+		nblock = block->next;
+	else
+		nblock = NULL;
+	return nblock;
+}
+
+/**
+ * Free an allocated slice from a block and if the block is empty,
+ * return an indication so that the block can be freed.
+ */
+static int
+tf_sram_free_slice(enum tf_sram_slice_size slice_size,
+		   uint16_t slice_offset, struct tf_sram_block *block,
+		   bool *block_is_empty)
+{
+	int rc = 0;
+	uint8_t shift;
+	uint8_t slice_mask = 0;
+
+	TF_CHECK_PARMS2(block, block_is_empty);
+
+	switch (slice_size) {
+	case TF_SRAM_SLICE_SIZE_8B:
+		shift = slice_offset >> 0;
+		assert(shift < 8);
+		slice_mask = 1 << shift;
+		break;
+
+	case TF_SRAM_SLICE_SIZE_16B:
+		shift = slice_offset >> 1;
+		assert(shift < 4);
+		slice_mask = 1 << shift;
+		break;
+
+	case TF_SRAM_SLICE_SIZE_32B:
+		shift = slice_offset >> 2;
+		assert(shift < 2);
+		slice_mask = 1 << shift;
+		break;
+
+	case TF_SRAM_SLICE_SIZE_64B:
+	default:
+		shift = slice_offset >> 0;
+		assert(shift < 1);
+		slice_mask = 1 << shift;
+		break;
+	}
+
+	if ((block->in_use_mask & slice_mask) == 0) {
+		rc = -EINVAL;
+		TFP_DRV_LOG(ERR, "block_id(0x%x) slice(%d) was not allocated\n",
+			    block->block_id, slice_offset);
+		return rc;
+	}
+
+	block->in_use_mask &= ~slice_mask;
+
+	if (block->in_use_mask == 0)
+		*block_is_empty = true;
+	else
+		*block_is_empty = false;
+
+	return rc;
+}
+
+/**
+ * TF SRAM get next slice
+ *
+ * Gets the next slice_offset available in the block
+ * and updates the in_use_mask.
+ */
+static int
+tf_sram_get_next_slice_in_block(struct tf_sram_block *block,
+				enum tf_sram_slice_size slice_size,
+				uint16_t *slice_offset,
+				bool *block_is_full)
+{
+	int rc, free_id = -1;
+	uint8_t shift, max_slices, mask, i, full_mask;
+
+	TF_CHECK_PARMS3(block, slice_offset, block_is_full);
+
+	switch (slice_size) {
+	case TF_SRAM_SLICE_SIZE_8B:
+		shift      = 0;
+		max_slices = 8;
+		full_mask  = 0xff;
+		break;
+	case TF_SRAM_SLICE_SIZE_16B:
+		shift      = 1;
+		max_slices = 4;
+		full_mask  = 0xf;
+		break;
+	case TF_SRAM_SLICE_SIZE_32B:
+		shift      = 2;
+		max_slices = 2;
+		full_mask  = 0x3;
+		break;
+	case TF_SRAM_SLICE_SIZE_64B:
+	default:
+		shift      = 0;
+		max_slices = 1;
+		full_mask  = 1;
+		break;
+	}
+
+	mask = block->in_use_mask;
+
+	for (i = 0; i < max_slices; i++) {
+		if ((mask & 1) == 0) {
+			free_id = i;
+			block->in_use_mask |= 1 << free_id;
+			break;
+		}
+		mask = mask >> 1;
+	}
+
+	if (block->in_use_mask == full_mask)
+		*block_is_full = true;
+	else
+		*block_is_full = false;
+
+
+	if (free_id >= 0) {
+		*slice_offset = free_id << shift;
+		rc = 0;
+	} else {
+		*slice_offset = 0;
+		rc = -ENOMEM;
+	}
+
+	return rc;
+}
+
+/**
+ * TF SRAM get indication as to whether the slice offset is
+ * allocated in the block.
+ *
+ */
+static int
+tf_sram_is_slice_allocated_in_block(struct tf_sram_block *block,
+				    enum tf_sram_slice_size slice_size,
+				    uint16_t slice_offset,
+				    bool *is_allocated)
+{
+	int rc = 0;
+	uint8_t shift;
+	uint8_t slice_mask = 0;
+
+	TF_CHECK_PARMS2(block, is_allocated);
+
+	*is_allocated = false;
+
+	switch (slice_size) {
+	case TF_SRAM_SLICE_SIZE_8B:
+		shift = slice_offset >> 0;
+		assert(shift < 8);
+		slice_mask = 1 << shift;
+		break;
+
+	case TF_SRAM_SLICE_SIZE_16B:
+		shift = slice_offset >> 1;
+		assert(shift < 4);
+		slice_mask = 1 << shift;
+		break;
+
+	case TF_SRAM_SLICE_SIZE_32B:
+		shift = slice_offset >> 2;
+		assert(shift < 2);
+		slice_mask = 1 << shift;
+		break;
+
+	case TF_SRAM_SLICE_SIZE_64B:
+	default:
+		shift = slice_offset >> 0;
+		assert(shift < 1);
+		slice_mask = 1 << shift;
+		break;
+	}
+
+	if ((block->in_use_mask & slice_mask) == 0) {
+		TFP_DRV_LOG(ERR, "block_id(0x%x) slice(%d) was not allocated\n",
+			    block->block_id, slice_offset);
+		*is_allocated = false;
+	} else {
+		*is_allocated = true;
+	}
+
+	return rc;
+}
+
+/**
+ * Initialize slice list
+ */
+static void
+tf_sram_init_slice_list(struct tf_sram_slice_list *slice_list,
+			enum tf_sram_slice_size slice_size)
+{
+	slice_list->head = NULL;
+	slice_list->tail = NULL;
+	slice_list->cnt = 0;
+	slice_list->size = slice_size;
+}
+
+/**
+ * Get the block count
+ */
+static uint32_t
+tf_sram_get_block_cnt(struct tf_sram_slice_list *slice_list)
+{
+	return slice_list->cnt;
+}
+
+
+/**
+ * Free a block data structure - does not free to the RM
+ */
+static void
+tf_sram_free_block(struct tf_sram_slice_list *slice_list,
+		   struct tf_sram_block *block)
+{
+	if (slice_list->head == block && slice_list->tail == block) {
+		slice_list->head = NULL;
+		slice_list->tail = NULL;
+	} else if (slice_list->head == block) {
+		slice_list->head = block->next;
+		slice_list->head->prev = NULL;
+	} else if (slice_list->tail == block) {
+		slice_list->tail = block->prev;
+		slice_list->tail->next = NULL;
+	} else {
+		block->prev->next = block->next;
+		block->next->prev = block->prev;
+	}
+	tfp_free(block);
+	slice_list->cnt--;
+}
+/**
+ * Free the entire slice_list
+ */
+static void
+tf_sram_free_slice_list(struct tf_sram_slice_list *slice_list)
+{
+	uint32_t i, block_cnt;
+	struct tf_sram_block *nblock, *block;
+
+	block_cnt = tf_sram_get_block_cnt(slice_list);
+	block = slice_list->head;
+
+	for (i = 0; i < block_cnt; i++) {
+		nblock = block->next;
+		tf_sram_free_block(slice_list, block);
+		block = nblock;
+	}
+}
+
+/**
+ * Allocate a single SRAM block from memory and add it to the slice list
+ */
+static struct tf_sram_block
+*tf_sram_alloc_block(struct tf_sram_slice_list *slice_list,
+		     uint16_t block_id)
+{
+	struct tf_sram_block *block;
+	struct tfp_calloc_parms cparms;
+	int rc;
+
+	cparms.nitems = 1;
+	cparms.size = sizeof(struct tf_sram_block);
+	cparms.alignment = 0;
+	rc = tfp_calloc(&cparms);
+	if (rc) {
+		/* Log error */
+		TFP_DRV_LOG(ERR,
+			    "Failed to allocate block, rc:%s\n",
+			    strerror(-rc));
+		return NULL;
+	}
+	block = (struct tf_sram_block *)cparms.mem_va;
+	block->block_id = block_id;
+
+	if (slice_list->head == NULL) {
+		slice_list->head = block;
+		slice_list->tail = block;
+		block->next = NULL;
+		block->prev = NULL;
+	} else {
+		block->next = slice_list->head;
+		block->prev = NULL;
+		block->next->prev = block;
+		slice_list->head = block->next->prev;
+	}
+	slice_list->cnt++;
+	return block;
+}
+
+/**
+ * Find the first not full block in the slice list
+ */
+static void
+tf_sram_find_first_not_full_block(struct tf_sram_slice_list *slice_list,
+				  enum tf_sram_slice_size slice_size,
+				  struct tf_sram_block **first_not_full_block)
+{
+	struct tf_sram_block *block = slice_list->head;
+	uint8_t slice_mask, mask;
+
+	switch (slice_size) {
+	case TF_SRAM_SLICE_SIZE_8B:
+		slice_mask = 0xff;
+		break;
+
+	case TF_SRAM_SLICE_SIZE_16B:
+		slice_mask = 0xf;
+		break;
+
+	case TF_SRAM_SLICE_SIZE_32B:
+		slice_mask = 0x3;
+		break;
+
+	case TF_SRAM_SLICE_SIZE_64B:
+	default:
+		slice_mask = 0x1;
+		break;
+	}
+
+	*first_not_full_block = NULL;
+
+	while (block) {
+		mask = block->in_use_mask & slice_mask;
+		if (mask != slice_mask) {
+			*first_not_full_block = block;
+			break;
+		}
+		block = block->next;
+	}
+}
+static void
+tf_sram_dump_block(struct tf_sram_block *block)
+{
+	TFP_DRV_LOG(INFO, "block_id(0x%x) in_use_mask(0x%02x)\n",
+		    block->block_id,
+		    block->in_use_mask);
+}
+
+/**********************
+ * External functions
+ **********************/
+int
+tf_sram_mgr_bind(void **sram_handle)
+{
+	int rc = 0;
+	enum tf_sram_bank_id bank_id;
+	enum tf_sram_slice_size slice_size;
+	struct tf_sram *sram;
+	struct tf_sram_slice_list *slice_list;
+	enum tf_dir dir;
+	struct tfp_calloc_parms cparms;
+
+	TF_CHECK_PARMS1(sram_handle);
+
+	cparms.nitems = 1;
+	cparms.size = sizeof(struct tf_sram);
+	cparms.alignment = 0;
+	rc = tfp_calloc(&cparms);
+	if (rc) {
+		/* Log error */
+		TFP_DRV_LOG(ERR,
+			    "Failed to allocate SRAM mgmt data, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+	sram = (struct tf_sram *)cparms.mem_va;
+
+	/* For each direction
+	 */
+	for (dir = 0; dir < TF_DIR_MAX; dir++) {
+		/* For each bank
+		 */
+		for (bank_id = TF_SRAM_BANK_ID_0;
+		     bank_id < TF_SRAM_BANK_ID_MAX;
+		     bank_id++) {
+			/* Create each sized slice empty list
+			 */
+			for (slice_size = TF_SRAM_SLICE_SIZE_8B;
+			     slice_size < TF_SRAM_SLICE_SIZE_MAX;
+			     slice_size++) {
+				rc = tf_sram_get_slice_list(sram, &slice_list,
+							    slice_size, dir,
+							    bank_id);
+				if (rc) {
+					/* Log error */
+					TFP_DRV_LOG(ERR,
+						  "No SRAM slice list, rc:%s\n",
+						  strerror(-rc));
+					return rc;
+				}
+				tf_sram_init_slice_list(slice_list, slice_size);
+			}
+		}
+	}
+
+	*sram_handle = sram;
+
+	return rc;
+}
+
+int
+tf_sram_mgr_unbind(void *sram_handle)
+{
+	int rc = 0;
+	struct tf_sram *sram;
+	enum tf_sram_bank_id bank_id;
+	enum tf_sram_slice_size slice_size;
+	enum tf_dir dir;
+	struct tf_sram_slice_list *slice_list;
+
+	TF_CHECK_PARMS1(sram_handle);
+
+	sram = (struct tf_sram *)sram_handle;
+
+	for (dir = 0; dir < TF_DIR_MAX; dir++) {
+		/* For each bank
+		 */
+		for (bank_id = TF_SRAM_BANK_ID_0;
+		     bank_id < TF_SRAM_BANK_ID_MAX;
+		     bank_id++) {
+			/* For each slice size
+			 */
+			for (slice_size = TF_SRAM_SLICE_SIZE_8B;
+			     slice_size < TF_SRAM_SLICE_SIZE_MAX;
+			     slice_size++) {
+				rc = tf_sram_get_slice_list(sram, &slice_list,
+							    slice_size, dir,
+							    bank_id);
+				if (rc) {
+					/* Log error */
+					TFP_DRV_LOG(ERR,
+						  "No SRAM slice list, rc:%s\n",
+						  strerror(-rc));
+					return rc;
+				}
+				if (tf_sram_get_block_cnt(slice_list))
+					tf_sram_free_slice_list(slice_list);
+			}
+		}
+	}
+
+	tfp_free(sram);
+	sram_handle = NULL;
+
+	/* Freeing of the RM resources is handled by the table manager */
+	return rc;
+}
+
+int tf_sram_mgr_alloc(void *sram_handle,
+		      struct tf_sram_mgr_alloc_parms *parms)
+{
+	int rc = 0;
+	struct tf_sram *sram;
+	struct tf_sram_slice_list *slice_list;
+	uint16_t block_id, slice_offset = 0;
+	uint32_t index;
+	struct tf_sram_block *block;
+	struct tf_rm_allocate_parms aparms = { 0 };
+	bool block_is_full;
+	uint16_t block_offset;
+
+	TF_CHECK_PARMS3(sram_handle, parms, parms->sram_offset);
+
+	sram = (struct tf_sram *)sram_handle;
+
+	/* Check the current slice list
+	 */
+	rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,
+				    parms->dir, parms->bank_id);
+	if (rc) {
+		/* Log error */
+		TFP_DRV_LOG(ERR,
+			    "No SRAM slice list, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* If the list is empty or all entries are full allocate a new block
+	 */
+	if (!slice_list->first_not_full_block) {
+		/* Allocate and insert a new block
+		 */
+		aparms.index = &index;
+		aparms.subtype = parms->tbl_type;
+		aparms.rm_db = parms->rm_db;
+		rc = tf_rm_allocate(&aparms);
+		if (rc)
+			return rc;
+
+		block_id = index;
+		block = tf_sram_alloc_block(slice_list, block_id);
+	} else {
+		/* Block exists
+		 */
+		block =
+		 (struct tf_sram_block *)(slice_list->first_not_full_block);
+	}
+	rc = tf_sram_get_next_slice_in_block(block,
+					     parms->slice_size,
+					     &slice_offset,
+					     &block_is_full);
+
+	/* Find the new first non-full block in the list
+	 */
+	tf_sram_find_first_not_full_block(slice_list,
+					  parms->slice_size,
+					  &slice_list->first_not_full_block);
+
+	tf_sram_block_id_2_offset(parms->bank_id, block->block_id,
+				  &block_offset);
+
+	*parms->sram_offset = block_offset + slice_offset;
+	return rc;
+}
+
+int
+tf_sram_mgr_free(void *sram_handle,
+		 struct tf_sram_mgr_free_parms *parms)
+{
+	int rc = 0;
+	struct tf_sram *sram;
+	struct tf_sram_slice_list *slice_list;
+	uint16_t block_id, slice_offset;
+	struct tf_sram_block *block;
+	bool block_is_empty;
+	struct tf_rm_free_parms fparms = { 0 };
+
+	TF_CHECK_PARMS2(sram_handle, parms);
+
+	sram = (struct tf_sram *)sram_handle;
+
+	/* Check the current slice list
+	 */
+	rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,
+				    parms->dir, parms->bank_id);
+	if (rc) {
+		/* Log error */
+		TFP_DRV_LOG(ERR,
+			    "No SRAM slice list, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Determine the block id and slice offset from the SRAM offset
+	 */
+	tf_sram_offset_2_block_id(parms->bank_id, parms->sram_offset, &block_id,
+				  &slice_offset);
+
+	/* Search the list of blocks for the matching block id
+	 */
+	block = tf_sram_find_block(block_id, slice_list);
+	if (block == NULL) {
+		TFP_DRV_LOG(ERR, "block not found 0x%x\n", block_id);
+		return rc;
+	}
+
+	/* If found, search for the matching SRAM slice in use.
+	 */
+	rc = tf_sram_free_slice(parms->slice_size, slice_offset,
+				block, &block_is_empty);
+	if (rc) {
+		TFP_DRV_LOG(ERR, "Error freeing slice (%s)\n", strerror(-rc));
+		return rc;
+	}
+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)
+	/* If this is a counter, clear it.  In the future we need to switch to
+	 * using the special access registers on Thor to automatically clear on
+	 * read.
+	 */
+	/* If this is counter table, clear the entry on free */
+	if (parms->tbl_type == TF_TBL_TYPE_ACT_STATS_64) {
+		uint8_t data[8] = { 0 };
+		uint16_t hcapi_type = 0;
+		struct tf_rm_get_hcapi_parms hparms = { 0 };
+
+		/* Get the hcapi type */
+		hparms.rm_db = parms->rm_db;
+		hparms.subtype = parms->tbl_type;
+		hparms.hcapi_type = &hcapi_type;
+		rc = tf_rm_get_hcapi_type(&hparms);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "%s, Failed type lookup, type:%s, rc:%s\n",
+				    tf_dir_2_str(parms->dir),
+				    tf_tbl_type_2_str(parms->tbl_type),
+				    strerror(-rc));
+			return rc;
+		}
+		/* Clear the counter
+		 */
+		rc = tf_msg_set_tbl_entry(parms->tfp,
+					  parms->dir,
+					  hcapi_type,
+					  sizeof(data),
+					  data,
+					  parms->sram_offset);
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "%s, Set failed, type:%s, rc:%s\n",
+				    tf_dir_2_str(parms->dir),
+				    tf_tbl_type_2_str(parms->tbl_type),
+				    strerror(-rc));
+			return rc;
+		}
+	}
+#endif
+	/* If the block is empty, free the block to the RM
+	 */
+	if (block_is_empty) {
+		fparms.rm_db = parms->rm_db;
+		fparms.subtype = parms->tbl_type;
+		fparms.index = block_id;
+		rc = tf_rm_free(&fparms);
+
+		if (rc) {
+			TFP_DRV_LOG(ERR, "Free block_id(%d) failed error(%s)\n",
+				    block_id, strerror(-rc));
+		}
+		/* Free local entry regardless
+		 */
+		tf_sram_free_block(slice_list, block);
+
+		/* Find the next non-full block in the list
+		 */
+		tf_sram_find_first_not_full_block(slice_list,
+					     parms->slice_size,
+					     &slice_list->first_not_full_block);
+	}
+
+	return rc;
+}
+
+int
+tf_sram_mgr_dump(void *sram_handle,
+		 struct tf_sram_mgr_dump_parms *parms)
+{
+	int rc = 0;
+	struct tf_sram *sram;
+	struct tf_sram_slice_list *slice_list;
+	uint32_t block_cnt, i;
+	struct tf_sram_block *block;
+
+	TF_CHECK_PARMS2(sram_handle, parms);
+
+	sram = (struct tf_sram *)sram_handle;
+
+	rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,
+				    parms->dir, parms->bank_id);
+	if (rc)
+		return rc;
+
+	if (slice_list->cnt || slice_list->first_not_full_block) {
+		TFP_DRV_LOG(INFO, "\n********** %s: %s: %s ***********\n",
+			    tf_sram_bank_2_str(parms->bank_id),
+			    tf_dir_2_str(parms->dir),
+			    tf_sram_slice_2_str(parms->slice_size));
+
+		block_cnt = tf_sram_get_block_cnt(slice_list);
+		TFP_DRV_LOG(INFO, "block_cnt(%d)\n", block_cnt);
+		if (slice_list->first_not_full_block)
+			TFP_DRV_LOG(INFO, "first_not_full_block(0x%x)\n",
+			    slice_list->first_not_full_block->block_id);
+		block = slice_list->head;
+		for (i = 0; i < block_cnt; i++) {
+			tf_sram_dump_block(block);
+			block = tf_sram_get_next_block(block);
+		}
+		TFP_DRV_LOG(INFO, "*********************************\n");
+	}
+	return rc;
+}
+/**
+ * Validate an SRAM Slice is allocated
+ *
+ * Validate whether the SRAM slice is allocated
+ *
+ * [in] sram_handle
+ *   Pointer to SRAM handle
+ *
+ * [in] parms
+ *   Pointer to the SRAM alloc parameters
+ *
+ * Returns
+ *   - (0) if successful
+ *   - (-EINVAL) on failure
+ *
+ */
+int tf_sram_mgr_is_allocated(void *sram_handle,
+			     struct tf_sram_mgr_is_allocated_parms *parms)
+{
+	int rc = 0;
+	struct tf_sram *sram;
+	struct tf_sram_slice_list *slice_list;
+	uint16_t block_id, slice_offset;
+	struct tf_sram_block *block;
+
+	TF_CHECK_PARMS3(sram_handle, parms, parms->is_allocated);
+
+	sram = (struct tf_sram *)sram_handle;
+
+	/* Check the current slice list
+	 */
+	rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,
+				    parms->dir, parms->bank_id);
+	if (rc) {
+		/* Log error */
+		TFP_DRV_LOG(ERR,
+			    "No SRAM slice list, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* If the list is empty, then it cannot be allocated
+	 */
+	if (!slice_list->cnt) {
+		TFP_DRV_LOG(ERR, "List is empty for %s:%s:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_sram_slice_2_str(parms->slice_size),
+			    tf_sram_bank_2_str(parms->bank_id));
+
+		parms->is_allocated = false;
+		goto done;
+	}
+
+	/* Determine the block id and slice offset from the SRAM offset
+	 */
+	tf_sram_offset_2_block_id(parms->bank_id, parms->sram_offset, &block_id,
+				  &slice_offset);
+
+	/* Search the list of blocks for the matching block id
+	 */
+	block = tf_sram_find_block(block_id, slice_list);
+	if (block == NULL) {
+		TFP_DRV_LOG(ERR, "block not found in list 0x%x\n",
+			    parms->sram_offset);
+		parms->is_allocated = false;
+		goto done;
+	}
+
+	rc = tf_sram_is_slice_allocated_in_block(block,
+						 parms->slice_size,
+						 slice_offset,
+						 parms->is_allocated);
+done:
+	return rc;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_sram_mgr.h b/drivers/net/bnxt/tf_core/tf_sram_mgr.h
new file mode 100644
index 0000000000..4abe3fb468
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_sram_mgr.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TF_SRAM_MGR_H_
+#define _TF_SRAM_MGR_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <limits.h>
+#include <errno.h>
+#include "tf_core.h"
+#include "tf_rm.h"
+
+/* When special access registers are used to access the SRAM, stats can be
+ * automatically cleared on read by the hardware.  This requires additional
+ * support to be added in the firmware to use these registers for statistics.
+ * The support entails using the special access registers to read the stats.
+ * These are stored in bank 3 currently but may move depending upon the
+ * policy defined in tf_device_p58.h
+ */
+#define STATS_CLEAR_ON_READ_SUPPORT 0
+
+#define TF_SRAM_MGR_BLOCK_SZ_BYTES 64
+#define TF_SRAM_MGR_MIN_SLICE_BYTES 8
+/**
+ * Bank identifier
+ */
+enum tf_sram_bank_id {
+	TF_SRAM_BANK_ID_0,		/**< SRAM Bank 0 id */
+	TF_SRAM_BANK_ID_1,		/**< SRAM Bank 1 id */
+	TF_SRAM_BANK_ID_2,		/**< SRAM Bank 2 id */
+	TF_SRAM_BANK_ID_3,		/**< SRAM Bank 3 id */
+	TF_SRAM_BANK_ID_MAX		/**< SRAM Bank index limit */
+};
+
+/**
+ * TF slice size.
+ *
+ * A slice is part of a 64B row
+ *
+ * Each slice is a multiple of 8B
+ */
+enum tf_sram_slice_size {
+	TF_SRAM_SLICE_SIZE_8B,	/**< 8 byte SRAM slice */
+	TF_SRAM_SLICE_SIZE_16B,	/**< 16 byte SRAM slice */
+	TF_SRAM_SLICE_SIZE_32B,	/**< 32 byte SRAM slice */
+	TF_SRAM_SLICE_SIZE_64B,	/**< 64 byte SRAM slice */
+	TF_SRAM_SLICE_SIZE_MAX  /**< slice limit */
+};
+
+
+/** Initialize the SRAM slice manager
+ *
+ *  The SRAM slice manager manages slices within 64B rows. Slices are of size
+ *  tf_sram_slice_size.  This function provides a handle to the SRAM manager
+ *  data.
+ *
+ *  SRAM manager data may dynamically allocate data upon initialization if
+ *  running on the host.
+ *
+ * [in/out] sram_handle
+ *   Pointer to SRAM handle
+ *
+ * Returns
+ *   - (0) if successful
+ *   - (-EINVAL) on failure
+ *
+ * Returns the handle for the SRAM slice manager
+ */
+int tf_sram_mgr_bind(void **sram_handle);
+
+/** Uninitialize the SRAM slice manager
+ *
+ * Frees any dynamically allocated data structures for SRAM slice management.
+ *
+ * [in] sram_handle
+ *   Pointer to SRAM handle
+ *
+ * Returns
+ *   - (0) if successful
+ *   - (-EINVAL) on failure
+ */
+int tf_sram_mgr_unbind(void *sram_handle);
+
+/**
+ * tf_sram_mgr_alloc_parms parameter definition
+ */
+struct tf_sram_mgr_alloc_parms {
+	/**
+	 * [in] dir
+	 */
+	enum tf_dir dir;
+	/**
+	 * [in] bank
+	 *
+	 *  the SRAM bank to allocate from
+	 */
+	enum tf_sram_bank_id bank_id;
+	/**
+	 * [in] slice_size
+	 *
+	 *  the slice size to allocate
+	 */
+	enum tf_sram_slice_size slice_size;
+	/**
+	 * [in/out] sram_slice
+	 *
+	 *  A pointer to be filled with an 8B sram slice offset
+	 */
+	uint16_t *sram_offset;
+	/**
+	 * [in] RM DB Handle required for RM allocation
+	 */
+	void *rm_db;
+	/**
+	 * [in] tf table type
+	 */
+	enum tf_tbl_type tbl_type;
+};
+
+/**
+ * Allocate an SRAM Slice
+ *
+ * Allocate an SRAM slice from the indicated bank.  If successful an 8B SRAM
+ * offset will be returned.  Slices are variable sized.  This may result in
+ * a row being allocated from the RM SRAM bank pool if required.
+ *
+ * [in] sram_handle
+ *   Pointer to SRAM handle
+ *
+ * [in] parms
+ *   Pointer to the SRAM alloc parameters
+ *
+ * Returns
+ *   - (0) if successful
+ *   - (-EINVAL) on failure
+ *
+ */
+int tf_sram_mgr_alloc(void *sram_handle,
+		      struct tf_sram_mgr_alloc_parms *parms);
+/**
+ * tf_sram_mgr_free_parms parameter definition
+ */
+struct tf_sram_mgr_free_parms {
+	/**
+	 * [in] dir
+	 */
+	enum tf_dir dir;
+	/**
+	 * [in] bank
+	 *
+	 *  the SRAM bank to free to
+	 */
+	enum tf_sram_bank_id bank_id;
+	/**
+	 * [in] slice_size
+	 *
+	 *  the slice size to be returned
+	 */
+	enum tf_sram_slice_size slice_size;
+	/**
+	 * [in] sram_offset
+	 *
+	 *  the SRAM slice offset (8B) to be returned
+	 */
+	uint16_t sram_offset;
+	/**
+	 * [in] RM DB Handle required for RM free
+	 */
+	void *rm_db;
+	/**
+	 * [in] tf table type
+	 */
+	enum tf_tbl_type tbl_type;
+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)
+	/**
+	 * [in] tfp
+	 *
+	 * A pointer to the tf handle
+	 */
+	void *tfp;
+#endif
+};
+
+/**
+ * Free an SRAM Slice
+ *
+ * Free an SRAM slice to the indicated bank.  This may result in a 64B row
+ * being returned to the RM SRAM bank pool.
+ *
+ * [in] sram_handle
+ *   Pointer to SRAM handle
+ *
+ * [in] parms
+ *   Pointer to the SRAM free parameters
+ *
+ * Returns
+ *   - (0) if successful
+ *   - (-EINVAL) on failure
+ *
+ */
+int tf_sram_mgr_free(void *sram_handle,
+		     struct tf_sram_mgr_free_parms *parms);
+
+/**
+ * tf_sram_mgr_dump_parms parameter definition
+ */
+struct tf_sram_mgr_dump_parms {
+	/**
+	 * [in] dir
+	 */
+	enum tf_dir dir;
+	/**
+	 * [in] bank
+	 *
+	 *  the SRAM bank to dump
+	 */
+	enum tf_sram_bank_id bank_id;
+	/**
+	 * [in] slice_size
+	 *
+	 *  the slice size list to be dumped
+	 */
+	enum tf_sram_slice_size slice_size;
+};
+
+/**
+ * Dump a slice list
+ *
+ * Dump the slice list given the SRAM bank and the slice size
+ *
+ * [in] sram_handle
+ *   Pointer to SRAM handle
+ *
+ * [in] parms
+ *   Pointer to the SRAM free parameters
+ *
+ * Returns
+ *   - (0) if successful
+ *   - (-EINVAL) on failure
+ *
+ */
+int tf_sram_mgr_dump(void *sram_handle,
+		     struct tf_sram_mgr_dump_parms *parms);
+
+/**
+ * tf_sram_mgr_is_allocated_parms parameter definition
+ */
+struct tf_sram_mgr_is_allocated_parms {
+	/**
+	 * [in] dir
+	 */
+	enum tf_dir dir;
+	/**
+	 * [in] bank
+	 *
+	 *  the SRAM bank to allocate from
+	 */
+	enum tf_sram_bank_id bank_id;
+	/**
+	 * [in] slice_size
+	 *
+	 *  the slice size which was allocated
+	 */
+	enum tf_sram_slice_size slice_size;
+	/**
+	 * [in] sram_offset
+	 *
+	 *  The sram slice offset to validate
+	 */
+	uint16_t sram_offset;
+	/**
+	 * [in/out] is_allocated
+	 *
+	 *  Pointer passed in to be filled with indication of allocation
+	 */
+	bool *is_allocated;
+};
+
+/**
+ * Validate an SRAM Slice is allocated
+ *
+ * Validate whether the SRAM slice is allocated
+ *
+ * [in] sram_handle
+ *   Pointer to SRAM handle
+ *
+ * [in] parms
+ *   Pointer to the SRAM alloc parameters
+ *
+ * Returns
+ *   - (0) if successful
+ *   - (-EINVAL) on failure
+ *
+ */
+int tf_sram_mgr_is_allocated(void *sram_handle,
+			     struct tf_sram_mgr_is_allocated_parms *parms);
+
+/**
+ * Given the slice size, return a char string
+ */
+const char
+*tf_sram_slice_2_str(enum tf_sram_slice_size slice_size);
+
+/**
+ * Given the bank_id, return a char string
+ */
+const char
+*tf_sram_bank_2_str(enum tf_sram_bank_id bank_id);
+
+#endif /* _TF_SRAM_MGR_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.c b/drivers/net/bnxt/tf_core/tf_tbl.c
index 7011edcd78..0a8720e7b6 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_tbl.c
@@ -16,20 +16,11 @@
 #include "tf_session.h"
 #include "tf_device.h"
 
-#define TF_TBL_RM_TO_PTR(new_idx, idx, base, shift) {		\
-		*(new_idx) = (((idx) + (base)) << (shift));	\
-}
-
-#define TF_TBL_PTR_TO_RM(new_idx, idx, base, shift) {		\
-		*(new_idx) = (((idx) >> (shift)) - (base));	\
-}
-
 struct tf;
 
-/**
- * Shadow init flag, set on bind and cleared on unbind
- */
-static uint8_t shadow_init;
+#define TF_TBL_RM_TO_PTR(new_idx, idx, base, shift) {          \
+		*(new_idx) = (((idx) + (base)) << (shift));    \
+}
 
 int
 tf_tbl_bind(struct tf *tfp,
@@ -121,8 +112,6 @@ tf_tbl_unbind(struct tf *tfp)
 		tbl_db->tbl_db[i] = NULL;
 	}
 
-	shadow_init = 0;
-
 	return 0;
 }
 
@@ -135,7 +124,6 @@ tf_tbl_alloc(struct tf *tfp __rte_unused,
 	struct tf_rm_allocate_parms aparms = { 0 };
 	struct tf_session *tfs;
 	struct tf_dev_info *dev;
-	uint16_t base = 0, shift = 0;
 	struct tbl_rm_db *tbl_db;
 	void *tbl_db_ptr = NULL;
 
@@ -154,28 +142,12 @@ tf_tbl_alloc(struct tf *tfp __rte_unused,
 	rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
 	if (rc) {
 		TFP_DRV_LOG(ERR,
-			    "Failed to get em_ext_db from session, rc:%s\n",
+			    "Failed to get tbl_db from session, rc:%s\n",
 			    strerror(-rc));
 		return rc;
 	}
 	tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
 
-	/* Only get table info if required for the device */
-	if (dev->ops->tf_dev_get_tbl_info) {
-		rc = dev->ops->tf_dev_get_tbl_info(tfp,
-						   tbl_db->tbl_db[parms->dir],
-						   parms->type,
-						   &base,
-						   &shift);
-		if (rc) {
-			TFP_DRV_LOG(ERR,
-				    "%s: Failed to get table info:%d\n",
-				    tf_dir_2_str(parms->dir),
-				    parms->type);
-			return rc;
-		}
-	}
-
 	/* Allocate requested element */
 	aparms.rm_db = tbl_db->tbl_db[parms->dir];
 	aparms.subtype = parms->type;
@@ -183,13 +155,12 @@ tf_tbl_alloc(struct tf *tfp __rte_unused,
 	rc = tf_rm_allocate(&aparms);
 	if (rc) {
 		TFP_DRV_LOG(ERR,
-			    "%s: Failed allocate, type:%d\n",
+			    "%s: Failed allocate, type:%s\n",
 			    tf_dir_2_str(parms->dir),
-			    parms->type);
+			    tf_tbl_type_2_str(parms->type));
 		return rc;
 	}
 
-	TF_TBL_RM_TO_PTR(&idx, idx, base, shift);
 	*parms->idx = idx;
 
 	return 0;
@@ -205,7 +176,6 @@ tf_tbl_free(struct tf *tfp __rte_unused,
 	int allocated = 0;
 	struct tf_session *tfs;
 	struct tf_dev_info *dev;
-	uint16_t base = 0, shift = 0;
 	struct tbl_rm_db *tbl_db;
 	void *tbl_db_ptr = NULL;
 
@@ -230,28 +200,10 @@ tf_tbl_free(struct tf *tfp __rte_unused,
 	}
 	tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
 
-	/* Only get table info if required for the device */
-	if (dev->ops->tf_dev_get_tbl_info) {
-		rc = dev->ops->tf_dev_get_tbl_info(tfp,
-						   tbl_db->tbl_db[parms->dir],
-						   parms->type,
-						   &base,
-						   &shift);
-		if (rc) {
-			TFP_DRV_LOG(ERR,
-				    "%s: Failed to get table info:%d\n",
-				    tf_dir_2_str(parms->dir),
-				    parms->type);
-			return rc;
-		}
-	}
-
 	/* Check if element is in use */
 	aparms.rm_db = tbl_db->tbl_db[parms->dir];
 	aparms.subtype = parms->type;
-
-	TF_TBL_PTR_TO_RM(&aparms.index, parms->idx, base, shift);
-
+	aparms.index = parms->idx;
 	aparms.allocated = &allocated;
 	rc = tf_rm_is_allocated(&aparms);
 	if (rc)
@@ -259,9 +211,9 @@ tf_tbl_free(struct tf *tfp __rte_unused,
 
 	if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) {
 		TFP_DRV_LOG(ERR,
-			    "%s: Entry already free, type:%d, index:%d\n",
+			    "%s: Entry already free, type:%s, index:%d\n",
 			    tf_dir_2_str(parms->dir),
-			    parms->type,
+			    tf_tbl_type_2_str(parms->type),
 			    parms->idx);
 		return -EINVAL;
 	}
@@ -279,9 +231,9 @@ tf_tbl_free(struct tf *tfp __rte_unused,
 		rc = tf_rm_get_hcapi_type(&hparms);
 		if (rc) {
 			TFP_DRV_LOG(ERR,
-				    "%s, Failed type lookup, type:%d, rc:%s\n",
+				    "%s, Failed type lookup, type:%s, rc:%s\n",
 				    tf_dir_2_str(parms->dir),
-				    parms->type,
+				    tf_tbl_type_2_str(parms->type),
 				    strerror(-rc));
 			return rc;
 		}
@@ -295,9 +247,9 @@ tf_tbl_free(struct tf *tfp __rte_unused,
 					  parms->idx);
 		if (rc) {
 			TFP_DRV_LOG(ERR,
-				    "%s, Set failed, type:%d, rc:%s\n",
+				    "%s, Set failed, type:%s, rc:%s\n",
 				    tf_dir_2_str(parms->dir),
-				    parms->type,
+				    tf_tbl_type_2_str(parms->type),
 				    strerror(-rc));
 			return rc;
 		}
@@ -306,15 +258,13 @@ tf_tbl_free(struct tf *tfp __rte_unused,
 	/* Free requested element */
 	fparms.rm_db = tbl_db->tbl_db[parms->dir];
 	fparms.subtype = parms->type;
-
-	TF_TBL_PTR_TO_RM(&fparms.index, parms->idx, base, shift);
-
+	fparms.index = parms->idx;
 	rc = tf_rm_free(&fparms);
 	if (rc) {
 		TFP_DRV_LOG(ERR,
-			    "%s: Free failed, type:%d, index:%d\n",
+			    "%s: Free failed, type:%s, index:%d\n",
 			    tf_dir_2_str(parms->dir),
-			    parms->type,
+			    tf_tbl_type_2_str(parms->type),
 			    parms->idx);
 		return rc;
 	}
@@ -333,7 +283,6 @@ tf_tbl_set(struct tf *tfp,
 	struct tf_rm_get_hcapi_parms hparms = { 0 };
 	struct tf_session *tfs;
 	struct tf_dev_info *dev;
-	uint16_t base = 0, shift = 0;
 	struct tbl_rm_db *tbl_db;
 	void *tbl_db_ptr = NULL;
 
@@ -358,21 +307,6 @@ tf_tbl_set(struct tf *tfp,
 	}
 	tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
 
-	/* Only get table info if required for the device */
-	if (dev->ops->tf_dev_get_tbl_info) {
-		rc = dev->ops->tf_dev_get_tbl_info(tfp,
-						   tbl_db->tbl_db[parms->dir],
-						   parms->type,
-						   &base,
-						   &shift);
-		if (rc) {
-			TFP_DRV_LOG(ERR,
-				    "%s: Failed to get table info:%d\n",
-				    tf_dir_2_str(parms->dir),
-				    parms->type);
-			return rc;
-		}
-	}
 
 	/* Do not check meter drop counter because it is not allocated
 	 * resources
@@ -381,19 +315,18 @@ tf_tbl_set(struct tf *tfp,
 		/* Verify that the entry has been previously allocated */
 		aparms.rm_db = tbl_db->tbl_db[parms->dir];
 		aparms.subtype = parms->type;
-		TF_TBL_PTR_TO_RM(&aparms.index, parms->idx, base, shift);
-
 		aparms.allocated = &allocated;
+		aparms.index = parms->idx;
 		rc = tf_rm_is_allocated(&aparms);
 		if (rc)
 			return rc;
 
 		if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) {
 			TFP_DRV_LOG(ERR,
-			   "%s, Invalid or not allocated index, type:%d, idx:%d\n",
-			   tf_dir_2_str(parms->dir),
-			   parms->type,
-			   parms->idx);
+			      "%s, Invalid or not allocated, type:%s, idx:%d\n",
+			      tf_dir_2_str(parms->dir),
+			      tf_tbl_type_2_str(parms->type),
+			      parms->idx);
 			return -EINVAL;
 		}
 	}
@@ -405,9 +338,9 @@ tf_tbl_set(struct tf *tfp,
 	rc = tf_rm_get_hcapi_type(&hparms);
 	if (rc) {
 		TFP_DRV_LOG(ERR,
-			    "%s, Failed type lookup, type:%d, rc:%s\n",
+			    "%s, Failed type lookup, type:%s, rc:%s\n",
 			    tf_dir_2_str(parms->dir),
-			    parms->type,
+			    tf_tbl_type_2_str(parms->type),
 			    strerror(-rc));
 		return rc;
 	}
@@ -420,9 +353,9 @@ tf_tbl_set(struct tf *tfp,
 				  parms->idx);
 	if (rc) {
 		TFP_DRV_LOG(ERR,
-			    "%s, Set failed, type:%d, rc:%s\n",
+			    "%s, Set failed, type:%s, rc:%s\n",
 			    tf_dir_2_str(parms->dir),
-			    parms->type,
+			    tf_tbl_type_2_str(parms->type),
 			    strerror(-rc));
 		return rc;
 	}
@@ -441,7 +374,6 @@ tf_tbl_get(struct tf *tfp,
 	struct tf_rm_get_hcapi_parms hparms = { 0 };
 	struct tf_session *tfs;
 	struct tf_dev_info *dev;
-	uint16_t base = 0, shift = 0;
 	struct tbl_rm_db *tbl_db;
 	void *tbl_db_ptr = NULL;
 
@@ -466,22 +398,6 @@ tf_tbl_get(struct tf *tfp,
 	}
 	tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
 
-	/* Only get table info if required for the device */
-	if (dev->ops->tf_dev_get_tbl_info) {
-		rc = dev->ops->tf_dev_get_tbl_info(tfp,
-						   tbl_db->tbl_db[parms->dir],
-						   parms->type,
-						   &base,
-						   &shift);
-		if (rc) {
-			TFP_DRV_LOG(ERR,
-				    "%s: Failed to get table info:%d\n",
-				    tf_dir_2_str(parms->dir),
-				    parms->type);
-			return rc;
-		}
-	}
-
 	/* Do not check meter drop counter because it is not allocated
 	 * resources.
 	 */
@@ -489,8 +405,7 @@ tf_tbl_get(struct tf *tfp,
 		/* Verify that the entry has been previously allocated */
 		aparms.rm_db = tbl_db->tbl_db[parms->dir];
 		aparms.subtype = parms->type;
-		TF_TBL_PTR_TO_RM(&aparms.index, parms->idx, base, shift);
-
+		aparms.index = parms->idx;
 		aparms.allocated = &allocated;
 		rc = tf_rm_is_allocated(&aparms);
 		if (rc)
@@ -498,9 +413,9 @@ tf_tbl_get(struct tf *tfp,
 
 		if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) {
 			TFP_DRV_LOG(ERR,
-			   "%s, Invalid or not allocated index, type:%d, idx:%d\n",
+			   "%s, Invalid or not allocated index, type:%s, idx:%d\n",
 			   tf_dir_2_str(parms->dir),
-			   parms->type,
+			   tf_tbl_type_2_str(parms->type),
 			   parms->idx);
 			return -EINVAL;
 		}
@@ -513,9 +428,9 @@ tf_tbl_get(struct tf *tfp,
 	rc = tf_rm_get_hcapi_type(&hparms);
 	if (rc) {
 		TFP_DRV_LOG(ERR,
-			    "%s, Failed type lookup, type:%d, rc:%s\n",
+			    "%s, Failed type lookup, type:%s, rc:%s\n",
 			    tf_dir_2_str(parms->dir),
-			    parms->type,
+			    tf_tbl_type_2_str(parms->type),
 			    strerror(-rc));
 		return rc;
 	}
@@ -529,9 +444,9 @@ tf_tbl_get(struct tf *tfp,
 				  parms->idx);
 	if (rc) {
 		TFP_DRV_LOG(ERR,
-			    "%s, Get failed, type:%d, rc:%s\n",
+			    "%s, Get failed, type:%s, rc:%s\n",
 			    tf_dir_2_str(parms->dir),
-			    parms->type,
+			    tf_tbl_type_2_str(parms->type),
 			    strerror(-rc));
 		return rc;
 	}
@@ -549,7 +464,6 @@ tf_tbl_bulk_get(struct tf *tfp,
 	struct tf_rm_check_indexes_in_range_parms cparms = { 0 };
 	struct tf_session *tfs;
 	struct tf_dev_info *dev;
-	uint16_t base = 0, shift = 0;
 	struct tbl_rm_db *tbl_db;
 	void *tbl_db_ptr = NULL;
 
@@ -574,40 +488,21 @@ tf_tbl_bulk_get(struct tf *tfp,
 	}
 	tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
 
-	/* Only get table info if required for the device */
-	if (dev->ops->tf_dev_get_tbl_info) {
-		rc = dev->ops->tf_dev_get_tbl_info(tfp,
-						   tbl_db->tbl_db[parms->dir],
-						   parms->type,
-						   &base,
-						   &shift);
-		if (rc) {
-			TFP_DRV_LOG(ERR,
-				    "%s: Failed to get table info:%d\n",
-				    tf_dir_2_str(parms->dir),
-				    parms->type);
-			return rc;
-		}
-	}
-
 	/* Verify that the entries are in the range of reserved resources. */
 	cparms.rm_db = tbl_db->tbl_db[parms->dir];
 	cparms.subtype = parms->type;
-
-	TF_TBL_PTR_TO_RM(&cparms.starting_index, parms->starting_idx,
-			 base, shift);
-
 	cparms.num_entries = parms->num_entries;
+	cparms.starting_index = parms->starting_idx;
 
 	rc = tf_rm_check_indexes_in_range(&cparms);
 	if (rc) {
 		TFP_DRV_LOG(ERR,
 			    "%s, Invalid or %d index starting from %d"
-			    " not in range, type:%d",
+			    " not in range, type:%s",
 			    tf_dir_2_str(parms->dir),
 			    parms->starting_idx,
 			    parms->num_entries,
-			    parms->type);
+			    tf_tbl_type_2_str(parms->type));
 		return rc;
 	}
 
@@ -617,9 +512,9 @@ tf_tbl_bulk_get(struct tf *tfp,
 	rc = tf_rm_get_hcapi_type(&hparms);
 	if (rc) {
 		TFP_DRV_LOG(ERR,
-			    "%s, Failed type lookup, type:%d, rc:%s\n",
+			    "%s, Failed type lookup, type:%s, rc:%s\n",
 			    tf_dir_2_str(parms->dir),
-			    parms->type,
+			    tf_tbl_type_2_str(parms->type),
 			    strerror(-rc));
 		return rc;
 	}
@@ -634,9 +529,9 @@ tf_tbl_bulk_get(struct tf *tfp,
 				       parms->physical_mem_addr);
 	if (rc) {
 		TFP_DRV_LOG(ERR,
-			    "%s, Bulk get failed, type:%d, rc:%s\n",
+			    "%s, Bulk get failed, type:%s, rc:%s\n",
 			    tf_dir_2_str(parms->dir),
-			    parms->type,
+			    tf_tbl_type_2_str(parms->type),
 			    strerror(-rc));
 	}
 
@@ -653,9 +548,9 @@ tf_tbl_get_resc_info(struct tf *tfp,
 	struct tf_rm_get_alloc_info_parms ainfo;
 	void *tbl_db_ptr = NULL;
 	struct tbl_rm_db *tbl_db;
-	uint16_t base = 0, shift = 0;
 	struct tf_dev_info *dev;
 	struct tf_session *tfs;
+	uint16_t base = 0, shift = 0;
 
 	TF_CHECK_PARMS2(tfp, tbl);
 
@@ -677,7 +572,6 @@ tf_tbl_get_resc_info(struct tf *tfp,
 
 	tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
 
-	/* check if reserved resource for WC is multiple of num_slices */
 	for (d = 0; d < TF_DIR_MAX; d++) {
 		ainfo.rm_db = tbl_db->tbl_db[d];
 		dinfo = tbl[d].info;
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.h b/drivers/net/bnxt/tf_core/tf_tbl.h
index 7e1107ffe7..2483718e5d 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl.h
+++ b/drivers/net/bnxt/tf_core/tf_tbl.h
@@ -28,14 +28,6 @@ struct tf_tbl_cfg_parms {
 	 * Table Type element configuration array
 	 */
 	struct tf_rm_element_cfg *cfg;
-	/**
-	 * Shadow table type configuration array
-	 */
-	struct tf_shadow_tbl_cfg *shadow_cfg;
-	/**
-	 * Boolean controlling the request shadow copy.
-	 */
-	bool shadow_copy;
 	/**
 	 * Session resource allocations
 	 */
@@ -197,8 +189,6 @@ struct tbl_rm_db {
  *
  * @ref tf_tbl_free
  *
- * @ref tf_tbl_alloc_search
- *
  * @ref tf_tbl_set
  *
  * @ref tf_tbl_get
@@ -255,10 +245,7 @@ int tf_tbl_alloc(struct tf *tfp,
 		 struct tf_tbl_alloc_parms *parms);
 
 /**
- * Free's the requested table type and returns it to the DB. If shadow
- * DB is enabled its searched first and if found the element refcount
- * is decremented. If refcount goes to 0 then its returned to the
- * table type DB.
+ * Frees the requested table type and returns it to the DB.
  *
  * [in] tfp
  *   Pointer to TF handle, used for HCAPI communication
diff --git a/drivers/net/bnxt/tf_core/tf_tbl_sram.c b/drivers/net/bnxt/tf_core/tf_tbl_sram.c
new file mode 100644
index 0000000000..ea10afecb6
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_tbl_sram.c
@@ -0,0 +1,713 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+/* Truflow Table APIs and supporting code */
+
+#include <rte_common.h>
+
+#include "tf_tbl.h"
+#include "tf_tbl_sram.h"
+#include "tf_sram_mgr.h"
+#include "tf_common.h"
+#include "tf_rm.h"
+#include "tf_util.h"
+#include "tf_msg.h"
+#include "tfp.h"
+#include "tf_session.h"
+#include "tf_device.h"
+#include "cfa_resource_types.h"
+
+#define DBG_SRAM 0
+
+/**
+ * tf_sram_tbl_get_info_parms parameter definition
+ */
+struct tf_tbl_sram_get_info_parms {
+	/**
+	 * [in] table RM database
+	 */
+	void *rm_db;
+	/**
+	 * [in] Receive or transmit direction
+	 */
+	enum tf_dir dir;
+	/**
+	 * [in] table_type
+	 *
+	 *  the TF index table type
+	 */
+	enum tf_tbl_type tbl_type;
+	/**
+	 * [out] bank
+	 *
+	 *  The SRAM bank associated with the type
+	 */
+	enum tf_sram_bank_id bank_id;
+	/**
+	 * [out] slice_size
+	 *
+	 *  the slice size for the indicated table type
+	 */
+	enum tf_sram_slice_size slice_size;
+};
+
+/**
+ * Translate HCAPI type to SRAM Manager bank
+ */
+const uint16_t tf_tbl_sram_hcapi_2_bank[CFA_RESOURCE_TYPE_P58_LAST] = {
+	[CFA_RESOURCE_TYPE_P58_SRAM_BANK_0] = TF_SRAM_BANK_ID_0,
+	[CFA_RESOURCE_TYPE_P58_SRAM_BANK_1] = TF_SRAM_BANK_ID_1,
+	[CFA_RESOURCE_TYPE_P58_SRAM_BANK_2] = TF_SRAM_BANK_ID_2,
+	[CFA_RESOURCE_TYPE_P58_SRAM_BANK_3] = TF_SRAM_BANK_ID_3
+};
+
+#define TF_TBL_SRAM_SLICES_MAX  \
+	(TF_SRAM_MGR_BLOCK_SZ_BYTES / TF_SRAM_MGR_MIN_SLICE_BYTES)
+/**
+ * Translate HCAPI type to SRAM Manager bank
+ */
+const uint8_t tf_tbl_sram_slices_2_size[TF_TBL_SRAM_SLICES_MAX + 1] = {
+	[0] = TF_SRAM_SLICE_SIZE_64B, /* if 0 slices assume 1 64B block */
+	[1] = TF_SRAM_SLICE_SIZE_64B, /* 1 slice  per 64B block */
+	[2] = TF_SRAM_SLICE_SIZE_32B, /* 2 slices per 64B block */
+	[4] = TF_SRAM_SLICE_SIZE_16B, /* 4 slices per 64B block */
+	[8] = TF_SRAM_SLICE_SIZE_8B   /* 8 slices per 64B block */
+};
+
+/**
+ * Get SRAM Table Information for a given index table type
+ *
+ *
+ * [in] sram_handle
+ *   Pointer to SRAM handle
+ *
+ * [in] parms
+ *   Pointer to the SRAM get info parameters
+ *
+ * Returns
+ *   - (0) if successful
+ *   - (-EINVAL) on failure
+ *
+ */
+static int tf_tbl_sram_get_info(struct tf_tbl_sram_get_info_parms *parms)
+{
+	int rc = 0;
+	uint16_t hcapi_type;
+	uint16_t slices;
+	struct tf_rm_get_hcapi_parms hparms;
+	struct tf_rm_get_slices_parms sparms;
+
+	hparms.rm_db = parms->rm_db;
+	hparms.subtype = parms->tbl_type;
+	hparms.hcapi_type = &hcapi_type;
+
+	rc = tf_rm_get_hcapi_type(&hparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to get hcapi_type %s, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->tbl_type),
+			    strerror(-rc));
+		return rc;
+	}
+	parms->bank_id = tf_tbl_sram_hcapi_2_bank[hcapi_type];
+
+	sparms.rm_db = parms->rm_db;
+	sparms.subtype = parms->tbl_type;
+	sparms.slices = &slices;
+
+	rc = tf_rm_get_slices(&sparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to get slice cnt %s, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->tbl_type),
+			    strerror(-rc));
+		return rc;
+	}
+	if (slices)
+		parms->slice_size = tf_tbl_sram_slices_2_size[slices];
+
+	TFP_DRV_LOG(INFO,
+		    "(%s) bank(%s) slice_size(%s)\n",
+		    tf_tbl_type_2_str(parms->tbl_type),
+		    tf_sram_bank_2_str(parms->bank_id),
+		    tf_sram_slice_2_str(parms->slice_size));
+	return rc;
+}
+
+int
+tf_tbl_sram_bind(struct tf *tfp __rte_unused)
+{
+	int rc = 0;
+	void *sram_handle = NULL;
+
+	TF_CHECK_PARMS1(tfp);
+
+	rc = tf_sram_mgr_bind(&sram_handle);
+
+	tf_session_set_sram_db(tfp, sram_handle);
+
+	TFP_DRV_LOG(INFO,
+		    "SRAM Table - initialized\n");
+
+	return rc;
+}
+
+int
+tf_tbl_sram_unbind(struct tf *tfp __rte_unused)
+{
+	int rc = 0;
+	void *sram_handle = NULL;
+
+	TF_CHECK_PARMS1(tfp);
+
+	rc = tf_session_get_sram_db(tfp, &sram_handle);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to get sram_handle from session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+	if (sram_handle)
+		rc = tf_sram_mgr_unbind(sram_handle);
+
+	TFP_DRV_LOG(INFO,
+		    "SRAM Table - deinitialized\n");
+	return rc;
+}
+
+int
+tf_tbl_sram_alloc(struct tf *tfp,
+		  struct tf_tbl_alloc_parms *parms)
+{
+	int rc;
+	uint16_t idx;
+	struct tf_session *tfs;
+	struct tf_dev_info *dev;
+	struct tf_tbl_sram_get_info_parms iparms = { 0 };
+	struct tf_sram_mgr_alloc_parms aparms = { 0 };
+	struct tbl_rm_db *tbl_db;
+	void *tbl_db_ptr = NULL;
+	void *sram_handle = NULL;
+
+	TF_CHECK_PARMS2(tfp, parms);
+
+	/* Retrieve the session information */
+	rc = tf_session_get(tfp, &tfs, &dev);
+	if (rc)
+		return rc;
+
+	rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to get tbl_db from session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
+
+	rc = tf_session_get_sram_db(tfp, &sram_handle);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to get sram_handle from session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	iparms.rm_db = tbl_db->tbl_db[parms->dir];
+	iparms.dir = parms->dir;
+	iparms.tbl_type = parms->type;
+
+	rc = tf_tbl_sram_get_info(&iparms);
+
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to get SRAM info %s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type));
+		return rc;
+	}
+
+	aparms.dir = parms->dir;
+	aparms.bank_id = iparms.bank_id;
+	aparms.slice_size = iparms.slice_size;
+	aparms.sram_offset = &idx;
+	aparms.tbl_type = parms->type;
+	aparms.rm_db = tbl_db->tbl_db[parms->dir];
+
+	rc = tf_sram_mgr_alloc(sram_handle, &aparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to allocate SRAM table:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type));
+		return rc;
+	}
+	*parms->idx = idx;
+
+#if (DBG_SRAM == 1)
+	{
+		struct tf_sram_mgr_dump_parms dparms;
+
+		dparms.dir = parms->dir;
+		dparms.bank_id = iparms.bank_id;
+		dparms.slice_size = iparms.slice_size;
+
+		rc = tf_sram_mgr_dump(sram_handle, &dparms);
+	}
+#endif
+
+	return rc;
+}
+
+int
+tf_tbl_sram_free(struct tf *tfp __rte_unused,
+		 struct tf_tbl_free_parms *parms)
+{
+	int rc;
+	struct tf_session *tfs;
+	struct tf_dev_info *dev;
+	struct tbl_rm_db *tbl_db;
+	void *tbl_db_ptr = NULL;
+	struct tf_tbl_sram_get_info_parms iparms = { 0 };
+	struct tf_sram_mgr_free_parms fparms = { 0 };
+	struct tf_sram_mgr_is_allocated_parms aparms = { 0 };
+	bool allocated = false;
+	void *sram_handle = NULL;
+
+	TF_CHECK_PARMS2(tfp, parms);
+
+	/* Retrieve the session information */
+	rc = tf_session_get(tfp, &tfs, &dev);
+	if (rc)
+		return rc;
+
+	rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to get em_ext_db from session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+	tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
+
+	rc = tf_session_get_sram_db(tfp, &sram_handle);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to get sram_handle from session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	iparms.rm_db = tbl_db->tbl_db[parms->dir];
+	iparms.dir = parms->dir;
+	iparms.tbl_type = parms->type;
+
+	rc = tf_tbl_sram_get_info(&iparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to get table info:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type));
+		return rc;
+	}
+
+#if (DBG_SRAM == 1)
+	{
+		struct tf_sram_mgr_dump_parms dparms;
+
+		printf("%s: %s: %s\n", tf_dir_2_str(parms->dir),
+		       tf_sram_slice_2_str(iparms.slice_size),
+		       tf_sram_bank_2_str(iparms.bank_id));
+
+		dparms.dir = parms->dir;
+		dparms.bank_id = iparms.bank_id;
+		dparms.slice_size = iparms.slice_size;
+
+		rc = tf_sram_mgr_dump(sram_handle, &dparms);
+	}
+#endif
+
+	aparms.sram_offset = parms->idx;
+	aparms.slice_size = iparms.slice_size;
+	aparms.bank_id = iparms.bank_id;
+	aparms.dir = parms->dir;
+	aparms.is_allocated = &allocated;
+
+	rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
+	if (rc || !allocated) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Free of invalid entry:%s idx(%d):(%s)\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    parms->idx,
+			    strerror(-rc));
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	fparms.rm_db = tbl_db->tbl_db[parms->dir];
+	fparms.tbl_type = parms->type;
+	fparms.sram_offset = parms->idx;
+	fparms.slice_size = iparms.slice_size;
+	fparms.bank_id = iparms.bank_id;
+	fparms.dir = parms->dir;
+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)
+	fparms.tfp = tfp;
+#endif
+	rc = tf_sram_mgr_free(sram_handle, &fparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to free entry:%s idx(%d)\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    parms->idx);
+		return rc;
+	}
+
+
+#if (DBG_SRAM == 1)
+	{
+		struct tf_sram_mgr_dump_parms dparms;
+
+		printf("%s: %s: %s\n", tf_dir_2_str(parms->dir),
+		       tf_sram_slice_2_str(iparms.slice_size),
+		       tf_sram_bank_2_str(iparms.bank_id));
+
+		dparms.dir = parms->dir;
+		dparms.bank_id = iparms.bank_id;
+		dparms.slice_size = iparms.slice_size;
+
+		rc = tf_sram_mgr_dump(sram_handle, &dparms);
+	}
+#endif
+	return rc;
+}
+
+int
+tf_tbl_sram_set(struct tf *tfp,
+		struct tf_tbl_set_parms *parms)
+{
+	int rc;
+	bool allocated = 0;
+	uint16_t hcapi_type;
+	struct tf_rm_get_hcapi_parms hparms = { 0 };
+	struct tf_session *tfs;
+	struct tf_dev_info *dev;
+	struct tbl_rm_db *tbl_db;
+	void *tbl_db_ptr = NULL;
+	struct tf_tbl_sram_get_info_parms iparms = { 0 };
+	struct tf_sram_mgr_is_allocated_parms aparms = { 0 };
+	void *sram_handle = NULL;
+
+
+	TF_CHECK_PARMS3(tfp, parms, parms->data);
+
+	/* Retrieve the session information */
+	rc = tf_session_get(tfp, &tfs, &dev);
+	if (rc)
+		return rc;
+
+	rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to get em_ext_db from session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+	tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
+
+	rc = tf_session_get_sram_db(tfp, &sram_handle);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to get sram_handle from session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	iparms.rm_db = tbl_db->tbl_db[parms->dir];
+	iparms.dir = parms->dir;
+	iparms.tbl_type = parms->type;
+
+	rc = tf_tbl_sram_get_info(&iparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to get table info:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type));
+		return rc;
+	}
+
+	aparms.sram_offset = parms->idx;
+	aparms.slice_size = iparms.slice_size;
+	aparms.bank_id = iparms.bank_id;
+	aparms.dir = parms->dir;
+	aparms.is_allocated = &allocated;
+	rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
+	if (rc || !allocated) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Entry not allocated:%s idx(%d):(%s)\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    parms->idx,
+			    strerror(-rc));
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	/* Set the entry */
+	hparms.rm_db = tbl_db->tbl_db[parms->dir];
+	hparms.subtype = parms->type;
+	hparms.hcapi_type = &hcapi_type;
+	rc = tf_rm_get_hcapi_type(&hparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s, Failed type lookup, type:%s, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    strerror(-rc));
+		return rc;
+	}
+
+	rc = tf_msg_set_tbl_entry(tfp,
+				  parms->dir,
+				  hcapi_type,
+				  parms->data_sz_in_bytes,
+				  parms->data,
+				  parms->idx);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s, Set failed, type:%s, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    strerror(-rc));
+		return rc;
+	}
+	return rc;
+}
+
+int
+tf_tbl_sram_get(struct tf *tfp,
+		struct tf_tbl_get_parms *parms)
+{
+	int rc;
+	uint16_t hcapi_type;
+	bool allocated = 0;
+	struct tf_rm_get_hcapi_parms hparms = { 0 };
+	struct tf_session *tfs;
+	struct tf_dev_info *dev;
+	struct tbl_rm_db *tbl_db;
+	void *tbl_db_ptr = NULL;
+	struct tf_tbl_sram_get_info_parms iparms = { 0 };
+	struct tf_sram_mgr_is_allocated_parms aparms = { 0 };
+	void *sram_handle = NULL;
+
+	TF_CHECK_PARMS3(tfp, parms, parms->data);
+
+	/* Retrieve the session information */
+	rc = tf_session_get(tfp, &tfs, &dev);
+	if (rc)
+		return rc;
+
+	rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to get em_ext_db from session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+	tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
+
+	rc = tf_session_get_sram_db(tfp, &sram_handle);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to get sram_handle from session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	iparms.rm_db = tbl_db->tbl_db[parms->dir];
+	iparms.dir = parms->dir;
+	iparms.tbl_type = parms->type;
+
+	rc = tf_tbl_sram_get_info(&iparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to get table info:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type));
+		return rc;
+	}
+
+	aparms.sram_offset = parms->idx;
+	aparms.slice_size = iparms.slice_size;
+	aparms.bank_id = iparms.bank_id;
+	aparms.dir = parms->dir;
+	aparms.is_allocated = &allocated;
+
+	rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
+	if (rc || !allocated) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Entry not allocated:%s idx(%d):(%s)\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    parms->idx,
+			    strerror(-rc));
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	/* Get the entry */
+	hparms.rm_db = tbl_db->tbl_db[parms->dir];
+	hparms.subtype = parms->type;
+	hparms.hcapi_type = &hcapi_type;
+	rc = tf_rm_get_hcapi_type(&hparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s, Failed type lookup, type:%s, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Get the entry */
+	rc = tf_msg_get_tbl_entry(tfp,
+				  parms->dir,
+				  hcapi_type,
+				  parms->data_sz_in_bytes,
+				  parms->data,
+				  parms->idx);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s, Get failed, type:%s, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    strerror(-rc));
+		return rc;
+	}
+	return rc;
+}
+
+int
+tf_tbl_sram_bulk_get(struct tf *tfp,
+		     struct tf_tbl_get_bulk_parms *parms)
+{
+	int rc;
+	uint16_t hcapi_type;
+	struct tf_rm_get_hcapi_parms hparms = { 0 };
+	struct tf_tbl_sram_get_info_parms iparms = { 0 };
+	struct tf_session *tfs;
+	struct tf_dev_info *dev;
+	struct tbl_rm_db *tbl_db;
+	void *tbl_db_ptr = NULL;
+	uint16_t idx;
+	struct tf_sram_mgr_is_allocated_parms aparms = { 0 };
+	bool allocated = false;
+	void *sram_handle = NULL;
+
+	TF_CHECK_PARMS2(tfp, parms);
+
+	/* Retrieve the session information */
+	rc = tf_session_get(tfp, &tfs, &dev);
+	if (rc)
+		return rc;
+
+	rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to get em_ext_db from session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+	tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
+
+	rc = tf_session_get_sram_db(tfp, &sram_handle);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "Failed to get sram_handle from session, rc:%s\n",
+			    strerror(-rc));
+		return rc;
+	}
+
+	iparms.rm_db = tbl_db->tbl_db[parms->dir];
+	iparms.dir = parms->dir;
+	iparms.tbl_type = parms->type;
+
+	rc = tf_tbl_sram_get_info(&iparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to get table info:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type));
+		return rc;
+	}
+
+	/* Validate the start offset and the end offset is allocated
+	 * This API is only used for statistics.  8 Byte entry allocation
+	 * is used to verify
+	 */
+	aparms.sram_offset = parms->starting_idx;
+	aparms.slice_size = iparms.slice_size;
+	aparms.bank_id = iparms.bank_id;
+	aparms.dir = parms->dir;
+	aparms.is_allocated = &allocated;
+	rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
+	if (rc || !allocated) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Entry not allocated:%s starting_idx(%d):(%s)\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    parms->starting_idx,
+			    strerror(-rc));
+		rc = -ENOMEM;
+		return rc;
+	}
+	idx = parms->starting_idx + parms->num_entries - 1;
+	aparms.sram_offset = idx;
+	rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
+	if (rc || !allocated) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Entry not allocated:%s last_idx(%d):(%s)\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    idx,
+			    strerror(-rc));
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	hparms.rm_db = tbl_db->tbl_db[parms->dir];
+	hparms.subtype = parms->type;
+	hparms.hcapi_type = &hcapi_type;
+	rc = tf_rm_get_hcapi_type(&hparms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s, Failed type lookup, type:%s, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Get the entries */
+	rc = tf_msg_bulk_get_tbl_entry(tfp,
+				       parms->dir,
+				       hcapi_type,
+				       parms->starting_idx,
+				       parms->num_entries,
+				       parms->entry_sz_in_bytes,
+				       parms->physical_mem_addr);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s, Bulk get failed, type:%s, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_tbl_type_2_str(parms->type),
+			    strerror(-rc));
+	}
+	return rc;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_tbl_sram.h b/drivers/net/bnxt/tf_core/tf_tbl_sram.h
new file mode 100644
index 0000000000..32001e34a9
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_tbl_sram.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef TF_TBL_SRAM_H_
+#define TF_TBL_SRAM_H_
+
+#include "tf_core.h"
+#include "stack.h"
+
+
+/**
+ * The SRAM Table module provides processing of managed SRAM types.
+ */
+
+
+/**
+ * @page  tblsram SRAM Table
+ *
+ * @ref tf_tbl_sram_bind
+ *
+ * @ref tf_tbl_sram_unbind
+ *
+ * @ref tf_tbl_sram_alloc
+ *
+ * @ref tf_tbl_sram_free
+ *
+ * @ref tf_tbl_sram_set
+ *
+ * @ref tf_tbl_sram_get
+ *
+ * @ref tf_tbl_sram_bulk_get
+ */
+
+/**
+ * Initializes the Table module with the requested DBs. Must be
+ * invoked as the first thing before any of the access functions.
+ *
+ * [in] tfp
+ *   Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ *   Pointer to Table configuration parameters
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_bind(struct tf *tfp);
+
+/**
+ * Cleans up the private DBs and releases all the data.
+ *
+ * [in] tfp
+ *   Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ *   Pointer to parameters
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_unbind(struct tf *tfp);
+
+/**
+ * Allocates the requested table type from the internal RM DB.
+ *
+ * [in] tfp
+ *   Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ *   Pointer to Table allocation parameters
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_alloc(struct tf *tfp,
+		      struct tf_tbl_alloc_parms *parms);
+
+/**
+ * Free's the requested table type and returns it to the DB. If shadow
+ * DB is enabled its searched first and if found the element refcount
+ * is decremented. If refcount goes to 0 then its returned to the
+ * table type DB.
+ *
+ * [in] tfp
+ *   Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ *   Pointer to Table free parameters
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_free(struct tf *tfp,
+		     struct tf_tbl_free_parms *parms);
+
+
+/**
+ * Configures the requested element by sending a firmware request which
+ * then installs it into the device internal structures.
+ *
+ * [in] tfp
+ *   Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ *   Pointer to Table set parameters
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_set(struct tf *tfp,
+		    struct tf_tbl_set_parms *parms);
+
+/**
+ * Retrieves the requested element by sending a firmware request to get
+ * the element.
+ *
+ * [in] tfp
+ *   Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ *   Pointer to Table get parameters
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_get(struct tf *tfp,
+		    struct tf_tbl_get_parms *parms);
+
+/**
+ * Retrieves bulk block of elements by sending a firmware request to
+ * get the elements.
+ *
+ * [in] tfp
+ *   Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ *   Pointer to Table get bulk parameters
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_bulk_get(struct tf *tfp,
+			 struct tf_tbl_get_bulk_parms *parms);
+
+#endif /* TF_TBL_SRAM_H */
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.c b/drivers/net/bnxt/tf_core/tf_tcam.c
index 45206c5992..806af3070a 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_tcam.c
@@ -43,7 +43,7 @@ tf_tcam_bind(struct tf *tfp,
 	struct tf_shadow_tcam_free_db_parms fshadow;
 	struct tf_shadow_tcam_cfg_parms shadow_cfg;
 	struct tf_shadow_tcam_create_db_parms shadow_cdb;
-	uint16_t num_slices = 1;
+	uint16_t num_slices = parms->wc_num_slices;
 	struct tf_session *tfs;
 	struct tf_dev_info *dev;
 	struct tcam_rm_db *tcam_db;
@@ -61,7 +61,7 @@ tf_tcam_bind(struct tf *tfp,
 	if (rc)
 		return rc;
 
-	if (dev->ops->tf_dev_get_tcam_slice_info == NULL) {
+	if (dev->ops->tf_dev_set_tcam_slice_info == NULL) {
 		rc = -EOPNOTSUPP;
 		TFP_DRV_LOG(ERR,
 			    "Operation not supported, rc:%s\n",
@@ -69,10 +69,8 @@ tf_tcam_bind(struct tf *tfp,
 		return rc;
 	}
 
-	rc = dev->ops->tf_dev_get_tcam_slice_info(tfp,
-						  TF_TCAM_TBL_TYPE_WC_TCAM,
-						  0,
-						  &num_slices);
+	rc = dev->ops->tf_dev_set_tcam_slice_info(tfp,
+						  num_slices);
 	if (rc)
 		return rc;
 
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.h b/drivers/net/bnxt/tf_core/tf_tcam.h
index bed17af6ae..b1e7a92b0b 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_tcam.h
@@ -12,6 +12,9 @@
  * The TCAM module provides processing of Internal TCAM types.
  */
 
+/* Number of slices per row for WC TCAM */
+extern uint16_t g_wc_num_slices_per_row;
+
 /**
  * TCAM configuration parameters
  */
@@ -36,6 +39,10 @@ struct tf_tcam_cfg_parms {
 	 * Session resource allocations
 	 */
 	struct tf_session_resources *resources;
+	/**
+	 * WC number of slices per row.
+	 */
+	enum tf_wc_num_slice wc_num_slices;
 };
 
 /**
diff --git a/drivers/net/bnxt/tf_core/tf_tcam_shared.c b/drivers/net/bnxt/tf_core/tf_tcam_shared.c
index 83b6fbd5fb..c120c6f577 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam_shared.c
+++ b/drivers/net/bnxt/tf_core/tf_tcam_shared.c
@@ -279,18 +279,6 @@ tf_tcam_shared_bind(struct tf *tfp,
 		if (rc)
 			return rc;
 
-		rc = tf_tcam_shared_get_slices(tfp,
-					       dev,
-					       &num_slices);
-		if (rc)
-			return rc;
-
-		if (num_slices > 1) {
-			TFP_DRV_LOG(ERR,
-				    "Only single slice supported\n");
-			return -EOPNOTSUPP;
-		}
-
 		tf_tcam_shared_create_db(&tcam_shared_wc);
 
 
@@ -330,6 +318,18 @@ tf_tcam_shared_bind(struct tf *tfp,
 
 			tf_session_set_tcam_shared_db(tfp, (void *)tcam_shared_wc);
 		}
+
+		rc = tf_tcam_shared_get_slices(tfp,
+					       dev,
+					       &num_slices);
+		if (rc)
+			return rc;
+
+		if (num_slices > 1) {
+			TFP_DRV_LOG(ERR,
+				    "Only single slice supported\n");
+			return -EOPNOTSUPP;
+		}
 	}
 done:
 	return rc;
@@ -972,9 +972,9 @@ tf_tcam_shared_move_entry(struct tf *tfp,
 	sparms.idx = dphy_idx;
 	sparms.key = gparms.key;
 	sparms.mask = gparms.mask;
-	sparms.key_size = gparms.key_size;
+	sparms.key_size = key_sz_bytes;
 	sparms.result = gparms.result;
-	sparms.result_size = gparms.result_size;
+	sparms.result_size = remap_sz_bytes;
 
 	rc = tf_msg_tcam_entry_set(tfp, dev, &sparms);
 	if (rc) {
diff --git a/drivers/net/bnxt/tf_core/tf_util.c b/drivers/net/bnxt/tf_core/tf_util.c
index d100399d0a..c1b9be0755 100644
--- a/drivers/net/bnxt/tf_core/tf_util.c
+++ b/drivers/net/bnxt/tf_core/tf_util.c
@@ -76,6 +76,8 @@ tf_tbl_type_2_str(enum tf_tbl_type tbl_type)
 	switch (tbl_type) {
 	case TF_TBL_TYPE_FULL_ACT_RECORD:
 		return "Full Action record";
+	case TF_TBL_TYPE_COMPACT_ACT_RECORD:
+		return "Compact Action record";
 	case TF_TBL_TYPE_MCAST_GROUPS:
 		return "Multicast Groups";
 	case TF_TBL_TYPE_ACT_ENCAP_8B:
@@ -96,6 +98,14 @@ tf_tbl_type_2_str(enum tf_tbl_type tbl_type)
 		return "Stats 64B";
 	case TF_TBL_TYPE_ACT_MODIFY_IPV4:
 		return "Modify IPv4";
+	case TF_TBL_TYPE_ACT_MODIFY_8B:
+		return "Modify 8B";
+	case TF_TBL_TYPE_ACT_MODIFY_16B:
+		return "Modify 16B";
+	case TF_TBL_TYPE_ACT_MODIFY_32B:
+		return "Modify 32B";
+	case TF_TBL_TYPE_ACT_MODIFY_64B:
+		return "Modify 64B";
 	case TF_TBL_TYPE_METER_PROF:
 		return "Meter Profile";
 	case TF_TBL_TYPE_METER_INST:
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index dbf85e4eda..183bae66c5 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -384,6 +384,7 @@ ulp_ctx_shared_session_open(struct bnxt *bp,
 	size_t copy_nbytes;
 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
 	int32_t	rc = 0;
+	uint8_t app_id;
 
 	/* only perform this if shared session is enabled. */
 	if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
@@ -422,6 +423,12 @@ ulp_ctx_shared_session_open(struct bnxt *bp,
 	if (rc)
 		return rc;
 
+	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
+	if (rc) {
+		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
+		return -EINVAL;
+	}
+
 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
 	if (rc) {
 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
@@ -445,6 +452,10 @@ ulp_ctx_shared_session_open(struct bnxt *bp,
 
 	parms.shadow_copy = true;
 	parms.bp = bp;
+	if (app_id == 0 || app_id == 3)
+		parms.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
+	else
+		parms.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
 
 	/*
 	 * Open the session here, but the collect the resources during the
@@ -516,6 +527,7 @@ ulp_ctx_session_open(struct bnxt *bp,
 	struct tf_open_session_parms	params;
 	struct tf_session_resources	*resources;
 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
+	uint8_t app_id;
 
 	memset(&params, 0, sizeof(params));
 
@@ -529,6 +541,12 @@ ulp_ctx_session_open(struct bnxt *bp,
 
 	params.shadow_copy = true;
 
+	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
+	if (rc) {
+		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
+		return -EINVAL;
+	}
+
 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
 	if (rc) {
 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
@@ -556,6 +574,11 @@ ulp_ctx_session_open(struct bnxt *bp,
 		return rc;
 
 	params.bp = bp;
+	if (app_id == 0 || app_id == 3)
+		params.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
+	else
+		params.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
+
 	rc = tf_open_session(&bp->tfp, &params);
 	if (rc) {
 		BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
diff --git a/meson_options.txt b/meson_options.txt
index 0e92734c49..f686e6d92a 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -46,3 +46,5 @@ option('tests', type: 'boolean', value: true, description:
        'build unit tests')
 option('use_hpet', type: 'boolean', value: false, description:
        'use HPET timer in EAL')
+option('bnxt_tf_wc_slices', type: 'integer', min: 1, max: 4, value: 2,
+	description: 'Number of slices per WC TCAM entry')
-- 
2.17.1


  parent reply	other threads:[~2021-09-01 14:25 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-01 14:24 [dpdk-dev] [PATCH 00/14] enhancements to host based flow table management Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 01/14] net/bnxt: tf core index table updates Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 02/14] net/bnxt: enable dpool allocator Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 03/14] net/bnxt: add flow meter drop counter support Venkat Duvvuru
2021-09-01 14:24 ` Venkat Duvvuru [this message]
2021-09-01 14:24 ` [dpdk-dev] [PATCH 05/14] net/bnxt: add flow templates support for Thor Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 06/14] net/bnxt: add support for tunnel offloads Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 07/14] net/bnxt: add support for dynamic encap action Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 08/14] net/bnxt: add wild card TCAM byte order for Thor Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 09/14] net/bnxt: add flow templates " Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 10/14] net/bnxt: tf core SRAM Manager Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 11/14] net/bnxt: dynamically allocate space for EM defrag function Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 12/14] net/bnxt: sram manager shared session Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 13/14] net/bnxt: add enhancements to TF ULP Venkat Duvvuru
2021-09-01 14:24 ` [dpdk-dev] [PATCH 14/14] net/bnxt: add support for testpmd co-existence Venkat Duvvuru
2021-09-08  5:06 ` [dpdk-dev] [PATCH v2 00/13] enhancements to host based flow table management Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 01/13] net/bnxt: tf core index table updates Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 02/13] net/bnxt: enable dpool allocator Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 03/13] net/bnxt: add flow meter drop counter support Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 04/13] net/bnxt: add Thor SRAM mgr model Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 05/13] net/bnxt: add flow templates support for Thor Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 06/13] net/bnxt: add support for tunnel offloads Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 07/13] net/bnxt: add support for dynamic encap action Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 08/13] net/bnxt: add wild card TCAM byte order for Thor Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 09/13] net/bnxt: add flow templates " Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 10/13] net/bnxt: tf core SRAM Manager Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 11/13] net/bnxt: dynamically allocate space for EM defrag function Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 12/13] net/bnxt: sram manager shared session Venkat Duvvuru
2021-09-08  5:06   ` [dpdk-dev] [PATCH v2 13/13] net/bnxt: add enhancements to TF ULP Venkat Duvvuru
2021-09-11 15:30   ` [dpdk-dev] [PATCH v3 00/13] enhancements to host based flow table management Venkat Duvvuru
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 01/13] net/bnxt: tf core index table updates Venkat Duvvuru
2021-09-16 13:47       ` Ferruh Yigit
2021-09-16 15:51         ` Ajit Khaparde
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 02/13] net/bnxt: enable dpool allocator Venkat Duvvuru
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 03/13] net/bnxt: add flow meter drop counter support Venkat Duvvuru
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 04/13] net/bnxt: add Thor SRAM mgr model Venkat Duvvuru
2021-09-16 13:49       ` Ferruh Yigit
2021-09-16 14:01         ` Bruce Richardson
2021-09-16 14:04           ` Thomas Monjalon
2021-09-16 16:29         ` Venkat Duvvuru
2021-09-16 16:30           ` Ferruh Yigit
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 05/13] net/bnxt: add flow templates support for Thor Venkat Duvvuru
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 06/13] net/bnxt: add support for tunnel offloads Venkat Duvvuru
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 07/13] net/bnxt: add support for dynamic encap action Venkat Duvvuru
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 08/13] net/bnxt: add wild card TCAM byte order for Thor Venkat Duvvuru
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 09/13] net/bnxt: add flow templates " Venkat Duvvuru
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 10/13] net/bnxt: tf core SRAM Manager Venkat Duvvuru
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 11/13] net/bnxt: dynamically allocate space for EM defrag function Venkat Duvvuru
2021-09-16 13:53       ` Ferruh Yigit
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 12/13] net/bnxt: sram manager shared session Venkat Duvvuru
2021-09-11 15:30     ` [dpdk-dev] [PATCH v3 13/13] net/bnxt: add enhancements to TF ULP Venkat Duvvuru
2021-09-16 14:06       ` Ferruh Yigit
2021-09-16  3:25     ` [dpdk-dev] [PATCH v3 00/13] enhancements to host based flow table management Ajit Khaparde
2021-09-16 13:26     ` Ferruh Yigit
2021-09-16 14:17       ` Brandon Lo
2021-09-16 16:18       ` Ajit Khaparde
2021-09-20  7:42   ` [dpdk-dev] [PATCH v4 " Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 01/13] net/bnxt: updates to TF core index table Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 02/13] net/bnxt: enable dpool allocator Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 03/13] net/bnxt: add flow meter drop counter support Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 04/13] net/bnxt: add SRAM manager model Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 05/13] net/bnxt: add flow template support for Thor Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 06/13] net/bnxt: add support for tunnel offload API Venkat Duvvuru
2021-09-28 12:43       ` Ferruh Yigit
2021-09-28 15:46         ` Thomas Monjalon
2021-09-28 15:57           ` Ferruh Yigit
2021-09-28 21:32         ` Ajit Khaparde
2021-09-29  8:20           ` Thomas Monjalon
2021-09-29  9:44             ` Ferruh Yigit
2021-09-29 16:44               ` Ajit Khaparde
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 07/13] net/bnxt: add support for dynamic encap action Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 08/13] net/bnxt: add wild card TCAM byte order for Thor Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 09/13] net/bnxt: add flow templates " Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 10/13] net/bnxt: change log level to debug Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 11/13] net/bnxt: dynamically allocate space for EM defrag function Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 12/13] net/bnxt: add SRAM manager shared session Venkat Duvvuru
2021-09-20  7:42     ` [dpdk-dev] [PATCH v4 13/13] net/bnxt: add enhancements to TF ULP Venkat Duvvuru
2021-09-21  4:50     ` [dpdk-dev] [PATCH v4 00/13] enhancements to host based flow table management Ajit Khaparde
2021-09-22 17:36       ` Ferruh Yigit
2021-09-22 20:21         ` Ajit Khaparde
2021-09-23  7:19           ` Ferruh Yigit
2021-09-25 14:24             ` [dpdk-dev] [PATCH] net/bnxt: remove code to initialize SRAM slice node Ajit Khaparde
2021-09-27 10:25               ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210901142433.8444-5-venkatkumar.duvvuru@broadcom.com \
    --to=venkatkumar.duvvuru@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=farah.smith@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).