DPDK patches and discussions
 help / color / mirror / Atom feed
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
To: dev@dpdk.org
Cc: Michael Wildt <michael.wildt@broadcom.com>
Subject: [dpdk-dev] [PATCH v3 08/34] net/bnxt: add resource manager functionality
Date: Tue, 14 Apr 2020 13:43:05 +0530	[thread overview]
Message-ID: <1586852011-37536-9-git-send-email-venkatkumar.duvvuru@broadcom.com> (raw)
In-Reply-To: <1586852011-37536-1-git-send-email-venkatkumar.duvvuru@broadcom.com>

From: Michael Wildt <michael.wildt@broadcom.com>

- Add TruFlow RM functionality for resource handling
- Update the TruFlow Resource Manager (RM) with resource
  support functions for debugging as well as resource cleanup.
- Add support for Internal and external pools.

Signed-off-by: Michael Wildt <michael.wildt@broadcom.com>
Reviewed-by: Randy Schacher <stuart.schacher@broadcom.com>
Reviewed-by: Ajit Kumar Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/tf_core/tf_core.c    |   14 +
 drivers/net/bnxt/tf_core/tf_core.h    |   26 +
 drivers/net/bnxt/tf_core/tf_rm.c      | 1718 +++++++++++++++++++++++++++++++--
 drivers/net/bnxt/tf_core/tf_session.h |   10 +
 drivers/net/bnxt/tf_core/tf_tbl.h     |   43 +
 5 files changed, 1735 insertions(+), 76 deletions(-)
 create mode 100644 drivers/net/bnxt/tf_core/tf_tbl.h

diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index 7d76efa..bb6d38b 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -149,6 +149,20 @@ tf_open_session(struct tf                    *tfp,
 		goto cleanup_close;
 	}
 
+	/* Shadow DB configuration */
+	if (parms->shadow_copy) {
+		/* Ignore shadow_copy setting */
+		session->shadow_copy = 0;/* parms->shadow_copy; */
+#if (TF_SHADOW == 1)
+		rc = tf_rm_shadow_db_init(tfs);
+		if (rc)
+			PMD_DRV_LOG(ERR,
+				    "Shadow DB Initialization failed\n, rc:%d",
+				    rc);
+		/* Add additional processing */
+#endif /* TF_SHADOW */
+	}
+
 	/* Adjust the Session with what firmware allowed us to get */
 	rc = tf_rm_allocate_validate(tfp);
 	if (rc) {
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index 3455d8f..16c8251 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -30,6 +30,32 @@ enum tf_dir {
 	TF_DIR_MAX
 };
 
+/**
+ * External pool size
+ *
+ * Defines a single pool of external action records of
+ * fixed size.  Currently, this is an index.
+ */
+#define TF_EXT_POOL_ENTRY_SZ_BYTES 1
+
+/**
+ *  External pool entry count
+ *
+ *  Defines the number of entries in the external action pool
+ */
+#define TF_EXT_POOL_ENTRY_CNT (1 * 1024)
+
+/**
+ * Number of external pools
+ */
+#define TF_EXT_POOL_CNT_MAX 1
+
+/**
+ * External pool Id
+ */
+#define TF_EXT_POOL_0      0 /**< matches TF_TBL_TYPE_EXT   */
+#define TF_EXT_POOL_1      1 /**< matches TF_TBL_TYPE_EXT_0 */
+
 /********** BEGIN API FUNCTION PROTOTYPES/PARAMETERS **********/
 
 /**
diff --git a/drivers/net/bnxt/tf_core/tf_rm.c b/drivers/net/bnxt/tf_core/tf_rm.c
index 56767e7..a5e96f29 100644
--- a/drivers/net/bnxt/tf_core/tf_rm.c
+++ b/drivers/net/bnxt/tf_core/tf_rm.c
@@ -104,9 +104,82 @@ const char
 	case TF_IDENT_TYPE_L2_FUNC:
 		return "l2_func";
 	default:
-		break;
+		return "Invalid identifier";
+	}
+}
+
+const char
+*tf_tcam_tbl_2_str(enum tf_tcam_tbl_type tcam_type)
+{
+	switch (tcam_type) {
+	case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
+		return "l2_ctxt_tcam";
+	case TF_TCAM_TBL_TYPE_PROF_TCAM:
+		return "prof_tcam";
+	case TF_TCAM_TBL_TYPE_WC_TCAM:
+		return "wc_tcam";
+	case TF_TCAM_TBL_TYPE_VEB_TCAM:
+		return "veb_tcam";
+	case TF_TCAM_TBL_TYPE_SP_TCAM:
+		return "sp_tcam";
+	case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
+		return "ct_rule_tcam";
+	default:
+		return "Invalid tcam table type";
+	}
+}
+
+const char
+*tf_hcapi_hw_2_str(enum tf_resource_type_hw hw_type)
+{
+	switch (hw_type) {
+	case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
+		return "L2 ctxt tcam";
+	case TF_RESC_TYPE_HW_PROF_FUNC:
+		return "Profile Func";
+	case TF_RESC_TYPE_HW_PROF_TCAM:
+		return "Profile tcam";
+	case TF_RESC_TYPE_HW_EM_PROF_ID:
+		return "EM profile id";
+	case TF_RESC_TYPE_HW_EM_REC:
+		return "EM record";
+	case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
+		return "WC tcam profile id";
+	case TF_RESC_TYPE_HW_WC_TCAM:
+		return "WC tcam";
+	case TF_RESC_TYPE_HW_METER_PROF:
+		return "Meter profile";
+	case TF_RESC_TYPE_HW_METER_INST:
+		return "Meter instance";
+	case TF_RESC_TYPE_HW_MIRROR:
+		return "Mirror";
+	case TF_RESC_TYPE_HW_UPAR:
+		return "UPAR";
+	case TF_RESC_TYPE_HW_SP_TCAM:
+		return "Source properties tcam";
+	case TF_RESC_TYPE_HW_L2_FUNC:
+		return "L2 Function";
+	case TF_RESC_TYPE_HW_FKB:
+		return "FKB";
+	case TF_RESC_TYPE_HW_TBL_SCOPE:
+		return "Table scope";
+	case TF_RESC_TYPE_HW_EPOCH0:
+		return "EPOCH0";
+	case TF_RESC_TYPE_HW_EPOCH1:
+		return "EPOCH1";
+	case TF_RESC_TYPE_HW_METADATA:
+		return "Metadata";
+	case TF_RESC_TYPE_HW_CT_STATE:
+		return "Connection tracking state";
+	case TF_RESC_TYPE_HW_RANGE_PROF:
+		return "Range profile";
+	case TF_RESC_TYPE_HW_RANGE_ENTRY:
+		return "Range entry";
+	case TF_RESC_TYPE_HW_LAG_ENTRY:
+		return "LAG";
+	default:
+		return "Invalid identifier";
 	}
-	return "Invalid identifier";
 }
 
 const char
@@ -145,6 +218,93 @@ const char
 }
 
 /**
+ * Helper function to perform a HW HCAPI resource type lookup against
+ * the reserved value of the same static type.
+ *
+ * Returns:
+ *   -EOPNOTSUPP - Reserved resource type not supported
+ *   Value       - Integer value of the reserved value for the requested type
+ */
+static int
+tf_rm_rsvd_hw_value(enum tf_dir dir, enum tf_resource_type_hw index)
+{
+	uint32_t value = -EOPNOTSUPP;
+
+	switch (index) {
+	case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
+		TF_RESC_RSVD(dir, TF_RSVD_L2_CTXT_TCAM, value);
+		break;
+	case TF_RESC_TYPE_HW_PROF_FUNC:
+		TF_RESC_RSVD(dir, TF_RSVD_PROF_FUNC, value);
+		break;
+	case TF_RESC_TYPE_HW_PROF_TCAM:
+		TF_RESC_RSVD(dir, TF_RSVD_PROF_TCAM, value);
+		break;
+	case TF_RESC_TYPE_HW_EM_PROF_ID:
+		TF_RESC_RSVD(dir, TF_RSVD_EM_PROF_ID, value);
+		break;
+	case TF_RESC_TYPE_HW_EM_REC:
+		TF_RESC_RSVD(dir, TF_RSVD_EM_REC, value);
+		break;
+	case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
+		TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM_PROF_ID, value);
+		break;
+	case TF_RESC_TYPE_HW_WC_TCAM:
+		TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM, value);
+		break;
+	case TF_RESC_TYPE_HW_METER_PROF:
+		TF_RESC_RSVD(dir, TF_RSVD_METER_PROF, value);
+		break;
+	case TF_RESC_TYPE_HW_METER_INST:
+		TF_RESC_RSVD(dir, TF_RSVD_METER_INST, value);
+		break;
+	case TF_RESC_TYPE_HW_MIRROR:
+		TF_RESC_RSVD(dir, TF_RSVD_MIRROR, value);
+		break;
+	case TF_RESC_TYPE_HW_UPAR:
+		TF_RESC_RSVD(dir, TF_RSVD_UPAR, value);
+		break;
+	case TF_RESC_TYPE_HW_SP_TCAM:
+		TF_RESC_RSVD(dir, TF_RSVD_SP_TCAM, value);
+		break;
+	case TF_RESC_TYPE_HW_L2_FUNC:
+		TF_RESC_RSVD(dir, TF_RSVD_L2_FUNC, value);
+		break;
+	case TF_RESC_TYPE_HW_FKB:
+		TF_RESC_RSVD(dir, TF_RSVD_FKB, value);
+		break;
+	case TF_RESC_TYPE_HW_TBL_SCOPE:
+		TF_RESC_RSVD(dir, TF_RSVD_TBL_SCOPE, value);
+		break;
+	case TF_RESC_TYPE_HW_EPOCH0:
+		TF_RESC_RSVD(dir, TF_RSVD_EPOCH0, value);
+		break;
+	case TF_RESC_TYPE_HW_EPOCH1:
+		TF_RESC_RSVD(dir, TF_RSVD_EPOCH1, value);
+		break;
+	case TF_RESC_TYPE_HW_METADATA:
+		TF_RESC_RSVD(dir, TF_RSVD_METADATA, value);
+		break;
+	case TF_RESC_TYPE_HW_CT_STATE:
+		TF_RESC_RSVD(dir, TF_RSVD_CT_STATE, value);
+		break;
+	case TF_RESC_TYPE_HW_RANGE_PROF:
+		TF_RESC_RSVD(dir, TF_RSVD_RANGE_PROF, value);
+		break;
+	case TF_RESC_TYPE_HW_RANGE_ENTRY:
+		TF_RESC_RSVD(dir, TF_RSVD_RANGE_ENTRY, value);
+		break;
+	case TF_RESC_TYPE_HW_LAG_ENTRY:
+		TF_RESC_RSVD(dir, TF_RSVD_LAG_ENTRY, value);
+		break;
+	default:
+		break;
+	}
+
+	return value;
+}
+
+/**
  * Helper function to perform a SRAM HCAPI resource type lookup
  * against the reserved value of the same static type.
  *
@@ -205,6 +365,36 @@ tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index)
 }
 
 /**
+ * Helper function to print all the HW resource qcaps errors reported
+ * in the error_flag.
+ *
+ * [in] dir
+ *   Receive or transmit direction
+ *
+ * [in] error_flag
+ *   Pointer to the hw error flags created at time of the query check
+ */
+static void
+tf_rm_print_hw_qcaps_error(enum tf_dir dir,
+			   struct tf_rm_hw_query *hw_query,
+			   uint32_t *error_flag)
+{
+	int i;
+
+	PMD_DRV_LOG(ERR, "QCAPS errors HW\n");
+	PMD_DRV_LOG(ERR, "  Direction: %s\n", tf_dir_2_str(dir));
+	PMD_DRV_LOG(ERR, "  Elements:\n");
+
+	for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
+		if (*error_flag & 1 << i)
+			PMD_DRV_LOG(ERR, "    %s, %d elem available, req:%d\n",
+				    tf_hcapi_hw_2_str(i),
+				    hw_query->hw_query[i].max,
+				    tf_rm_rsvd_hw_value(dir, i));
+	}
+}
+
+/**
  * Helper function to print all the SRAM resource qcaps errors
  * reported in the error_flag.
  *
@@ -264,12 +454,139 @@ tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query,
 			    uint32_t *error_flag)
 {
 	*error_flag = 0;
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_L2_CTXT_TCAM,
+			     TF_RSVD_L2_CTXT_TCAM,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_PROF_FUNC,
+			     TF_RSVD_PROF_FUNC,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_PROF_TCAM,
+			     TF_RSVD_PROF_TCAM,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_EM_PROF_ID,
+			     TF_RSVD_EM_PROF_ID,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_EM_REC,
+			     TF_RSVD_EM_REC,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_WC_TCAM_PROF_ID,
+			     TF_RSVD_WC_TCAM_PROF_ID,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_WC_TCAM,
+			     TF_RSVD_WC_TCAM,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_METER_PROF,
+			     TF_RSVD_METER_PROF,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_METER_INST,
+			     TF_RSVD_METER_INST,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_MIRROR,
+			     TF_RSVD_MIRROR,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_UPAR,
+			     TF_RSVD_UPAR,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_SP_TCAM,
+			     TF_RSVD_SP_TCAM,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_L2_FUNC,
+			     TF_RSVD_L2_FUNC,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_FKB,
+			     TF_RSVD_FKB,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_TBL_SCOPE,
+			     TF_RSVD_TBL_SCOPE,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_EPOCH0,
+			     TF_RSVD_EPOCH0,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_EPOCH1,
+			     TF_RSVD_EPOCH1,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_METADATA,
+			     TF_RSVD_METADATA,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_CT_STATE,
+			     TF_RSVD_CT_STATE,
+			     error_flag);
+
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_RANGE_PROF,
+			     TF_RSVD_RANGE_PROF,
+			     error_flag);
+
 	TF_RM_CHECK_HW_ALLOC(query,
 			     dir,
 			     TF_RESC_TYPE_HW_RANGE_ENTRY,
 			     TF_RSVD_RANGE_ENTRY,
 			     error_flag);
 
+	TF_RM_CHECK_HW_ALLOC(query,
+			     dir,
+			     TF_RESC_TYPE_HW_LAG_ENTRY,
+			     TF_RSVD_LAG_ENTRY,
+			     error_flag);
+
 	if (*error_flag != 0)
 		return -ENOMEM;
 
@@ -434,26 +751,584 @@ tf_rm_reserve_range(uint32_t count,
 			for (i = 0; i < rsv_begin; i++)
 				ba_alloc_index(pool, i);
 
-			/* Skip and then do the remaining */
-			if (rsv_end < max - 1) {
-				for (i = rsv_end; i < max; i++)
-					ba_alloc_index(pool, i);
-			}
-		}
-	}
+			/* Skip and then do the remaining */
+			if (rsv_end < max - 1) {
+				for (i = rsv_end; i < max; i++)
+					ba_alloc_index(pool, i);
+			}
+		}
+	}
+}
+
+/**
+ * Internal function to mark all the l2 ctxt allocated that Truflow
+ * does not own.
+ */
+static void
+tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;
+	uint32_t end = 0;
+
+	/* l2 ctxt rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_L2_CTXT_TCAM,
+			    tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);
+
+	/* l2 ctxt tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_L2_CTXT_TCAM,
+			    tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the profile tcam and profile func
+ * resources that Truflow does not own.
+ */
+static void
+tf_rm_rsvd_prof(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_PROF_FUNC;
+	uint32_t end = 0;
+
+	/* profile func rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_PROF_FUNC,
+			    tfs->TF_PROF_FUNC_POOL_NAME_RX);
+
+	/* profile func tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_PROF_FUNC,
+			    tfs->TF_PROF_FUNC_POOL_NAME_TX);
+
+	index = TF_RESC_TYPE_HW_PROF_TCAM;
+
+	/* profile tcam rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_PROF_TCAM,
+			    tfs->TF_PROF_TCAM_POOL_NAME_RX);
+
+	/* profile tcam tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_PROF_TCAM,
+			    tfs->TF_PROF_TCAM_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the em profile id allocated that
+ * Truflow does not own.
+ */
+static void
+tf_rm_rsvd_em_prof(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_EM_PROF_ID;
+	uint32_t end = 0;
+
+	/* em prof id rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_EM_PROF_ID,
+			    tfs->TF_EM_PROF_ID_POOL_NAME_RX);
+
+	/* em prof id tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_EM_PROF_ID,
+			    tfs->TF_EM_PROF_ID_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the wildcard tcam and profile id
+ * resources that Truflow does not own.
+ */
+static void
+tf_rm_rsvd_wc(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_WC_TCAM_PROF_ID;
+	uint32_t end = 0;
+
+	/* wc profile id rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_WC_PROF_ID,
+			    tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX);
+
+	/* wc profile id tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_WC_PROF_ID,
+			    tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX);
+
+	index = TF_RESC_TYPE_HW_WC_TCAM;
+
+	/* wc tcam rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_WC_TCAM_ROW,
+			    tfs->TF_WC_TCAM_POOL_NAME_RX);
+
+	/* wc tcam tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_WC_TCAM_ROW,
+			    tfs->TF_WC_TCAM_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the meter resources allocated that
+ * Truflow does not own.
+ */
+static void
+tf_rm_rsvd_meter(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_METER_PROF;
+	uint32_t end = 0;
+
+	/* meter profiles rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_METER_PROF,
+			    tfs->TF_METER_PROF_POOL_NAME_RX);
+
+	/* meter profiles tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_METER_PROF,
+			    tfs->TF_METER_PROF_POOL_NAME_TX);
+
+	index = TF_RESC_TYPE_HW_METER_INST;
+
+	/* meter rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_METER,
+			    tfs->TF_METER_INST_POOL_NAME_RX);
+
+	/* meter tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_METER,
+			    tfs->TF_METER_INST_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the mirror resources allocated that
+ * Truflow does not own.
+ */
+static void
+tf_rm_rsvd_mirror(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_MIRROR;
+	uint32_t end = 0;
+
+	/* mirror rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_MIRROR,
+			    tfs->TF_MIRROR_POOL_NAME_RX);
+
+	/* mirror tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_MIRROR,
+			    tfs->TF_MIRROR_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the upar resources allocated that
+ * Truflow does not own.
+ */
+static void
+tf_rm_rsvd_upar(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_UPAR;
+	uint32_t end = 0;
+
+	/* upar rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_UPAR,
+			    tfs->TF_UPAR_POOL_NAME_RX);
+
+	/* upar tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_UPAR,
+			    tfs->TF_UPAR_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the sp tcam resources allocated that
+ * Truflow does not own.
+ */
+static void
+tf_rm_rsvd_sp_tcam(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_SP_TCAM;
+	uint32_t end = 0;
+
+	/* sp tcam rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_SP_TCAM,
+			    tfs->TF_SP_TCAM_POOL_NAME_RX);
+
+	/* sp tcam tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_SP_TCAM,
+			    tfs->TF_SP_TCAM_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the l2 func resources allocated that
+ * Truflow does not own.
+ */
+static void
+tf_rm_rsvd_l2_func(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_L2_FUNC;
+	uint32_t end = 0;
+
+	/* l2 func rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_L2_FUNC,
+			    tfs->TF_L2_FUNC_POOL_NAME_RX);
+
+	/* l2 func tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_L2_FUNC,
+			    tfs->TF_L2_FUNC_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the fkb resources allocated that
+ * Truflow does not own.
+ */
+static void
+tf_rm_rsvd_fkb(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_FKB;
+	uint32_t end = 0;
+
+	/* fkb rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_FKB,
+			    tfs->TF_FKB_POOL_NAME_RX);
+
+	/* fkb tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_FKB,
+			    tfs->TF_FKB_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the tbld scope resources allocated
+ * that Truflow does not own.
+ */
+static void
+tf_rm_rsvd_tbl_scope(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_TBL_SCOPE;
+	uint32_t end = 0;
+
+	/* tbl scope rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_TBL_SCOPE,
+			    tfs->TF_TBL_SCOPE_POOL_NAME_RX);
+
+	/* tbl scope tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_TBL_SCOPE,
+			    tfs->TF_TBL_SCOPE_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the l2 epoch resources allocated that
+ * Truflow does not own.
+ */
+static void
+tf_rm_rsvd_epoch(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_EPOCH0;
+	uint32_t end = 0;
+
+	/* epoch0 rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_EPOCH0,
+			    tfs->TF_EPOCH0_POOL_NAME_RX);
+
+	/* epoch0 tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_EPOCH0,
+			    tfs->TF_EPOCH0_POOL_NAME_TX);
+
+	index = TF_RESC_TYPE_HW_EPOCH1;
+
+	/* epoch1 rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_EPOCH1,
+			    tfs->TF_EPOCH1_POOL_NAME_RX);
+
+	/* epoch1 tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_EPOCH1,
+			    tfs->TF_EPOCH1_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the metadata resources allocated that
+ * Truflow does not own.
+ */
+static void
+tf_rm_rsvd_metadata(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_METADATA;
+	uint32_t end = 0;
+
+	/* metadata rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_METADATA,
+			    tfs->TF_METADATA_POOL_NAME_RX);
+
+	/* metadata tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_METADATA,
+			    tfs->TF_METADATA_POOL_NAME_TX);
+}
+
+/**
+ * Internal function to mark all the ct state resources allocated that
+ * Truflow does not own.
+ */
+static void
+tf_rm_rsvd_ct_state(struct tf_session *tfs)
+{
+	uint32_t index = TF_RESC_TYPE_HW_CT_STATE;
+	uint32_t end = 0;
+
+	/* ct state rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_CT_STATE,
+			    tfs->TF_CT_STATE_POOL_NAME_RX);
+
+	/* ct state tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_CT_STATE,
+			    tfs->TF_CT_STATE_POOL_NAME_TX);
 }
 
 /**
- * Internal function to mark all the l2 ctxt allocated that Truflow
- * does not own.
+ * Internal function to mark all the range resources allocated that
+ * Truflow does not own.
  */
 static void
-tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
+tf_rm_rsvd_range(struct tf_session *tfs)
 {
-	uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;
+	uint32_t index = TF_RESC_TYPE_HW_RANGE_PROF;
 	uint32_t end = 0;
 
-	/* l2 ctxt rx direction */
+	/* range profile rx direction */
 	if (tfs->resc.rx.hw_entry[index].stride > 0)
 		end = tfs->resc.rx.hw_entry[index].start +
 			tfs->resc.rx.hw_entry[index].stride - 1;
@@ -461,10 +1336,10 @@ tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
 	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
 			    tfs->resc.rx.hw_entry[index].start,
 			    end,
-			    TF_NUM_L2_CTXT_TCAM,
-			    tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);
+			    TF_NUM_RANGE_PROF,
+			    tfs->TF_RANGE_PROF_POOL_NAME_RX);
 
-	/* l2 ctxt tx direction */
+	/* range profile tx direction */
 	if (tfs->resc.tx.hw_entry[index].stride > 0)
 		end = tfs->resc.tx.hw_entry[index].start +
 			tfs->resc.tx.hw_entry[index].stride - 1;
@@ -472,21 +1347,45 @@ tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
 	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
 			    tfs->resc.tx.hw_entry[index].start,
 			    end,
-			    TF_NUM_L2_CTXT_TCAM,
-			    tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);
+			    TF_NUM_RANGE_PROF,
+			    tfs->TF_RANGE_PROF_POOL_NAME_TX);
+
+	index = TF_RESC_TYPE_HW_RANGE_ENTRY;
+
+	/* range entry rx direction */
+	if (tfs->resc.rx.hw_entry[index].stride > 0)
+		end = tfs->resc.rx.hw_entry[index].start +
+			tfs->resc.rx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
+			    tfs->resc.rx.hw_entry[index].start,
+			    end,
+			    TF_NUM_RANGE_ENTRY,
+			    tfs->TF_RANGE_ENTRY_POOL_NAME_RX);
+
+	/* range entry tx direction */
+	if (tfs->resc.tx.hw_entry[index].stride > 0)
+		end = tfs->resc.tx.hw_entry[index].start +
+			tfs->resc.tx.hw_entry[index].stride - 1;
+
+	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
+			    tfs->resc.tx.hw_entry[index].start,
+			    end,
+			    TF_NUM_RANGE_ENTRY,
+			    tfs->TF_RANGE_ENTRY_POOL_NAME_TX);
 }
 
 /**
- * Internal function to mark all the l2 func resources allocated that
+ * Internal function to mark all the lag resources allocated that
  * Truflow does not own.
  */
 static void
-tf_rm_rsvd_l2_func(struct tf_session *tfs)
+tf_rm_rsvd_lag_entry(struct tf_session *tfs)
 {
-	uint32_t index = TF_RESC_TYPE_HW_L2_FUNC;
+	uint32_t index = TF_RESC_TYPE_HW_LAG_ENTRY;
 	uint32_t end = 0;
 
-	/* l2 func rx direction */
+	/* lag entry rx direction */
 	if (tfs->resc.rx.hw_entry[index].stride > 0)
 		end = tfs->resc.rx.hw_entry[index].start +
 			tfs->resc.rx.hw_entry[index].stride - 1;
@@ -494,10 +1393,10 @@ tf_rm_rsvd_l2_func(struct tf_session *tfs)
 	tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
 			    tfs->resc.rx.hw_entry[index].start,
 			    end,
-			    TF_NUM_L2_FUNC,
-			    tfs->TF_L2_FUNC_POOL_NAME_RX);
+			    TF_NUM_LAG_ENTRY,
+			    tfs->TF_LAG_ENTRY_POOL_NAME_RX);
 
-	/* l2 func tx direction */
+	/* lag entry tx direction */
 	if (tfs->resc.tx.hw_entry[index].stride > 0)
 		end = tfs->resc.tx.hw_entry[index].start +
 			tfs->resc.tx.hw_entry[index].stride - 1;
@@ -505,8 +1404,8 @@ tf_rm_rsvd_l2_func(struct tf_session *tfs)
 	tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
 			    tfs->resc.tx.hw_entry[index].start,
 			    end,
-			    TF_NUM_L2_FUNC,
-			    tfs->TF_L2_FUNC_POOL_NAME_TX);
+			    TF_NUM_LAG_ENTRY,
+			    tfs->TF_LAG_ENTRY_POOL_NAME_TX);
 }
 
 /**
@@ -909,7 +1808,21 @@ tf_rm_reserve_hw(struct tf *tfp)
 	 * used except the resources that Truflow took ownership off.
 	 */
 	tf_rm_rsvd_l2_ctxt(tfs);
+	tf_rm_rsvd_prof(tfs);
+	tf_rm_rsvd_em_prof(tfs);
+	tf_rm_rsvd_wc(tfs);
+	tf_rm_rsvd_mirror(tfs);
+	tf_rm_rsvd_meter(tfs);
+	tf_rm_rsvd_upar(tfs);
+	tf_rm_rsvd_sp_tcam(tfs);
 	tf_rm_rsvd_l2_func(tfs);
+	tf_rm_rsvd_fkb(tfs);
+	tf_rm_rsvd_tbl_scope(tfs);
+	tf_rm_rsvd_epoch(tfs);
+	tf_rm_rsvd_metadata(tfs);
+	tf_rm_rsvd_ct_state(tfs);
+	tf_rm_rsvd_range(tfs);
+	tf_rm_rsvd_lag_entry(tfs);
 }
 
 /**
@@ -972,6 +1885,7 @@ tf_rm_allocate_validate_hw(struct tf *tfp,
 			"%s, HW QCAPS validation failed, error_flag:0x%x\n",
 			tf_dir_2_str(dir),
 			error_flag);
+		tf_rm_print_hw_qcaps_error(dir, &hw_query, &error_flag);
 		goto cleanup;
 	}
 
@@ -1032,65 +1946,388 @@ tf_rm_allocate_validate_sram(struct tf *tfp,
 	struct tf_rm_entry *sram_entries;
 	uint32_t error_flag;
 
-	if (dir == TF_DIR_RX)
-		sram_entries = tfs->resc.rx.sram_entry;
-	else
-		sram_entries = tfs->resc.tx.sram_entry;
+	if (dir == TF_DIR_RX)
+		sram_entries = tfs->resc.rx.sram_entry;
+	else
+		sram_entries = tfs->resc.tx.sram_entry;
+
+	/* Query for Session SRAM Resources */
+	rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
+	if (rc) {
+		/* Log error */
+		PMD_DRV_LOG(ERR,
+			    "%s, SRAM qcaps message send failed\n",
+			    tf_dir_2_str(dir));
+		goto cleanup;
+	}
+
+	rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
+	if (rc) {
+		/* Log error */
+		PMD_DRV_LOG(ERR,
+			"%s, SRAM QCAPS validation failed, error_flag:%x\n",
+			tf_dir_2_str(dir),
+			error_flag);
+		tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);
+		goto cleanup;
+	}
+
+	/* Post process SRAM capability */
+	for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)
+		sram_alloc.sram_num[i] = sram_query.sram_query[i].max;
+
+	/* Allocate Session SRAM Resources */
+	rc = tf_msg_session_sram_resc_alloc(tfp,
+					    dir,
+					    &sram_alloc,
+					    sram_entries);
+	if (rc) {
+		/* Log error */
+		PMD_DRV_LOG(ERR,
+			    "%s, SRAM alloc message send failed\n",
+			    tf_dir_2_str(dir));
+		goto cleanup;
+	}
+
+	/* Perform SRAM allocation validation as its possible the
+	 * resource availability changed between qcaps and alloc
+	 */
+	rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);
+	if (rc) {
+		/* Log error */
+		PMD_DRV_LOG(ERR,
+			    "%s, SRAM Resource allocation validation failed\n",
+			    tf_dir_2_str(dir));
+		goto cleanup;
+	}
+
+	return 0;
+
+ cleanup:
+	return -1;
+}
+
+/**
+ * Helper function used to prune a HW resource array to only hold
+ * elements that needs to be flushed.
+ *
+ * [in] tfs
+ *   Session handle
+ *
+ * [in] dir
+ *   Receive or transmit direction
+ *
+ * [in] hw_entries
+ *   Master HW Resource database
+ *
+ * [in/out] flush_entries
+ *   Pruned HW Resource database of entries to be flushed. This
+ *   array should be passed in as a complete copy of the master HW
+ *   Resource database. The outgoing result will be a pruned version
+ *   based on the result of the requested checking
+ *
+ * Returns:
+ *    0 - Success, no flush required
+ *    1 - Success, flush required
+ *   -1 - Internal error
+ */
+static int
+tf_rm_hw_to_flush(struct tf_session *tfs,
+		  enum tf_dir dir,
+		  struct tf_rm_entry *hw_entries,
+		  struct tf_rm_entry *flush_entries)
+{
+	int rc;
+	int flush_rc = 0;
+	int free_cnt;
+	struct bitalloc *pool;
+
+	/* Check all the hw resource pools and check for left over
+	 * elements. Any found will result in the complete pool of a
+	 * type to get invalidated.
+	 */
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_L2_CTXT_TCAM_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride) {
+		flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_PROF_FUNC_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride) {
+		flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_PROF_TCAM_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride) {
+		flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_EM_PROF_ID_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride) {
+		flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	flush_entries[TF_RESC_TYPE_HW_EM_REC].start = 0;
+	flush_entries[TF_RESC_TYPE_HW_EM_REC].stride = 0;
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_WC_TCAM_PROF_ID_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride) {
+		flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_WC_TCAM_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM].stride) {
+		flush_entries[TF_RESC_TYPE_HW_WC_TCAM].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_WC_TCAM].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_METER_PROF_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_PROF].stride) {
+		flush_entries[TF_RESC_TYPE_HW_METER_PROF].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_METER_PROF].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_METER_INST_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_INST].stride) {
+		flush_entries[TF_RESC_TYPE_HW_METER_INST].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_METER_INST].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_MIRROR_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_MIRROR].stride) {
+		flush_entries[TF_RESC_TYPE_HW_MIRROR].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_MIRROR].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_UPAR_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_UPAR].stride) {
+		flush_entries[TF_RESC_TYPE_HW_UPAR].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_UPAR].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_SP_TCAM_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_SP_TCAM].stride) {
+		flush_entries[TF_RESC_TYPE_HW_SP_TCAM].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_SP_TCAM].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_L2_FUNC_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_FUNC].stride) {
+		flush_entries[TF_RESC_TYPE_HW_L2_FUNC].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_L2_FUNC].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_FKB_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_FKB].stride) {
+		flush_entries[TF_RESC_TYPE_HW_FKB].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_FKB].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
 
-	/* Query for Session SRAM Resources */
-	rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
-	if (rc) {
-		/* Log error */
-		PMD_DRV_LOG(ERR,
-			    "%s, SRAM qcaps message send failed\n",
-			    tf_dir_2_str(dir));
-		goto cleanup;
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_TBL_SCOPE_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride) {
+		flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride = 0;
+	} else {
+		PMD_DRV_LOG(ERR, "%s: TBL_SCOPE free_cnt:%d, entries:%d\n",
+			    tf_dir_2_str(dir),
+			    free_cnt,
+			    hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride);
+		flush_rc = 1;
 	}
 
-	rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
-	if (rc) {
-		/* Log error */
-		PMD_DRV_LOG(ERR,
-			"%s, SRAM QCAPS validation failed, error_flag:%x\n",
-			tf_dir_2_str(dir),
-			error_flag);
-		tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);
-		goto cleanup;
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_EPOCH0_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH0].stride) {
+		flush_entries[TF_RESC_TYPE_HW_EPOCH0].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_EPOCH0].stride = 0;
+	} else {
+		flush_rc = 1;
 	}
 
-	/* Post process SRAM capability */
-	for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)
-		sram_alloc.sram_num[i] = sram_query.sram_query[i].max;
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_EPOCH1_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH1].stride) {
+		flush_entries[TF_RESC_TYPE_HW_EPOCH1].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_EPOCH1].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
 
-	/* Allocate Session SRAM Resources */
-	rc = tf_msg_session_sram_resc_alloc(tfp,
-					    dir,
-					    &sram_alloc,
-					    sram_entries);
-	if (rc) {
-		/* Log error */
-		PMD_DRV_LOG(ERR,
-			    "%s, SRAM alloc message send failed\n",
-			    tf_dir_2_str(dir));
-		goto cleanup;
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_METADATA_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METADATA].stride) {
+		flush_entries[TF_RESC_TYPE_HW_METADATA].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_METADATA].stride = 0;
+	} else {
+		flush_rc = 1;
 	}
 
-	/* Perform SRAM allocation validation as its possible the
-	 * resource availability changed between qcaps and alloc
-	 */
-	rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);
-	if (rc) {
-		/* Log error */
-		PMD_DRV_LOG(ERR,
-			    "%s, SRAM Resource allocation validation failed\n",
-			    tf_dir_2_str(dir));
-		goto cleanup;
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_CT_STATE_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_CT_STATE].stride) {
+		flush_entries[TF_RESC_TYPE_HW_CT_STATE].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_CT_STATE].stride = 0;
+	} else {
+		flush_rc = 1;
 	}
 
-	return 0;
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_RANGE_PROF_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride) {
+		flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
 
- cleanup:
-	return -1;
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_RANGE_ENTRY_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride) {
+		flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	TF_RM_GET_POOLS(tfs, dir, &pool,
+			TF_LAG_ENTRY_POOL_NAME,
+			rc);
+	if (rc)
+		return rc;
+	free_cnt = ba_free_count(pool);
+	if (free_cnt == hw_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride) {
+		flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].start = 0;
+		flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride = 0;
+	} else {
+		flush_rc = 1;
+	}
+
+	return flush_rc;
 }
 
 /**
@@ -1335,6 +2572,32 @@ tf_rm_sram_to_flush(struct tf_session *tfs,
 }
 
 /**
+ * Helper function used to generate an error log for the HW types that
+ * needs to be flushed. The types should have been cleaned up ahead of
+ * invoking tf_close_session.
+ *
+ * [in] hw_entries
+ *   HW Resource database holding elements to be flushed
+ */
+static void
+tf_rm_log_hw_flush(enum tf_dir dir,
+		   struct tf_rm_entry *hw_entries)
+{
+	int i;
+
+	/* Walk the hw flush array and log the types that wasn't
+	 * cleaned up.
+	 */
+	for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
+		if (hw_entries[i].stride != 0)
+			PMD_DRV_LOG(ERR,
+				    "%s: %s was not cleaned up\n",
+				    tf_dir_2_str(dir),
+				    tf_hcapi_hw_2_str(i));
+	}
+}
+
+/**
  * Helper function used to generate an error log for the SRAM types
  * that needs to be flushed. The types should have been cleaned up
  * ahead of invoking tf_close_session.
@@ -1386,6 +2649,53 @@ tf_rm_init(struct tf *tfp __rte_unused)
 	/* Initialization of HW Resource Pools */
 	ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
 	ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
+	ba_init(tfs->TF_PROF_FUNC_POOL_NAME_RX, TF_NUM_PROF_FUNC);
+	ba_init(tfs->TF_PROF_FUNC_POOL_NAME_TX, TF_NUM_PROF_FUNC);
+	ba_init(tfs->TF_PROF_TCAM_POOL_NAME_RX, TF_NUM_PROF_TCAM);
+	ba_init(tfs->TF_PROF_TCAM_POOL_NAME_TX, TF_NUM_PROF_TCAM);
+	ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_RX, TF_NUM_EM_PROF_ID);
+	ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_TX, TF_NUM_EM_PROF_ID);
+
+	/* TBD, how do we want to handle EM records ?*/
+	/* EM Records should not be controlled by way of a pool */
+
+	ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX, TF_NUM_WC_PROF_ID);
+	ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX, TF_NUM_WC_PROF_ID);
+	ba_init(tfs->TF_WC_TCAM_POOL_NAME_RX, TF_NUM_WC_TCAM_ROW);
+	ba_init(tfs->TF_WC_TCAM_POOL_NAME_TX, TF_NUM_WC_TCAM_ROW);
+	ba_init(tfs->TF_METER_PROF_POOL_NAME_RX, TF_NUM_METER_PROF);
+	ba_init(tfs->TF_METER_PROF_POOL_NAME_TX, TF_NUM_METER_PROF);
+	ba_init(tfs->TF_METER_INST_POOL_NAME_RX, TF_NUM_METER);
+	ba_init(tfs->TF_METER_INST_POOL_NAME_TX, TF_NUM_METER);
+	ba_init(tfs->TF_MIRROR_POOL_NAME_RX, TF_NUM_MIRROR);
+	ba_init(tfs->TF_MIRROR_POOL_NAME_TX, TF_NUM_MIRROR);
+	ba_init(tfs->TF_UPAR_POOL_NAME_RX, TF_NUM_UPAR);
+	ba_init(tfs->TF_UPAR_POOL_NAME_TX, TF_NUM_UPAR);
+
+	ba_init(tfs->TF_SP_TCAM_POOL_NAME_RX, TF_NUM_SP_TCAM);
+	ba_init(tfs->TF_SP_TCAM_POOL_NAME_TX, TF_NUM_SP_TCAM);
+
+	ba_init(tfs->TF_FKB_POOL_NAME_RX, TF_NUM_FKB);
+	ba_init(tfs->TF_FKB_POOL_NAME_TX, TF_NUM_FKB);
+
+	ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_RX, TF_NUM_TBL_SCOPE);
+	ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_TX, TF_NUM_TBL_SCOPE);
+	ba_init(tfs->TF_L2_FUNC_POOL_NAME_RX, TF_NUM_L2_FUNC);
+	ba_init(tfs->TF_L2_FUNC_POOL_NAME_TX, TF_NUM_L2_FUNC);
+	ba_init(tfs->TF_EPOCH0_POOL_NAME_RX, TF_NUM_EPOCH0);
+	ba_init(tfs->TF_EPOCH0_POOL_NAME_TX, TF_NUM_EPOCH0);
+	ba_init(tfs->TF_EPOCH1_POOL_NAME_RX, TF_NUM_EPOCH1);
+	ba_init(tfs->TF_EPOCH1_POOL_NAME_TX, TF_NUM_EPOCH1);
+	ba_init(tfs->TF_METADATA_POOL_NAME_RX, TF_NUM_METADATA);
+	ba_init(tfs->TF_METADATA_POOL_NAME_TX, TF_NUM_METADATA);
+	ba_init(tfs->TF_CT_STATE_POOL_NAME_RX, TF_NUM_CT_STATE);
+	ba_init(tfs->TF_CT_STATE_POOL_NAME_TX, TF_NUM_CT_STATE);
+	ba_init(tfs->TF_RANGE_PROF_POOL_NAME_RX, TF_NUM_RANGE_PROF);
+	ba_init(tfs->TF_RANGE_PROF_POOL_NAME_TX, TF_NUM_RANGE_PROF);
+	ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_RX, TF_NUM_RANGE_ENTRY);
+	ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_TX, TF_NUM_RANGE_ENTRY);
+	ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_RX, TF_NUM_LAG_ENTRY);
+	ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_TX, TF_NUM_LAG_ENTRY);
 
 	/* Initialization of SRAM Resource Pools
 	 * These pools are set to the TFLIB defined MAX sizes not
@@ -1476,6 +2786,7 @@ tf_rm_close(struct tf *tfp)
 	int rc_close = 0;
 	int i;
 	struct tf_rm_entry *hw_entries;
+	struct tf_rm_entry *hw_flush_entries;
 	struct tf_rm_entry *sram_entries;
 	struct tf_rm_entry *sram_flush_entries;
 	struct tf_session *tfs __rte_unused =
@@ -1501,14 +2812,41 @@ tf_rm_close(struct tf *tfp)
 	for (i = 0; i < TF_DIR_MAX; i++) {
 		if (i == TF_DIR_RX) {
 			hw_entries = tfs->resc.rx.hw_entry;
+			hw_flush_entries = flush_resc.rx.hw_entry;
 			sram_entries = tfs->resc.rx.sram_entry;
 			sram_flush_entries = flush_resc.rx.sram_entry;
 		} else {
 			hw_entries = tfs->resc.tx.hw_entry;
+			hw_flush_entries = flush_resc.tx.hw_entry;
 			sram_entries = tfs->resc.tx.sram_entry;
 			sram_flush_entries = flush_resc.tx.sram_entry;
 		}
 
+		/* Check for any not previously freed HW resources and
+		 * flush if required.
+		 */
+		rc = tf_rm_hw_to_flush(tfs, i, hw_entries, hw_flush_entries);
+		if (rc) {
+			rc_close = -ENOTEMPTY;
+			/* Log error */
+			PMD_DRV_LOG(ERR,
+				    "%s, lingering HW resources\n",
+				    tf_dir_2_str(i));
+
+			/* Log the entries to be flushed */
+			tf_rm_log_hw_flush(i, hw_flush_entries);
+			rc = tf_msg_session_hw_resc_flush(tfp,
+							  i,
+							  hw_flush_entries);
+			if (rc) {
+				rc_close = rc;
+				/* Log error */
+				PMD_DRV_LOG(ERR,
+					    "%s, HW flush failed\n",
+					    tf_dir_2_str(i));
+			}
+		}
+
 		/* Check for any not previously freed SRAM resources
 		 * and flush if required.
 		 */
@@ -1560,6 +2898,234 @@ tf_rm_close(struct tf *tfp)
 	return rc_close;
 }
 
+#if (TF_SHADOW == 1)
+int
+tf_rm_shadow_db_init(struct tf_session *tfs)
+{
+	rc = 1;
+
+	return rc;
+}
+#endif /* TF_SHADOW */
+
+int
+tf_rm_lookup_tcam_type_pool(struct tf_session *tfs,
+			    enum tf_dir dir,
+			    enum tf_tcam_tbl_type type,
+			    struct bitalloc **pool)
+{
+	int rc = -EOPNOTSUPP;
+
+	*pool = NULL;
+
+	switch (type) {
+	case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_L2_CTXT_TCAM_POOL_NAME,
+				rc);
+		break;
+	case TF_TCAM_TBL_TYPE_PROF_TCAM:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_PROF_TCAM_POOL_NAME,
+				rc);
+		break;
+	case TF_TCAM_TBL_TYPE_WC_TCAM:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_WC_TCAM_POOL_NAME,
+				rc);
+		break;
+	case TF_TCAM_TBL_TYPE_VEB_TCAM:
+	case TF_TCAM_TBL_TYPE_SP_TCAM:
+	case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
+	default:
+		break;
+	}
+
+	if (rc == -EOPNOTSUPP) {
+		PMD_DRV_LOG(ERR,
+			    "dir:%d, Tcam type not supported, type:%d\n",
+			    dir,
+			    type);
+		return rc;
+	} else if (rc == -1) {
+		PMD_DRV_LOG(ERR,
+			    "%s:, Tcam type lookup failed, type:%d\n",
+			    tf_dir_2_str(dir),
+			    type);
+		return rc;
+	}
+
+	return 0;
+}
+
+int
+tf_rm_lookup_tbl_type_pool(struct tf_session *tfs,
+			   enum tf_dir dir,
+			   enum tf_tbl_type type,
+			   struct bitalloc **pool)
+{
+	int rc = -EOPNOTSUPP;
+
+	*pool = NULL;
+
+	switch (type) {
+	case TF_TBL_TYPE_FULL_ACT_RECORD:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_SRAM_FULL_ACTION_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_MCAST_GROUPS:
+		/* No pools for TX direction, so bail out */
+		if (dir == TF_DIR_TX)
+			break;
+		TF_RM_GET_POOLS_RX(tfs, pool,
+				   TF_SRAM_MCG_POOL_NAME);
+		rc = 0;
+		break;
+	case TF_TBL_TYPE_ACT_ENCAP_8B:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_SRAM_ENCAP_8B_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_ACT_ENCAP_16B:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_SRAM_ENCAP_16B_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_ACT_ENCAP_64B:
+		/* No pools for RX direction, so bail out */
+		if (dir == TF_DIR_RX)
+			break;
+		TF_RM_GET_POOLS_TX(tfs, pool,
+				   TF_SRAM_ENCAP_64B_POOL_NAME);
+		rc = 0;
+		break;
+	case TF_TBL_TYPE_ACT_SP_SMAC:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_SRAM_SP_SMAC_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
+		/* No pools for TX direction, so bail out */
+		if (dir == TF_DIR_RX)
+			break;
+		TF_RM_GET_POOLS_TX(tfs, pool,
+				   TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
+		rc = 0;
+		break;
+	case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
+		/* No pools for TX direction, so bail out */
+		if (dir == TF_DIR_RX)
+			break;
+		TF_RM_GET_POOLS_TX(tfs, pool,
+				   TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
+		rc = 0;
+		break;
+	case TF_TBL_TYPE_ACT_STATS_64:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_SRAM_STATS_64B_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_ACT_MODIFY_SPORT:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_SRAM_NAT_SPORT_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_SRAM_NAT_S_IPV4_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_SRAM_NAT_D_IPV4_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_METER_PROF:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_METER_PROF_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_METER_INST:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_METER_INST_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_MIRROR_CONFIG:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_MIRROR_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_UPAR:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_UPAR_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_EPOCH0:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_EPOCH0_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_EPOCH1:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_EPOCH1_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_METADATA:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_METADATA_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_CT_STATE:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_CT_STATE_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_RANGE_PROF:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_RANGE_PROF_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_RANGE_ENTRY:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_RANGE_ENTRY_POOL_NAME,
+				rc);
+		break;
+	case TF_TBL_TYPE_LAG:
+		TF_RM_GET_POOLS(tfs, dir, pool,
+				TF_LAG_ENTRY_POOL_NAME,
+				rc);
+		break;
+	/* Not yet supported */
+	case TF_TBL_TYPE_ACT_ENCAP_32B:
+	case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
+	case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
+	case TF_TBL_TYPE_VNIC_SVIF:
+		break;
+	/* No bitalloc pools for these types */
+	case TF_TBL_TYPE_EXT:
+	case TF_TBL_TYPE_EXT_0:
+	default:
+		break;
+	}
+
+	if (rc == -EOPNOTSUPP) {
+		PMD_DRV_LOG(ERR,
+			    "dir:%d, Table type not supported, type:%d\n",
+			    dir,
+			    type);
+		return rc;
+	} else if (rc == -1) {
+		PMD_DRV_LOG(ERR,
+			    "dir:%d, Table type lookup failed, type:%d\n",
+			    dir,
+			    type);
+		return rc;
+	}
+
+	return 0;
+}
+
 int
 tf_rm_convert_tbl_type(enum tf_tbl_type type,
 		       uint32_t *hcapi_type)
diff --git a/drivers/net/bnxt/tf_core/tf_session.h b/drivers/net/bnxt/tf_core/tf_session.h
index 34b6c41..fed34f1 100644
--- a/drivers/net/bnxt/tf_core/tf_session.h
+++ b/drivers/net/bnxt/tf_core/tf_session.h
@@ -12,6 +12,7 @@
 #include "bitalloc.h"
 #include "tf_core.h"
 #include "tf_rm.h"
+#include "tf_tbl.h"
 
 /** Session defines
  */
@@ -285,6 +286,15 @@ struct tf_session {
 
 	/** Lookup3 init values */
 	uint32_t lkup_lkup3_init_cfg[TF_DIR_MAX];
+
+	/** Table scope array */
+	struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
+
+	/** Each external pool is associated with a single table scope
+	 *  For each external pool store the associated table scope in
+	 *  this data structure
+	 */
+	uint32_t ext_pool_2_scope[TF_DIR_MAX][TF_EXT_POOL_CNT_MAX];
 };
 
 #endif /* _TF_SESSION_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.h b/drivers/net/bnxt/tf_core/tf_tbl.h
new file mode 100644
index 0000000..5a5e72f
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_tbl.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TF_TBL_H_
+#define _TF_TBL_H_
+
+#include <stdint.h>
+
+enum tf_pg_tbl_lvl {
+	PT_LVL_0,
+	PT_LVL_1,
+	PT_LVL_2,
+	PT_LVL_MAX
+};
+
+/** Invalid table scope id */
+#define TF_TBL_SCOPE_INVALID 0xffffffff
+
+/**
+ * Table Scope Control Block
+ *
+ * Holds private data for a table scope. Only one instance of a table
+ * scope with Internal EM is supported.
+ */
+struct tf_tbl_scope_cb {
+	uint32_t tbl_scope_id;
+	int index;
+	uint32_t              *ext_pool_mem[TF_DIR_MAX][TF_EXT_POOL_CNT_MAX];
+};
+
+/**
+ * Initialize table pool structure to indicate
+ * no table scope has been associated with the
+ * external pool of indexes.
+ *
+ * [in] session
+ */
+void
+tf_init_tbl_pool(struct tf_session *session);
+
+#endif /* _TF_TBL_H_ */
-- 
2.7.4


  parent reply	other threads:[~2020-04-14  8:15 UTC|newest]

Thread overview: 154+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-17 15:37 [dpdk-dev] [PATCH 00/33] add support for host based flow table management Venkat Duvvuru
2020-03-17 15:37 ` [dpdk-dev] [PATCH 01/33] net/bnxt: add updated dpdk hsi structure Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 02/33] net/bnxt: update hwrm prep to use ptr Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 03/33] net/bnxt: add truflow message handlers Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 04/33] net/bnxt: add initial tf core session open Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 05/33] net/bnxt: add initial tf core session close support Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 06/33] net/bnxt: add tf core session sram functions Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 07/33] net/bnxt: add initial tf core resource mgmt support Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 08/33] net/bnxt: add resource manager functionality Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 09/33] net/bnxt: add tf core identifier support Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 10/33] net/bnxt: add tf core TCAM support Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 11/33] net/bnxt: add tf core table scope support Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 12/33] net/bnxt: add EM/EEM functionality Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 13/33] net/bnxt: fetch SVIF information from the firmware Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 14/33] net/bnxt: fetch vnic info from DPDK port Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 15/33] net/bnxt: add support for ULP session manager init Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 16/33] net/bnxt: add support for ULP session manager cleanup Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 17/33] net/bnxt: add helper functions for blob/regfile ops Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 18/33] net/bnxt: add support to process action tables Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 19/33] net/bnxt: add support to process key tables Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 20/33] net/bnxt: add support to free key and action tables Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 21/33] net/bnxt: add support to alloc and program key and act tbls Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 22/33] net/bnxt: match rte flow items with flow template patterns Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 23/33] net/bnxt: match rte flow actions with flow template actions Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 24/33] net/bnxt: add support for rte flow item parsing Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 25/33] net/bnxt: add support for rte flow action parsing Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 26/33] net/bnxt: add support for rte flow create driver hook Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 27/33] net/bnxt: add support for rte flow validate " Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 28/33] net/bnxt: add support for rte flow destroy " Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 29/33] net/bnxt: add support for rte flow flush " Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 30/33] net/bnxt: register tf rte flow ops Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 31/33] net/bnxt: disable vector mode when BNXT TRUFLOW is enabled Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 32/33] net/bnxt: add support for injecting mark into packet’s mbuf Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 33/33] config: introduce BNXT TRUFLOW config flag Venkat Duvvuru
2020-04-13 19:39 ` [dpdk-dev] [PATCH v2 00/34] add support for host based flow table management Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 01/34] net/bnxt: add updated dpdk hsi structure Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 02/34] net/bnxt: update hwrm prep to use ptr Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 03/34] net/bnxt: add truflow message handlers Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 04/34] net/bnxt: add initial tf core session open Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 05/34] net/bnxt: add initial tf core session close support Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 06/34] net/bnxt: add tf core session sram functions Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 07/34] net/bnxt: add initial tf core resource mgmt support Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 08/34] net/bnxt: add resource manager functionality Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 09/34] net/bnxt: add tf core identifier support Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 10/34] net/bnxt: add tf core TCAM support Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 11/34] net/bnxt: add tf core table scope support Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 12/34] net/bnxt: add EM/EEM functionality Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 13/34] net/bnxt: fetch SVIF information from the firmware Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 14/34] net/bnxt: fetch vnic info from DPDK port Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 15/34] net/bnxt: add devargs parameter for host memory based TRUFLOW feature Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 16/34] net/bnxt: add support for ULP session manager init Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 17/34] net/bnxt: add support for ULP session manager cleanup Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 18/34] net/bnxt: add helper functions for blob/regfile ops Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 19/34] net/bnxt: add support to process action tables Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 20/34] net/bnxt: add support to process key tables Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 21/34] net/bnxt: add support to free key and action tables Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 22/34] net/bnxt: add support to alloc and program key and act tbls Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 23/34] net/bnxt: match rte flow items with flow template patterns Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 24/34] net/bnxt: match rte flow actions with flow template actions Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 25/34] net/bnxt: add support for rte flow item parsing Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 26/34] net/bnxt: add support for rte flow action parsing Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 27/34] net/bnxt: add support for rte flow create driver hook Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 28/34] net/bnxt: add support for rte flow validate " Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 29/34] net/bnxt: add support for rte flow destroy " Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 30/34] net/bnxt: add support for rte flow flush " Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 31/34] net/bnxt: register tf rte flow ops Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 32/34] net/bnxt: disable vector mode when host based TRUFLOW is enabled Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 33/34] net/bnxt: add support for injecting mark into packet’s mbuf Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 34/34] net/bnxt: enable meson build on truflow code Venkat Duvvuru
2020-04-13 21:35   ` [dpdk-dev] [PATCH v2 00/34] add support for host based flow table management Thomas Monjalon
2020-04-15  8:56     ` Venkat Duvvuru
2020-04-14  8:12   ` [dpdk-dev] [PATCH v3 " Venkat Duvvuru
2020-04-14  8:12     ` [dpdk-dev] [PATCH v3 01/34] net/bnxt: add updated dpdk hsi structure Venkat Duvvuru
2020-04-14  8:12     ` [dpdk-dev] [PATCH v3 02/34] net/bnxt: update hwrm prep to use ptr Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 03/34] net/bnxt: add truflow message handlers Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 04/34] net/bnxt: add initial tf core session open Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 05/34] net/bnxt: add initial tf core session close support Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 06/34] net/bnxt: add tf core session sram functions Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 07/34] net/bnxt: add initial tf core resource mgmt support Venkat Duvvuru
2020-04-14  8:13     ` Venkat Duvvuru [this message]
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 09/34] net/bnxt: add tf core identifier support Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 10/34] net/bnxt: add tf core TCAM support Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 11/34] net/bnxt: add tf core table scope support Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 12/34] net/bnxt: add EM/EEM functionality Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 13/34] net/bnxt: fetch SVIF information from the firmware Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 14/34] net/bnxt: fetch vnic info from DPDK port Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 15/34] net/bnxt: add devargs parameter for host memory based TRUFLOW feature Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 16/34] net/bnxt: add support for ULP session manager init Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 17/34] net/bnxt: add support for ULP session manager cleanup Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 18/34] net/bnxt: add helper functions for blob/regfile ops Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 19/34] net/bnxt: add support to process action tables Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 20/34] net/bnxt: add support to process key tables Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 21/34] net/bnxt: add support to free key and action tables Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 22/34] net/bnxt: add support to alloc and program key and act tbls Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 23/34] net/bnxt: match rte flow items with flow template patterns Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 24/34] net/bnxt: match rte flow actions with flow template actions Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 25/34] net/bnxt: add support for rte flow item parsing Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 26/34] net/bnxt: add support for rte flow action parsing Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 27/34] net/bnxt: add support for rte flow create driver hook Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 28/34] net/bnxt: add support for rte flow validate " Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 29/34] net/bnxt: add support for rte flow destroy " Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 30/34] net/bnxt: add support for rte flow flush " Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 31/34] net/bnxt: register tf rte flow ops Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 32/34] net/bnxt: disable vector mode when host based TRUFLOW is enabled Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 33/34] net/bnxt: add support for injecting mark into packet’s mbuf Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 34/34] net/bnxt: enable meson build on truflow code Venkat Duvvuru
2020-04-15  8:18     ` [dpdk-dev] [PATCH v4 00/34] add support for host based flow table management Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 01/34] net/bnxt: add updated dpdk hsi structure Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 02/34] net/bnxt: update hwrm prep to use ptr Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 03/34] net/bnxt: add truflow message handlers Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 04/34] net/bnxt: add initial tf core session open Venkat Duvvuru
2020-04-16 17:39         ` Ferruh Yigit
2020-04-16 17:47           ` Ajit Khaparde
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 05/34] net/bnxt: add initial tf core session close support Venkat Duvvuru
2020-04-16 17:39         ` Ferruh Yigit
2020-04-16 17:48           ` Ajit Khaparde
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 06/34] net/bnxt: add tf core session sram functions Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 07/34] net/bnxt: add initial tf core resource mgmt support Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 08/34] net/bnxt: add resource manager functionality Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 09/34] net/bnxt: add tf core identifier support Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 10/34] net/bnxt: add tf core TCAM support Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 11/34] net/bnxt: add tf core table scope support Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 12/34] net/bnxt: add EM/EEM functionality Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 13/34] net/bnxt: fetch SVIF information from the firmware Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 14/34] net/bnxt: fetch vnic info from DPDK port Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 15/34] net/bnxt: add devargs parameter for host memory based TRUFLOW feature Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 16/34] net/bnxt: add support for ULP session manager init Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 17/34] net/bnxt: add support for ULP session manager cleanup Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 18/34] net/bnxt: add helper functions for blob/regfile ops Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 19/34] net/bnxt: add support to process action tables Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 20/34] net/bnxt: add support to process key tables Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 21/34] net/bnxt: add support to free key and action tables Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 22/34] net/bnxt: add support to alloc and program key and act tbls Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 23/34] net/bnxt: match rte flow items with flow template patterns Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 24/34] net/bnxt: match rte flow actions with flow template actions Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 25/34] net/bnxt: add support for rte flow item parsing Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 26/34] net/bnxt: add support for rte flow action parsing Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 27/34] net/bnxt: add support for rte flow create driver hook Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 28/34] net/bnxt: add support for rte flow validate " Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 29/34] net/bnxt: add support for rte flow destroy " Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 30/34] net/bnxt: add support for rte flow flush " Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 31/34] net/bnxt: register tf rte flow ops Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 32/34] net/bnxt: disable vector mode when host based TRUFLOW is enabled Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 33/34] net/bnxt: add support for injecting mark into packet’s mbuf Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 34/34] net/bnxt: enable meson build on truflow code Venkat Duvvuru
2020-04-22 21:27         ` Thomas Monjalon
2020-04-15 15:29       ` [dpdk-dev] [PATCH v4 00/34] add support for host based flow table management Ajit Khaparde
2020-04-16 16:23       ` Ferruh Yigit
2020-04-16 16:38         ` Ajit Khaparde
2020-04-16 17:40       ` Ferruh Yigit
2020-04-16 17:51         ` Ajit Khaparde
2020-04-17  8:37           ` Ferruh Yigit
2020-04-17 11:03             ` Ferruh Yigit
2020-04-17 16:14               ` Ajit Khaparde

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1586852011-37536-9-git-send-email-venkatkumar.duvvuru@broadcom.com \
    --to=venkatkumar.duvvuru@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=michael.wildt@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).