DPDK patches and discussions
 help / color / mirror / Atom feed
From: Manish Kurup <manish.kurup@broadcom.com>
To: dev@dpdk.org
Cc: ajit.khaparde@broadcom.com, Jay Ding <jay.ding@broadcom.com>,
	Michael Baucom <michael.baucom@broadcom.com>
Subject: [PATCH 04/54] net/bnxt/tf_ulp: add meter stats support for Thor2
Date: Mon, 29 Sep 2025 20:35:14 -0400	[thread overview]
Message-ID: <20250930003604.87108-5-manish.kurup@broadcom.com> (raw)
In-Reply-To: <20250930003604.87108-1-manish.kurup@broadcom.com>

From: Jay Ding <jay.ding@broadcom.com>

A CFA stats counter is created with each meter by default but is
only activated when the meter is attached to a non-tunnel flow
because the tunnel flow may use the stats pointer for other purpose.

Only the green and red/drop packet/byte stats are supported.

Signed-off-by: Jay Ding <jay.ding@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
 drivers/net/bnxt/tf_ulp/bnxt_ulp_meter.c | 45 ++++++++++++-----
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c     | 46 +++++++++++++++++
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h     | 11 +++++
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tf.c  |  3 +-
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tfc.c | 63 ++++++++++++++++++++----
 drivers/net/bnxt/tf_ulp/ulp_flow_db.h    |  6 +++
 drivers/net/bnxt/tf_ulp/ulp_mapper.c     |  9 ----
 drivers/net/bnxt/tf_ulp/ulp_mapper.h     | 24 +++++++++
 drivers/net/bnxt/tf_ulp/ulp_mapper_tf.c  |  5 +-
 drivers/net/bnxt/tf_ulp/ulp_mapper_tfc.c | 54 +++++++++++++++++++-
 10 files changed, 234 insertions(+), 32 deletions(-)

diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_meter.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_meter.c
index 591bde96e8..bc48e35c00 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_meter.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_meter.c
@@ -42,7 +42,6 @@
  * Meter init status
  */
 int bnxt_mtr_initialized;
-
 int32_t
 bnxt_flow_mtr_init(struct bnxt *bp __rte_unused)
 {
@@ -542,6 +541,7 @@ bnxt_flow_mtr_destroy(struct rte_eth_dev *dev,
 	uint16_t func_id;
 	int ret;
 	uint32_t tmp_mtr_id;
+	const struct ulp_mapper_core_ops *oper;
 
 	if (!bnxt_mtr_initialized)
 		return -rte_mtr_error_set(error, ENOTSUP,
@@ -750,17 +750,40 @@ bnxt_flow_mtr_stats_update(struct rte_eth_dev *dev __rte_unused,
  * Read meter statistics.
  */
 static int
-bnxt_flow_mtr_stats_read(struct rte_eth_dev *dev __rte_unused,
-			   uint32_t mtr_id __rte_unused,
-			   struct rte_mtr_stats *stats __rte_unused,
-			   uint64_t *stats_mask __rte_unused,
-			   int clear __rte_unused,
-			   struct rte_mtr_error *error)
+bnxt_flow_mtr_stats_read(struct rte_eth_dev *dev,
+			 uint32_t mtr_id,
+			 struct rte_mtr_stats *stats,
+			 uint64_t *stats_mask,
+			 int clear,
+			 struct rte_mtr_error *error)
 {
-	return -rte_mtr_error_set(error, ENOTSUP,
-				  RTE_MTR_ERROR_TYPE_UNSPECIFIED,
-				  NULL,
-				  "Meter_stats_read not supported yet");
+	int rc = 0;
+	struct bnxt_ulp_context *ulp_ctx;
+
+	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
+	if (unlikely(!ulp_ctx)) {
+		BNXT_DRV_DBG(ERR, "ULP context is not initialized\n");
+		goto error;
+	}
+
+	memset(stats, 0, sizeof(*stats));
+	rc = ulp_mtr_query_count_get(ulp_ctx, mtr_id, clear, stats);
+	if (unlikely(rc))
+		goto error;
+
+	*stats_mask = 0;
+	*stats_mask |= RTE_MTR_STATS_N_PKTS_GREEN;
+	*stats_mask |= RTE_MTR_STATS_N_PKTS_RED;
+	*stats_mask |= RTE_MTR_STATS_N_PKTS_DROPPED;
+	*stats_mask |= RTE_MTR_STATS_N_BYTES_GREEN;
+	*stats_mask |= RTE_MTR_STATS_N_BYTES_RED;
+	*stats_mask |= RTE_MTR_STATS_N_BYTES_DROPPED;
+
+	return rc;
+error:
+	return -rte_mtr_error_set(error, EINVAL,
+				  RTE_MTR_ERROR_TYPE_STATS, NULL,
+				  "Failed to query meter.");
 }
 
 static const struct rte_mtr_ops bnxt_flow_mtr_ops = {
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
index f9d069f4e1..4be2703740 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
@@ -627,6 +627,52 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
 	return rc;
 }
 
+/*
+ * Fill the rte_mtr_stats 'mtr_count' argument passed
+ * in the rte_mtr_stats_read() with the values obtained
+ * through CFA table get.
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ *
+ * mtr_id [in] The SW meter ID
+ *
+ * count [out] The rte_mtr_stats 'mtr_count' that is set
+ *
+ */
+int ulp_mtr_query_count_get(struct bnxt_ulp_context *ctxt,
+			    uint32_t mtr_id,
+			    int clear,
+			    struct rte_mtr_stats *mtr_count)
+{
+	int rc = 0;
+	struct bnxt_ulp_fc_info *ulp_fc_info;
+	const struct bnxt_ulp_fc_core_ops *fc_ops;
+	struct ulp_flow_db_res_params params;
+	uint32_t session_type = 0;
+	uint8_t dir = 0;
+	const struct ulp_mapper_core_ops *oper;
+
+	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
+	if (!ulp_fc_info)
+		return -ENODEV;
+
+	oper = ulp_mapper_data_oper_get(ctxt);
+	if (!oper)
+		return -ENODEV;
+	rc = oper->ulp_mapper_mtr_stats_hndl_get(mtr_id, &params.resource_hndl);
+	if (rc)
+		return rc;
+
+	fc_ops = ulp_fc_info->fc_ops;
+	if (!fc_ops || !fc_ops->ulp_mtr_stat_get)
+		return -ENODEV;
+
+	rc = fc_ops->ulp_mtr_stat_get(ctxt, dir, session_type,
+				      params.resource_hndl, clear, mtr_count);
+
+	return rc;
+}
+
 /*
  * Set the parent flow if it is SW accumulation counter entry.
  *
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h
index 0b81be7af9..d80d2e4e9d 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h
@@ -36,6 +36,13 @@ struct bnxt_ulp_fc_core_ops {
 	(*ulp_flow_stats_accum_update)(struct bnxt_ulp_context *ctxt,
 				       struct bnxt_ulp_fc_info *ulp_fc_info,
 				       struct bnxt_ulp_device_params *dparms);
+	int32_t
+	(*ulp_mtr_stat_get)(struct bnxt_ulp_context *ctxt,
+			    uint8_t direction,
+			    uint32_t session_type,
+			    uint64_t handle,
+			    int32_t clear,
+			    struct rte_mtr_stats *mtr_count);
 };
 
 struct sw_acc_counter {
@@ -186,6 +193,10 @@ bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt);
 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ulp_ctx,
 			       uint32_t flow_id,
 			       struct rte_flow_query_count *count);
+int ulp_mtr_query_count_get(struct bnxt_ulp_context *ulp_ctx,
+			    uint32_t mtr_id,
+			    int clear,
+			    struct rte_mtr_stats *count);
 
 /*
  * Set the parent flow if in the SW accumulator table entry
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tf.c b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tf.c
index 60a7073514..65737f0cde 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tf.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tf.c
@@ -255,5 +255,6 @@ ulp_fc_tf_flow_stat_get(struct bnxt_ulp_context *ctxt,
 
 const struct bnxt_ulp_fc_core_ops ulp_fc_tf_core_ops = {
 	.ulp_flow_stat_get = ulp_fc_tf_flow_stat_get,
-	.ulp_flow_stats_accum_update = ulp_fc_tf_update_accum_stats
+	.ulp_flow_stats_accum_update = ulp_fc_tf_update_accum_stats,
+	.ulp_mtr_stat_get = NULL,
 };
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tfc.c b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tfc.c
index 4a1d0bf3df..d1b374e603 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tfc.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tfc.c
@@ -26,6 +26,8 @@
  */
 #define ULP_FC_TFC_PKT_CNT_OFFS 0
 #define ULP_FC_TFC_BYTE_CNT_OFFS 1
+#define ULP_FC_TFC_PKT_MTR_DROP_CNT_OFFS 2
+#define ULP_FC_TFC_BYTE_MTR_DROP_CNT_OFFS 3
 #define ULP_TFC_CNTR_READ_BYTES 32
 #define ULP_TFC_CNTR_ALIGN 32
 #define ULP_TFC_ACT_WORD_SZ 32
@@ -56,11 +58,12 @@ static uint8_t *data;
 static uint64_t virt2iova_data;
 
 static int32_t
-ulp_fc_tfc_flow_stat_get(struct bnxt_ulp_context *ctxt,
-			 uint8_t direction,
-			 uint32_t session_type __rte_unused,
-			 uint64_t handle,
-			 struct rte_flow_query_count *count)
+ulp_fc_tfc_stat_get(struct bnxt_ulp_context *ctxt,
+		    uint8_t direction,
+		    uint32_t session_type __rte_unused,
+		    uint64_t handle,
+		    struct rte_flow_query_count *count,
+		    struct rte_mtr_stats *mtr_drop_count)
 {
 	uint16_t data_size = ULP_TFC_CNTR_READ_BYTES;
 	struct tfc_cmm_clr cmm_clr = { 0 };
@@ -102,11 +105,15 @@ ulp_fc_tfc_flow_stat_get(struct bnxt_ulp_context *ctxt,
 	cmm_info.act_handle = handle;
 	cmm_info.dir = (enum cfa_dir)direction;
 	/* Read and Clear the hw stat if requested */
-	if (count->reset) {
+	if (count && count->reset) {
 		cmm_clr.clr = true;
 		cmm_clr.offset_in_byte = 0;
 		cmm_clr.sz_in_byte = sizeof(data64[ULP_FC_TFC_PKT_CNT_OFFS]) +
 			sizeof(data64[ULP_FC_TFC_BYTE_CNT_OFFS]);
+		if (mtr_drop_count) {
+			cmm_clr.sz_in_byte += sizeof(data64[ULP_FC_TFC_PKT_MTR_DROP_CNT_OFFS]);
+			cmm_clr.sz_in_byte += sizeof(data64[ULP_FC_TFC_BYTE_MTR_DROP_CNT_OFFS]);
+		}
 	}
 	rc = tfc_act_get(tfcp, NULL, &cmm_info, &cmm_clr, &virt2iova_data, &word_size);
 	if (rc) {
@@ -115,19 +122,57 @@ ulp_fc_tfc_flow_stat_get(struct bnxt_ulp_context *ctxt,
 			     handle);
 		return rc;
 	}
-	if (data64[ULP_FC_TFC_PKT_CNT_OFFS]) {
+	if (count && data64[ULP_FC_TFC_PKT_CNT_OFFS]) {
 		count->hits_set = 1;
 		count->hits = data64[ULP_FC_TFC_PKT_CNT_OFFS];
 	}
-	if (data64[ULP_FC_TFC_BYTE_CNT_OFFS]) {
+	if (count && data64[ULP_FC_TFC_BYTE_CNT_OFFS]) {
 		count->bytes_set = 1;
 		count->bytes = data64[ULP_FC_TFC_BYTE_CNT_OFFS];
 	}
 
+	if (mtr_drop_count) {
+		mtr_drop_count->n_pkts[RTE_COLOR_GREEN] = data64[ULP_FC_TFC_PKT_CNT_OFFS];
+		mtr_drop_count->n_bytes[RTE_COLOR_GREEN] = data64[ULP_FC_TFC_BYTE_CNT_OFFS];
+		mtr_drop_count->n_pkts_dropped = data64[ULP_FC_TFC_PKT_MTR_DROP_CNT_OFFS];
+		mtr_drop_count->n_pkts[RTE_COLOR_RED] = data64[ULP_FC_TFC_PKT_MTR_DROP_CNT_OFFS];
+		mtr_drop_count->n_bytes_dropped = data64[ULP_FC_TFC_BYTE_MTR_DROP_CNT_OFFS];
+		mtr_drop_count->n_bytes[RTE_COLOR_RED] = data64[ULP_FC_TFC_BYTE_MTR_DROP_CNT_OFFS];
+	}
+
 	return rc;
 }
 
+static int32_t
+ulp_fc_tfc_flow_stat_get(struct bnxt_ulp_context *ctxt,
+			 uint8_t direction,
+			 uint32_t session_type,
+			 uint64_t handle,
+			 struct rte_flow_query_count *count)
+{
+	return ulp_fc_tfc_stat_get(ctxt, direction, session_type, handle, count, NULL);
+}
+
+static int32_t
+ulp_fc_tfc_mtr_stat_get(struct bnxt_ulp_context *ctxt,
+			uint8_t direction,
+			uint32_t session_type,
+			uint64_t handle,
+			int32_t clear,
+			struct rte_mtr_stats *mtr_count)
+{
+	struct rte_flow_query_count count;
+
+	if (clear)
+		count.reset = 1;
+	else
+		count.reset = 0;
+
+	return ulp_fc_tfc_stat_get(ctxt, direction, session_type, handle, &count, mtr_count);
+}
+
 const struct bnxt_ulp_fc_core_ops ulp_fc_tfc_core_ops = {
 	.ulp_flow_stat_get = ulp_fc_tfc_flow_stat_get,
-	.ulp_flow_stats_accum_update = ulp_fc_tfc_update_accum_stats
+	.ulp_flow_stats_accum_update = ulp_fc_tfc_update_accum_stats,
+	.ulp_mtr_stat_get = ulp_fc_tfc_mtr_stat_get
 };
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
index 4301094cfe..f0a677b64b 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
@@ -91,6 +91,12 @@ struct ulp_flow_db_res_params {
 	uint64_t			resource_hndl;
 };
 
+struct bnxt_mtr_stats_id_map {
+	bool valid;
+	uint32_t mtr_id;
+	uint64_t stats_hndl;
+};
+
 /*
  * Initialize the flow database. Memory is allocated in this
  * call and assigned to the flow database.
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index c06d849883..4829ae41d1 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -71,15 +71,6 @@ bnxt_ulp_mapper_ops_get(struct bnxt *bp)
 	return func_ops;
 }
 
-static const struct ulp_mapper_core_ops *
-ulp_mapper_data_oper_get(struct bnxt_ulp_context *ulp_ctx)
-{
-	struct bnxt_ulp_mapper_data *m_data;
-
-	m_data = (struct bnxt_ulp_mapper_data *)ulp_ctx->cfg_data->mapper_data;
-	return m_data->mapper_oper;
-}
-
 static const char *
 ulp_mapper_tmpl_name_str(enum bnxt_ulp_template_type tmpl_type)
 {
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.h b/drivers/net/bnxt/tf_ulp/ulp_mapper.h
index d1dec6c2bd..f9a407cd84 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.h
@@ -170,8 +170,26 @@ struct ulp_mapper_core_ops {
 	int
 	(*ulp_mapper_mpc_batch_end)(struct tfc *tfcp,
 				    struct tfc_mpc_batch_info_t *batch_info);
+
+	int32_t
+	(*ulp_mapper_mtr_stats_hndl_set)(struct bnxt_ulp_mapper_parms *parms,
+					 uint32_t mtr_id,
+					 uint64_t stats_hndl);
+	int32_t
+	(*ulp_mapper_mtr_stats_hndl_get)(uint32_t mtr_id, uint64_t *stats_hndl);
+
+	int
+	(*ulp_mapper_mtr_stats_hndl_del)(uint32_t mtr_id);
 };
 
+static inline const struct ulp_mapper_core_ops *
+ulp_mapper_data_oper_get(struct bnxt_ulp_context *ulp_ctx) {
+	struct bnxt_ulp_mapper_data *m_data;
+
+	m_data = (struct bnxt_ulp_mapper_data *)ulp_ctx->cfg_data->mapper_data;
+	return m_data->mapper_oper;
+}
+
 extern const struct ulp_mapper_core_ops ulp_mapper_tf_core_ops;
 extern const struct ulp_mapper_core_ops ulp_mapper_tfc_core_ops;
 
@@ -302,6 +320,12 @@ ulp_mapper_init(struct bnxt_ulp_context	*ulp_ctx);
 void
 ulp_mapper_deinit(struct bnxt_ulp_context *ulp_ctx);
 
+int
+ulp_mapper_get_mtr_stats_hndl(uint32_t mtr_id, uint64_t *stats_ptr);
+
+int
+ulp_mapper_del_mtr_stats_hndl(uint32_t mtr_id);
+
 #ifdef TF_FLOW_SCALE_QUERY
 int32_t
 ulp_resc_usage_sync(struct bnxt_ulp_context *ulp_ctx);
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper_tf.c b/drivers/net/bnxt/tf_ulp/ulp_mapper_tf.c
index 53497c164a..e755591716 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper_tf.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper_tf.c
@@ -1385,5 +1385,8 @@ const struct ulp_mapper_core_ops ulp_mapper_tf_core_ops = {
 	.ulp_mapper_core_handle_to_offset = ulp_mapper_tf_handle_to_offset,
 	.ulp_mapper_mpc_batch_started = ulp_mapper_tf_mpc_batch_started,
 	.ulp_mapper_mpc_batch_start = ulp_mapper_tf_mpc_batch_start,
-	.ulp_mapper_mpc_batch_end = ulp_mapper_tf_mpc_batch_end
+	.ulp_mapper_mpc_batch_end = ulp_mapper_tf_mpc_batch_end,
+	.ulp_mapper_mtr_stats_hndl_set = NULL,
+	.ulp_mapper_mtr_stats_hndl_get = NULL,
+	.ulp_mapper_mtr_stats_hndl_del = NULL
 };
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper_tfc.c b/drivers/net/bnxt/tf_ulp/ulp_mapper_tfc.c
index cbe9aa01c9..388ebea7ee 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper_tfc.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper_tfc.c
@@ -1759,6 +1759,55 @@ ulp_mapper_tfc_mpc_batch_start(struct tfc_mpc_batch_info_t *batch_info)
 	return tfc_mpc_batch_start(batch_info);
 }
 
+static int32_t
+ulp_mapper_tfc_mtr_stats_hndl_set(struct bnxt_ulp_mapper_parms *parms __rte_unused,
+				  uint32_t mtr_id, uint64_t stats_hndl)
+{
+	int32_t i, rc = -ENOMEM;
+
+	for (i = 0; i < BNXT_METER_MAX_NUM; i++)
+		if (!mtr_stats[i].valid) {
+			mtr_stats[i].mtr_id = mtr_id;
+			mtr_stats[i].stats_hndl = stats_hndl;
+			mtr_stats[i].valid = true;
+			rc = 0;
+			break;
+		}
+
+	return rc;
+}
+
+static int32_t
+ulp_mapper_tfc_mtr_stats_hndl_get(uint32_t mtr_id, uint64_t *stats_hndl)
+{
+	int32_t i, rc = -EINVAL;
+
+	for (i = 0; i < BNXT_METER_MAX_NUM; i++) {
+		if (mtr_stats[i].valid && mtr_stats[i].mtr_id == mtr_id) {
+			*stats_hndl = mtr_stats[i].stats_hndl;
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t
+ulp_mapper_tfc_mtr_stats_hndl_del(uint32_t mtr_id)
+{
+	int32_t i, rc = -EINVAL;
+
+	for (i = 0; i < BNXT_METER_MAX_NUM; i++)
+		if (mtr_stats[i].valid && mtr_stats[i].mtr_id == mtr_id) {
+			mtr_stats[i].valid = false;
+			rc = 0;
+			break;
+		}
+
+	return rc;
+}
+
 const struct ulp_mapper_core_ops ulp_mapper_tfc_core_ops = {
 	.ulp_mapper_core_tcam_tbl_process = ulp_mapper_tfc_tcam_tbl_process,
 	.ulp_mapper_core_tcam_entry_free = ulp_mapper_tfc_tcam_entry_free,
@@ -1779,5 +1828,8 @@ const struct ulp_mapper_core_ops ulp_mapper_tfc_core_ops = {
 	.ulp_mapper_core_handle_to_offset = ulp_mapper_tfc_handle_to_offset,
 	.ulp_mapper_mpc_batch_start = ulp_mapper_tfc_mpc_batch_start,
 	.ulp_mapper_mpc_batch_started = ulp_mapper_tfc_mpc_batch_started,
-	.ulp_mapper_mpc_batch_end = ulp_mapper_tfc_mpc_batch_end
+	.ulp_mapper_mpc_batch_end = ulp_mapper_tfc_mpc_batch_end,
+	.ulp_mapper_mtr_stats_hndl_set = ulp_mapper_tfc_mtr_stats_hndl_set,
+	.ulp_mapper_mtr_stats_hndl_get = ulp_mapper_tfc_mtr_stats_hndl_get,
+	.ulp_mapper_mtr_stats_hndl_del = ulp_mapper_tfc_mtr_stats_hndl_del
 };
-- 
2.39.5 (Apple Git-154)


  parent reply	other threads:[~2025-09-30  7:05 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-09-30  0:35 [PATCH 00/54] bnxt patchset Manish Kurup
2025-09-30  0:35 ` [PATCH 01/54] net/bnxt/tf_ulp: add bnxt app data for 25.11 Manish Kurup
2025-09-30  0:35 ` [PATCH 02/54] net/bnxt: fix a NULL pointer dereference in bnxt_rep funcs Manish Kurup
2025-09-30  0:35 ` [PATCH 03/54] net/bnxt: enable vector mode processing Manish Kurup
2025-09-30  0:35 ` Manish Kurup [this message]
2025-09-30  0:35 ` [PATCH 05/54] net/bnxt/tf_core: dynamic UPAR support for THOR2 Manish Kurup
2025-09-30  0:35 ` [PATCH 06/54] net/bnxt/tf_core: fix the miscalculation of the lkup table pool Manish Kurup
2025-09-30  0:35 ` [PATCH 07/54] net/bnxt/tf_core: thor2 TF table scope sizing adjustments Manish Kurup
2025-09-30  0:35 ` [PATCH 08/54] net/bnxt/tf_ulp: add support for global identifiers Manish Kurup
2025-09-30  0:35 ` [PATCH 09/54] net/bnxt/tf_core: add support for multi instance Manish Kurup
2025-09-30  0:35 ` [PATCH 10/54] net/bnxt/tf_core: fix table scope free Manish Kurup
2025-09-30  0:35 ` [PATCH 11/54] net/bnxt/tf_core: fix vfr clean up and stats lockup Manish Kurup
2025-09-30  0:35 ` [PATCH 12/54] net/bnxt/tf_ulp: add support for special vxlan Manish Kurup
2025-09-30  0:35 ` [PATCH 13/54] net/bnxt/tf_ulp: increase shared pool size to 32 Manish Kurup
2025-09-30  0:35 ` [PATCH 14/54] next/bnxt/tf_ulp: truflow fixes for meter and mac_addr cache Manish Kurup
2025-09-30  0:35 ` [PATCH 15/54] net/bnxt/tf_ulp: add support for tcam priority update Manish Kurup
2025-09-30  0:35 ` [PATCH 16/54] net/bnxt/tf_ulp: hot upgrade support Manish Kurup
2025-09-30  0:35 ` [PATCH 17/54] net/bnxt/tf_core: tcam manager logical id free Manish Kurup
2025-09-30  0:35 ` [PATCH 18/54] net/bnxt/tf_ulp: fix stats counter memory initialization Manish Kurup
2025-09-30  0:35 ` [PATCH 19/54] net/bnxt: fix max VFs count for thor2 Manish Kurup
2025-09-30  0:35 ` [PATCH 20/54] net/bnxt/tf_ulp: ovs-dpdk packet drop observed with thor2 Manish Kurup
2025-09-30  0:35 ` [PATCH 21/54] net/bnxt/tf_ulp: fix seg fault when devargs argument missing Manish Kurup
2025-09-30  0:35 ` [PATCH 22/54] net/bnxt: fix default rss config Manish Kurup
2025-09-30  0:35 ` [PATCH 23/54] net/bnxt/tf_ulp: enable support for global index table Manish Kurup
2025-09-30  0:35 ` [PATCH 24/54] net/bnxt/tf_core: fix build failure with flow scale option Manish Kurup
2025-09-30  0:35 ` [PATCH 25/54] net/bnxt: truflow remove redundant code for mpc init Manish Kurup
2025-09-30  0:35 ` [PATCH 26/54] net/bnxt/tf_ulp: optimize template enums Manish Kurup
2025-09-30  0:35 ` [PATCH 27/54] net/bnxt/tf_core: thor2 hot upgrade ungraceful quit crash Manish Kurup
2025-09-30  0:35 ` [PATCH 28/54] net/bnxt/tf_ulp: support MPLS packets Manish Kurup
2025-09-30  0:35 ` [PATCH 29/54] net/bnxt/tf_core: add backing store debug to dpdk Manish Kurup
2025-09-30  0:35 ` [PATCH 30/54] net/bnxt/tf_core: truflow global table scope Manish Kurup
2025-09-30  0:35 ` [PATCH 31/54] net/bnxt/tf_ulp: ulp parser support to handle gre key Manish Kurup
2025-09-30  0:35 ` [PATCH 32/54] net/bnxt/tf_core: handle out of order MPC completions Manish Kurup
2025-09-30  0:35 ` [PATCH 33/54] net/bnxt/tf_ulp: socket direct enable Manish Kurup
2025-09-30  0:35 ` [PATCH 34/54] net/bnxt: fix adding udp_tunnel_port Manish Kurup
2025-09-30  0:35 ` [PATCH 35/54] net/bnxt/tf_ulp: add non vfr mode capability Manish Kurup
2025-09-30  0:35 ` [PATCH 36/54] net/bnxt: avoid iova range check when external memory is used Manish Kurup
2025-09-30  0:35 ` [PATCH 37/54] net/bnxt: avoid potential segfault in VFR handling Manish Kurup
2025-09-30  0:35 ` [PATCH 38/54] net/bnxt/tf_ulp: change rte_mem_virt2iova to rte_mem_virt2phys Manish Kurup
2025-09-30  0:35 ` [PATCH 39/54] net/bnxt: thor2 truflow memory manager bug Manish Kurup
2025-09-30  0:35 ` [PATCH 40/54] net/bnxt: fix stats collection when rx queue is not set Manish Kurup
2025-09-30  0:35 ` [PATCH 41/54] net/bnxt: fix rss configuration when set to none Manish Kurup
2025-09-30  0:35 ` [PATCH 42/54] net/bnxt: packet drop after port stop and start Manish Kurup
2025-09-30  0:35 ` [PATCH 43/54] net/bnxt/tf_core: fix truflow crash on memory allocation failure Manish Kurup
2025-09-30  0:35 ` [PATCH 44/54] net/bnxt: truflow remove RTE devarg processing for mpc=1 Manish Kurup
2025-09-30  0:35 ` [PATCH 45/54] net/bnxt: add meson build options for TruFlow Manish Kurup
2025-09-30  0:35 ` [PATCH 46/54] net/bnxt: truflow HSI struct fixes Manish Kurup
2025-09-30  0:35 ` [PATCH 47/54] net/bnxt/tf_ulp: truflow add pf action handler Manish Kurup
2025-09-30  0:35 ` [PATCH 48/54] net/bnxt/tf_ulp: add support for unicast only feature Manish Kurup
2025-09-30  0:35 ` [PATCH 49/54] net/bnxt/tf_core: remove excessive debug logging Manish Kurup
2025-09-30  0:36 ` [PATCH 50/54] net/bnxt/tf_core: fix truflow PF init failure on sriov disabled Manish Kurup
2025-09-30  0:36 ` [PATCH 51/54] net/bnxt/tf_ulp: fixes to enable TF functionality Manish Kurup
2025-09-30  0:36 ` [PATCH 52/54] net/bnxt/tf_ulp: add feature bit rx miss handling Manish Kurup
2025-09-30  0:36 ` [PATCH 53/54] net/bnxt: add support for truflow promiscuous mode Manish Kurup
2025-09-30  0:36 ` [PATCH 54/54] net/bnxt/tf_ulp: remove Truflow DEBUG code Manish Kurup

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250930003604.87108-5-manish.kurup@broadcom.com \
    --to=manish.kurup@broadcom.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=jay.ding@broadcom.com \
    --cc=michael.baucom@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).