DPDK patches and discussions
 help / color / mirror / Atom feed
From: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
To: dev@dpdk.org
Cc: Peter Spreadborough <peter.spreadborough@broadcom.com>,
	Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>,
	Jay Ding <jay.ding@broadcom.com>,
	Shahaji Bhosle <sbhosle@broadcom.com>,
	Ajit Khaparde <ajit.khaparde@broadcom.com>
Subject: [PATCH v3 47/47] net/bnxt: tf_ulp: add stats cache for thor2
Date: Tue,  1 Oct 2024 11:28:33 +0530	[thread overview]
Message-ID: <20241001055833.757163-48-sriharsha.basavapatna@broadcom.com> (raw)
In-Reply-To: <20241001055833.757163-1-sriharsha.basavapatna@broadcom.com>

From: Peter Spreadborough <peter.spreadborough@broadcom.com>

This change adds a stats cache for Thor2 flows using counters.
Flow stats will be harvested periodically in the background
and stats reads by the application will be returned stats from
the cache and not by initiating a read from HW.

This change also adds read-clear functionality for counter resets
and restructures the stats collection while loop to
guarantee full coverage of entries added or removed during the
collection period.

Signed-off-by: Peter Spreadborough <peter.spreadborough@broadcom.com>
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Reviewed-by: Jay Ding <jay.ding@broadcom.com>
Reviewed-by: Shahaji Bhosle <sbhosle@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/tf_ulp/bnxt_ulp.h       |   2 +
 drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c  |  16 +-
 drivers/net/bnxt/tf_ulp/bnxt_ulp_tfc.c   |  10 +
 drivers/net/bnxt/tf_ulp/bnxt_ulp_utils.h |  26 ++
 drivers/net/bnxt/tf_ulp/meson.build      |   4 +-
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tfc.c |  13 +
 drivers/net/bnxt/tf_ulp/ulp_flow_db.c    |   4 +
 drivers/net/bnxt/tf_ulp/ulp_mapper.c     |  73 ++++
 drivers/net/bnxt/tf_ulp/ulp_sc_mgr.c     | 529 +++++++++++++++++++++++
 drivers/net/bnxt/tf_ulp/ulp_sc_mgr.h     | 142 ++++++
 drivers/net/bnxt/tf_ulp/ulp_sc_mgr_tfc.c |  60 +++
 11 files changed, 877 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/bnxt/tf_ulp/ulp_sc_mgr.c
 create mode 100644 drivers/net/bnxt/tf_ulp/ulp_sc_mgr.h
 create mode 100644 drivers/net/bnxt/tf_ulp/ulp_sc_mgr_tfc.c

diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index a35f79f167..83fb205f68 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -162,6 +162,8 @@ struct bnxt_ulp_data {
 	uint64_t			feature_bits;
 	uint64_t			default_class_bits;
 	uint64_t			default_act_bits;
+	struct ulp_fc_tfc_stats_cache_entry *stats_cache;
+	struct bnxt_ulp_sc_info		*sc_info;
 };
 
 enum bnxt_ulp_tfo_type {
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
index 334eda99ce..2c22582e1c 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
@@ -670,6 +670,7 @@ bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
 	struct bnxt_ulp_context *ulp_ctx;
 	struct rte_flow_action_rss *rss_conf;
 	struct rte_flow_query_count *count;
+	enum bnxt_ulp_device_id  dev_id;
 	uint32_t flow_id;
 
 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
@@ -681,6 +682,15 @@ bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
+	rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
+	if (rc) {
+		BNXT_DRV_DBG(ERR, "Can't identify the device\n");
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to query flow.");
+		return -EINVAL;
+	}
+
 	flow_id = (uint32_t)(uintptr_t)flow;
 
 	switch (action->type) {
@@ -696,7 +706,11 @@ bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
 		break;
 	case RTE_FLOW_ACTION_TYPE_COUNT:
 		count = data;
-		rc = ulp_fc_mgr_query_count_get(ulp_ctx, flow_id, count);
+		if (dev_id == BNXT_ULP_DEVICE_ID_THOR2)
+			rc = ulp_sc_mgr_query_count_get(ulp_ctx, flow_id, count);
+		else
+			rc = ulp_fc_mgr_query_count_get(ulp_ctx, flow_id, count);
+
 		if (unlikely(rc)) {
 			rte_flow_error_set(error, EINVAL,
 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_tfc.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_tfc.c
index cc779826a5..df4d2a0220 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_tfc.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_tfc.c
@@ -26,6 +26,7 @@
 #include "ulp_template_struct.h"
 #include "ulp_mark_mgr.h"
 #include "ulp_fc_mgr.h"
+#include "ulp_sc_mgr.h"
 #include "ulp_flow_db.h"
 #include "ulp_mapper.h"
 #include "ulp_matcher.h"
@@ -887,6 +888,9 @@ ulp_tfc_deinit(struct bnxt *bp,
 			BNXT_DRV_DBG(ERR, "Failed to close HA (%d)\n", rc);
 	}
 
+	/* Delete the Stats Counter Manager */
+	ulp_sc_mgr_deinit(bp->ulp_ctx);
+
 	/* cleanup the flow database */
 	ulp_flow_db_deinit(bp->ulp_ctx);
 
@@ -1043,6 +1047,12 @@ ulp_tfc_init(struct bnxt *bp,
 		goto jump_to_error;
 	}
 
+	rc = ulp_sc_mgr_init(bp->ulp_ctx);
+	if (rc) {
+		BNXT_DRV_DBG(ERR, "Failed to initialize ulp stats cache mgr\n");
+		goto jump_to_error;
+	}
+
 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
 	if (rc) {
 		BNXT_DRV_DBG(ERR, "Unable to get device id from ulp.\n");
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_utils.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp_utils.h
index edc75da05e..d027d62802 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_utils.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_utils.h
@@ -25,6 +25,7 @@
 #include "ulp_template_struct.h"
 #include "ulp_mark_mgr.h"
 #include "ulp_fc_mgr.h"
+#include "ulp_sc_mgr.h"
 #include "ulp_flow_db.h"
 #include "ulp_mapper.h"
 #include "ulp_matcher.h"
@@ -739,6 +740,31 @@ bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
 	return ulp_ctx->cfg_data->fc_info;
 }
 
+/* Function to set the flow counter info into the context */
+static inline int32_t
+bnxt_ulp_cntxt_ptr2_sc_info_set(struct bnxt_ulp_context *ulp_ctx,
+				struct bnxt_ulp_sc_info *ulp_sc_info)
+{
+	if (unlikely(!ulp_ctx || !ulp_ctx->cfg_data)) {
+		BNXT_DRV_DBG(ERR, "Invalid ulp context data\n");
+		return -EINVAL;
+	}
+
+	ulp_ctx->cfg_data->sc_info = ulp_sc_info;
+
+	return 0;
+}
+
+/* Function to retrieve the flow counter info from the context. */
+static inline struct bnxt_ulp_sc_info *
+bnxt_ulp_cntxt_ptr2_sc_info_get(struct bnxt_ulp_context *ulp_ctx)
+{
+	if (unlikely(!ulp_ctx || !ulp_ctx->cfg_data))
+		return NULL;
+
+	return ulp_ctx->cfg_data->sc_info;
+}
+
 /* Function to get the ulp flags from the ulp context. */
 static inline int32_t
 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
diff --git a/drivers/net/bnxt/tf_ulp/meson.build b/drivers/net/bnxt/tf_ulp/meson.build
index e19d51ee01..db6f65539d 100644
--- a/drivers/net/bnxt/tf_ulp/meson.build
+++ b/drivers/net/bnxt/tf_ulp/meson.build
@@ -31,6 +31,8 @@ sources += files(
 	'bnxt_ulp_tfc.c',
 	'ulp_fc_mgr_tfc.c',
 	'ulp_fc_mgr_tf.c',
-	'ulp_alloc_tbl.c')
+	'ulp_alloc_tbl.c',
+	'ulp_sc_mgr.c',
+	'ulp_sc_mgr_tfc.c')
 
 subdir('generic_templates')
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tfc.c b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tfc.c
index 4a93dd1a33..e73fdcd1c7 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tfc.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr_tfc.c
@@ -30,6 +30,19 @@
 #define ULP_TFC_CNTR_ALIGN 32
 #define ULP_TFC_ACT_WORD_SZ 32
 
+struct ulp_fc_tfc_stats_cache_entry {
+	uint32_t flags;
+	uint64_t timestamp;
+	uint8_t tsid;
+	uint32_t record_size;
+	uint32_t offset;
+	uint8_t dir;
+	uint64_t packet_count;
+	uint64_t byte_count;
+	uint16_t tcp_flags;
+	uint32_t tcp_timestamp;
+};
+
 static int32_t
 ulp_fc_tfc_update_accum_stats(__rte_unused struct bnxt_ulp_context *ctxt,
 			      __rte_unused struct bnxt_ulp_fc_info *fc_info,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
index 8984808b67..d7b05ae39b 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
@@ -12,6 +12,7 @@
 #include "ulp_mapper.h"
 #include "ulp_flow_db.h"
 #include "ulp_fc_mgr.h"
+#include "ulp_sc_mgr.h"
 #include "ulp_tun.h"
 #ifdef TF_FLOW_SCALE_QUERY
 #include "tf_resources.h"
@@ -633,6 +634,9 @@ ulp_flow_db_resource_add(struct bnxt_ulp_context *ulp_ctxt,
 
 		if (!ulp_fc_mgr_thread_isstarted(ulp_ctxt))
 			ulp_fc_mgr_thread_start(ulp_ctxt);
+
+		if (!ulp_sc_mgr_thread_isstarted(ulp_ctxt))
+			ulp_sc_mgr_thread_start(ulp_ctxt);
 	}
 
 	/* all good, return success */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 721e8f4992..2429ac2f1a 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -2950,6 +2950,72 @@ ulp_mapper_vnic_tbl_process(struct bnxt_ulp_mapper_parms *parms,
 	return rc;
 }
 
+static int32_t
+ulp_mapper_stats_cache_tbl_process(struct bnxt_ulp_mapper_parms *parms,
+				   struct bnxt_ulp_mapper_tbl_info *tbl)
+{
+	struct ulp_flow_db_res_params fid_parms;
+	uint64_t counter_handle;
+	struct ulp_blob	data;
+	uint16_t data_len = 0;
+	uint8_t *tmp_data;
+	int32_t rc = 0;
+
+	/* Initialize the blob data */
+	if (unlikely(ulp_blob_init(&data, tbl->result_bit_size,
+				   BNXT_ULP_BYTE_ORDER_BE))) {
+		BNXT_DRV_DBG(ERR, "Failed initial ulp_global table blob\n");
+		return -EINVAL;
+	}
+
+	/* read the arguments from the result table */
+	rc = ulp_mapper_tbl_result_build(parms, tbl, &data,
+					 "ULP Global Result");
+	if (unlikely(rc)) {
+		BNXT_DRV_DBG(ERR, "Failed to build the result blob\n");
+		return rc;
+	}
+
+	tmp_data = ulp_blob_data_get(&data, &data_len);
+	counter_handle = *(uint64_t *)tmp_data;
+	counter_handle = tfp_be_to_cpu_64(counter_handle);
+
+	memset(&fid_parms, 0, sizeof(fid_parms));
+	fid_parms.direction	= tbl->direction;
+	fid_parms.resource_func	= tbl->resource_func;
+	fid_parms.resource_type	= tbl->resource_type;
+	fid_parms.resource_sub_type = tbl->resource_sub_type;
+	fid_parms.resource_hndl	    = counter_handle;
+	fid_parms.critical_resource = tbl->critical_resource;
+	rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms);
+	if (unlikely(rc)) {
+		BNXT_DRV_DBG(ERR, "Failed to link resource to flow rc = %d\n",
+			     rc);
+		return rc;
+	}
+
+	rc = ulp_sc_mgr_entry_alloc(parms, counter_handle, tbl);
+	if (unlikely(rc)) {
+		BNXT_DRV_DBG(ERR, "Failed to link resource to flow rc = %d\n",
+			     rc);
+		return rc;
+	}
+#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
+#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG_MAPPER
+	BNXT_DRV_DBG(DEBUG, "flow id =0x%x\n", parms->flow_id);
+#endif
+#endif
+	return rc;
+}
+
+static int32_t
+ulp_mapper_stats_cache_tbl_res_free(struct bnxt_ulp_context *ulp,
+				    uint32_t fid)
+{
+	ulp_sc_mgr_entry_free(ulp, fid);
+	return 0;
+}
+
 /* Free the vnic resource */
 static int32_t
 ulp_mapper_vnic_tbl_res_free(struct bnxt_ulp_context *ulp __rte_unused,
@@ -4148,6 +4214,9 @@ ulp_mapper_tbls_process(struct bnxt_ulp_mapper_parms *parms, void *error)
 		case BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE:
 			rc = ulp_mapper_allocator_tbl_process(parms, tbl);
 			break;
+		case BNXT_ULP_RESOURCE_FUNC_STATS_CACHE:
+			rc = ulp_mapper_stats_cache_tbl_process(parms, tbl);
+			break;
 		default:
 			BNXT_DRV_DBG(ERR, "Unexpected mapper resource %d\n",
 				     tbl->resource_func);
@@ -4286,6 +4355,10 @@ ulp_mapper_resource_free(struct bnxt_ulp_context *ulp,
 						 res->direction,
 						 res->resource_hndl);
 		break;
+	case BNXT_ULP_RESOURCE_FUNC_STATS_CACHE:
+		rc = ulp_mapper_stats_cache_tbl_res_free(ulp,
+							 fid);
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_sc_mgr.c b/drivers/net/bnxt/tf_ulp/ulp_sc_mgr.c
new file mode 100644
index 0000000000..a98360bc6e
--- /dev/null
+++ b/drivers/net/bnxt/tf_ulp/ulp_sc_mgr.c
@@ -0,0 +1,529 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2021 Broadcom
+ * All rights reserved.
+ */
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include "bnxt.h"
+#include "bnxt_ulp.h"
+#include "bnxt_ulp_utils.h"
+#include "bnxt_ulp_tfc.h"
+#include "bnxt_tf_common.h"
+#include "ulp_sc_mgr.h"
+#include "ulp_flow_db.h"
+#include "ulp_template_db_enum.h"
+#include "ulp_template_struct.h"
+#include "tfc.h"
+#include "tfc_debug.h"
+#include "tfc_action_handle.h"
+
+#define ULP_TFC_CNTR_READ_BYTES 32
+#define ULP_TFC_CNTR_ALIGN 32
+#define ULP_TFC_ACT_WORD_SZ 32
+
+static const struct bnxt_ulp_sc_core_ops *
+bnxt_ulp_sc_ops_get(struct bnxt_ulp_context *ctxt)
+{
+	int32_t rc;
+	enum bnxt_ulp_device_id  dev_id;
+	const struct bnxt_ulp_sc_core_ops *func_ops;
+
+	rc = bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id);
+	if (rc)
+		return NULL;
+
+	switch (dev_id) {
+	case BNXT_ULP_DEVICE_ID_THOR2:
+		func_ops = &ulp_sc_tfc_core_ops;
+		break;
+	case BNXT_ULP_DEVICE_ID_THOR:
+	case BNXT_ULP_DEVICE_ID_STINGRAY:
+	case BNXT_ULP_DEVICE_ID_WH_PLUS:
+	default:
+		func_ops = NULL;
+		break;
+	}
+	return func_ops;
+}
+
+int32_t ulp_sc_mgr_init(struct bnxt_ulp_context *ctxt)
+{
+	const struct bnxt_ulp_sc_core_ops *sc_ops;
+	struct bnxt_ulp_device_params *dparms;
+	struct bnxt_ulp_sc_info *ulp_sc_info;
+	uint32_t stats_cache_tbl_sz;
+	uint32_t dev_id;
+	int rc;
+
+	if (!ctxt) {
+		BNXT_DRV_DBG(DEBUG, "Invalid ULP CTXT\n");
+		return -EINVAL;
+	}
+
+	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
+		BNXT_DRV_DBG(DEBUG, "Failed to get device id\n");
+		return -EINVAL;
+	}
+
+	dparms = bnxt_ulp_device_params_get(dev_id);
+	if (!dparms) {
+		BNXT_DRV_DBG(DEBUG, "Failed to device parms\n");
+		return -EINVAL;
+	}
+
+	sc_ops = bnxt_ulp_sc_ops_get(ctxt);
+	if (sc_ops == NULL) {
+		BNXT_DRV_DBG(DEBUG, "Failed to get the counter ops\n");
+		return -EINVAL;
+	}
+
+	ulp_sc_info = rte_zmalloc("ulp_sc_info", sizeof(*ulp_sc_info), 0);
+	if (!ulp_sc_info) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	ulp_sc_info->sc_ops = sc_ops;
+	ulp_sc_info->flags = 0;
+
+	rc = pthread_mutex_init(&ulp_sc_info->sc_lock, NULL);
+	if (rc) {
+		BNXT_DRV_DBG(ERR, "Failed to initialize sc mutex\n");
+		goto error;
+	}
+
+	/* Add the SC info tbl to the ulp context. */
+	bnxt_ulp_cntxt_ptr2_sc_info_set(ctxt, ulp_sc_info);
+
+	ulp_sc_info->num_counters = dparms->ext_flow_db_num_entries;
+	if (!ulp_sc_info->num_counters) {
+		/* No need for software counters, call fw directly */
+		BNXT_DRV_DBG(DEBUG, "Sw flow counter support not enabled\n");
+		return 0;
+	}
+
+	/*
+	 * Size is determined by the number of flows + 10% to cover IDs
+	 * used for resources.
+	 */
+	stats_cache_tbl_sz = sizeof(struct ulp_sc_tfc_stats_cache_entry) *
+		(ulp_sc_info->num_counters +
+		 (ulp_sc_info->num_counters / 10));
+
+	ulp_sc_info->stats_cache_tbl = rte_zmalloc("ulp_stats_cache_tbl",
+						   stats_cache_tbl_sz, 0);
+	if (!ulp_sc_info->stats_cache_tbl) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	ulp_sc_info->read_data = rte_zmalloc("ulp_stats_cache_read_data",
+					     ULP_SC_BATCH_SIZE * ULP_SC_PAGE_SIZE,
+					     ULP_SC_PAGE_SIZE);
+	if (!ulp_sc_info->read_data) {
+		rte_free(ulp_sc_info->stats_cache_tbl);
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	rc = ulp_sc_mgr_thread_start(ctxt);
+	if (rc)
+		BNXT_DRV_DBG(DEBUG, "Stats counter thread start failed\n");
+
+ error:
+	return rc;
+}
+
+/*
+ * Release all resources in the Flow Counter Manager for this ulp context
+ *
+ * ctxt [in] The ulp context for the Flow Counter manager
+ *
+ */
+int32_t
+ulp_sc_mgr_deinit(struct bnxt_ulp_context *ctxt)
+{
+	struct bnxt_ulp_sc_info *ulp_sc_info;
+
+	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+
+	if (!ulp_sc_info)
+		return -EINVAL;
+
+	pthread_mutex_lock(&ulp_sc_info->sc_lock);
+
+	ulp_sc_mgr_thread_cancel(ctxt);
+
+	pthread_mutex_destroy(&ulp_sc_info->sc_lock);
+
+	if (ulp_sc_info->stats_cache_tbl)
+		rte_free(ulp_sc_info->stats_cache_tbl);
+
+	if (ulp_sc_info->read_data)
+		rte_free(ulp_sc_info->read_data);
+
+	rte_free(ulp_sc_info);
+
+	/* Safe to ignore on deinit */
+	(void)bnxt_ulp_cntxt_ptr2_sc_info_set(ctxt, NULL);
+
+	return 0;
+}
+
+#define ULP_SC_PERIOD_S 1
+#define ULP_SC_PERIOD_MS (ULP_SC_PERIOD_S * 1000)
+
+static void *ulp_stats_cache_main_loop(void *arg)
+{
+	struct ulp_sc_tfc_stats_cache_entry *count;
+	const struct bnxt_ulp_sc_core_ops *sc_ops;
+	struct ulp_sc_tfc_stats_cache_entry *sce;
+	struct ulp_sc_tfc_stats_cache_entry *sce_end;
+	struct tfc_mpc_batch_info_t batch_info;
+	struct bnxt_ulp_sc_info *ulp_sc_info;
+	struct bnxt_ulp_context *ctxt = NULL;
+	uint16_t words = (ULP_TFC_CNTR_READ_BYTES + ULP_TFC_ACT_WORD_SZ - 1) / ULP_TFC_ACT_WORD_SZ;
+	uint32_t batch_size;
+	struct tfc *tfcp = NULL;
+	uint32_t batch;
+	uint32_t delay = ULP_SC_PERIOD_MS;
+	uint64_t start;
+	uint64_t stop;
+	uint64_t hz;
+	int oldstate;
+	int oldtype;
+	uint8_t *data;
+	int rc;
+	static uint32_t loop;
+	uint64_t cycles = 0;
+	uint64_t cpms = 0;
+
+	while (!ctxt) {
+		ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
+
+		if (ctxt)
+			break;
+
+		BNXT_DRV_DBG(INFO, "could not get the ulp context lock\n");
+		rte_delay_us_block(1000);
+	}
+
+
+	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+	if (!ulp_sc_info) {
+		bnxt_ulp_cntxt_entry_release();
+		goto terminate;
+	}
+
+	sc_ops = ulp_sc_info->sc_ops;
+
+	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
+	pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype);
+
+	hz = rte_get_timer_hz();
+	cpms = hz / 1000;
+
+	while (true) {
+		bnxt_ulp_cntxt_entry_release();
+		ctxt = NULL;
+		rte_delay_ms(delay);
+
+		while (!ctxt) {
+			ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
+
+			if (ctxt)
+				break;
+
+			BNXT_DRV_DBG(INFO, "could not get the ulp context lock\n");
+			rte_delay_us_block(1);
+		}
+
+		start = rte_get_timer_cycles();
+		sce = ulp_sc_info->stats_cache_tbl;
+		sce_end = sce + (ulp_sc_info->num_counters + (ulp_sc_info->num_counters / 10));
+
+		while (ulp_sc_info->num_entries && (sce < sce_end)) {
+			data = ulp_sc_info->read_data;
+
+			rc = tfc_mpc_batch_start(&batch_info);
+			if (rc) {
+				PMD_DRV_LOG(ERR,
+				    "MPC batch start failed rc:%d loop:%d\n", rc, loop);
+				break;
+			}
+
+			if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
+				break;
+
+			rc = pthread_mutex_lock(&ulp_sc_info->sc_lock);
+			if (rc) {
+				PMD_DRV_LOG(ERR,
+				    "Failed to get SC lock, terminating main loop rc:%d loop:%d\n",
+				    rc, loop);
+				goto terminate;
+			}
+
+			for (batch = 0; (batch < ULP_SC_BATCH_SIZE) && (sce < sce_end);) {
+				if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID)) {
+					sce++;
+					continue;
+				}
+
+				tfcp = bnxt_ulp_cntxt_tfcp_get(sce->ctxt);
+				if (tfcp == NULL) {
+					bnxt_ulp_cntxt_entry_release();
+					goto terminate;
+				}
+
+
+				/* Store the entry pointer to use for counter update */
+				batch_info.em_hdl[batch_info.count] = (uint64_t)sce;
+
+				rc = sc_ops->ulp_stats_cache_update(tfcp,
+								    sce->dir,
+								    data,
+								    sce->handle,
+								    &words,
+								    &batch_info,
+								    sce->reset);
+				if (rc) {
+					/* Abort this batch */
+					PMD_DRV_LOG(ERR,
+						    "loop:%d read_counter() failed:%d\n",
+						    loop, rc);
+					break;
+				}
+
+				if (sce->reset)
+					sce->reset = false;
+
+				/* Next */
+				batch++;
+				sce++;
+				data += ULP_SC_PAGE_SIZE;
+			}
+
+			batch_size = batch_info.count;
+			rc = tfc_mpc_batch_end(tfcp, &batch_info);
+
+			pthread_mutex_unlock(&ulp_sc_info->sc_lock);
+			bnxt_ulp_cntxt_release_fdb_lock(ctxt);
+
+			if (rc) {
+				PMD_DRV_LOG(ERR,
+					    "MPC batch end failed rc:%d loop:%d\n",
+					    rc, loop);
+				batch_info.enabled = false;
+				break;
+			}
+
+			/* Process counts */
+			data = ulp_sc_info->read_data;
+
+			for (batch = 0; batch < batch_size; batch++) {
+				/* Check for error in completion */
+				if (batch_info.result[batch]) {
+					PMD_DRV_LOG(ERR,
+						    "batch:%d result:%d\n",
+						    batch, batch_info.result[batch]);
+				} else {
+					count =
+				(struct ulp_sc_tfc_stats_cache_entry *)((uintptr_t)batch_info.em_hdl[batch]);
+					memcpy(&count->packet_count, data, ULP_TFC_ACT_WORD_SZ);
+				}
+
+				data += ULP_SC_PAGE_SIZE;
+			}
+		}
+
+		loop++;
+		stop = rte_get_timer_cycles();
+		cycles = stop - start;
+		if (cycles > (hz * ULP_SC_PERIOD_S)) {
+			PMD_DRV_LOG(ERR, "%s: Stats collection time exceeded %dmS Cycles:%" PRIu64 "\n",
+				    __func__, ULP_SC_PERIOD_MS, cycles);
+			delay = ULP_SC_PERIOD_MS;
+		} else {
+			delay = ULP_SC_PERIOD_MS - (cycles / cpms);
+
+			if (delay > ULP_SC_PERIOD_MS) {
+				PMD_DRV_LOG(ERR, "%s: Stats collection delay:%dmS exceedes %dmS\n",
+					    __func__, delay, ULP_SC_PERIOD_MS);
+				delay = ULP_SC_PERIOD_MS;
+			}
+		}
+	}
+
+ terminate:
+	return NULL;
+}
+
+/*
+ * Check if the alarm thread that walks through the flows is started
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ *
+ */
+bool ulp_sc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
+{
+	struct bnxt_ulp_sc_info *ulp_sc_info;
+
+	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+
+	if (ulp_sc_info)
+		return !!(ulp_sc_info->flags & ULP_FLAG_SC_THREAD);
+
+	return false;
+}
+
+/*
+ * Setup the Flow counter timer thread that will fetch/accumulate raw counter
+ * data from the chip's internal flow counters
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ *
+ */
+int32_t
+ulp_sc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
+{
+	struct bnxt_ulp_sc_info *ulp_sc_info;
+	int rc;
+
+	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+
+	if (ulp_sc_info && !(ulp_sc_info->flags & ULP_FLAG_SC_THREAD)) {
+		rc = pthread_create(&ulp_sc_info->tid,
+				    NULL,
+				    &ulp_stats_cache_main_loop,
+				    (void *)ctxt->cfg_data);
+		if (rc)
+			return rc;
+
+		ulp_sc_info->flags |= ULP_FLAG_SC_THREAD;
+	}
+
+	return 0;
+}
+
+/*
+ * Cancel the alarm handler
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ *
+ */
+void ulp_sc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
+{
+	struct bnxt_ulp_sc_info *ulp_sc_info;
+
+	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+	if (!ulp_sc_info)
+		return;
+
+	ulp_sc_info->flags &= ~ULP_FLAG_SC_THREAD;
+	pthread_cancel(ulp_sc_info->tid);
+}
+
+/*
+ * Fill the rte_flow_query_count 'data' argument passed
+ * in the rte_flow_query() with the values obtained and
+ * accumulated locally.
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ *
+ * flow_id [in] The HW flow ID
+ *
+ * count [out] The rte_flow_query_count 'data' that is set
+ *
+ */
+int ulp_sc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
+			       uint32_t flow_id,
+			       struct rte_flow_query_count *count)
+{
+	struct ulp_sc_tfc_stats_cache_entry *sce;
+	struct bnxt_ulp_sc_info *ulp_sc_info;
+	int rc = 0;
+
+	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+	if (!ulp_sc_info)
+		return -ENODEV;
+
+	sce = ulp_sc_info->stats_cache_tbl;
+	sce += flow_id;
+
+	/* If entry is not valid return an error */
+	if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID))
+		return -EBUSY;
+
+	count->hits = sce->packet_count;
+	count->hits_set = 1;
+	count->bytes = sce->byte_count;
+	count->bytes_set = 1;
+
+	if (count->reset)
+		sce->reset = true;
+
+	return rc;
+}
+
+
+int ulp_sc_mgr_entry_alloc(struct bnxt_ulp_mapper_parms *parms,
+			   uint64_t counter_handle,
+			   struct bnxt_ulp_mapper_tbl_info *tbl)
+{
+	struct ulp_sc_tfc_stats_cache_entry *sce;
+	struct bnxt_ulp_sc_info *ulp_sc_info;
+
+	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(parms->ulp_ctx);
+	if (!ulp_sc_info)
+		return -ENODEV;
+
+	pthread_mutex_lock(&ulp_sc_info->sc_lock);
+
+	sce = ulp_sc_info->stats_cache_tbl;
+	sce += parms->flow_id;
+
+	/* If entry is not free return an error */
+	if (sce->flags & ULP_SC_ENTRY_FLAG_VALID) {
+		pthread_mutex_unlock(&ulp_sc_info->sc_lock);
+		return -EBUSY;
+	}
+
+	memset(sce, 0, sizeof(*sce));
+	sce->ctxt = parms->ulp_ctx;
+	sce->flags |= ULP_SC_ENTRY_FLAG_VALID;
+	sce->handle = counter_handle;
+	sce->dir = tbl->direction;
+	ulp_sc_info->num_entries++;
+	pthread_mutex_unlock(&ulp_sc_info->sc_lock);
+
+	return 0;
+}
+
+void ulp_sc_mgr_entry_free(struct bnxt_ulp_context *ulp,
+			   uint32_t fid)
+{
+	struct ulp_sc_tfc_stats_cache_entry *sce;
+	struct bnxt_ulp_sc_info *ulp_sc_info;
+
+	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ulp);
+	if (!ulp_sc_info)
+		return;
+
+	pthread_mutex_lock(&ulp_sc_info->sc_lock);
+
+	sce = ulp_sc_info->stats_cache_tbl;
+	sce += fid;
+
+	if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID)) {
+		pthread_mutex_unlock(&ulp_sc_info->sc_lock);
+		return;
+	}
+
+	sce->flags = 0;
+	ulp_sc_info->num_entries--;
+
+	pthread_mutex_unlock(&ulp_sc_info->sc_lock);
+}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_sc_mgr.h b/drivers/net/bnxt/tf_ulp/ulp_sc_mgr.h
new file mode 100644
index 0000000000..0155a8d61c
--- /dev/null
+++ b/drivers/net/bnxt/tf_ulp/ulp_sc_mgr.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2023 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _ULP_SC_MGR_H_
+#define _ULP_SC_MGR_H_
+
+#include "pthread.h"
+#include "bnxt_ulp.h"
+#include "ulp_flow_db.h"
+
+#define ULP_FLAG_SC_THREAD			BIT(0)
+
+#define ULP_SC_ENTRY_FLAG_VALID BIT(0)
+
+#define ULP_SC_BATCH_SIZE   64
+#define ULP_SC_PAGE_SIZE  4096
+
+struct ulp_sc_tfc_stats_cache_entry {
+	struct bnxt_ulp_context *ctxt;
+	uint32_t flags;
+	uint64_t timestamp;
+	uint64_t handle;
+	uint8_t dir;
+	uint64_t packet_count;
+	uint64_t byte_count;
+	uint64_t count_fields1;
+	uint64_t count_fields2;
+	bool reset;
+};
+
+struct bnxt_ulp_sc_info {
+	struct ulp_sc_tfc_stats_cache_entry *stats_cache_tbl;
+	uint8_t                 *read_data;
+	uint32_t		flags;
+	uint32_t		num_entries;
+	pthread_mutex_t		sc_lock;
+	uint32_t		num_counters;
+	pthread_t               tid;
+	const struct bnxt_ulp_sc_core_ops *sc_ops;
+};
+
+struct bnxt_ulp_sc_core_ops {
+	int32_t
+	(*ulp_stats_cache_update)(struct tfc *tfcp,
+				  int dir,
+				  uint8_t *data,
+				  uint64_t handle,
+				  uint16_t *words,
+				  struct tfc_mpc_batch_info_t *batch_info,
+				  bool reset);
+};
+
+/*
+ * Allocate all resources in the stats cache manager for this ulp context
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ */
+int32_t
+ulp_sc_mgr_init(struct bnxt_ulp_context *ctxt);
+
+/*
+ * Release all resources in the stats cache manager for this ulp context
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ */
+int32_t
+ulp_sc_mgr_deinit(struct bnxt_ulp_context *ctxt);
+
+/*
+ * Setup the stats cache timer thread that will fetch/accumulate raw counter
+ * data from the chip's internal stats caches
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ */
+int32_t
+ulp_sc_mgr_thread_start(struct bnxt_ulp_context *ctxt);
+
+/*
+ * Alarm handler that will issue the TF-Core API to fetch
+ * data from the chip's internal stats caches
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ */
+void
+ulp_sc_mgr_alarm_cb(void *arg);
+
+/*
+ * Cancel the alarm handler
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ *
+ */
+void ulp_sc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt);
+
+/*
+ * Check if the thread that walks through the flows is started
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ *
+ */
+bool ulp_sc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt);
+
+/*
+ * Get the current counts for the given flow id
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ * flow_id [in] The flow identifier
+ * count [out] structure in which the updated counts are passed
+ * back to the caller.
+ *
+ */
+int ulp_sc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
+			       uint32_t flow_id,
+			       struct rte_flow_query_count *count);
+
+/*
+ * Allocate a cache entry for flow
+ *
+ * parms [in] Various fields used to identify the flow
+ * counter_handle [in] This is the action table entry identifier.
+ * tbl [in] Various fields used to identify the flow
+ *
+ */
+int ulp_sc_mgr_entry_alloc(struct bnxt_ulp_mapper_parms *parms,
+			   uint64_t counter_handle,
+			   struct bnxt_ulp_mapper_tbl_info *tbl);
+
+/*
+ * Free cache entry
+ *
+ * ulp [in] The ulp context for the stats cache manager
+ * fid [in] The flow identifier
+ *
+ */
+void ulp_sc_mgr_entry_free(struct bnxt_ulp_context *ulp,
+			   uint32_t fid);
+
+extern const struct bnxt_ulp_sc_core_ops ulp_sc_tfc_core_ops;
+
+#endif /* _ULP_SC_MGR_H_ */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_sc_mgr_tfc.c b/drivers/net/bnxt/tf_ulp/ulp_sc_mgr_tfc.c
new file mode 100644
index 0000000000..a8141980d8
--- /dev/null
+++ b/drivers/net/bnxt/tf_ulp/ulp_sc_mgr_tfc.c
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2021 Broadcom
+ * All rights reserved.
+ */
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_alarm.h>
+#include "bnxt.h"
+#include "bnxt_ulp.h"
+#include "bnxt_ulp_utils.h"
+#include "bnxt_ulp_tfc.h"
+#include "bnxt_tf_common.h"
+#include "ulp_sc_mgr.h"
+#include "ulp_flow_db.h"
+#include "ulp_template_db_enum.h"
+#include "ulp_template_struct.h"
+#include "tfc.h"
+#include "tfc_debug.h"
+#include "tfc_action_handle.h"
+
+static int32_t
+ulp_sc_tfc_stats_cache_update(struct tfc *tfcp,
+			      int dir,
+			      uint8_t *data,
+			      uint64_t handle,
+			      uint16_t *words,
+			      struct tfc_mpc_batch_info_t *batch_info,
+			      bool reset)
+{
+	struct tfc_cmm_info cmm_info;
+	struct tfc_cmm_clr cmm_clr;
+	int rc;
+
+	cmm_info.dir = dir;
+	cmm_info.rsubtype = CFA_RSUBTYPE_CMM_ACT;
+	cmm_info.act_handle = handle;
+	cmm_clr.clr = reset;
+
+	if (reset) {
+		cmm_clr.offset_in_byte = 0;
+		cmm_clr.sz_in_byte = 16;
+	}
+
+	rc = tfc_act_get(tfcp,
+			 batch_info,
+			 &cmm_info,
+			 &cmm_clr,
+			 data,
+			 words);
+
+	return rc;
+}
+
+
+const struct bnxt_ulp_sc_core_ops ulp_sc_tfc_core_ops = {
+	.ulp_stats_cache_update = ulp_sc_tfc_stats_cache_update
+};
-- 
2.39.3


      parent reply	other threads:[~2024-10-01  5:54 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-01  5:57 [PATCH v3 00/47] TruFlow update for Thor2 Sriharsha Basavapatna
2024-10-01  5:57 ` [PATCH v3 01/47] net/bnxt: tf_core: fix wc tcam multi slice delete issue Sriharsha Basavapatna
2024-10-01  5:57 ` [PATCH v3 02/47] net/bnxt: tf_core: tcam manager data corruption Sriharsha Basavapatna
2024-10-01  5:57 ` [PATCH v3 03/47] net/bnxt: tf_core: External EM support cleanup Sriharsha Basavapatna
2024-10-01  5:57 ` [PATCH v3 04/47] net/bnxt: tf_core: Thor TF EM key size check Sriharsha Basavapatna
2024-10-01  5:57 ` [PATCH v3 05/47] net/bnxt: tf_core: flow scale improvement Sriharsha Basavapatna
2024-10-01  5:57 ` [PATCH v3 06/47] net/bnxt: tf_core: TF support flow scale query Sriharsha Basavapatna
2024-10-01  5:57 ` [PATCH v3 07/47] net/bnxt: tf_core: fix slice count in case of HA entry move Sriharsha Basavapatna
2024-10-01  5:57 ` [PATCH v3 08/47] net/bnxt: tf_core: convert priority based TCAM manager to dynamic allocation Sriharsha Basavapatna
2024-10-01  5:57 ` [PATCH v3 09/47] net/bnxt: tf_core: remove dead AFM code from session-based priority TCAM mgr Sriharsha Basavapatna
2024-10-01  5:57 ` [PATCH v3 10/47] net/bnxt: tf_core: remove dead " Sriharsha Basavapatna
2024-10-01  5:57 ` [PATCH v3 13/47] net/bnxt: tf_ulp: add custom l2 etype tunnel support Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 14/47] net/bnxt: tf_ulp: add support for vf to vf flow offload Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 15/47] net/bnxt: tf_ulp: Wh+ mirroring support Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 16/47] net/bnxt: tf_ulp: miscellaneous fixes Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 19/47] net/bnxt: tf_ulp: convert recipe table to dynamic memory Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 21/47] net/bnxt: tf_ulp: add action read and clear support Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 23/47] net/bnxt: tf_ulp: VFR updates for Thor 2 Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 25/47] net/bnxt: tf_ulp: update template files Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 26/47] net/bnxt: tf_ulp: enable recipe id generation Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 27/47] net/bnxt: tf_ulp: fixed parent child db counters Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 28/47] net/bnxt: tf_ulp: modify return values to adhere to C coding standard Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 29/47] net/bnxt: tf_ulp: update template files Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 30/47] net/bnxt: tf_ulp: add mask defaults when mask is not specified Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 32/47] net/bnxt: tf_ulp: add support for flow priority Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 34/47] net/bnxt: tf_ulp: add rte_mtr support for Thor2 Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 35/47] net/bnxt: tf_ulp: TF support flow scale query Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 36/47] net/bnxt: tf_ulp: add support for rss flow query to ULP Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 38/47] net/bnxt: tf_ulp: inline utility functions and use likely/unlikely Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 39/47] net/bnxt: tf_ulp: switch ulp to use rte crc32 hash Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 41/47] net/bnxt: tf_ulp: support a few generic template items Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 42/47] net/bnxt: tf_ulp: TFC support flow scale query for Thor2 Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 44/47] net/bnxt: tf_ulp: enable support for truflow feature configuration Sriharsha Basavapatna
2024-10-01  5:58 ` [PATCH v3 45/47] net/bnxt: tf_ulp: support a few feature extensions Sriharsha Basavapatna
2024-10-01  5:58 ` Sriharsha Basavapatna [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241001055833.757163-48-sriharsha.basavapatna@broadcom.com \
    --to=sriharsha.basavapatna@broadcom.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=jay.ding@broadcom.com \
    --cc=peter.spreadborough@broadcom.com \
    --cc=sbhosle@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).