DPDK patches and discussions
 help / color / mirror / Atom feed
From: Srikanth Yalavarthi <syalavarthi@marvell.com>
To: Srikanth Yalavarthi <syalavarthi@marvell.com>
Cc: <dev@dpdk.org>, <sshankarnara@marvell.com>, <jerinj@marvell.com>,
	<aprabhu@marvell.com>
Subject: [PATCH v3 31/38] ml/cnxk: add support to handle extended dev stats
Date: Tue, 20 Dec 2022 11:26:38 -0800	[thread overview]
Message-ID: <20221220192645.14042-32-syalavarthi@marvell.com> (raw)
In-Reply-To: <20221220192645.14042-1-syalavarthi@marvell.com>

Added support to handle ML device extended stats. Support
is enabled to get xstats names and stats values and reset
xstats. Supported xstats include avg, min and max hardware
and firmware latency.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
 drivers/ml/cnxk/cn10k_ml_dev.h   |   3 +
 drivers/ml/cnxk/cn10k_ml_model.h |  57 +++++
 drivers/ml/cnxk/cn10k_ml_ops.c   | 356 ++++++++++++++++++++++++++++++-
 3 files changed, 415 insertions(+), 1 deletion(-)

diff --git a/drivers/ml/cnxk/cn10k_ml_dev.h b/drivers/ml/cnxk/cn10k_ml_dev.h
index 604a200e26..b7ff369ba8 100644
--- a/drivers/ml/cnxk/cn10k_ml_dev.h
+++ b/drivers/ml/cnxk/cn10k_ml_dev.h
@@ -372,6 +372,9 @@ struct cn10k_ml_dev {
 
 	/* Number of models loaded */
 	uint16_t nb_models_loaded;
+
+	/* xstats status */
+	bool xstats_enabled;
 };
 
 uint64_t cn10k_ml_fw_flags_get(struct cn10k_ml_fw *fw);
diff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h
index 2fd12846d4..f6a7276aa7 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.h
+++ b/drivers/ml/cnxk/cn10k_ml_model.h
@@ -402,6 +402,57 @@ struct cn10k_ml_model_addr {
 	uint32_t total_output_sz_d;
 };
 
+/* Extended stats types enum */
+enum cn10k_ml_model_xstats_type {
+	/* Average hardware latency */
+	avg_hw_latency = 0,
+
+	/* Minimum hardware latency */
+	min_hw_latency,
+
+	/* Maximum hardware latency */
+	max_hw_latency,
+
+	/* Average firmware latency */
+	avg_fw_latency,
+
+	/* Minimum firmware latency */
+	min_fw_latency,
+
+	/* Maximum firmware latency */
+	max_fw_latency,
+};
+
+/* Model fast-path stats */
+struct cn10k_ml_model_stats {
+	/* Total hardware latency, sum of all inferences */
+	uint64_t hw_latency_tot;
+
+	/* Minimum hardware latency */
+	uint64_t hw_latency_min;
+
+	/* Maximum hardware latency */
+	uint64_t hw_latency_max;
+
+	/* Total firmware latency, sum of all inferences */
+	uint64_t fw_latency_tot;
+
+	/* Minimum firmware latency */
+	uint64_t fw_latency_min;
+
+	/* Maximum firmware latency */
+	uint64_t fw_latency_max;
+
+	/* Total jobs dequeued */
+	uint64_t dequeued_count;
+
+	/* Hardware stats reset index */
+	uint64_t hw_reset_count;
+
+	/* Firmware stats reset index */
+	uint64_t fw_reset_count;
+};
+
 /* Model Object */
 struct cn10k_ml_model {
 	/* Device reference */
@@ -441,6 +492,12 @@ struct cn10k_ml_model {
 
 	/* Slow-path operations request pointer */
 	struct cn10k_ml_req *req;
+
+	/* Stats for burst ops */
+	struct cn10k_ml_model_stats *burst_stats;
+
+	/* Stats for sync ops */
+	struct cn10k_ml_model_stats *sync_stats;
 };
 
 int cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t size);
diff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c
index d5c45ce916..47edde0404 100644
--- a/drivers/ml/cnxk/cn10k_ml_ops.c
+++ b/drivers/ml/cnxk/cn10k_ml_ops.c
@@ -354,6 +354,134 @@ cn10k_ml_prep_fp_job_descriptor(struct rte_ml_dev *dev, struct cn10k_ml_req *req
 	req->jd.model_run.num_batches = op->nb_batches;
 }
 
+#define ML_AVG_FOREACH_QP(dev, model, qp_id, str, value, count)                                    \
+	do {                                                                                       \
+		value = 0;                                                                         \
+		for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \
+			value += model->burst_stats[qp_id].str##_latency_tot;                      \
+			count += model->burst_stats[qp_id].dequeued_count -                        \
+				 model->burst_stats[qp_id].str##_reset_count;                      \
+		}                                                                                  \
+		value = value / count;                                                             \
+	} while (0)
+
+#define ML_MIN_FOREACH_QP(dev, model, qp_id, str, value, count)                                    \
+	do {                                                                                       \
+		value = UINT64_MAX;                                                                \
+		for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \
+			value = PLT_MIN(value, model->burst_stats[qp_id].str##_latency_min);       \
+			count += model->burst_stats[qp_id].dequeued_count -                        \
+				 model->burst_stats[qp_id].str##_reset_count;                      \
+		}                                                                                  \
+		if (count == 0)                                                                    \
+			value = 0;                                                                 \
+	} while (0)
+
+#define ML_MAX_FOREACH_QP(dev, model, qp_id, str, value, count)                                    \
+	do {                                                                                       \
+		value = 0;                                                                         \
+		for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \
+			value = PLT_MAX(value, model->burst_stats[qp_id].str##_latency_max);       \
+			count += model->burst_stats[qp_id].dequeued_count -                        \
+				 model->burst_stats[qp_id].str##_reset_count;                      \
+		}                                                                                  \
+		if (count == 0)                                                                    \
+			value = 0;                                                                 \
+	} while (0)
+
+static uint64_t
+cn10k_ml_model_xstat_get(struct rte_ml_dev *dev, uint16_t model_id,
+			 enum cn10k_ml_model_xstats_type type)
+{
+	struct cn10k_ml_model *model;
+	uint64_t count = 0;
+	uint64_t value;
+	uint32_t qp_id;
+
+	model = dev->data->models[model_id];
+	if (model == NULL)
+		return 0;
+
+	switch (type) {
+	case avg_hw_latency:
+		ML_AVG_FOREACH_QP(dev, model, qp_id, hw, value, count);
+		break;
+	case min_hw_latency:
+		ML_MIN_FOREACH_QP(dev, model, qp_id, hw, value, count);
+		break;
+	case max_hw_latency:
+		ML_MAX_FOREACH_QP(dev, model, qp_id, hw, value, count);
+		break;
+	case avg_fw_latency:
+		ML_AVG_FOREACH_QP(dev, model, qp_id, fw, value, count);
+		break;
+	case min_fw_latency:
+		ML_MIN_FOREACH_QP(dev, model, qp_id, fw, value, count);
+		break;
+	case max_fw_latency:
+		ML_MAX_FOREACH_QP(dev, model, qp_id, fw, value, count);
+		break;
+	default:
+		value = 0;
+	}
+
+	return value;
+}
+
+#define ML_AVG_RESET_FOREACH_QP(dev, model, qp_id, str)                                            \
+	do {                                                                                       \
+		for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \
+			model->burst_stats[qp_id].str##_latency_tot = 0;                           \
+			model->burst_stats[qp_id].str##_reset_count =                              \
+				model->burst_stats[qp_id].dequeued_count;                          \
+		}                                                                                  \
+	} while (0)
+
+#define ML_MIN_RESET_FOREACH_QP(dev, model, qp_id, str)                                            \
+	do {                                                                                       \
+		for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++)                        \
+			model->burst_stats[qp_id].str##_latency_min = UINT64_MAX;                  \
+	} while (0)
+
+#define ML_MAX_RESET_FOREACH_QP(dev, model, qp_id, str)                                            \
+	do {                                                                                       \
+		for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++)                        \
+			model->burst_stats[qp_id].str##_latency_max = 0;                           \
+	} while (0)
+
+static void
+cn10k_ml_model_xstat_reset(struct rte_ml_dev *dev, uint16_t model_id,
+			   enum cn10k_ml_model_xstats_type type)
+{
+	struct cn10k_ml_model *model;
+	uint32_t qp_id;
+
+	model = dev->data->models[model_id];
+
+	switch (type) {
+	case avg_hw_latency:
+		ML_AVG_RESET_FOREACH_QP(dev, model, qp_id, hw);
+		break;
+	case min_hw_latency:
+		ML_MIN_RESET_FOREACH_QP(dev, model, qp_id, hw);
+		break;
+	case max_hw_latency:
+		ML_MAX_RESET_FOREACH_QP(dev, model, qp_id, hw);
+		break;
+	case avg_fw_latency:
+		ML_AVG_RESET_FOREACH_QP(dev, model, qp_id, fw);
+		break;
+	case min_fw_latency:
+		ML_MIN_RESET_FOREACH_QP(dev, model, qp_id, fw);
+		break;
+	case max_fw_latency:
+		ML_MAX_RESET_FOREACH_QP(dev, model, qp_id, fw);
+		break;
+	default:
+		return;
+	}
+}
+
 static int
 cn10k_ml_dev_info_get(struct rte_ml_dev *dev, struct rte_ml_dev_info *dev_info)
 {
@@ -519,6 +647,13 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c
 
 	rte_spinlock_init(&ocm->lock);
 
+	/* Check firmware stats */
+	if ((mldev->fw.req->jd.fw_load.cap.s.hw_stats) &&
+	    (mldev->fw.req->jd.fw_load.cap.s.fw_stats))
+		mldev->xstats_enabled = true;
+	else
+		mldev->xstats_enabled = false;
+
 	dev->enqueue_burst = cn10k_ml_enqueue_burst;
 	dev->dequeue_burst = cn10k_ml_dequeue_burst;
 	dev->op_error_get = cn10k_ml_op_error_get;
@@ -714,6 +849,170 @@ cn10k_ml_dev_stats_reset(struct rte_ml_dev *dev)
 	}
 }
 
+/* Model xstats names */
+struct rte_ml_dev_xstats_map cn10k_ml_model_xstats_table[] = {
+	{avg_hw_latency, "Avg-HW-Latency"}, {min_hw_latency, "Min-HW-Latency"},
+	{max_hw_latency, "Max-HW-Latency"}, {avg_fw_latency, "Avg-FW-Latency"},
+	{min_fw_latency, "Min-FW-Latency"}, {max_fw_latency, "Max-FW-Latency"},
+};
+
+static int
+cn10k_ml_dev_xstats_names_get(struct rte_ml_dev *dev, struct rte_ml_dev_xstats_map *xstats_map,
+			      uint32_t size)
+{
+	struct rte_ml_dev_info dev_info;
+	struct cn10k_ml_model *model;
+	struct cn10k_ml_dev *mldev;
+	uint32_t model_id;
+	uint32_t count;
+	uint32_t type;
+	uint32_t id;
+
+	mldev = dev->data->dev_private;
+	if (!mldev->xstats_enabled)
+		return 0;
+
+	if (xstats_map == NULL)
+		return PLT_DIM(cn10k_ml_model_xstats_table) * mldev->nb_models_loaded;
+
+	/* Model xstats names */
+	count = 0;
+	cn10k_ml_dev_info_get(dev, &dev_info);
+
+	for (id = 0; id < PLT_DIM(cn10k_ml_model_xstats_table) * dev_info.max_models; id++) {
+		model_id = id / PLT_DIM(cn10k_ml_model_xstats_table);
+		model = dev->data->models[model_id];
+
+		if (model == NULL)
+			continue;
+
+		xstats_map[count].id = id;
+		type = id % PLT_DIM(cn10k_ml_model_xstats_table);
+
+		snprintf(xstats_map[count].name, RTE_ML_STR_MAX, "%s-%s-cycles",
+			 model->metadata.model.name, cn10k_ml_model_xstats_table[type].name);
+
+		count++;
+		if (count == size)
+			break;
+	}
+
+	return count;
+}
+
+static int
+cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16_t *stat_id,
+				uint64_t *value)
+{
+	struct rte_ml_dev_xstats_map *xstats_map;
+	struct rte_ml_dev_info dev_info;
+	struct cn10k_ml_dev *mldev;
+	uint32_t num_xstats;
+	uint32_t model_id;
+	uint32_t type;
+	uint32_t id;
+
+	mldev = dev->data->dev_private;
+	if (!mldev->xstats_enabled)
+		return 0;
+
+	num_xstats = PLT_DIM(cn10k_ml_model_xstats_table) * mldev->nb_models_loaded;
+	xstats_map = rte_zmalloc("cn10k_ml_xstats_map",
+				 sizeof(struct rte_ml_dev_xstats_map) * num_xstats, 0);
+	cn10k_ml_dev_xstats_names_get(dev, xstats_map, num_xstats);
+
+	cn10k_ml_dev_info_get(dev, &dev_info);
+	for (id = 0; id < PLT_DIM(cn10k_ml_model_xstats_table) * dev_info.max_models; id++) {
+		if (strncmp(name, xstats_map[id].name, strlen(name)) == 0) {
+			*stat_id = id;
+			rte_free(xstats_map);
+			break;
+		}
+	}
+
+	if (id == PLT_DIM(cn10k_ml_model_xstats_table) * dev_info.max_models)
+		return -EINVAL;
+
+	model_id = id / PLT_DIM(cn10k_ml_model_xstats_table);
+	type = id % PLT_DIM(cn10k_ml_model_xstats_table);
+	*value = cn10k_ml_model_xstat_get(dev, model_id, type);
+
+	return 0;
+}
+
+static int
+cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, const uint16_t *stat_ids, uint64_t *values,
+			uint16_t nb_ids)
+{
+	struct cn10k_ml_model *model;
+	struct cn10k_ml_dev *mldev;
+	uint32_t model_id;
+	uint32_t count;
+	uint32_t type;
+	uint32_t i;
+
+	mldev = dev->data->dev_private;
+	if (!mldev->xstats_enabled)
+		return 0;
+
+	count = 0;
+	for (i = 0; i < nb_ids; i++) {
+		model_id = stat_ids[i] / PLT_DIM(cn10k_ml_model_xstats_table);
+		model = dev->data->models[model_id];
+
+		if (model == NULL)
+			continue;
+
+		type = stat_ids[i] % PLT_DIM(cn10k_ml_model_xstats_table);
+		values[i] = cn10k_ml_model_xstat_get(dev, model_id, type);
+		count++;
+	}
+
+	return count;
+}
+
+static int
+cn10k_ml_dev_xstats_reset(struct rte_ml_dev *dev, const uint16_t *stat_ids, uint16_t nb_ids)
+{
+	struct rte_ml_dev_info dev_info;
+	struct cn10k_ml_model *model;
+	struct cn10k_ml_dev *mldev;
+	uint32_t model_id;
+	uint32_t type;
+	uint32_t i;
+
+	mldev = dev->data->dev_private;
+	if (!mldev->xstats_enabled)
+		return 0;
+
+	cn10k_ml_dev_info_get(dev, &dev_info);
+	if (stat_ids == NULL) {
+		for (i = 0; i < PLT_DIM(cn10k_ml_model_xstats_table) * dev_info.max_models; i++) {
+			model_id = i / PLT_DIM(cn10k_ml_model_xstats_table);
+			model = dev->data->models[model_id];
+
+			if (model == NULL)
+				continue;
+
+			type = i % PLT_DIM(cn10k_ml_model_xstats_table);
+			cn10k_ml_model_xstat_reset(dev, model_id, type);
+		}
+	} else {
+		for (i = 0; i < nb_ids; i++) {
+			model_id = stat_ids[i] / PLT_DIM(cn10k_ml_model_xstats_table);
+			model = dev->data->models[model_id];
+
+			if (model == NULL)
+				continue;
+
+			type = stat_ids[i] % PLT_DIM(cn10k_ml_model_xstats_table);
+			cn10k_ml_model_xstat_reset(dev, model_id, type);
+		}
+	}
+
+	return 0;
+}
+
 static int
 cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)
 {
@@ -856,6 +1155,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 
 	char str[RTE_MEMZONE_NAMESIZE];
 	const struct plt_memzone *mz;
+	size_t model_stats_size;
 	size_t model_data_size;
 	size_t model_info_size;
 	uint8_t *base_dma_addr;
@@ -864,6 +1164,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	uint64_t mz_size;
 	uint16_t idx;
 	bool found;
+	int qp_id;
 	int ret;
 
 	ret = cn10k_ml_model_metadata_check(params->addr, params->size);
@@ -900,10 +1201,12 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 			  metadata->model.num_input * sizeof(struct rte_ml_io_info) +
 			  metadata->model.num_output * sizeof(struct rte_ml_io_info);
 	model_info_size = PLT_ALIGN_CEIL(model_info_size, ML_CN10K_ALIGN_SIZE);
+	model_stats_size = (dev->data->nb_queue_pairs + 1) * sizeof(struct cn10k_ml_model_stats);
 
 	mz_size = PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_model), ML_CN10K_ALIGN_SIZE) +
 		  2 * model_data_size + model_info_size +
-		  PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_req), ML_CN10K_ALIGN_SIZE);
+		  PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_req), ML_CN10K_ALIGN_SIZE) +
+		  model_stats_size;
 
 	/* Allocate memzone for model object and model data */
 	snprintf(str, RTE_MEMZONE_NAMESIZE, "%s_%u", CN10K_ML_MODEL_MEMZONE_NAME, idx);
@@ -949,6 +1252,24 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	/* Set slow-path request address and state */
 	model->req = PLT_PTR_ADD(model->info, model_info_size);
 
+	/* Reset burst and sync stats */
+	model->burst_stats = PLT_PTR_ADD(
+		model->req, PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_req), ML_CN10K_ALIGN_SIZE));
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs + 1; qp_id++) {
+		model->burst_stats[qp_id].hw_latency_tot = 0;
+		model->burst_stats[qp_id].hw_latency_min = UINT64_MAX;
+		model->burst_stats[qp_id].hw_latency_max = 0;
+		model->burst_stats[qp_id].fw_latency_tot = 0;
+		model->burst_stats[qp_id].fw_latency_min = UINT64_MAX;
+		model->burst_stats[qp_id].fw_latency_max = 0;
+		model->burst_stats[qp_id].hw_reset_count = 0;
+		model->burst_stats[qp_id].fw_reset_count = 0;
+		model->burst_stats[qp_id].dequeued_count = 0;
+	}
+	model->sync_stats =
+		PLT_PTR_ADD(model->burst_stats,
+			    dev->data->nb_queue_pairs * sizeof(struct cn10k_ml_model_stats));
+
 	plt_spinlock_init(&model->lock);
 	model->state = ML_CN10K_MODEL_STATE_LOADED;
 	dev->data->models[idx] = model;
@@ -1502,15 +1823,44 @@ static __rte_always_inline void
 cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cn10k_ml_result *result,
 		       struct rte_ml_op *op)
 {
+	struct cn10k_ml_model_stats *stats;
+	struct cn10k_ml_model *model;
 	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_qp *qp;
+	uint64_t hw_latency;
+	uint64_t fw_latency;
 
 	if (likely(result->error_code.u64 == 0)) {
+		model = dev->data->models[op->model_id];
 		if (likely(qp_id >= 0)) {
 			qp = dev->data->queue_pairs[qp_id];
 			qp->stats.dequeued_count++;
+			stats = &model->burst_stats[qp_id];
+		} else {
+			stats = model->sync_stats;
+		}
+
+		if (unlikely(stats->dequeued_count == stats->hw_reset_count)) {
+			stats->hw_latency_min = UINT64_MAX;
+			stats->hw_latency_max = 0;
 		}
 
+		if (unlikely(stats->dequeued_count == stats->fw_reset_count)) {
+			stats->fw_latency_min = UINT64_MAX;
+			stats->fw_latency_max = 0;
+		}
+
+		hw_latency = result->stats.hw_end - result->stats.hw_start;
+		fw_latency = result->stats.fw_end - result->stats.fw_start - hw_latency;
+
+		stats->hw_latency_tot += hw_latency;
+		stats->hw_latency_min = PLT_MIN(stats->hw_latency_min, hw_latency);
+		stats->hw_latency_max = PLT_MAX(stats->hw_latency_max, hw_latency);
+		stats->fw_latency_tot += fw_latency;
+		stats->fw_latency_min = PLT_MIN(stats->fw_latency_min, fw_latency);
+		stats->fw_latency_max = PLT_MAX(stats->fw_latency_max, fw_latency);
+		stats->dequeued_count++;
+
 		op->impl_opaque = result->error_code.u64;
 		op->status = RTE_ML_OP_STATUS_SUCCESS;
 	} else {
@@ -1744,6 +2094,10 @@ struct rte_ml_dev_ops cn10k_ml_ops = {
 	/* Stats ops */
 	.dev_stats_get = cn10k_ml_dev_stats_get,
 	.dev_stats_reset = cn10k_ml_dev_stats_reset,
+	.dev_xstats_names_get = cn10k_ml_dev_xstats_names_get,
+	.dev_xstats_by_name_get = cn10k_ml_dev_xstats_by_name_get,
+	.dev_xstats_get = cn10k_ml_dev_xstats_get,
+	.dev_xstats_reset = cn10k_ml_dev_xstats_reset,
 
 	/* Model ops */
 	.model_load = cn10k_ml_model_load,
-- 
2.17.1


  parent reply	other threads:[~2022-12-20 19:30 UTC|newest]

Thread overview: 253+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-08 20:01 [PATCH v1 00/37] Implementation of ML CNXK driver Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 01/37] ml/cnxk: add skeleton for ML cnxk driver Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 02/37] ml/cnxk: enable probe and remove of ML device Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 03/37] ml/cnxk: add driver support to get device info Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 04/37] ml/cnxk: add support for configure and close Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 05/37] ml/cnxk: parse ML firmware path from device args Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 06/37] ml/cnxk: enable firmware load and device reset Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 07/37] ml/cnxk: enable support for simulator environment Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 08/37] ml/cnxk: enable support for device start and stop Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 09/37] ml/cnxk: add support to create device queue-pairs Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 10/37] ml/cnxk: add functions to load and unload models Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 11/37] ml/cnxk: enable validity checks for model metadata Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 12/37] ml/cnxk: add internal structures for derived info Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 13/37] ml/cnxk: add internal structures for tiles and OCM Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 14/37] ml/cnxk: add structures for slow and fast path JDs Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 15/37] ml/cnxk: find OCM mask and page slots for a model Srikanth Yalavarthi
2022-12-08 20:01 ` [PATCH v1 16/37] ml/cnxk: add support to reserve and free OCM pages Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 17/37] ml/cnxk: enable support to start an ML model Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 18/37] ml/cnxk: enable support to stop an ML models Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 19/37] ml/cnxk: enable support to get model information Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 20/37] ml/cnxk: enable support to update model params Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 21/37] ml/cnxk: add support to get IO buffer sizes Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 22/37] ml/cnxk: enable quantization and dequantization Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 23/37] ml/cnxk: enable support to dump device debug info Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 24/37] ml/cnxk: add driver support for device selftest Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 25/37] ml/cnxk: enqueue a burst of inference requests Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 26/37] ml/cnxk: dequeue " Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 27/37] ml/cnxk: add internal function for sync mode run Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 28/37] ml/cnxk: enable support for firmware error codes Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 29/37] ml/cnxk: add support to get and reset device stats Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 30/37] ml/cnxk: add support to handle extended dev stats Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 31/37] ml/cnxk: enable support to get xstats in cycles Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 32/37] ml/cnxk: add support to report DPE FW warnings Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 33/37] ml/cnxk: add support to enable model data caching Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 34/37] ml/cnxk: add support to select OCM allocation mode Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 35/37] ml/cnxk: add support to use lock during jcmd enq Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 36/37] ml/cnxk: add support to select poll memory region Srikanth Yalavarthi
2022-12-08 20:02 ` [PATCH v1 37/37] ml/cnxk: add user guide for marvell cnxk ml driver Srikanth Yalavarthi
2022-12-08 20:17 ` [PATCH v2 00/37] Implementation of ML CNXK driver Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 01/37] ml/cnxk: add skeleton for ML cnxk driver Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 02/37] ml/cnxk: enable probe and remove of ML device Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 03/37] ml/cnxk: add driver support to get device info Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 04/37] ml/cnxk: add support for configure and close Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 05/37] ml/cnxk: parse ML firmware path from device args Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 06/37] ml/cnxk: enable firmware load and device reset Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 07/37] ml/cnxk: enable support for simulator environment Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 08/37] ml/cnxk: enable support for device start and stop Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 09/37] ml/cnxk: add support to create device queue-pairs Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 10/37] ml/cnxk: add functions to load and unload models Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 11/37] ml/cnxk: enable validity checks for model metadata Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 12/37] ml/cnxk: add internal structures for derived info Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 13/37] ml/cnxk: add internal structures for tiles and OCM Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 14/37] ml/cnxk: add structures for slow and fast path JDs Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 15/37] ml/cnxk: find OCM mask and page slots for a model Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 16/37] ml/cnxk: add support to reserve and free OCM pages Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 17/37] ml/cnxk: enable support to start an ML model Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 18/37] ml/cnxk: enable support to stop an ML models Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 19/37] ml/cnxk: enable support to get model information Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 20/37] ml/cnxk: enable support to update model params Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 21/37] ml/cnxk: add support to get IO buffer sizes Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 22/37] ml/cnxk: enable quantization and dequantization Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 23/37] ml/cnxk: enable support to dump device debug info Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 24/37] ml/cnxk: add driver support for device selftest Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 25/37] ml/cnxk: enqueue a burst of inference requests Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 26/37] ml/cnxk: dequeue " Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 27/37] ml/cnxk: add internal function for sync mode run Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 28/37] ml/cnxk: enable support for firmware error codes Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 29/37] ml/cnxk: add support to get and reset device stats Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 30/37] ml/cnxk: add support to handle extended dev stats Srikanth Yalavarthi
2022-12-08 20:17   ` [PATCH v2 31/37] ml/cnxk: enable support to get xstats in cycles Srikanth Yalavarthi
2022-12-08 20:18   ` [PATCH v2 32/37] ml/cnxk: add support to report DPE FW warnings Srikanth Yalavarthi
2022-12-08 20:18   ` [PATCH v2 33/37] ml/cnxk: add support to enable model data caching Srikanth Yalavarthi
2022-12-08 20:18   ` [PATCH v2 34/37] ml/cnxk: add support to select OCM allocation mode Srikanth Yalavarthi
2022-12-08 20:18   ` [PATCH v2 35/37] ml/cnxk: add support to use lock during jcmd enq Srikanth Yalavarthi
2022-12-08 20:18   ` [PATCH v2 36/37] ml/cnxk: add support to select poll memory region Srikanth Yalavarthi
2022-12-08 20:18   ` [PATCH v2 37/37] ml/cnxk: add user guide for marvell cnxk ml driver Srikanth Yalavarthi
2022-12-20 19:26   ` [PATCH v3 00/38] Implementation of ML CNXK driver Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 01/38] common/cnxk: add ML headers and ROC code for cnxk Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 02/38] ml/cnxk: add skeleton for ML cnxk driver Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 03/38] ml/cnxk: enable probe and remove of ML device Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 04/38] ml/cnxk: add driver support to get device info Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 05/38] ml/cnxk: add support for configure and close Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 06/38] ml/cnxk: parse ML firmware path from device args Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 07/38] ml/cnxk: enable firmware load and device reset Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 08/38] ml/cnxk: enable support for simulator environment Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 09/38] ml/cnxk: enable support for device start and stop Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 10/38] ml/cnxk: add support to create device queue-pairs Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 11/38] ml/cnxk: add functions to load and unload models Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 12/38] ml/cnxk: enable validity checks for model metadata Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 13/38] ml/cnxk: add internal structures for derived info Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 14/38] ml/cnxk: add internal structures for tiles and OCM Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 15/38] ml/cnxk: add structures for slow and fast path JDs Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 16/38] ml/cnxk: find OCM mask and page slots for a model Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 17/38] ml/cnxk: add support to reserve and free OCM pages Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 18/38] ml/cnxk: enable support to start an ML model Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 19/38] ml/cnxk: enable support to stop an ML models Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 20/38] ml/cnxk: enable support to get model information Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 21/38] ml/cnxk: enable support to update model params Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 22/38] ml/cnxk: add support to get IO buffer sizes Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 23/38] ml/cnxk: enable quantization and dequantization Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 24/38] ml/cnxk: enable support to dump device debug info Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 25/38] ml/cnxk: add driver support for device selftest Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 26/38] ml/cnxk: enqueue a burst of inference requests Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 27/38] ml/cnxk: dequeue " Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 28/38] ml/cnxk: add internal function for sync mode run Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 29/38] ml/cnxk: enable support for firmware error codes Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 30/38] ml/cnxk: add support to get and reset device stats Srikanth Yalavarthi
2022-12-20 19:26     ` Srikanth Yalavarthi [this message]
2022-12-20 19:26     ` [PATCH v3 32/38] ml/cnxk: enable support to get xstats in cycles Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 33/38] ml/cnxk: add support to report DPE FW warnings Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 34/38] ml/cnxk: add support to enable model data caching Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 35/38] ml/cnxk: add support to select OCM allocation mode Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 36/38] ml/cnxk: add support to use lock during jcmd enq Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 37/38] ml/cnxk: add support to select poll memory region Srikanth Yalavarthi
2022-12-20 19:26     ` [PATCH v3 38/38] ml/cnxk: add user guide for marvell cnxk ml driver Srikanth Yalavarthi
2022-12-20 21:23     ` [PATCH v3 00/38] Implementation of ML CNXK driver Stephen Hemminger
2022-12-21  4:44       ` Jerin Jacob
2023-02-01  9:22 ` [PATCH v4 00/39] " Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 01/39] common/cnxk: add ML headers and ROC code for cnxk Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 02/39] ml/cnxk: add skeleton for ML cnxk driver Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 03/39] ml/cnxk: enable probe and remove of ML device Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 04/39] ml/cnxk: add driver support to get device info Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 05/39] ml/cnxk: add support for configure and close Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 06/39] ml/cnxk: parse ML firmware path from device args Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 07/39] ml/cnxk: enable firmware load and device reset Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 08/39] ml/cnxk: enable support for simulator environment Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 09/39] ml/cnxk: enable support for device start and stop Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 10/39] ml/cnxk: add support to create device queue-pairs Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 11/39] ml/cnxk: add functions to load and unload models Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 12/39] ml/cnxk: enable validity checks for model metadata Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 13/39] ml/cnxk: add internal structures for derived info Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 14/39] ml/cnxk: add internal structures for tiles and OCM Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 15/39] ml/cnxk: add structures for slow and fast path JDs Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 16/39] ml/cnxk: find OCM mask and page slots for a model Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 17/39] ml/cnxk: add support to reserve and free OCM pages Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 18/39] ml/cnxk: enable support to start an ML model Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 19/39] ml/cnxk: enable support to stop an ML models Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 20/39] ml/cnxk: enable support to get model information Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 21/39] ml/cnxk: enable support to update model params Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 22/39] ml/cnxk: add support to get IO buffer sizes Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 23/39] ml/cnxk: enable quantization and dequantization Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 24/39] ml/cnxk: enable support to dump device debug info Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 25/39] ml/cnxk: add driver support for device selftest Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 26/39] ml/cnxk: enqueue a burst of inference requests Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 27/39] ml/cnxk: dequeue " Srikanth Yalavarthi
2023-02-01  9:22   ` [PATCH v4 28/39] ml/cnxk: add internal function for sync mode run Srikanth Yalavarthi
2023-02-01  9:23   ` [PATCH v4 29/39] ml/cnxk: enable support for firmware error codes Srikanth Yalavarthi
2023-02-01  9:23   ` [PATCH v4 30/39] ml/cnxk: add support to get and reset device stats Srikanth Yalavarthi
2023-02-01  9:23   ` [PATCH v4 31/39] ml/cnxk: add support to handle extended dev stats Srikanth Yalavarthi
2023-02-01  9:23   ` [PATCH v4 32/39] ml/cnxk: enable support to get xstats in cycles Srikanth Yalavarthi
2023-02-01  9:23   ` [PATCH v4 33/39] ml/cnxk: add support to report DPE FW warnings Srikanth Yalavarthi
2023-02-01  9:23   ` [PATCH v4 34/39] ml/cnxk: add support to enable model data caching Srikanth Yalavarthi
2023-02-01  9:23   ` [PATCH v4 35/39] ml/cnxk: add support to select OCM allocation mode Srikanth Yalavarthi
2023-02-01  9:23   ` [PATCH v4 36/39] ml/cnxk: add support to use lock during jcmd enq Srikanth Yalavarthi
2023-02-01  9:23   ` [PATCH v4 37/39] ml/cnxk: add support to select poll memory region Srikanth Yalavarthi
2023-02-01  9:23   ` [PATCH v4 38/39] ml/cnxk: add user guide for marvell cnxk ml driver Srikanth Yalavarthi
2023-02-01  9:23   ` [PATCH v4 39/39] ml/cnxk: enable support for configurable ocm page Srikanth Yalavarthi
2023-02-07 16:06 ` [PATCH v5 00/39] Implementation of ML CNXK driver Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 01/39] common/cnxk: add ML headers and ROC code for cnxk Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 02/39] ml/cnxk: add skeleton for ML cnxk driver Srikanth Yalavarthi
2023-03-09 22:06     ` Thomas Monjalon
2023-03-10  8:25       ` [EXT] " Srikanth Yalavarthi
2023-03-10  9:28         ` Thomas Monjalon
2023-03-10  9:31           ` Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 03/39] ml/cnxk: enable probe and remove of ML device Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 04/39] ml/cnxk: add driver support to get device info Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 05/39] ml/cnxk: add support for configure and close Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 06/39] ml/cnxk: parse ML firmware path from device args Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 07/39] ml/cnxk: enable firmware load and device reset Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 08/39] ml/cnxk: enable support for simulator environment Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 09/39] ml/cnxk: enable support for device start and stop Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 10/39] ml/cnxk: add support to create device queue-pairs Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 11/39] ml/cnxk: add functions to load and unload models Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 12/39] ml/cnxk: enable validity checks for model metadata Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 13/39] ml/cnxk: add internal structures for derived info Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 14/39] ml/cnxk: add internal structures for tiles and OCM Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 15/39] ml/cnxk: add structures for slow and fast path JDs Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 16/39] ml/cnxk: find OCM mask and page slots for a model Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 17/39] ml/cnxk: add support to reserve and free OCM pages Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 18/39] ml/cnxk: enable support to start an ML model Srikanth Yalavarthi
2023-02-07 16:06   ` [PATCH v5 19/39] ml/cnxk: enable support to stop an ML models Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 20/39] ml/cnxk: enable support to get model information Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 21/39] ml/cnxk: enable support to update model params Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 22/39] ml/cnxk: add support to get IO buffer sizes Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 23/39] ml/cnxk: enable quantization and dequantization Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 24/39] ml/cnxk: enable support to dump device debug info Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 25/39] ml/cnxk: add driver support for device selftest Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 26/39] ml/cnxk: enqueue a burst of inference requests Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 27/39] ml/cnxk: dequeue " Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 28/39] ml/cnxk: add internal function for sync mode run Srikanth Yalavarthi
2023-02-27 10:42     ` Prince Takkar
2023-02-07 16:07   ` [PATCH v5 29/39] ml/cnxk: enable support for firmware error codes Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 30/39] ml/cnxk: add support to get and reset device stats Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 31/39] ml/cnxk: add support to handle extended dev stats Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 32/39] ml/cnxk: enable support to get xstats in cycles Srikanth Yalavarthi
2023-02-15 12:33     ` Shivah Shankar Shankar Narayan Rao
2023-02-16  4:40     ` Prince Takkar
2023-02-07 16:07   ` [PATCH v5 33/39] ml/cnxk: add support to report DPE FW warnings Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 34/39] ml/cnxk: add support to enable model data caching Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 35/39] ml/cnxk: add support to select OCM allocation mode Srikanth Yalavarthi
2023-03-01  9:01     ` Prince Takkar
2023-02-07 16:07   ` [PATCH v5 36/39] ml/cnxk: add support to use lock during jcmd enq Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 37/39] ml/cnxk: add support to select poll memory region Srikanth Yalavarthi
2023-02-07 16:07   ` [PATCH v5 38/39] ml/cnxk: add user guide for marvell cnxk ml driver Srikanth Yalavarthi
2023-02-15 12:34     ` Shivah Shankar Shankar Narayan Rao
2023-02-16  4:41     ` Prince Takkar
2023-02-07 16:07   ` [PATCH v5 39/39] ml/cnxk: enable support for configurable ocm page Srikanth Yalavarthi
2023-02-15 12:33     ` Shivah Shankar Shankar Narayan Rao
2023-02-16  4:37     ` Prince Takkar
2023-03-02  6:08   ` [PATCH v5 00/39] Implementation of ML CNXK driver Prince Takkar
2023-03-10  8:19 ` [PATCH v6 " Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 01/39] common/cnxk: add ML headers and ROC code for cnxk Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 02/39] ml/cnxk: add skeleton for ML cnxk driver Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 03/39] ml/cnxk: enable probe and remove of ML device Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 04/39] ml/cnxk: add driver support to get device info Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 05/39] ml/cnxk: add support for configure and close Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 06/39] ml/cnxk: parse ML firmware path from device args Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 07/39] ml/cnxk: enable firmware load and device reset Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 08/39] ml/cnxk: enable support for simulator environment Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 09/39] ml/cnxk: enable support for device start and stop Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 10/39] ml/cnxk: add support to create device queue-pairs Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 11/39] ml/cnxk: add functions to load and unload models Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 12/39] ml/cnxk: enable validity checks for model metadata Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 13/39] ml/cnxk: add internal structures for derived info Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 14/39] ml/cnxk: add internal structures for tiles and OCM Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 15/39] ml/cnxk: add structures for slow and fast path JDs Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 16/39] ml/cnxk: find OCM mask and page slots for a model Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 17/39] ml/cnxk: add support to reserve and free OCM pages Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 18/39] ml/cnxk: enable support to start an ML model Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 19/39] ml/cnxk: enable support to stop an ML models Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 20/39] ml/cnxk: enable support to get model information Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 21/39] ml/cnxk: enable support to update model params Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 22/39] ml/cnxk: add support to get IO buffer sizes Srikanth Yalavarthi
2023-03-10  8:19   ` [PATCH v6 23/39] ml/cnxk: enable quantization and dequantization Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 24/39] ml/cnxk: enable support to dump device debug info Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 25/39] ml/cnxk: add driver support for device selftest Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 26/39] ml/cnxk: enqueue a burst of inference requests Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 27/39] ml/cnxk: dequeue " Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 28/39] ml/cnxk: add internal function for sync mode run Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 29/39] ml/cnxk: enable support for firmware error codes Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 30/39] ml/cnxk: add support to get and reset device stats Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 31/39] ml/cnxk: add support to handle extended dev stats Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 32/39] ml/cnxk: enable support to get xstats in cycles Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 33/39] ml/cnxk: add support to report DPE FW warnings Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 34/39] ml/cnxk: add support to enable model data caching Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 35/39] ml/cnxk: add support to select OCM allocation mode Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 36/39] ml/cnxk: add support to use lock during jcmd enq Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 37/39] ml/cnxk: add support to select poll memory region Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 38/39] ml/cnxk: add user guide for marvell cnxk ml driver Srikanth Yalavarthi
2023-03-10  8:20   ` [PATCH v6 39/39] ml/cnxk: add support for configurable ocm page Srikanth Yalavarthi
2023-03-10  9:31   ` [PATCH v6 00/39] Implementation of ML CNXK driver Thomas Monjalon
2023-03-10 10:30     ` [EXT] " Srikanth Yalavarthi
2023-03-10 15:24   ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221220192645.14042-32-syalavarthi@marvell.com \
    --to=syalavarthi@marvell.com \
    --cc=aprabhu@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=sshankarnara@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).