From: Srikanth Yalavarthi <syalavarthi@marvell.com>
To: Srikanth Yalavarthi <syalavarthi@marvell.com>
Cc: <dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,
<ptakkar@marvell.com>
Subject: [PATCH v7 05/34] ml/cnxk: add generic cnxk xstats structures
Date: Wed, 18 Oct 2023 21:16:54 -0700 [thread overview]
Message-ID: <20231019041726.19243-6-syalavarthi@marvell.com> (raw)
In-Reply-To: <20231019041726.19243-1-syalavarthi@marvell.com>
Introduced generic xstats structures and renamed cn10k
xstats enumerations with cnxk prefix.
Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
drivers/ml/cnxk/cn10k_ml_dev.h | 86 +---------------
drivers/ml/cnxk/cn10k_ml_model.h | 6 +-
drivers/ml/cnxk/cn10k_ml_ops.c | 169 ++++++++++++++-----------------
drivers/ml/cnxk/cnxk_ml_xstats.h | 128 +++++++++++++++++++++++
4 files changed, 209 insertions(+), 180 deletions(-)
create mode 100644 drivers/ml/cnxk/cnxk_ml_xstats.h
diff --git a/drivers/ml/cnxk/cn10k_ml_dev.h b/drivers/ml/cnxk/cn10k_ml_dev.h
index 1852d4f6c9..be989e0a20 100644
--- a/drivers/ml/cnxk/cn10k_ml_dev.h
+++ b/drivers/ml/cnxk/cn10k_ml_dev.h
@@ -10,6 +10,7 @@
#include "cn10k_ml_ocm.h"
#include "cnxk_ml_io.h"
+#include "cnxk_ml_xstats.h"
/* Dummy Device ops */
extern struct rte_ml_dev_ops ml_dev_dummy_ops;
@@ -121,89 +122,6 @@ struct cn10k_ml_fw {
struct cnxk_ml_req *req;
};
-/* Extended stats types enum */
-enum cn10k_ml_xstats_type {
- /* Number of models loaded */
- nb_models_loaded,
-
- /* Number of models unloaded */
- nb_models_unloaded,
-
- /* Number of models started */
- nb_models_started,
-
- /* Number of models stopped */
- nb_models_stopped,
-
- /* Average inference hardware latency */
- avg_hw_latency,
-
- /* Minimum hardware latency */
- min_hw_latency,
-
- /* Maximum hardware latency */
- max_hw_latency,
-
- /* Average firmware latency */
- avg_fw_latency,
-
- /* Minimum firmware latency */
- min_fw_latency,
-
- /* Maximum firmware latency */
- max_fw_latency,
-};
-
-/* Extended stats function type enum. */
-enum cn10k_ml_xstats_fn_type {
- /* Device function */
- CN10K_ML_XSTATS_FN_DEVICE,
-
- /* Model function */
- CN10K_ML_XSTATS_FN_MODEL,
-};
-
-/* Function pointer to get xstats for a type */
-typedef uint64_t (*cn10k_ml_xstats_fn)(struct rte_ml_dev *dev, uint16_t obj_idx,
- enum cn10k_ml_xstats_type stat);
-
-/* Extended stats entry structure */
-struct cn10k_ml_xstats_entry {
- /* Name-ID map */
- struct rte_ml_dev_xstats_map map;
-
- /* xstats mode, device or model */
- enum rte_ml_dev_xstats_mode mode;
-
- /* Type of xstats */
- enum cn10k_ml_xstats_type type;
-
- /* xstats function */
- enum cn10k_ml_xstats_fn_type fn_id;
-
- /* Object ID, model ID for model stat type */
- uint16_t obj_idx;
-
- /* Allowed to reset the stat */
- uint8_t reset_allowed;
-
- /* An offset to be taken away to emulate resets */
- uint64_t reset_value;
-};
-
-/* Extended stats data */
-struct cn10k_ml_xstats {
- /* Pointer to xstats entries */
- struct cn10k_ml_xstats_entry *entries;
-
- /* Store num stats and offset of the stats for each model */
- uint16_t count_per_model[ML_CNXK_MAX_MODELS];
- uint16_t offset_for_model[ML_CNXK_MAX_MODELS];
- uint16_t count_mode_device;
- uint16_t count_mode_model;
- uint16_t count;
-};
-
/* Device private data */
struct cn10k_ml_dev {
/* Device ROC */
@@ -216,7 +134,7 @@ struct cn10k_ml_dev {
struct cn10k_ml_ocm ocm;
/* Extended stats data */
- struct cn10k_ml_xstats xstats;
+ struct cnxk_ml_xstats xstats;
/* Enable / disable model data caching */
int cache_model_data;
diff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h
index 74ada1531a..5c32f48c68 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.h
+++ b/drivers/ml/cnxk/cn10k_ml_model.h
@@ -404,7 +404,7 @@ struct cn10k_ml_layer_addr {
};
/* Model fast-path stats */
-struct cn10k_ml_layer_stats {
+struct cn10k_ml_layer_xstats {
/* Total hardware latency, sum of all inferences */
uint64_t hw_latency_tot;
@@ -447,10 +447,10 @@ struct cn10k_ml_layer_data {
struct cnxk_ml_req *req;
/* Layer: Stats for burst ops */
- struct cn10k_ml_layer_stats *burst_stats;
+ struct cn10k_ml_layer_xstats *burst_xstats;
/* Layer: Stats for sync ops */
- struct cn10k_ml_layer_stats *sync_stats;
+ struct cn10k_ml_layer_xstats *sync_xstats;
};
struct cn10k_ml_model_data {
diff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c
index 25ebb28993..b470955ffd 100644
--- a/drivers/ml/cnxk/cn10k_ml_ops.c
+++ b/drivers/ml/cnxk/cn10k_ml_ops.c
@@ -10,6 +10,7 @@
#include "cnxk_ml_dev.h"
#include "cnxk_ml_model.h"
#include "cnxk_ml_ops.h"
+#include "cnxk_ml_xstats.h"
/* ML model macros */
#define CN10K_ML_MODEL_MEMZONE_NAME "ml_cn10k_model_mz"
@@ -425,26 +426,6 @@ cn10k_ml_prep_fp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cnxk_ml
req->cn10k_req.jd.model_run.num_batches = op->nb_batches;
}
-struct xstat_info {
- char name[32];
- enum cn10k_ml_xstats_type type;
- uint8_t reset_allowed;
-};
-
-/* Note: Device stats are not allowed to be reset. */
-static const struct xstat_info device_stats[] = {
- {"nb_models_loaded", nb_models_loaded, 0},
- {"nb_models_unloaded", nb_models_unloaded, 0},
- {"nb_models_started", nb_models_started, 0},
- {"nb_models_stopped", nb_models_stopped, 0},
-};
-
-static const struct xstat_info model_stats[] = {
- {"Avg-HW-Latency", avg_hw_latency, 1}, {"Min-HW-Latency", min_hw_latency, 1},
- {"Max-HW-Latency", max_hw_latency, 1}, {"Avg-FW-Latency", avg_fw_latency, 1},
- {"Min-FW-Latency", min_fw_latency, 1}, {"Max-FW-Latency", max_fw_latency, 1},
-};
-
static int
cn10k_ml_xstats_init(struct rte_ml_dev *dev)
{
@@ -459,10 +440,10 @@ cn10k_ml_xstats_init(struct rte_ml_dev *dev)
cn10k_mldev = &cnxk_mldev->cn10k_mldev;
/* Allocate memory for xstats entries. Don't allocate during reconfigure */
- nb_stats = RTE_DIM(device_stats) + ML_CNXK_MAX_MODELS * RTE_DIM(model_stats);
+ nb_stats = RTE_DIM(device_xstats) + ML_CNXK_MAX_MODELS * RTE_DIM(layer_xstats);
if (cn10k_mldev->xstats.entries == NULL)
cn10k_mldev->xstats.entries = rte_zmalloc(
- "cn10k_ml_xstats", sizeof(struct cn10k_ml_xstats_entry) * nb_stats,
+ "cn10k_ml_xstats", sizeof(struct cnxk_ml_xstats_entry) * nb_stats,
PLT_CACHE_LINE_SIZE);
if (cn10k_mldev->xstats.entries == NULL)
@@ -470,17 +451,17 @@ cn10k_ml_xstats_init(struct rte_ml_dev *dev)
/* Initialize device xstats */
stat_id = 0;
- for (i = 0; i < RTE_DIM(device_stats); i++) {
+ for (i = 0; i < RTE_DIM(device_xstats); i++) {
cn10k_mldev->xstats.entries[stat_id].map.id = stat_id;
snprintf(cn10k_mldev->xstats.entries[stat_id].map.name,
sizeof(cn10k_mldev->xstats.entries[stat_id].map.name), "%s",
- device_stats[i].name);
+ device_xstats[i].name);
cn10k_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_DEVICE;
- cn10k_mldev->xstats.entries[stat_id].type = device_stats[i].type;
- cn10k_mldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_DEVICE;
+ cn10k_mldev->xstats.entries[stat_id].type = device_xstats[i].type;
+ cn10k_mldev->xstats.entries[stat_id].fn_id = CNXK_ML_XSTATS_FN_DEVICE;
cn10k_mldev->xstats.entries[stat_id].obj_idx = 0;
- cn10k_mldev->xstats.entries[stat_id].reset_allowed = device_stats[i].reset_allowed;
+ cn10k_mldev->xstats.entries[stat_id].reset_allowed = device_xstats[i].reset_allowed;
stat_id++;
}
cn10k_mldev->xstats.count_mode_device = stat_id;
@@ -489,24 +470,24 @@ cn10k_ml_xstats_init(struct rte_ml_dev *dev)
for (model = 0; model < ML_CNXK_MAX_MODELS; model++) {
cn10k_mldev->xstats.offset_for_model[model] = stat_id;
- for (i = 0; i < RTE_DIM(model_stats); i++) {
+ for (i = 0; i < RTE_DIM(layer_xstats); i++) {
cn10k_mldev->xstats.entries[stat_id].map.id = stat_id;
cn10k_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_MODEL;
- cn10k_mldev->xstats.entries[stat_id].type = model_stats[i].type;
- cn10k_mldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_MODEL;
+ cn10k_mldev->xstats.entries[stat_id].type = layer_xstats[i].type;
+ cn10k_mldev->xstats.entries[stat_id].fn_id = CNXK_ML_XSTATS_FN_MODEL;
cn10k_mldev->xstats.entries[stat_id].obj_idx = model;
cn10k_mldev->xstats.entries[stat_id].reset_allowed =
- model_stats[i].reset_allowed;
+ layer_xstats[i].reset_allowed;
/* Name of xstat is updated during model load */
snprintf(cn10k_mldev->xstats.entries[stat_id].map.name,
sizeof(cn10k_mldev->xstats.entries[stat_id].map.name),
- "Model-%u-%s", model, model_stats[i].name);
+ "Model-%u-%s", model, layer_xstats[i].name);
stat_id++;
}
- cn10k_mldev->xstats.count_per_model[model] = RTE_DIM(model_stats);
+ cn10k_mldev->xstats.count_per_model[model] = RTE_DIM(layer_xstats);
}
cn10k_mldev->xstats.count_mode_model = stat_id - cn10k_mldev->xstats.count_mode_device;
@@ -545,7 +526,7 @@ cn10k_ml_xstats_model_name_update(struct rte_ml_dev *dev, uint16_t model_id)
cnxk_mldev = dev->data->dev_private;
cn10k_mldev = &cnxk_mldev->cn10k_mldev;
model = dev->data->models[model_id];
- stat_id = RTE_DIM(device_stats) + model_id * RTE_DIM(model_stats);
+ stat_id = RTE_DIM(device_xstats) + model_id * RTE_DIM(layer_xstats);
roc_clk_freq_get(&rclk_freq, &sclk_freq);
if (sclk_freq == 0)
@@ -554,17 +535,17 @@ cn10k_ml_xstats_model_name_update(struct rte_ml_dev *dev, uint16_t model_id)
strcpy(suffix, "ns");
/* Update xstat name based on model name and sclk availability */
- for (i = 0; i < RTE_DIM(model_stats); i++) {
+ for (i = 0; i < RTE_DIM(layer_xstats); i++) {
snprintf(cn10k_mldev->xstats.entries[stat_id].map.name,
sizeof(cn10k_mldev->xstats.entries[stat_id].map.name), "%s-%s-%s",
- model->layer[0].glow.metadata.model.name, model_stats[i].name, suffix);
+ model->layer[0].glow.metadata.model.name, layer_xstats[i].name, suffix);
stat_id++;
}
}
static uint64_t
cn10k_ml_dev_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx __rte_unused,
- enum cn10k_ml_xstats_type type)
+ enum cnxk_ml_xstats_type type)
{
struct cnxk_ml_dev *cnxk_mldev;
@@ -590,9 +571,9 @@ cn10k_ml_dev_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx __rte_unused,
do { \
value = 0; \
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { \
- value += model->layer[0].glow.burst_stats[qp_id].str##_latency_tot; \
- count += model->layer[0].glow.burst_stats[qp_id].dequeued_count - \
- model->layer[0].glow.burst_stats[qp_id].str##_reset_count; \
+ value += model->layer[0].glow.burst_xstats[qp_id].str##_latency_tot; \
+ count += model->layer[0].glow.burst_xstats[qp_id].dequeued_count - \
+ model->layer[0].glow.burst_xstats[qp_id].str##_reset_count; \
} \
if (count != 0) \
value = value / count; \
@@ -603,9 +584,10 @@ cn10k_ml_dev_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx __rte_unused,
value = UINT64_MAX; \
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { \
value = PLT_MIN( \
- value, model->layer[0].glow.burst_stats[qp_id].str##_latency_min); \
- count += model->layer[0].glow.burst_stats[qp_id].dequeued_count - \
- model->layer[0].glow.burst_stats[qp_id].str##_reset_count; \
+ value, \
+ model->layer[0].glow.burst_xstats[qp_id].str##_latency_min); \
+ count += model->layer[0].glow.burst_xstats[qp_id].dequeued_count - \
+ model->layer[0].glow.burst_xstats[qp_id].str##_reset_count; \
} \
if (count == 0) \
value = 0; \
@@ -616,16 +598,17 @@ cn10k_ml_dev_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx __rte_unused,
value = 0; \
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { \
value = PLT_MAX( \
- value, model->layer[0].glow.burst_stats[qp_id].str##_latency_max); \
- count += model->layer[0].glow.burst_stats[qp_id].dequeued_count - \
- model->layer[0].glow.burst_stats[qp_id].str##_reset_count; \
+ value, \
+ model->layer[0].glow.burst_xstats[qp_id].str##_latency_max); \
+ count += model->layer[0].glow.burst_xstats[qp_id].dequeued_count - \
+ model->layer[0].glow.burst_xstats[qp_id].str##_reset_count; \
} \
if (count == 0) \
value = 0; \
} while (0)
static uint64_t
-cn10k_ml_model_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx, enum cn10k_ml_xstats_type type)
+cn10k_ml_model_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx, enum cnxk_ml_xstats_type type)
{
struct cnxk_ml_model *model;
uint16_t rclk_freq; /* MHz */
@@ -671,8 +654,8 @@ cn10k_ml_model_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx, enum cn10k_ml
static int
cn10k_ml_device_xstats_reset(struct rte_ml_dev *dev, const uint16_t stat_ids[], uint16_t nb_ids)
{
- struct cn10k_ml_xstats_entry *xs;
struct cn10k_ml_dev *cn10k_mldev;
+ struct cnxk_ml_xstats_entry *xs;
struct cnxk_ml_dev *cnxk_mldev;
uint16_t nb_stats;
uint16_t stat_id;
@@ -708,26 +691,26 @@ cn10k_ml_device_xstats_reset(struct rte_ml_dev *dev, const uint16_t stat_ids[],
#define ML_AVG_RESET_FOREACH_QP(dev, model, qp_id, str) \
do { \
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { \
- model->layer[0].glow.burst_stats[qp_id].str##_latency_tot = 0; \
- model->layer[0].glow.burst_stats[qp_id].str##_reset_count = \
- model->layer[0].glow.burst_stats[qp_id].dequeued_count; \
+ model->layer[0].glow.burst_xstats[qp_id].str##_latency_tot = 0; \
+ model->layer[0].glow.burst_xstats[qp_id].str##_reset_count = \
+ model->layer[0].glow.burst_xstats[qp_id].dequeued_count; \
} \
} while (0)
#define ML_MIN_RESET_FOREACH_QP(dev, model, qp_id, str) \
do { \
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) \
- model->layer[0].glow.burst_stats[qp_id].str##_latency_min = UINT64_MAX; \
+ model->layer[0].glow.burst_xstats[qp_id].str##_latency_min = UINT64_MAX; \
} while (0)
#define ML_MAX_RESET_FOREACH_QP(dev, model, qp_id, str) \
do { \
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) \
- model->layer[0].glow.burst_stats[qp_id].str##_latency_max = 0; \
+ model->layer[0].glow.burst_xstats[qp_id].str##_latency_max = 0; \
} while (0)
static void
-cn10k_ml_reset_model_stat(struct rte_ml_dev *dev, uint16_t model_id, enum cn10k_ml_xstats_type type)
+cn10k_ml_reset_model_stat(struct rte_ml_dev *dev, uint16_t model_id, enum cnxk_ml_xstats_type type)
{
struct cnxk_ml_model *model;
uint32_t qp_id;
@@ -762,8 +745,8 @@ static int
cn10k_ml_model_xstats_reset(struct rte_ml_dev *dev, int32_t model_id, const uint16_t stat_ids[],
uint16_t nb_ids)
{
- struct cn10k_ml_xstats_entry *xs;
struct cn10k_ml_dev *cn10k_mldev;
+ struct cnxk_ml_xstats_entry *xs;
struct cnxk_ml_dev *cnxk_mldev;
struct cnxk_ml_model *model;
int32_t lcl_model_id = 0;
@@ -1342,10 +1325,10 @@ static int
cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16_t *stat_id,
uint64_t *value)
{
- struct cn10k_ml_xstats_entry *xs;
+ struct cnxk_ml_xstats_entry *xs;
struct cn10k_ml_dev *cn10k_mldev;
struct cnxk_ml_dev *cnxk_mldev;
- cn10k_ml_xstats_fn fn;
+ cnxk_ml_xstats_fn fn;
uint32_t i;
cnxk_mldev = dev->data->dev_private;
@@ -1357,10 +1340,10 @@ cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16
*stat_id = xs->map.id;
switch (xs->fn_id) {
- case CN10K_ML_XSTATS_FN_DEVICE:
+ case CNXK_ML_XSTATS_FN_DEVICE:
fn = cn10k_ml_dev_xstat_get;
break;
- case CN10K_ML_XSTATS_FN_MODEL:
+ case CNXK_ML_XSTATS_FN_MODEL:
fn = cn10k_ml_model_xstat_get;
break;
default:
@@ -1384,11 +1367,11 @@ static int
cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode, int32_t model_id,
const uint16_t stat_ids[], uint64_t values[], uint16_t nb_ids)
{
- struct cn10k_ml_xstats_entry *xs;
struct cn10k_ml_dev *cn10k_mldev;
+ struct cnxk_ml_xstats_entry *xs;
struct cnxk_ml_dev *cnxk_mldev;
uint32_t xstats_mode_count;
- cn10k_ml_xstats_fn fn;
+ cnxk_ml_xstats_fn fn;
uint64_t val;
uint32_t idx;
uint32_t i;
@@ -1423,10 +1406,10 @@ cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode
}
switch (xs->fn_id) {
- case CN10K_ML_XSTATS_FN_DEVICE:
+ case CNXK_ML_XSTATS_FN_DEVICE:
fn = cn10k_ml_dev_xstat_get;
break;
- case CN10K_ML_XSTATS_FN_MODEL:
+ case CNXK_ML_XSTATS_FN_MODEL:
fn = cn10k_ml_model_xstat_get;
break;
default:
@@ -1664,7 +1647,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
metadata->model.num_input * sizeof(struct rte_ml_io_info) +
metadata->model.num_output * sizeof(struct rte_ml_io_info);
model_info_size = PLT_ALIGN_CEIL(model_info_size, ML_CN10K_ALIGN_SIZE);
- model_stats_size = (dev->data->nb_queue_pairs + 1) * sizeof(struct cn10k_ml_layer_stats);
+ model_stats_size = (dev->data->nb_queue_pairs + 1) * sizeof(struct cn10k_ml_layer_xstats);
mz_size = PLT_ALIGN_CEIL(sizeof(struct cnxk_ml_model), ML_CN10K_ALIGN_SIZE) +
2 * model_data_size + model_scratch_size + model_info_size +
@@ -1738,24 +1721,24 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
model->layer[0].glow.req = PLT_PTR_ADD(model->info, model_info_size);
/* Reset burst and sync stats */
- model->layer[0].glow.burst_stats =
+ model->layer[0].glow.burst_xstats =
PLT_PTR_ADD(model->layer[0].glow.req,
PLT_ALIGN_CEIL(sizeof(struct cnxk_ml_req), ML_CN10K_ALIGN_SIZE));
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs + 1; qp_id++) {
- model->layer[0].glow.burst_stats[qp_id].hw_latency_tot = 0;
- model->layer[0].glow.burst_stats[qp_id].hw_latency_min = UINT64_MAX;
- model->layer[0].glow.burst_stats[qp_id].hw_latency_max = 0;
- model->layer[0].glow.burst_stats[qp_id].fw_latency_tot = 0;
- model->layer[0].glow.burst_stats[qp_id].fw_latency_min = UINT64_MAX;
- model->layer[0].glow.burst_stats[qp_id].fw_latency_max = 0;
- model->layer[0].glow.burst_stats[qp_id].hw_reset_count = 0;
- model->layer[0].glow.burst_stats[qp_id].fw_reset_count = 0;
- model->layer[0].glow.burst_stats[qp_id].dequeued_count = 0;
+ model->layer[0].glow.burst_xstats[qp_id].hw_latency_tot = 0;
+ model->layer[0].glow.burst_xstats[qp_id].hw_latency_min = UINT64_MAX;
+ model->layer[0].glow.burst_xstats[qp_id].hw_latency_max = 0;
+ model->layer[0].glow.burst_xstats[qp_id].fw_latency_tot = 0;
+ model->layer[0].glow.burst_xstats[qp_id].fw_latency_min = UINT64_MAX;
+ model->layer[0].glow.burst_xstats[qp_id].fw_latency_max = 0;
+ model->layer[0].glow.burst_xstats[qp_id].hw_reset_count = 0;
+ model->layer[0].glow.burst_xstats[qp_id].fw_reset_count = 0;
+ model->layer[0].glow.burst_xstats[qp_id].dequeued_count = 0;
}
- model->layer[0].glow.sync_stats =
- PLT_PTR_ADD(model->layer[0].glow.burst_stats,
- dev->data->nb_queue_pairs * sizeof(struct cn10k_ml_layer_stats));
+ model->layer[0].glow.sync_xstats =
+ PLT_PTR_ADD(model->layer[0].glow.burst_xstats,
+ dev->data->nb_queue_pairs * sizeof(struct cn10k_ml_layer_xstats));
plt_spinlock_init(&model->lock);
model->state = ML_CNXK_MODEL_STATE_LOADED;
@@ -2308,7 +2291,7 @@ static __rte_always_inline void
cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cnxk_ml_req *req)
{
union cn10k_ml_error_code *error_code;
- struct cn10k_ml_layer_stats *stats;
+ struct cn10k_ml_layer_xstats *xstats;
struct cn10k_ml_dev *cn10k_mldev;
struct cnxk_ml_dev *cnxk_mldev;
struct cn10k_ml_result *result;
@@ -2326,31 +2309,31 @@ cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cnxk_ml_req *re
if (likely(qp_id >= 0)) {
qp = dev->data->queue_pairs[qp_id];
qp->stats.dequeued_count++;
- stats = &model->layer[0].glow.burst_stats[qp_id];
+ xstats = &model->layer[0].glow.burst_xstats[qp_id];
} else {
- stats = model->layer[0].glow.sync_stats;
+ xstats = model->layer[0].glow.sync_xstats;
}
- if (unlikely(stats->dequeued_count == stats->hw_reset_count)) {
- stats->hw_latency_min = UINT64_MAX;
- stats->hw_latency_max = 0;
+ if (unlikely(xstats->dequeued_count == xstats->hw_reset_count)) {
+ xstats->hw_latency_min = UINT64_MAX;
+ xstats->hw_latency_max = 0;
}
- if (unlikely(stats->dequeued_count == stats->fw_reset_count)) {
- stats->fw_latency_min = UINT64_MAX;
- stats->fw_latency_max = 0;
+ if (unlikely(xstats->dequeued_count == xstats->fw_reset_count)) {
+ xstats->fw_latency_min = UINT64_MAX;
+ xstats->fw_latency_max = 0;
}
hw_latency = result->stats.hw_end - result->stats.hw_start;
fw_latency = result->stats.fw_end - result->stats.fw_start - hw_latency;
- stats->hw_latency_tot += hw_latency;
- stats->hw_latency_min = PLT_MIN(stats->hw_latency_min, hw_latency);
- stats->hw_latency_max = PLT_MAX(stats->hw_latency_max, hw_latency);
- stats->fw_latency_tot += fw_latency;
- stats->fw_latency_min = PLT_MIN(stats->fw_latency_min, fw_latency);
- stats->fw_latency_max = PLT_MAX(stats->fw_latency_max, fw_latency);
- stats->dequeued_count++;
+ xstats->hw_latency_tot += hw_latency;
+ xstats->hw_latency_min = PLT_MIN(xstats->hw_latency_min, hw_latency);
+ xstats->hw_latency_max = PLT_MAX(xstats->hw_latency_max, hw_latency);
+ xstats->fw_latency_tot += fw_latency;
+ xstats->fw_latency_min = PLT_MIN(xstats->fw_latency_min, fw_latency);
+ xstats->fw_latency_max = PLT_MAX(xstats->fw_latency_max, fw_latency);
+ xstats->dequeued_count++;
op->impl_opaque = result->error_code;
op->status = RTE_ML_OP_STATUS_SUCCESS;
diff --git a/drivers/ml/cnxk/cnxk_ml_xstats.h b/drivers/ml/cnxk/cnxk_ml_xstats.h
new file mode 100644
index 0000000000..0d405679ca
--- /dev/null
+++ b/drivers/ml/cnxk/cnxk_ml_xstats.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Marvell.
+ */
+
+#ifndef _CNXK_ML_XSTATS_H_
+#define _CNXK_ML_XSTATS_H_
+
+#include "cnxk_ml_io.h"
+
+/* Extended stats types enum */
+enum cnxk_ml_xstats_type {
+ /* Number of models loaded */
+ nb_models_loaded,
+
+ /* Number of models unloaded */
+ nb_models_unloaded,
+
+ /* Number of models started */
+ nb_models_started,
+
+ /* Number of models stopped */
+ nb_models_stopped,
+
+ /* Average inference hardware latency */
+ avg_hw_latency,
+
+ /* Minimum hardware latency */
+ min_hw_latency,
+
+ /* Maximum hardware latency */
+ max_hw_latency,
+
+ /* Average firmware latency */
+ avg_fw_latency,
+
+ /* Minimum firmware latency */
+ min_fw_latency,
+
+ /* Maximum firmware latency */
+ max_fw_latency,
+
+ /* Average runtime latency */
+ avg_rt_latency,
+
+ /* Minimum runtime latency */
+ min_rt_latency,
+
+ /* Maximum runtime latency */
+ max_rt_latency,
+};
+
+/* Extended stats function type enum. */
+enum cnxk_ml_xstats_fn_type {
+ /* Device function */
+ CNXK_ML_XSTATS_FN_DEVICE,
+
+ /* Model function */
+ CNXK_ML_XSTATS_FN_MODEL,
+};
+
+/* Function pointer to get xstats for a type */
+typedef uint64_t (*cnxk_ml_xstats_fn)(struct rte_ml_dev *cnxk_mldev, uint16_t obj_idx,
+ enum cnxk_ml_xstats_type stat);
+
+/* Extended stats entry structure */
+struct cnxk_ml_xstats_entry {
+ /* Name-ID map */
+ struct rte_ml_dev_xstats_map map;
+
+ /* xstats mode, device or model */
+ enum rte_ml_dev_xstats_mode mode;
+
+ /* Type of xstats */
+ enum cnxk_ml_xstats_type type;
+
+ /* xstats function */
+ enum cnxk_ml_xstats_fn_type fn_id;
+
+ /* Object ID, model ID for model stat type */
+ uint16_t obj_idx;
+
+ /* Layer ID, valid for model stat type */
+ int32_t layer_id;
+
+ /* Allowed to reset the stat */
+ uint8_t reset_allowed;
+
+ /* An offset to be taken away to emulate resets */
+ uint64_t reset_value;
+};
+
+/* Extended stats data */
+struct cnxk_ml_xstats {
+ /* Pointer to xstats entries */
+ struct cnxk_ml_xstats_entry *entries;
+
+ /* Store num stats and offset of the stats for each model */
+ uint16_t count_per_model[ML_CNXK_MAX_MODELS];
+ uint16_t offset_for_model[ML_CNXK_MAX_MODELS];
+ uint16_t count_per_layer[ML_CNXK_MAX_MODELS][ML_CNXK_MODEL_MAX_LAYERS];
+ uint16_t offset_for_layer[ML_CNXK_MAX_MODELS][ML_CNXK_MODEL_MAX_LAYERS];
+ uint16_t count_mode_device;
+ uint16_t count_mode_model;
+ uint16_t count;
+};
+
+struct cnxk_ml_xstat_info {
+ char name[32];
+ enum cnxk_ml_xstats_type type;
+ uint8_t reset_allowed;
+};
+
+/* Device xstats. Note: Device stats are not allowed to be reset. */
+static const struct cnxk_ml_xstat_info device_xstats[] = {
+ {"nb_models_loaded", nb_models_loaded, 0},
+ {"nb_models_unloaded", nb_models_unloaded, 0},
+ {"nb_models_started", nb_models_started, 0},
+ {"nb_models_stopped", nb_models_stopped, 0},
+};
+
+/* Layer xstats */
+static const struct cnxk_ml_xstat_info layer_xstats[] = {
+ {"Avg-HW-Latency", avg_hw_latency, 1}, {"Min-HW-Latency", min_hw_latency, 1},
+ {"Max-HW-Latency", max_hw_latency, 1}, {"Avg-FW-Latency", avg_fw_latency, 1},
+ {"Min-FW-Latency", min_fw_latency, 1}, {"Max-FW-Latency", max_fw_latency, 1},
+};
+
+#endif /* _CNXK_ML_XSTATS_H_ */
--
2.42.0
next prev parent reply other threads:[~2023-10-19 4:17 UTC|newest]
Thread overview: 340+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-30 15:58 [PATCH v1 00/34] Implemenation of revised ml/cnxk driver Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 02/34] ml/cnxk: drop use of RTE API for firmware read Srikanth Yalavarthi
2023-09-21 12:08 ` Jerin Jacob
2023-09-21 12:52 ` David Marchand
2023-09-21 13:06 ` [EXT] " Srikanth Yalavarthi
2023-09-21 13:26 ` David Marchand
2023-09-22 3:59 ` Srikanth Yalavarthi
2023-09-22 8:07 ` David Marchand
2023-09-22 16:59 ` Srikanth Yalavarthi
2023-09-27 9:38 ` David Marchand
2023-09-27 10:00 ` [EXT] " Srikanth Yalavarthi
2023-09-27 18:37 ` Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 03/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 04/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 05/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 06/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 07/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 08/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 09/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 10/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 11/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 12/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 13/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 14/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 15/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 16/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 17/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 18/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 19/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-09-21 12:32 ` Jerin Jacob
2023-09-27 18:38 ` [EXT] " Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 20/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 21/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 22/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 23/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 24/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 25/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-09-20 7:24 ` [PATCH v2 00/34] Implemenation of revised ml/cnxk driver Srikanth Yalavarthi
2023-09-20 7:24 ` [PATCH v2 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-09-20 7:24 ` [PATCH v2 02/34] ml/cnxk: drop use of RTE API for firmware read Srikanth Yalavarthi
2023-09-20 7:24 ` [PATCH v2 03/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-09-20 7:24 ` [PATCH v2 04/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-09-20 7:24 ` [PATCH v2 05/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-09-20 7:24 ` [PATCH v2 06/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-09-20 7:24 ` [PATCH v2 07/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-09-20 7:24 ` [PATCH v2 08/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 09/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 10/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 11/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 12/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 13/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 14/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 15/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 16/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 17/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 18/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 19/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 20/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 21/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 22/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 23/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 24/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 25/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-09-20 7:25 ` [PATCH v2 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-09-21 12:15 ` [PATCH v2 00/34] Implemenation of revised ml/cnxk driver Jerin Jacob
2023-09-27 18:39 ` [EXT] " Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 00/35] " Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 01/35] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 02/35] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 03/35] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 04/35] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 05/35] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 06/35] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 07/35] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 08/35] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 09/35] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 10/35] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 11/35] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 12/35] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 13/35] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 14/35] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 15/35] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 16/35] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 17/35] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 18/35] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 19/35] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 20/35] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 21/35] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 22/35] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 23/35] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 24/35] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 25/35] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 26/35] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 27/35] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 28/35] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 29/35] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 30/35] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 31/35] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 32/35] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 33/35] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 34/35] ml/cnxk: update dependency info in driver docs Srikanth Yalavarthi
2023-09-28 4:12 ` Jerin Jacob
2023-10-01 0:32 ` [EXT] " Srikanth Yalavarthi
2023-10-17 17:03 ` Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 35/35] ml/cnxk: update release notes for 23.11 Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 00/34] Implementation of revised ml/cnxk driver Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 05/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 10/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 11/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 12/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 13/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 14/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 15/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 16/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 17/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 18/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 19/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 20/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 21/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 22/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 23/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 24/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 25/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-18 1:56 ` [PATCH v4 00/34] Implementation of revised ml/cnxk driver Jerin Jacob
2023-10-18 6:55 ` [EXT] " Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 " Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 05/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 10/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 11/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 12/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 13/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 14/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 15/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 16/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 17/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 18/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 19/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 20/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 21/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 22/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 23/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 24/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 25/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-18 6:47 ` [PATCH v5 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-18 6:48 ` [PATCH v5 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-18 6:48 ` [PATCH v5 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-18 6:48 ` [PATCH v5 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 00/34] Implementation of revised ml/cnxk driver Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 05/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 10/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 11/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 12/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 13/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 14/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 15/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 16/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 17/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 18/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-18 18:34 ` Jerin Jacob
2023-10-19 6:44 ` [EXT] " Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 19/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 20/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 21/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 22/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 23/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 24/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 25/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-18 13:54 ` [PATCH v6 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-18 14:20 ` [PATCH v6 00/34] Implementation of revised ml/cnxk driver Jerin Jacob
2023-10-19 6:41 ` [EXT] " Srikanth Yalavarthi
2023-10-19 4:16 ` [PATCH v7 " Srikanth Yalavarthi
2023-10-19 4:16 ` [PATCH v7 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-19 4:16 ` [PATCH v7 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-19 4:16 ` [PATCH v7 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-19 4:16 ` [PATCH v7 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-19 4:16 ` Srikanth Yalavarthi [this message]
2023-10-19 4:16 ` [PATCH v7 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-19 4:16 ` [PATCH v7 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-19 4:16 ` [PATCH v7 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-19 4:16 ` [PATCH v7 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-19 4:16 ` [PATCH v7 10/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 11/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 12/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 13/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 14/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 15/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 16/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 17/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 18/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 19/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 20/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 21/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 22/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 23/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 24/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 25/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-19 4:17 ` [PATCH v7 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 00/34] Implementation of revised ml/cnxk driver Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 05/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 10/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 11/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 12/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 13/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 14/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 15/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 16/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 17/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 18/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 19/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 20/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 21/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 22/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 23/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 24/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 25/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-23 4:41 ` [PATCH v8 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 00/34] Implementation of revised ml/cnxk driver Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 05/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 10/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 11/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 12/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 13/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 14/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 15/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 16/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 17/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 18/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 19/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 20/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 21/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 22/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 23/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 24/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 25/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-29 12:53 ` [PATCH v9 00/34] Implementation of revised ml/cnxk driver Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231019041726.19243-6-syalavarthi@marvell.com \
--to=syalavarthi@marvell.com \
--cc=aprabhu@marvell.com \
--cc=dev@dpdk.org \
--cc=ptakkar@marvell.com \
--cc=sshankarnara@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).