From: Srikanth Yalavarthi <syalavarthi@marvell.com>
To: Srikanth Yalavarthi <syalavarthi@marvell.com>
Cc: <dev@dpdk.org>, <sshankarnara@marvell.com>, <jerinj@marvell.com>,
<aprabhu@marvell.com>
Subject: [PATCH v3 11/12] app/mldev: enable reporting stats in mldev app
Date: Thu, 8 Dec 2022 11:29:17 -0800 [thread overview]
Message-ID: <20221208192918.25022-11-syalavarthi@marvell.com> (raw)
In-Reply-To: <20221208192918.25022-1-syalavarthi@marvell.com>
Enable reporting driver xstats and inference end-to-end
latency and throughput in mldev inference tests. Reporting
of stats can be enabled using "--stats" option.
Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
v3:
* Fixed issue in reporting end-to-end inference stats
v2:
* Fixed typos and formatting issue
app/test-mldev/ml_options.c | 22 ++--
app/test-mldev/ml_options.h | 2 +
app/test-mldev/test_inference_common.c | 139 +++++++++++++++++++++
app/test-mldev/test_inference_common.h | 8 ++
app/test-mldev/test_inference_interleave.c | 4 +
app/test-mldev/test_inference_ordered.c | 1 +
6 files changed, 168 insertions(+), 8 deletions(-)
diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index 092303903f..0e7877eed3 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -36,6 +36,7 @@ ml_options_default(struct ml_options *opt)
opt->queue_size = 1;
opt->batches = 0;
opt->tolerance = 0.0;
+ opt->stats = false;
opt->debug = false;
}
@@ -222,7 +223,8 @@ ml_dump_test_options(const char *testname)
"\t\t--queue_pairs : number of queue pairs to create\n"
"\t\t--queue_size : size fo queue-pair\n"
"\t\t--batches : number of batches of input\n"
- "\t\t--tolerance : maximum tolerance (%%) for output validation\n");
+ "\t\t--tolerance : maximum tolerance (%%) for output validation\n"
+ "\t\t--stats : enable reporting performance statistics\n");
printf("\n");
}
}
@@ -242,13 +244,12 @@ print_usage(char *program)
ml_test_dump_names(ml_dump_test_options);
}
-static struct option lgopts[] = {{ML_TEST, 1, 0, 0}, {ML_DEVICE_ID, 1, 0, 0},
- {ML_SOCKET_ID, 1, 0, 0}, {ML_MODELS, 1, 0, 0},
- {ML_FILELIST, 1, 0, 0}, {ML_REPETITIONS, 1, 0, 0},
- {ML_BURST_SIZE, 1, 0, 0}, {ML_QUEUE_PAIRS, 1, 0, 0},
- {ML_QUEUE_SIZE, 1, 0, 0}, {ML_BATCHES, 1, 0, 0},
- {ML_TOLERANCE, 1, 0, 0}, {ML_DEBUG, 0, 0, 0},
- {ML_HELP, 0, 0, 0}, {NULL, 0, 0, 0}};
+static struct option lgopts[] = {
+ {ML_TEST, 1, 0, 0}, {ML_DEVICE_ID, 1, 0, 0}, {ML_SOCKET_ID, 1, 0, 0},
+ {ML_MODELS, 1, 0, 0}, {ML_FILELIST, 1, 0, 0}, {ML_REPETITIONS, 1, 0, 0},
+ {ML_BURST_SIZE, 1, 0, 0}, {ML_QUEUE_PAIRS, 1, 0, 0}, {ML_QUEUE_SIZE, 1, 0, 0},
+ {ML_BATCHES, 1, 0, 0}, {ML_TOLERANCE, 1, 0, 0}, {ML_STATS, 0, 0, 0},
+ {ML_DEBUG, 0, 0, 0}, {ML_HELP, 0, 0, 0}, {NULL, 0, 0, 0}};
static int
ml_opts_parse_long(int opt_idx, struct ml_options *opt)
@@ -283,6 +284,11 @@ ml_options_parse(struct ml_options *opt, int argc, char **argv)
while ((opts = getopt_long(argc, argv, "", lgopts, &opt_idx)) != EOF) {
switch (opts) {
case 0: /* parse long options */
+ if (!strcmp(lgopts[opt_idx].name, "stats")) {
+ opt->stats = true;
+ break;
+ }
+
if (!strcmp(lgopts[opt_idx].name, "debug")) {
opt->debug = true;
break;
diff --git a/app/test-mldev/ml_options.h b/app/test-mldev/ml_options.h
index 79ac54de98..a375ae6750 100644
--- a/app/test-mldev/ml_options.h
+++ b/app/test-mldev/ml_options.h
@@ -24,6 +24,7 @@
#define ML_QUEUE_SIZE ("queue_size")
#define ML_BATCHES ("batches")
#define ML_TOLERANCE ("tolerance")
+#define ML_STATS ("stats")
#define ML_DEBUG ("debug")
#define ML_HELP ("help")
@@ -46,6 +47,7 @@ struct ml_options {
uint16_t queue_size;
uint16_t batches;
float tolerance;
+ bool stats;
bool debug;
};
diff --git a/app/test-mldev/test_inference_common.c b/app/test-mldev/test_inference_common.c
index 008cee1023..8d1fc55c2f 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -11,6 +11,7 @@
#include <unistd.h>
#include <rte_common.h>
+#include <rte_cycles.h>
#include <rte_hash_crc.h>
#include <rte_launch.h>
#include <rte_lcore.h>
@@ -45,6 +46,17 @@
} \
} while (0)
+static void
+print_line(uint16_t len)
+{
+ uint16_t i;
+
+ for (i = 0; i < len; i++)
+ printf("-");
+
+ printf("\n");
+}
+
/* Enqueue inference requests with burst size equal to 1 */
static int
ml_enqueue_single(void *arg)
@@ -54,6 +66,7 @@ ml_enqueue_single(void *arg)
struct rte_ml_op *op = NULL;
struct ml_core_args *args;
uint64_t model_enq = 0;
+ uint64_t start_cycle;
uint32_t burst_enq;
uint32_t lcore_id;
int16_t fid;
@@ -61,6 +74,7 @@ ml_enqueue_single(void *arg)
lcore_id = rte_lcore_id();
args = &t->args[lcore_id];
+ args->start_cycles = 0;
model_enq = 0;
if (args->nb_reqs == 0)
@@ -96,10 +110,12 @@ ml_enqueue_single(void *arg)
req->fid = fid;
enqueue_req:
+ start_cycle = rte_get_tsc_cycles();
burst_enq = rte_ml_enqueue_burst(t->cmn.opt->dev_id, args->qp_id, &op, 1);
if (burst_enq == 0)
goto enqueue_req;
+ args->start_cycles += start_cycle;
fid++;
if (likely(fid <= args->end_fid))
goto next_model;
@@ -123,10 +139,12 @@ ml_dequeue_single(void *arg)
uint64_t total_deq = 0;
uint8_t nb_filelist;
uint32_t burst_deq;
+ uint64_t end_cycle;
uint32_t lcore_id;
lcore_id = rte_lcore_id();
args = &t->args[lcore_id];
+ args->end_cycles = 0;
nb_filelist = args->end_fid - args->start_fid + 1;
if (args->nb_reqs == 0)
@@ -134,9 +152,11 @@ ml_dequeue_single(void *arg)
dequeue_req:
burst_deq = rte_ml_dequeue_burst(t->cmn.opt->dev_id, args->qp_id, &op, 1);
+ end_cycle = rte_get_tsc_cycles();
if (likely(burst_deq == 1)) {
total_deq += burst_deq;
+ args->end_cycles += end_cycle;
if (unlikely(op->status == RTE_ML_OP_STATUS_ERROR)) {
rte_ml_op_error_get(t->cmn.opt->dev_id, op, &error);
ml_err("error_code = 0x%" PRIx64 ", error_message = %s\n", error.errcode,
@@ -159,6 +179,7 @@ ml_enqueue_burst(void *arg)
{
struct test_inference *t = ml_test_priv((struct ml_test *)arg);
struct ml_core_args *args;
+ uint64_t start_cycle;
uint16_t ops_count;
uint64_t model_enq;
uint16_t burst_enq;
@@ -171,6 +192,7 @@ ml_enqueue_burst(void *arg)
lcore_id = rte_lcore_id();
args = &t->args[lcore_id];
+ args->start_cycles = 0;
model_enq = 0;
if (args->nb_reqs == 0)
@@ -212,8 +234,10 @@ ml_enqueue_burst(void *arg)
pending = ops_count;
enqueue_reqs:
+ start_cycle = rte_get_tsc_cycles();
burst_enq =
rte_ml_enqueue_burst(t->cmn.opt->dev_id, args->qp_id, &args->enq_ops[idx], pending);
+ args->start_cycles += burst_enq * start_cycle;
pending = pending - burst_enq;
if (pending > 0) {
@@ -243,11 +267,13 @@ ml_dequeue_burst(void *arg)
uint64_t total_deq = 0;
uint16_t burst_deq = 0;
uint8_t nb_filelist;
+ uint64_t end_cycle;
uint32_t lcore_id;
uint32_t i;
lcore_id = rte_lcore_id();
args = &t->args[lcore_id];
+ args->end_cycles = 0;
nb_filelist = args->end_fid - args->start_fid + 1;
if (args->nb_reqs == 0)
@@ -256,9 +282,11 @@ ml_dequeue_burst(void *arg)
dequeue_burst:
burst_deq = rte_ml_dequeue_burst(t->cmn.opt->dev_id, args->qp_id, args->deq_ops,
t->cmn.opt->burst_size);
+ end_cycle = rte_get_tsc_cycles();
if (likely(burst_deq > 0)) {
total_deq += burst_deq;
+ args->end_cycles += burst_deq * end_cycle;
for (i = 0; i < burst_deq; i++) {
if (unlikely(args->deq_ops[i]->status == RTE_ML_OP_STATUS_ERROR)) {
@@ -387,6 +415,7 @@ test_inference_opt_dump(struct ml_options *opt)
ml_dump("queue_pairs", "%u", opt->queue_pairs);
ml_dump("queue_size", "%u", opt->queue_size);
ml_dump("tolerance", "%-7.3f", opt->tolerance);
+ ml_dump("stats", "%s", (opt->stats ? "true" : "false"));
if (opt->batches == 0)
ml_dump("batches", "%u (default)", opt->batches);
@@ -459,6 +488,11 @@ test_inference_setup(struct ml_test *test, struct ml_options *opt)
RTE_CACHE_LINE_SIZE, opt->socket_id);
}
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ t->args[i].start_cycles = 0;
+ t->args[i].end_cycles = 0;
+ }
+
return 0;
error:
@@ -985,3 +1019,108 @@ ml_inference_launch_cores(struct ml_test *test, struct ml_options *opt, int16_t
return 0;
}
+
+int
+ml_inference_stats_get(struct ml_test *test, struct ml_options *opt)
+{
+ struct test_inference *t = ml_test_priv(test);
+ uint64_t total_cycles = 0;
+ uint32_t nb_filelist;
+ uint64_t throughput;
+ uint64_t avg_e2e;
+ uint32_t qp_id;
+ uint64_t freq;
+ int ret;
+ int i;
+
+ if (!opt->stats)
+ return 0;
+
+ /* get xstats size */
+ t->xstats_size = rte_ml_dev_xstats_names_get(opt->dev_id, NULL, 0);
+ if (t->xstats_size >= 0) {
+ /* allocate for xstats_map and values */
+ t->xstats_map = rte_malloc(
+ "ml_xstats_map", t->xstats_size * sizeof(struct rte_ml_dev_xstats_map), 0);
+ if (t->xstats_map == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ t->xstats_values =
+ rte_malloc("ml_xstats_values", t->xstats_size * sizeof(uint64_t), 0);
+ if (t->xstats_values == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = rte_ml_dev_xstats_names_get(opt->dev_id, t->xstats_map, t->xstats_size);
+ if (ret != t->xstats_size) {
+ printf("Unable to get xstats names, ret = %d\n", ret);
+ ret = -1;
+ goto error;
+ }
+
+ for (i = 0; i < t->xstats_size; i++)
+ rte_ml_dev_xstats_get(opt->dev_id, &t->xstats_map[i].id,
+ &t->xstats_values[i], 1);
+ }
+
+ /* print xstats*/
+ printf("\n");
+ print_line(80);
+ printf(" ML Device Extended Statistics\n");
+ print_line(80);
+ for (i = 0; i < t->xstats_size; i++)
+ printf(" %-64s = %" PRIu64 "\n", t->xstats_map[i].name, t->xstats_values[i]);
+ print_line(80);
+
+ /* release buffers */
+ if (t->xstats_map)
+ rte_free(t->xstats_map);
+
+ if (t->xstats_values)
+ rte_free(t->xstats_values);
+
+ /* print end-to-end stats */
+ freq = rte_get_tsc_hz();
+ for (qp_id = 0; qp_id < RTE_MAX_LCORE; qp_id++)
+ total_cycles += t->args[qp_id].end_cycles - t->args[qp_id].start_cycles;
+ avg_e2e = total_cycles / opt->repetitions;
+
+ if (freq == 0) {
+ avg_e2e = total_cycles / opt->repetitions;
+ printf(" %-64s = %" PRIu64 "\n", "Average End-to-End Latency (cycles)", avg_e2e);
+ } else {
+ avg_e2e = (total_cycles * NS_PER_S) / (opt->repetitions * freq);
+ printf(" %-64s = %" PRIu64 "\n", "Average End-to-End Latency (ns)", avg_e2e);
+ }
+
+ if (strcmp(opt->test_name, "inference_ordered") == 0)
+ nb_filelist = 1;
+ else
+ nb_filelist = t->cmn.opt->nb_filelist;
+
+ if (freq == 0) {
+ throughput = (nb_filelist * t->cmn.opt->repetitions * 1000000) / total_cycles;
+ printf(" %-64s = %" PRIu64 "\n", "Average Throughput (inferences / million cycles)",
+ throughput);
+ } else {
+ throughput = (nb_filelist * t->cmn.opt->repetitions * freq) / total_cycles;
+ printf(" %-64s = %" PRIu64 "\n", "Average Throughput (inferences / second)",
+ throughput);
+ }
+
+ print_line(80);
+
+ return 0;
+
+error:
+ if (t->xstats_map)
+ rte_free(t->xstats_map);
+
+ if (t->xstats_values)
+ rte_free(t->xstats_values);
+
+ return ret;
+}
diff --git a/app/test-mldev/test_inference_common.h b/app/test-mldev/test_inference_common.h
index 3f2b042360..bb2920cc30 100644
--- a/app/test-mldev/test_inference_common.h
+++ b/app/test-mldev/test_inference_common.h
@@ -32,6 +32,9 @@ struct ml_core_args {
struct rte_ml_op **enq_ops;
struct rte_ml_op **deq_ops;
struct ml_request **reqs;
+
+ uint64_t start_cycles;
+ uint64_t end_cycles;
};
struct test_inference {
@@ -50,6 +53,10 @@ struct test_inference {
int (*dequeue)(void *arg);
struct ml_core_args args[RTE_MAX_LCORE];
+
+ struct rte_ml_dev_xstats_map *xstats_map;
+ uint64_t *xstats_values;
+ int xstats_size;
} __rte_cache_aligned;
bool test_inference_cap_check(struct ml_options *opt);
@@ -67,5 +74,6 @@ void ml_inference_mem_destroy(struct ml_test *test, struct ml_options *opt);
int ml_inference_result(struct ml_test *test, struct ml_options *opt, int16_t fid);
int ml_inference_launch_cores(struct ml_test *test, struct ml_options *opt, int16_t start_fid,
int16_t end_fid);
+int ml_inference_stats_get(struct ml_test *test, struct ml_options *opt);
#endif /* _ML_TEST_INFERENCE_COMMON_ */
diff --git a/app/test-mldev/test_inference_interleave.c b/app/test-mldev/test_inference_interleave.c
index 74ad0c597f..d86838c3fa 100644
--- a/app/test-mldev/test_inference_interleave.c
+++ b/app/test-mldev/test_inference_interleave.c
@@ -60,7 +60,11 @@ test_inference_interleave_driver(struct ml_test *test, struct ml_options *opt)
goto error;
ml_inference_iomem_destroy(test, opt, fid);
+ }
+
+ ml_inference_stats_get(test, opt);
+ for (fid = 0; fid < opt->nb_filelist; fid++) {
ret = ml_model_stop(test, opt, &t->model[fid], fid);
if (ret != 0)
goto error;
diff --git a/app/test-mldev/test_inference_ordered.c b/app/test-mldev/test_inference_ordered.c
index 84e6bf9109..3826121a65 100644
--- a/app/test-mldev/test_inference_ordered.c
+++ b/app/test-mldev/test_inference_ordered.c
@@ -58,6 +58,7 @@ test_inference_ordered_driver(struct ml_test *test, struct ml_options *opt)
goto error;
ml_inference_iomem_destroy(test, opt, fid);
+ ml_inference_stats_get(test, opt);
/* stop model */
ret = ml_model_stop(test, opt, &t->model[fid], fid);
--
2.17.1
next prev parent reply other threads:[~2022-12-08 19:30 UTC|newest]
Thread overview: 122+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-29 7:07 [PATCH v1 00/12] implement mldev test application Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2022-11-29 8:20 ` [PATCH v2 " Srikanth Yalavarthi
2022-11-29 8:20 ` [PATCH v2 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2022-12-08 19:29 ` Srikanth Yalavarthi [this message]
2023-02-03 9:49 ` [PATCH v3 11/12] app/mldev: enable reporting stats in mldev app Anup Prabhu
2022-12-08 19:29 ` [PATCH v3 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-02-02 12:39 ` Anup Prabhu
2022-11-29 7:07 ` [PATCH v1 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-02-07 15:49 ` [PATCH v4 00/12] Implementation of mldev test application Srikanth Yalavarthi
2023-02-07 15:49 ` [PATCH v4 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-02-14 4:55 ` Shivah Shankar Shankar Narayan Rao
2023-03-03 8:15 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2023-02-23 9:03 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-01 5:35 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-02 2:58 ` Anup Prabhu
2023-03-09 18:42 ` Thomas Monjalon
2023-03-10 2:55 ` [EXT] " Srikanth Yalavarthi
2023-02-07 15:49 ` [PATCH v4 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-02-27 6:11 ` Anup Prabhu
2023-03-09 20:06 ` Thomas Monjalon
2023-03-10 8:13 ` [EXT] " Srikanth Yalavarthi
2023-02-07 15:49 ` [PATCH v4 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-02-20 6:31 ` Anup Prabhu
2023-03-09 20:15 ` Thomas Monjalon
2023-03-10 8:14 ` [EXT] " Srikanth Yalavarthi
2023-02-07 15:49 ` [PATCH v4 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-02-20 10:11 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-02 8:15 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-02-27 3:46 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-02-16 12:23 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-02-16 4:21 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-02-15 12:26 ` Shivah Shankar Shankar Narayan Rao
2023-03-03 6:07 ` Anup Prabhu
2023-03-10 8:09 ` [PATCH v5 00/12] Implementation of mldev test application Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 00/12] Implementation of mldev test application Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-03-16 17:45 ` Thomas Monjalon
2023-03-16 17:47 ` [EXT] " Srikanth Yalavarthi
2023-03-16 18:01 ` Thomas Monjalon
2023-03-16 21:31 ` Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-03-16 17:47 ` Thomas Monjalon
2023-03-16 17:52 ` [EXT] " Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-03-16 17:50 ` Thomas Monjalon
2023-03-16 17:56 ` [EXT] " Srikanth Yalavarthi
2023-03-16 18:03 ` Thomas Monjalon
2023-03-16 18:07 ` Srikanth Yalavarthi
2023-03-16 21:32 ` Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 00/11] Implementation of mldev test application Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 01/11] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 02/11] app/mldev: add common test functions Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 03/11] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 04/11] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 05/11] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 06/11] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 07/11] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 08/11] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 09/11] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 10/11] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 11/11] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-03-19 22:08 ` [PATCH v7 00/11] Implementation of mldev test application Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221208192918.25022-11-syalavarthi@marvell.com \
--to=syalavarthi@marvell.com \
--cc=aprabhu@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=sshankarnara@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).