From: Srikanth Yalavarthi <syalavarthi@marvell.com>
To: Srikanth Yalavarthi <syalavarthi@marvell.com>
Cc: <dev@dpdk.org>, <sshankarnara@marvell.com>, <jerinj@marvell.com>
Subject: [PATCH v2 05/12] app/mldev: add ordered inference test case
Date: Tue, 29 Nov 2022 00:21:02 -0800 [thread overview]
Message-ID: <20221129082109.6809-5-syalavarthi@marvell.com> (raw)
In-Reply-To: <20221129082109.6809-1-syalavarthi@marvell.com>
Added an ordered test case to execute inferences with single
or multiple models. In this test case inference requests for
a model are enqueued after completion of all requests for
the previous model. Test supports inference repetitions.
Operations sequence when testing with N models and R reps,
(load -> start -> (enqueue + dequeue) x R -> stop -> unload) x N
Test case can be executed by selecting "inference_ordered" test
and repetitions can be specified through "--repetitions" argument.
Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
app/test-mldev/meson.build | 2 +
app/test-mldev/ml_options.c | 73 ++-
app/test-mldev/ml_options.h | 17 +-
app/test-mldev/test_inference_common.c | 565 ++++++++++++++++++++++++
app/test-mldev/test_inference_common.h | 65 +++
app/test-mldev/test_inference_ordered.c | 119 +++++
app/test-mldev/test_model_common.h | 10 +
7 files changed, 839 insertions(+), 12 deletions(-)
create mode 100644 app/test-mldev/test_inference_common.c
create mode 100644 app/test-mldev/test_inference_common.h
create mode 100644 app/test-mldev/test_inference_ordered.c
diff --git a/app/test-mldev/meson.build b/app/test-mldev/meson.build
index b09e1ccc8a..475d76d126 100644
--- a/app/test-mldev/meson.build
+++ b/app/test-mldev/meson.build
@@ -16,6 +16,8 @@ sources = files(
'test_device_ops.c',
'test_model_common.c',
'test_model_ops.c',
+ 'test_inference_common.c',
+ 'test_inference_ordered.c',
)
deps += ['mldev']
diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index 15043c0992..10dad18fff 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -29,6 +29,7 @@ ml_options_default(struct ml_options *opt)
opt->dev_id = 0;
opt->socket_id = SOCKET_ID_ANY;
opt->nb_filelist = 0;
+ opt->repetitions = 1;
opt->debug = false;
}
@@ -96,6 +97,60 @@ ml_parse_models(struct ml_options *opt, const char *arg)
return ret;
}
+static int
+ml_parse_filelist(struct ml_options *opt, const char *arg)
+{
+ const char *delim = ",";
+ char filelist[PATH_MAX];
+ char *token;
+
+ if (opt->nb_filelist >= ML_TEST_MAX_MODELS) {
+ ml_err("Exceeded filelist count, max = %d\n", ML_TEST_MAX_MODELS);
+ return -1;
+ }
+
+ strlcpy(filelist, arg, PATH_MAX);
+
+ /* model */
+ token = strtok(filelist, delim);
+ if (token == NULL) {
+ ml_err("Invalid filelist, model not specified = %s\n", arg);
+ return -EINVAL;
+ }
+ strlcpy(opt->filelist[opt->nb_filelist].model, token, PATH_MAX);
+
+ /* input */
+ token = strtok(NULL, delim);
+ if (token == NULL) {
+ ml_err("Invalid filelist, input not specified = %s\n", arg);
+ return -EINVAL;
+ }
+ strlcpy(opt->filelist[opt->nb_filelist].input, token, PATH_MAX);
+
+ /* output */
+ token = strtok(NULL, delim);
+ if (token == NULL) {
+ ml_err("Invalid filelist, output not specified = %s\n", arg);
+ return -EINVAL;
+ }
+ strlcpy(opt->filelist[opt->nb_filelist].output, token, PATH_MAX);
+
+ opt->nb_filelist++;
+
+ if (opt->nb_filelist == 0) {
+ ml_err("Empty filelist. Need at least one filelist entry for the test.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ml_parse_repetitions(struct ml_options *opt, const char *arg)
+{
+ return parser_read_uint64(&opt->repetitions, arg);
+}
+
static void
ml_dump_test_options(const char *testname)
{
@@ -106,6 +161,12 @@ ml_dump_test_options(const char *testname)
printf("\t\t--models : comma separated list of models\n");
printf("\n");
}
+
+ if (strcmp(testname, "inference_ordered") == 0) {
+ printf("\t\t--filelist : comma separated list of model, input and output\n"
+ "\t\t--repetitions : number of inference repetitions\n");
+ printf("\n");
+ }
}
static void
@@ -124,8 +185,9 @@ print_usage(char *program)
}
static struct option lgopts[] = {
- {ML_TEST, 1, 0, 0}, {ML_DEVICE_ID, 1, 0, 0}, {ML_SOCKET_ID, 1, 0, 0}, {ML_MODELS, 1, 0, 0},
- {ML_DEBUG, 0, 0, 0}, {ML_HELP, 0, 0, 0}, {NULL, 0, 0, 0}};
+ {ML_TEST, 1, 0, 0}, {ML_DEVICE_ID, 1, 0, 0}, {ML_SOCKET_ID, 1, 0, 0},
+ {ML_MODELS, 1, 0, 0}, {ML_FILELIST, 1, 0, 0}, {ML_REPETITIONS, 1, 0, 0},
+ {ML_DEBUG, 0, 0, 0}, {ML_HELP, 0, 0, 0}, {NULL, 0, 0, 0}};
static int
ml_opts_parse_long(int opt_idx, struct ml_options *opt)
@@ -133,10 +195,9 @@ ml_opts_parse_long(int opt_idx, struct ml_options *opt)
unsigned int i;
struct long_opt_parser parsermap[] = {
- {ML_TEST, ml_parse_test_name},
- {ML_DEVICE_ID, ml_parse_dev_id},
- {ML_SOCKET_ID, ml_parse_socket_id},
- {ML_MODELS, ml_parse_models},
+ {ML_TEST, ml_parse_test_name}, {ML_DEVICE_ID, ml_parse_dev_id},
+ {ML_SOCKET_ID, ml_parse_socket_id}, {ML_MODELS, ml_parse_models},
+ {ML_FILELIST, ml_parse_filelist}, {ML_REPETITIONS, ml_parse_repetitions},
};
for (i = 0; i < RTE_DIM(parsermap); i++) {
diff --git a/app/test-mldev/ml_options.h b/app/test-mldev/ml_options.h
index 8faf3b5deb..ad8aee5964 100644
--- a/app/test-mldev/ml_options.h
+++ b/app/test-mldev/ml_options.h
@@ -13,15 +13,19 @@
#define ML_TEST_MAX_MODELS 8
/* Options names */
-#define ML_TEST ("test")
-#define ML_DEVICE_ID ("dev_id")
-#define ML_SOCKET_ID ("socket_id")
-#define ML_MODELS ("models")
-#define ML_DEBUG ("debug")
-#define ML_HELP ("help")
+#define ML_TEST ("test")
+#define ML_DEVICE_ID ("dev_id")
+#define ML_SOCKET_ID ("socket_id")
+#define ML_MODELS ("models")
+#define ML_FILELIST ("filelist")
+#define ML_REPETITIONS ("repetitions")
+#define ML_DEBUG ("debug")
+#define ML_HELP ("help")
struct ml_filelist {
char model[PATH_MAX];
+ char input[PATH_MAX];
+ char output[PATH_MAX];
};
struct ml_options {
@@ -30,6 +34,7 @@ struct ml_options {
int socket_id;
struct ml_filelist filelist[ML_TEST_MAX_MODELS];
uint8_t nb_filelist;
+ uint64_t repetitions;
bool debug;
};
diff --git a/app/test-mldev/test_inference_common.c b/app/test-mldev/test_inference_common.c
new file mode 100644
index 0000000000..8b5dc89346
--- /dev/null
+++ b/app/test-mldev/test_inference_common.c
@@ -0,0 +1,565 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Marvell.
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_mldev.h>
+
+#include "ml_common.h"
+#include "ml_options.h"
+#include "ml_test.h"
+#include "test_common.h"
+#include "test_inference_common.h"
+
+/* Enqueue inference requests with burst size equal to 1 */
+static int
+ml_enqueue_single(void *arg)
+{
+ struct test_inference *t = ml_test_priv((struct ml_test *)arg);
+ struct ml_request *req = NULL;
+ struct rte_ml_op *op = NULL;
+ struct ml_core_args *args;
+ uint64_t model_enq = 0;
+ uint32_t burst_enq;
+ uint32_t lcore_id;
+ int16_t fid;
+ int ret;
+
+ lcore_id = rte_lcore_id();
+ args = &t->args[lcore_id];
+ model_enq = 0;
+
+ if (args->nb_reqs == 0)
+ return 0;
+
+next_rep:
+ fid = args->start_fid;
+
+next_model:
+ ret = rte_mempool_get(t->op_pool, (void **)&op);
+ if (ret != 0)
+ goto next_model;
+
+retry:
+ ret = rte_mempool_get(t->model[fid].io_pool, (void **)&req);
+ if (ret != 0)
+ goto retry;
+
+ op->model_id = t->model[fid].id;
+ op->nb_batches = t->model[fid].info.batch_size;
+ op->mempool = t->op_pool;
+
+ op->input.addr = req->input;
+ op->input.length = t->model[fid].inp_qsize;
+ op->input.next = NULL;
+
+ op->output.addr = req->output;
+ op->output.length = t->model[fid].out_qsize;
+ op->output.next = NULL;
+
+ op->user_ptr = req;
+ req->niters++;
+ req->fid = fid;
+
+enqueue_req:
+ burst_enq = rte_ml_enqueue_burst(t->cmn.opt->dev_id, 0, &op, 1);
+ if (burst_enq == 0)
+ goto enqueue_req;
+
+ fid++;
+ if (likely(fid <= args->end_fid))
+ goto next_model;
+
+ model_enq++;
+ if (likely(model_enq < args->nb_reqs))
+ goto next_rep;
+
+ return 0;
+}
+
+/* Dequeue inference requests with burst size equal to 1 */
+static int
+ml_dequeue_single(void *arg)
+{
+ struct test_inference *t = ml_test_priv((struct ml_test *)arg);
+ struct rte_ml_op_error error;
+ struct rte_ml_op *op = NULL;
+ struct ml_core_args *args;
+ struct ml_request *req;
+ uint64_t total_deq = 0;
+ uint8_t nb_filelist;
+ uint32_t burst_deq;
+ uint32_t lcore_id;
+
+ lcore_id = rte_lcore_id();
+ args = &t->args[lcore_id];
+ nb_filelist = args->end_fid - args->start_fid + 1;
+
+ if (args->nb_reqs == 0)
+ return 0;
+
+dequeue_req:
+ burst_deq = rte_ml_dequeue_burst(t->cmn.opt->dev_id, 0, &op, 1);
+
+ if (likely(burst_deq == 1)) {
+ total_deq += burst_deq;
+ if (unlikely(op->status == RTE_ML_OP_STATUS_ERROR)) {
+ rte_ml_op_error_get(t->cmn.opt->dev_id, op, &error);
+ ml_err("error_code = 0x%016lx, error_message = %s\n", error.errcode,
+ error.message);
+ }
+ req = (struct ml_request *)op->user_ptr;
+ rte_mempool_put(t->model[req->fid].io_pool, req);
+ rte_mempool_put(t->op_pool, op);
+ }
+
+ if (likely(total_deq < args->nb_reqs * nb_filelist))
+ goto dequeue_req;
+
+ return 0;
+}
+
+bool
+test_inference_cap_check(struct ml_options *opt)
+{
+ struct rte_ml_dev_info dev_info;
+
+ if (!ml_test_cap_check(opt))
+ return false;
+
+ rte_ml_dev_info_get(opt->dev_id, &dev_info);
+ if (opt->nb_filelist > dev_info.max_models) {
+ ml_err("Insufficient capabilities: Filelist count exceeded device limit, count = %u (max limit = %u)",
+ opt->nb_filelist, dev_info.max_models);
+ return false;
+ }
+
+ return true;
+}
+
+int
+test_inference_opt_check(struct ml_options *opt)
+{
+ uint32_t i;
+ int ret;
+
+ /* check common opts */
+ ret = ml_test_opt_check(opt);
+ if (ret != 0)
+ return ret;
+
+ /* check file availability */
+ for (i = 0; i < opt->nb_filelist; i++) {
+ if (access(opt->filelist[i].model, F_OK) == -1) {
+ ml_err("Model file not accessible: id = %u, file = %s", i,
+ opt->filelist[i].model);
+ return -ENOENT;
+ }
+
+ if (access(opt->filelist[i].input, F_OK) == -1) {
+ ml_err("Input file not accessible: id = %u, file = %s", i,
+ opt->filelist[i].input);
+ return -ENOENT;
+ }
+ }
+
+ if (opt->repetitions == 0) {
+ ml_err("Invalid option, repetitions = %" PRIu64 "\n", opt->repetitions);
+ return -EINVAL;
+ }
+
+ /* check number of available lcores. */
+ if (rte_lcore_count() < 3) {
+ ml_err("Insufficient lcores = %u\n", rte_lcore_count());
+ ml_err("Minimum lcores required to create %u queue-pairs = %u\n", 1, 3);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+test_inference_opt_dump(struct ml_options *opt)
+{
+ uint32_t i;
+
+ /* dump common opts */
+ ml_test_opt_dump(opt);
+
+ /* dump test opts */
+ ml_dump("repetitions", "%" PRIu64, opt->repetitions);
+
+ ml_dump_begin("filelist");
+ for (i = 0; i < opt->nb_filelist; i++) {
+ ml_dump_list("model", i, opt->filelist[i].model);
+ ml_dump_list("input", i, opt->filelist[i].input);
+ ml_dump_list("output", i, opt->filelist[i].output);
+ }
+ ml_dump_end;
+}
+
+int
+test_inference_setup(struct ml_test *test, struct ml_options *opt)
+{
+ struct test_inference *t;
+ void *test_inference;
+ int ret = 0;
+ uint32_t i;
+
+ test_inference = rte_zmalloc_socket(test->name, sizeof(struct test_inference),
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+ if (test_inference == NULL) {
+ ml_err("failed to allocate memory for test_model");
+ ret = -ENOMEM;
+ goto error;
+ }
+ test->test_priv = test_inference;
+ t = ml_test_priv(test);
+
+ t->nb_used = 0;
+ t->cmn.result = ML_TEST_FAILED;
+ t->cmn.opt = opt;
+
+ /* get device info */
+ ret = rte_ml_dev_info_get(opt->dev_id, &t->cmn.dev_info);
+ if (ret < 0) {
+ ml_err("failed to get device info");
+ goto error;
+ }
+
+ t->enqueue = ml_enqueue_single;
+ t->dequeue = ml_dequeue_single;
+
+ /* set model initial state */
+ for (i = 0; i < opt->nb_filelist; i++)
+ t->model[i].state = MODEL_INITIAL;
+
+ return 0;
+
+error:
+ if (test_inference != NULL)
+ rte_free(test_inference);
+
+ return ret;
+}
+
+void
+test_inference_destroy(struct ml_test *test, struct ml_options *opt)
+{
+ struct test_inference *t;
+
+ RTE_SET_USED(opt);
+
+ t = ml_test_priv(test);
+ if (t != NULL)
+ rte_free(t);
+}
+
+int
+ml_inference_mldev_setup(struct ml_test *test, struct ml_options *opt)
+{
+ struct rte_ml_dev_qp_conf qp_conf;
+ struct test_inference *t;
+ int ret;
+
+ t = ml_test_priv(test);
+
+ ret = ml_test_device_configure(test, opt);
+ if (ret != 0)
+ return ret;
+
+ /* setup queue pairs */
+ qp_conf.nb_desc = t->cmn.dev_info.max_desc;
+ qp_conf.cb = NULL;
+
+ ret = rte_ml_dev_queue_pair_setup(opt->dev_id, 0, &qp_conf, opt->socket_id);
+ if (ret != 0) {
+ ml_err("Failed to setup ml device queue-pair, dev_id = %d, qp_id = %u\n",
+ opt->dev_id, 0);
+ goto error;
+ }
+
+ ret = ml_test_device_start(test, opt);
+ if (ret != 0)
+ goto error;
+
+ return 0;
+
+error:
+ ml_test_device_close(test, opt);
+
+ return ret;
+}
+
+int
+ml_inference_mldev_destroy(struct ml_test *test, struct ml_options *opt)
+{
+ int ret;
+
+ ret = ml_test_device_stop(test, opt);
+ if (ret != 0)
+ goto error;
+
+ ret = ml_test_device_close(test, opt);
+ if (ret != 0)
+ return ret;
+
+ return 0;
+
+error:
+ ml_test_device_close(test, opt);
+
+ return ret;
+}
+
+/* Callback for IO pool create. This function would compute the fields of ml_request
+ * structure and prepare the quantized input data.
+ */
+static void
+ml_request_initialize(struct rte_mempool *mp, void *opaque, void *obj, unsigned int obj_idx)
+{
+ struct test_inference *t = ml_test_priv((struct ml_test *)opaque);
+ struct ml_request *req = (struct ml_request *)obj;
+
+ RTE_SET_USED(mp);
+ RTE_SET_USED(obj_idx);
+
+ req->input = RTE_PTR_ADD(
+ obj, RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.min_align_size));
+ req->output = RTE_PTR_ADD(req->input, RTE_ALIGN_CEIL(t->model[t->fid].inp_qsize,
+ t->cmn.dev_info.min_align_size));
+ req->niters = 0;
+
+ /* quantize data */
+ rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id,
+ t->model[t->fid].info.batch_size, t->model[t->fid].input, req->input);
+}
+
+int
+ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, int16_t fid)
+{
+ struct test_inference *t = ml_test_priv(test);
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ const struct rte_memzone *mz;
+ uint64_t nb_buffers;
+ uint32_t buff_size;
+ uint32_t mz_size;
+ uint32_t fsize;
+ FILE *fp;
+ int ret;
+
+ /* get input buffer size */
+ ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, t->model[fid].info.batch_size,
+ &t->model[fid].inp_qsize, &t->model[fid].inp_dsize);
+ if (ret != 0) {
+ ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model);
+ return ret;
+ }
+
+ /* get output buffer size */
+ ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id,
+ t->model[fid].info.batch_size, &t->model[fid].out_qsize,
+ &t->model[fid].out_dsize);
+ if (ret != 0) {
+ ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model);
+ return ret;
+ }
+
+ /* allocate buffer for user data */
+ mz_size = t->model[fid].inp_dsize + t->model[fid].out_dsize;
+ sprintf(mz_name, "ml_user_data_%d", fid);
+ mz = rte_memzone_reserve(mz_name, mz_size, opt->socket_id, 0);
+ if (mz == NULL) {
+ ml_err("Memzone allocation failed for ml_user_data\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ t->model[fid].input = mz->addr;
+ t->model[fid].output = RTE_PTR_ADD(t->model[fid].input, t->model[fid].inp_dsize);
+
+ /* load input file */
+ fp = fopen(opt->filelist[fid].input, "r");
+ if (fp == NULL) {
+ ml_err("Failed to open input file : %s\n", opt->filelist[fid].input);
+ ret = -errno;
+ goto error;
+ }
+
+ fseek(fp, 0, SEEK_END);
+ fsize = ftell(fp);
+ fseek(fp, 0, SEEK_SET);
+ if (fsize != t->model[fid].inp_dsize) {
+ ml_err("Invalid input file, size = %u (expected size = %" PRIu64 ")\n", fsize,
+ t->model[fid].inp_dsize);
+ ret = -EINVAL;
+ fclose(fp);
+ goto error;
+ }
+
+ if (fread(t->model[fid].input, 1, t->model[fid].inp_dsize, fp) != t->model[fid].inp_dsize) {
+ ml_err("Failed to read input file : %s\n", opt->filelist[fid].input);
+ ret = -errno;
+ fclose(fp);
+ goto error;
+ }
+ fclose(fp);
+
+ /* create mempool for quantized input and output buffers. ml_request_initialize is
+ * used as a callback for object creation.
+ */
+ buff_size = RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.min_align_size) +
+ RTE_ALIGN_CEIL(t->model[fid].inp_qsize, t->cmn.dev_info.min_align_size) +
+ RTE_ALIGN_CEIL(t->model[fid].out_qsize, t->cmn.dev_info.min_align_size);
+ nb_buffers = RTE_MIN((uint64_t)ML_TEST_MAX_POOL_SIZE, opt->repetitions);
+
+ t->fid = fid;
+ sprintf(mp_name, "ml_io_pool_%d", fid);
+ t->model[fid].io_pool = rte_mempool_create(mp_name, nb_buffers, buff_size, 0, 0, NULL, NULL,
+ ml_request_initialize, test, opt->socket_id, 0);
+ if (t->model[fid].io_pool == NULL) {
+ ml_err("Failed to create io pool : %s\n", "ml_io_pool");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (mz != NULL)
+ rte_memzone_free(mz);
+
+ if (t->model[fid].io_pool != NULL) {
+ rte_mempool_free(t->model[fid].io_pool);
+ t->model[fid].io_pool = NULL;
+ }
+
+ return ret;
+}
+
+void
+ml_inference_iomem_destroy(struct ml_test *test, struct ml_options *opt, int16_t fid)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ const struct rte_memzone *mz;
+ struct rte_mempool *mp;
+
+ RTE_SET_USED(test);
+ RTE_SET_USED(opt);
+
+ /* release user data memzone */
+ sprintf(mz_name, "ml_user_data_%d", fid);
+ mz = rte_memzone_lookup(mz_name);
+ if (mz != NULL)
+ rte_memzone_free(mz);
+
+ /* destroy io pool */
+ sprintf(mp_name, "ml_io_pool_%d", fid);
+ mp = rte_mempool_lookup(mp_name);
+ if (mp != NULL)
+ rte_mempool_free(mp);
+}
+
+int
+ml_inference_mem_setup(struct ml_test *test, struct ml_options *opt)
+{
+ struct test_inference *t = ml_test_priv(test);
+
+ /* create op pool */
+ t->op_pool = rte_ml_op_pool_create("ml_test_op_pool", ML_TEST_MAX_POOL_SIZE, 0, 0,
+ opt->socket_id);
+ if (t->op_pool == NULL) {
+ ml_err("Failed to create op pool : %s\n", "ml_op_pool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+ml_inference_mem_destroy(struct ml_test *test, struct ml_options *opt)
+{
+ struct test_inference *t = ml_test_priv(test);
+
+ RTE_SET_USED(opt);
+
+ /* release op pool */
+ if (t->op_pool != NULL)
+ rte_mempool_free(t->op_pool);
+}
+
+/* Callback for mempool object iteration. This call would dequantize output data. */
+static void
+ml_request_finish(struct rte_mempool *mp, void *opaque, void *obj, unsigned int obj_idx)
+{
+ struct test_inference *t = ml_test_priv((struct ml_test *)opaque);
+ struct ml_request *req = (struct ml_request *)obj;
+ struct ml_model *model = &t->model[req->fid];
+
+ RTE_SET_USED(mp);
+ RTE_SET_USED(obj_idx);
+
+ if (req->niters == 0)
+ return;
+
+ t->nb_used++;
+ rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, t->model[req->fid].info.batch_size,
+ req->output, model->output);
+}
+
+int
+ml_inference_result(struct ml_test *test, struct ml_options *opt, int16_t fid)
+{
+ struct test_inference *t = ml_test_priv(test);
+
+ RTE_SET_USED(opt);
+
+ rte_mempool_obj_iter(t->model[fid].io_pool, ml_request_finish, test);
+
+ if (t->nb_used > 0)
+ t->cmn.result = ML_TEST_SUCCESS;
+ else
+ t->cmn.result = ML_TEST_FAILED;
+
+ return t->cmn.result;
+}
+
+int
+ml_inference_launch_cores(struct ml_test *test, struct ml_options *opt, int16_t start_fid,
+ int16_t end_fid)
+{
+ struct test_inference *t = ml_test_priv(test);
+ uint32_t lcore_id;
+ uint32_t id = 0;
+
+ RTE_LCORE_FOREACH_WORKER(lcore_id)
+ {
+ if (id == 2)
+ break;
+
+ t->args[lcore_id].nb_reqs = opt->repetitions;
+ t->args[lcore_id].start_fid = start_fid;
+ t->args[lcore_id].end_fid = end_fid;
+
+ if (id % 2 == 0)
+ rte_eal_remote_launch(t->enqueue, test, lcore_id);
+ else
+ rte_eal_remote_launch(t->dequeue, test, lcore_id);
+
+ id++;
+ }
+
+ return 0;
+}
diff --git a/app/test-mldev/test_inference_common.h b/app/test-mldev/test_inference_common.h
new file mode 100644
index 0000000000..91007954b4
--- /dev/null
+++ b/app/test-mldev/test_inference_common.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Marvell.
+ */
+
+#ifndef _ML_TEST_INFERENCE_COMMON_
+#define _ML_TEST_INFERENCE_COMMON_
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_mempool.h>
+
+#include "ml_options.h"
+#include "ml_test.h"
+#include "test_common.h"
+#include "test_model_common.h"
+
+struct ml_request {
+ void *input;
+ void *output;
+ int16_t fid;
+ uint64_t niters;
+};
+
+struct ml_core_args {
+ uint64_t nb_reqs;
+ int16_t start_fid;
+ int16_t end_fid;
+};
+
+struct test_inference {
+ /* common data */
+ struct test_common cmn;
+
+ /* test specific data */
+ struct ml_model model[ML_TEST_MAX_MODELS];
+ struct rte_mempool *op_pool;
+
+ uint64_t nb_used;
+ int16_t fid;
+
+ int (*enqueue)(void *arg);
+ int (*dequeue)(void *arg);
+
+ struct ml_core_args args[RTE_MAX_LCORE];
+} __rte_cache_aligned;
+
+bool test_inference_cap_check(struct ml_options *opt);
+int test_inference_opt_check(struct ml_options *opt);
+void test_inference_opt_dump(struct ml_options *opt);
+int test_inference_setup(struct ml_test *test, struct ml_options *opt);
+void test_inference_destroy(struct ml_test *test, struct ml_options *opt);
+
+int ml_inference_mldev_setup(struct ml_test *test, struct ml_options *opt);
+int ml_inference_mldev_destroy(struct ml_test *test, struct ml_options *opt);
+int ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, int16_t fid);
+void ml_inference_iomem_destroy(struct ml_test *test, struct ml_options *opt, int16_t fid);
+int ml_inference_mem_setup(struct ml_test *test, struct ml_options *opt);
+void ml_inference_mem_destroy(struct ml_test *test, struct ml_options *opt);
+int ml_inference_result(struct ml_test *test, struct ml_options *opt, int16_t fid);
+int ml_inference_launch_cores(struct ml_test *test, struct ml_options *opt, int16_t start_fid,
+ int16_t end_fid);
+
+#endif /* _ML_TEST_INFERENCE_COMMON_ */
diff --git a/app/test-mldev/test_inference_ordered.c b/app/test-mldev/test_inference_ordered.c
new file mode 100644
index 0000000000..84e6bf9109
--- /dev/null
+++ b/app/test-mldev/test_inference_ordered.c
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Marvell.
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_launch.h>
+
+#include "ml_common.h"
+#include "ml_test.h"
+#include "test_inference_common.h"
+#include "test_model_common.h"
+
+static int
+test_inference_ordered_driver(struct ml_test *test, struct ml_options *opt)
+{
+ struct test_inference *t;
+ int16_t fid = 0;
+ int ret = 0;
+
+ t = ml_test_priv(test);
+
+ ret = ml_inference_mldev_setup(test, opt);
+ if (ret != 0)
+ return ret;
+
+ ret = ml_inference_mem_setup(test, opt);
+ if (ret != 0)
+ return ret;
+
+next_model:
+ /* load model */
+ ret = ml_model_load(test, opt, &t->model[fid], fid);
+ if (ret != 0)
+ goto error;
+
+ /* start model */
+ ret = ml_model_start(test, opt, &t->model[fid], fid);
+ if (ret != 0)
+ goto error;
+
+ ret = ml_inference_iomem_setup(test, opt, fid);
+ if (ret != 0)
+ goto error;
+
+ /* launch inferences for one model using available queue pairs */
+ ret = ml_inference_launch_cores(test, opt, fid, fid);
+ if (ret != 0) {
+ ml_err("failed to launch cores");
+ goto error;
+ }
+
+ rte_eal_mp_wait_lcore();
+
+ ret = ml_inference_result(test, opt, fid);
+ if (ret != ML_TEST_SUCCESS)
+ goto error;
+
+ ml_inference_iomem_destroy(test, opt, fid);
+
+ /* stop model */
+ ret = ml_model_stop(test, opt, &t->model[fid], fid);
+ if (ret != 0)
+ goto error;
+
+ /* unload model */
+ ret = ml_model_unload(test, opt, &t->model[fid], fid);
+ if (ret != 0)
+ goto error;
+
+ fid++;
+ if (fid < opt->nb_filelist)
+ goto next_model;
+
+ ml_inference_mem_destroy(test, opt);
+
+ ret = ml_inference_mldev_destroy(test, opt);
+ if (ret != 0)
+ return ret;
+
+ t->cmn.result = ML_TEST_SUCCESS;
+
+ return 0;
+
+error:
+ ml_inference_iomem_destroy(test, opt, fid);
+ ml_inference_mem_destroy(test, opt);
+ ml_model_stop(test, opt, &t->model[fid], fid);
+ ml_model_unload(test, opt, &t->model[fid], fid);
+
+ t->cmn.result = ML_TEST_FAILED;
+
+ return ret;
+}
+
+static int
+test_inference_ordered_result(struct ml_test *test, struct ml_options *opt)
+{
+ struct test_inference *t;
+
+ RTE_SET_USED(opt);
+
+ t = ml_test_priv(test);
+
+ return t->cmn.result;
+}
+
+static const struct ml_test_ops inference_ordered = {
+ .cap_check = test_inference_cap_check,
+ .opt_check = test_inference_opt_check,
+ .opt_dump = test_inference_opt_dump,
+ .test_setup = test_inference_setup,
+ .test_destroy = test_inference_destroy,
+ .test_driver = test_inference_ordered_driver,
+ .test_result = test_inference_ordered_result,
+};
+
+ML_TEST_REGISTER(inference_ordered);
diff --git a/app/test-mldev/test_model_common.h b/app/test-mldev/test_model_common.h
index 302e4eb45f..c45ae80853 100644
--- a/app/test-mldev/test_model_common.h
+++ b/app/test-mldev/test_model_common.h
@@ -23,6 +23,16 @@ struct ml_model {
int16_t id;
struct rte_ml_model_info info;
enum model_state state;
+
+ uint64_t inp_dsize;
+ uint64_t inp_qsize;
+ uint64_t out_dsize;
+ uint64_t out_qsize;
+
+ uint8_t *input;
+ uint8_t *output;
+
+ struct rte_mempool *io_pool;
};
int ml_model_load(struct ml_test *test, struct ml_options *opt, struct ml_model *model,
--
2.17.1
next prev parent reply other threads:[~2022-11-29 8:21 UTC|newest]
Thread overview: 122+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-29 7:07 [PATCH v1 00/12] implement mldev test application Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2022-11-29 8:20 ` [PATCH v2 " Srikanth Yalavarthi
2022-11-29 8:20 ` [PATCH v2 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2022-11-29 8:21 ` Srikanth Yalavarthi [this message]
2022-11-29 8:21 ` [PATCH v2 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2022-11-29 8:21 ` [PATCH v2 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2022-12-08 19:29 ` [PATCH v3 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-02-03 9:49 ` Anup Prabhu
2022-12-08 19:29 ` [PATCH v3 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-02-02 12:39 ` Anup Prabhu
2022-11-29 7:07 ` [PATCH v1 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-02-07 15:49 ` [PATCH v4 00/12] Implementation of mldev test application Srikanth Yalavarthi
2023-02-07 15:49 ` [PATCH v4 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-02-14 4:55 ` Shivah Shankar Shankar Narayan Rao
2023-03-03 8:15 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2023-02-23 9:03 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-01 5:35 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-02 2:58 ` Anup Prabhu
2023-03-09 18:42 ` Thomas Monjalon
2023-03-10 2:55 ` [EXT] " Srikanth Yalavarthi
2023-02-07 15:49 ` [PATCH v4 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-02-27 6:11 ` Anup Prabhu
2023-03-09 20:06 ` Thomas Monjalon
2023-03-10 8:13 ` [EXT] " Srikanth Yalavarthi
2023-02-07 15:49 ` [PATCH v4 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-02-20 6:31 ` Anup Prabhu
2023-03-09 20:15 ` Thomas Monjalon
2023-03-10 8:14 ` [EXT] " Srikanth Yalavarthi
2023-02-07 15:49 ` [PATCH v4 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-02-20 10:11 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-02 8:15 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-02-27 3:46 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-02-16 12:23 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-02-16 4:21 ` Anup Prabhu
2023-02-07 15:49 ` [PATCH v4 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-02-15 12:26 ` Shivah Shankar Shankar Narayan Rao
2023-03-03 6:07 ` Anup Prabhu
2023-03-10 8:09 ` [PATCH v5 00/12] Implementation of mldev test application Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-03-10 8:09 ` [PATCH v5 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 00/12] Implementation of mldev test application Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-03-16 17:45 ` Thomas Monjalon
2023-03-16 17:47 ` [EXT] " Srikanth Yalavarthi
2023-03-16 18:01 ` Thomas Monjalon
2023-03-16 21:31 ` Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-03-16 17:47 ` Thomas Monjalon
2023-03-16 17:52 ` [EXT] " Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-03-11 15:09 ` [PATCH v6 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-03-16 17:50 ` Thomas Monjalon
2023-03-16 17:56 ` [EXT] " Srikanth Yalavarthi
2023-03-16 18:03 ` Thomas Monjalon
2023-03-16 18:07 ` Srikanth Yalavarthi
2023-03-16 21:32 ` Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 00/11] Implementation of mldev test application Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 01/11] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 02/11] app/mldev: add common test functions Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 03/11] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 04/11] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 05/11] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 06/11] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 07/11] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 08/11] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 09/11] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 10/11] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 11/11] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-03-19 22:08 ` [PATCH v7 00/11] Implementation of mldev test application Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221129082109.6809-5-syalavarthi@marvell.com \
--to=syalavarthi@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=sshankarnara@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).