From: Srikanth Yalavarthi <syalavarthi@marvell.com>
To: Srikanth Yalavarthi <syalavarthi@marvell.com>
Cc: <dev@dpdk.org>, <sshankarnara@marell.com>, <jerinj@marvell.com>
Subject: [PATCH v1 06/12] app/mldev: add test case to interleave inferences
Date: Mon, 28 Nov 2022 22:50:34 -0800 [thread overview]
Message-ID: <20221129065040.5875-7-syalavarthi@marvell.com> (raw)
In-Reply-To: <20221129065040.5875-1-syalavarthi@marvell.com>
Added test case to interleave inference requests from multiple
models. Interleaving would load and start all models and launch
inference requests for the models using available queue-pairs
Operations sequence when testing with N models and R reps,
(load + start) x N -> (enqueue + dequeue) x N x R ...
-> (stop + unload) x N
Test can be executed by selecting "inference_interleave" test.
Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
Change-Id: Ia8f0e42e1838398dd77984111316621529f8d2e6
---
app/test-mldev/meson.build | 1 +
app/test-mldev/ml_options.c | 3 +-
app/test-mldev/test_inference_common.c | 12 +--
app/test-mldev/test_inference_common.h | 4 +-
app/test-mldev/test_inference_interleave.c | 118 +++++++++++++++++++++
5 files changed, 129 insertions(+), 9 deletions(-)
create mode 100644 app/test-mldev/test_inference_interleave.c
diff --git a/app/test-mldev/meson.build b/app/test-mldev/meson.build
index 475d76d126..41d22fb22c 100644
--- a/app/test-mldev/meson.build
+++ b/app/test-mldev/meson.build
@@ -18,6 +18,7 @@ sources = files(
'test_model_ops.c',
'test_inference_common.c',
'test_inference_ordered.c',
+ 'test_inference_interleave.c',
)
deps += ['mldev']
diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index 59a5d16584..9a006ff7c8 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -162,7 +162,8 @@ ml_dump_test_options(const char *testname)
printf("\n");
}
- if (strcmp(testname, "inference_ordered") == 0) {
+ if ((strcmp(testname, "inference_ordered") == 0) ||
+ (strcmp(testname, "inference_interleave") == 0)) {
printf("\t\t--filelist : comma separated list of model, input and output\n"
"\t\t--repetitions : number of inference repetitions\n");
printf("\n");
diff --git a/app/test-mldev/test_inference_common.c b/app/test-mldev/test_inference_common.c
index e5e300ffdc..1e0e30637f 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -115,7 +115,7 @@ ml_dequeue_single(void *arg)
total_deq += burst_deq;
if (unlikely(op->status == RTE_ML_OP_STATUS_ERROR)) {
rte_ml_op_error_get(t->cmn.opt->dev_id, op, &error);
- ml_err("error_code = 0x%016lx, error_message = %s\n", error.errcode,
+ ml_err("error_code = 0x%" PRIx64 ", error_message = %s\n", error.errcode,
error.message);
}
req = (struct ml_request *)op->user_ptr;
@@ -334,10 +334,10 @@ ml_request_initialize(struct rte_mempool *mp, void *opaque, void *obj, unsigned
RTE_SET_USED(mp);
RTE_SET_USED(obj_idx);
- req->input = RTE_PTR_ADD(
- obj, RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.min_align_size));
- req->output = RTE_PTR_ADD(req->input, RTE_ALIGN_CEIL(t->model[t->fid].inp_qsize,
- t->cmn.dev_info.min_align_size));
+ req->input = (uint8_t *)obj +
+ RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.min_align_size);
+ req->output = req->input +
+ RTE_ALIGN_CEIL(t->model[t->fid].inp_qsize, t->cmn.dev_info.min_align_size);
req->niters = 0;
/* quantize data */
@@ -387,7 +387,7 @@ ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, int16_t f
}
t->model[fid].input = mz->addr;
- t->model[fid].output = RTE_PTR_ADD(t->model[fid].input, t->model[fid].inp_dsize);
+ t->model[fid].output = t->model[fid].input + t->model[fid].inp_dsize;
/* load input file */
fp = fopen(opt->filelist[fid].input, "r");
diff --git a/app/test-mldev/test_inference_common.h b/app/test-mldev/test_inference_common.h
index 91007954b4..b058abada4 100644
--- a/app/test-mldev/test_inference_common.h
+++ b/app/test-mldev/test_inference_common.h
@@ -17,8 +17,8 @@
#include "test_model_common.h"
struct ml_request {
- void *input;
- void *output;
+ uint8_t *input;
+ uint8_t *output;
int16_t fid;
uint64_t niters;
};
diff --git a/app/test-mldev/test_inference_interleave.c b/app/test-mldev/test_inference_interleave.c
new file mode 100644
index 0000000000..74ad0c597f
--- /dev/null
+++ b/app/test-mldev/test_inference_interleave.c
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Marvell.
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_launch.h>
+
+#include "ml_common.h"
+#include "ml_test.h"
+#include "test_inference_common.h"
+#include "test_model_common.h"
+
+static int
+test_inference_interleave_driver(struct ml_test *test, struct ml_options *opt)
+{
+ struct test_inference *t;
+ int16_t fid = 0;
+ int ret = 0;
+
+ t = ml_test_priv(test);
+
+ ret = ml_inference_mldev_setup(test, opt);
+ if (ret != 0)
+ return ret;
+
+ ret = ml_inference_mem_setup(test, opt);
+ if (ret != 0)
+ return ret;
+
+ /* load and start all models */
+ for (fid = 0; fid < opt->nb_filelist; fid++) {
+ ret = ml_model_load(test, opt, &t->model[fid], fid);
+ if (ret != 0)
+ goto error;
+
+ ret = ml_model_start(test, opt, &t->model[fid], fid);
+ if (ret != 0)
+ goto error;
+
+ ret = ml_inference_iomem_setup(test, opt, fid);
+ if (ret != 0)
+ goto error;
+ }
+
+ /* launch inference requests */
+ ret = ml_inference_launch_cores(test, opt, 0, opt->nb_filelist - 1);
+ if (ret != 0) {
+ ml_err("failed to launch cores");
+ goto error;
+ }
+
+ rte_eal_mp_wait_lcore();
+
+ /* stop and unload all models */
+ for (fid = 0; fid < opt->nb_filelist; fid++) {
+ ret = ml_inference_result(test, opt, fid);
+ if (ret != ML_TEST_SUCCESS)
+ goto error;
+
+ ml_inference_iomem_destroy(test, opt, fid);
+
+ ret = ml_model_stop(test, opt, &t->model[fid], fid);
+ if (ret != 0)
+ goto error;
+
+ ret = ml_model_unload(test, opt, &t->model[fid], fid);
+ if (ret != 0)
+ goto error;
+ }
+
+ ml_inference_mem_destroy(test, opt);
+
+ ret = ml_inference_mldev_destroy(test, opt);
+ if (ret != 0)
+ return ret;
+
+ t->cmn.result = ML_TEST_SUCCESS;
+
+ return 0;
+
+error:
+ ml_inference_mem_destroy(test, opt);
+ for (fid = 0; fid < opt->nb_filelist; fid++) {
+ ml_inference_iomem_destroy(test, opt, fid);
+ ml_model_stop(test, opt, &t->model[fid], fid);
+ ml_model_unload(test, opt, &t->model[fid], fid);
+ }
+
+ t->cmn.result = ML_TEST_FAILED;
+
+ return ret;
+}
+
+static int
+test_inference_interleave_result(struct ml_test *test, struct ml_options *opt)
+{
+ struct test_inference *t;
+
+ RTE_SET_USED(opt);
+
+ t = ml_test_priv(test);
+
+ return t->cmn.result;
+}
+
+static const struct ml_test_ops inference_interleave = {
+ .cap_check = test_inference_cap_check,
+ .opt_check = test_inference_opt_check,
+ .opt_dump = test_inference_opt_dump,
+ .test_setup = test_inference_setup,
+ .test_destroy = test_inference_destroy,
+ .test_driver = test_inference_interleave_driver,
+ .test_result = test_inference_interleave_result,
+};
+
+ML_TEST_REGISTER(inference_interleave);
--
2.17.1
next prev parent reply other threads:[~2022-11-29 6:51 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-29 6:50 [PATCH v1 00/12] *** implement mldev test application *** Srikanth Yalavarthi
2022-11-29 6:50 ` [PATCH v1 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2022-11-29 6:50 ` [PATCH v1 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2022-11-29 6:50 ` [PATCH v1 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2022-11-29 6:50 ` [PATCH v1 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2022-11-29 6:50 ` [PATCH v1 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2022-11-29 6:50 ` Srikanth Yalavarthi [this message]
2022-11-29 6:50 ` [PATCH v1 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2022-11-29 6:50 ` [PATCH v1 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2022-11-29 6:50 ` [PATCH v1 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2022-11-29 6:50 ` [PATCH v1 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2022-11-29 6:50 ` [PATCH v1 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2022-11-29 6:50 ` [PATCH v1 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2022-11-29 7:07 [PATCH v1 00/12] implement mldev test application Srikanth Yalavarthi
2022-11-29 7:07 ` [PATCH v1 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221129065040.5875-7-syalavarthi@marvell.com \
--to=syalavarthi@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=sshankarnara@marell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).