DPDK patches and discussions
 help / color / mirror / Atom feed
From: Srikanth Yalavarthi <syalavarthi@marvell.com>
To: Srikanth Yalavarthi <syalavarthi@marvell.com>
Cc: <dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,
	<ptakkar@marvell.com>
Subject: [PATCH v2 1/1] app/mldev: enable support for pre-quantized I/O
Date: Thu, 26 Oct 2023 05:49:38 -0700	[thread overview]
Message-ID: <20231026124938.23695-1-syalavarthi@marvell.com> (raw)
In-Reply-To: <20231002100217.12456-1-syalavarthi@marvell.com>

From: Anup Prabhu <aprabhu@marvell.com>

Enabled support for pre-quantized input and output in ML
test application.

Signed-off-by: Anup Prabhu <aprabhu@marvell.com>
---

v2:
  - Updated application help

v1:
  - Initial changes

 app/test-mldev/ml_options.c            | 11 +++++++++-
 app/test-mldev/ml_options.h            | 28 ++++++++++++++------------
 app/test-mldev/test_inference_common.c | 20 ++++++++++++------
 doc/guides/tools/testmldev.rst         |  3 +++
 4 files changed, 42 insertions(+), 20 deletions(-)

diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index eeaffec399..320f6325ae 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -24,6 +24,7 @@ ml_options_default(struct ml_options *opt)
 	opt->dev_id = 0;
 	opt->socket_id = SOCKET_ID_ANY;
 	opt->nb_filelist = 0;
+	opt->quantized_io = false;
 	opt->repetitions = 1;
 	opt->burst_size = 1;
 	opt->queue_pairs = 1;
@@ -243,7 +244,8 @@ ml_dump_test_options(const char *testname)
 		       "\t\t--queue_pairs      : number of queue pairs to create\n"
 		       "\t\t--queue_size       : size of queue-pair\n"
 		       "\t\t--tolerance        : maximum tolerance (%%) for output validation\n"
-		       "\t\t--stats            : enable reporting device and model statistics\n");
+		       "\t\t--stats            : enable reporting device and model statistics\n"
+		       "\t\t--quantized_io     : skip input/output quantization\n");
 		printf("\n");
 	}
 }
@@ -269,6 +271,7 @@ static struct option lgopts[] = {
 	{ML_SOCKET_ID, 1, 0, 0},
 	{ML_MODELS, 1, 0, 0},
 	{ML_FILELIST, 1, 0, 0},
+	{ML_QUANTIZED_IO, 0, 0, 0},
 	{ML_REPETITIONS, 1, 0, 0},
 	{ML_BURST_SIZE, 1, 0, 0},
 	{ML_QUEUE_PAIRS, 1, 0, 0},
@@ -316,6 +319,11 @@ ml_options_parse(struct ml_options *opt, int argc, char **argv)
 	while ((opts = getopt_long(argc, argv, "", lgopts, &opt_idx)) != EOF) {
 		switch (opts) {
 		case 0: /* parse long options */
+			if (!strcmp(lgopts[opt_idx].name, "quantized_io")) {
+				opt->quantized_io = true;
+				break;
+			}
+
 			if (!strcmp(lgopts[opt_idx].name, "stats")) {
 				opt->stats = true;
 				break;
@@ -360,4 +368,5 @@ ml_options_dump(struct ml_options *opt)
 		ml_dump("socket_id", "%d", opt->socket_id);
 
 	ml_dump("debug", "%s", (opt->debug ? "true" : "false"));
+	ml_dump("quantized_io", "%s", (opt->quantized_io ? "true" : "false"));
 }
diff --git a/app/test-mldev/ml_options.h b/app/test-mldev/ml_options.h
index 90e22adeac..edb9dba8f7 100644
--- a/app/test-mldev/ml_options.h
+++ b/app/test-mldev/ml_options.h
@@ -12,19 +12,20 @@
 #define ML_TEST_MAX_MODELS   8
 
 /* Options names */
-#define ML_TEST	       ("test")
-#define ML_DEVICE_ID   ("dev_id")
-#define ML_SOCKET_ID   ("socket_id")
-#define ML_MODELS      ("models")
-#define ML_FILELIST    ("filelist")
-#define ML_REPETITIONS ("repetitions")
-#define ML_BURST_SIZE  ("burst_size")
-#define ML_QUEUE_PAIRS ("queue_pairs")
-#define ML_QUEUE_SIZE  ("queue_size")
-#define ML_TOLERANCE   ("tolerance")
-#define ML_STATS       ("stats")
-#define ML_DEBUG       ("debug")
-#define ML_HELP	       ("help")
+#define ML_TEST		("test")
+#define ML_DEVICE_ID	("dev_id")
+#define ML_SOCKET_ID	("socket_id")
+#define ML_MODELS	("models")
+#define ML_FILELIST	("filelist")
+#define ML_QUANTIZED_IO ("quantized_io")
+#define ML_REPETITIONS	("repetitions")
+#define ML_BURST_SIZE	("burst_size")
+#define ML_QUEUE_PAIRS	("queue_pairs")
+#define ML_QUEUE_SIZE	("queue_size")
+#define ML_TOLERANCE	("tolerance")
+#define ML_STATS	("stats")
+#define ML_DEBUG	("debug")
+#define ML_HELP		("help")
 
 struct ml_filelist {
 	char model[PATH_MAX];
@@ -46,6 +47,7 @@ struct ml_options {
 	float tolerance;
 	bool stats;
 	bool debug;
+	bool quantized_io;
 };
 
 void ml_options_default(struct ml_options *opt);
diff --git a/app/test-mldev/test_inference_common.c b/app/test-mldev/test_inference_common.c
index 846f71abb1..36629210ee 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -777,14 +777,22 @@ ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t
 	}
 
 	t->model[fid].inp_dsize = 0;
-	for (i = 0; i < t->model[fid].info.nb_inputs; i++)
-		t->model[fid].inp_dsize +=
-			t->model[fid].info.input_info[i].nb_elements * sizeof(float);
+	for (i = 0; i < t->model[fid].info.nb_inputs; i++) {
+		if (opt->quantized_io)
+			t->model[fid].inp_dsize += t->model[fid].info.input_info[i].size;
+		else
+			t->model[fid].inp_dsize +=
+				t->model[fid].info.input_info[i].nb_elements * sizeof(float);
+	}
 
 	t->model[fid].out_dsize = 0;
-	for (i = 0; i < t->model[fid].info.nb_outputs; i++)
-		t->model[fid].out_dsize +=
-			t->model[fid].info.output_info[i].nb_elements * sizeof(float);
+	for (i = 0; i < t->model[fid].info.nb_outputs; i++) {
+		if (opt->quantized_io)
+			t->model[fid].out_dsize += t->model[fid].info.output_info[i].size;
+		else
+			t->model[fid].out_dsize +=
+				t->model[fid].info.output_info[i].nb_elements * sizeof(float);
+	}
 
 	/* allocate buffer for user data */
 	mz_size = t->model[fid].inp_dsize + t->model[fid].out_dsize;
diff --git a/doc/guides/tools/testmldev.rst b/doc/guides/tools/testmldev.rst
index 9b1565a457..55e26eed08 100644
--- a/doc/guides/tools/testmldev.rst
+++ b/doc/guides/tools/testmldev.rst
@@ -89,6 +89,9 @@ The following are the command-line options supported by the test application.
   A suffix ``.q`` is appended to quantized output filename.
   Maximum number of filelist entries supported by the test is ``8``.
 
+``--quantized_io``
+  Disable IO quantization and dequantization.
+
 ``--repetitions <n>``
   Set the number of inference repetitions to be executed in the test per each model.
   Default value is ``1``.
-- 
2.42.0


  parent reply	other threads:[~2023-10-26 12:51 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-02 10:02 [PATCH v1 " Srikanth Yalavarthi
2023-10-03  6:01 ` Shivah Shankar Shankar Narayan Rao
2023-10-26 12:49 ` Srikanth Yalavarthi [this message]
2023-10-30  5:15   ` [PATCH v2 " Shivah Shankar Shankar Narayan Rao
2023-11-14 14:08     ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231026124938.23695-1-syalavarthi@marvell.com \
    --to=syalavarthi@marvell.com \
    --cc=aprabhu@marvell.com \
    --cc=dev@dpdk.org \
    --cc=ptakkar@marvell.com \
    --cc=sshankarnara@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).