DPDK patches and discussions
 help / color / mirror / Atom feed
From: Shivah Shankar Shankar Narayan Rao <sshankarnara@marvell.com>
To: Srikanth Yalavarthi <syalavarthi@marvell.com>,
	Srikanth Yalavarthi <syalavarthi@marvell.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>, Anup Prabhu <aprabhu@marvell.com>,
	Prince Takkar <ptakkar@marvell.com>
Subject: RE: [PATCH v1 1/1] app/mldev: enable support for pre-quantized I/O
Date: Tue, 3 Oct 2023 06:01:57 +0000	[thread overview]
Message-ID: <PH0PR18MB4766170D9D8BBA3373642584DBC4A@PH0PR18MB4766.namprd18.prod.outlook.com> (raw)
In-Reply-To: <20231002100217.12456-1-syalavarthi@marvell.com>

[-- Attachment #1: Type: text/plain, Size: 6158 bytes --]

> -----Original Message-----
> From: Srikanth Yalavarthi <syalavarthi@marvell.com>
> Sent: Monday, October 2, 2023 3:32 PM
> To: Srikanth Yalavarthi <syalavarthi@marvell.com>
> Cc: dev@dpdk.org; Shivah Shankar Shankar Narayan Rao
> <sshankarnara@marvell.com>; Anup Prabhu <aprabhu@marvell.com>;
> Prince Takkar <ptakkar@marvell.com>
> Subject: [PATCH v1 1/1] app/mldev: enable support for pre-quantized I/O
> 
> From: Anup Prabhu <aprabhu@marvell.com>
> 
> Enabled support for pre-quantized input and output in ML test application.
> 
> Signed-off-by: Anup Prabhu <aprabhu@marvell.com>
> ---
> Depends-on: series-29710 ("Spec changes to support multi I/O models")
> 
>  app/test-mldev/ml_options.c            |  8 ++++++++
>  app/test-mldev/ml_options.h            | 28 ++++++++++++++------------
>  app/test-mldev/test_inference_common.c | 20 ++++++++++++------
>  doc/guides/tools/testmldev.rst         |  3 +++
>  4 files changed, 40 insertions(+), 19 deletions(-)
> 
> diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
> index eeaffec399..7d24f7e2f0 100644
> --- a/app/test-mldev/ml_options.c
> +++ b/app/test-mldev/ml_options.c
> @@ -24,6 +24,7 @@ ml_options_default(struct ml_options *opt)
>  	opt->dev_id = 0;
>  	opt->socket_id = SOCKET_ID_ANY;
>  	opt->nb_filelist = 0;
> +	opt->quantized_io = false;
>  	opt->repetitions = 1;
>  	opt->burst_size = 1;
>  	opt->queue_pairs = 1;
> @@ -269,6 +270,7 @@ static struct option lgopts[] = {
>  	{ML_SOCKET_ID, 1, 0, 0},
>  	{ML_MODELS, 1, 0, 0},
>  	{ML_FILELIST, 1, 0, 0},
> +	{ML_QUANTIZED_IO, 0, 0, 0},
>  	{ML_REPETITIONS, 1, 0, 0},
>  	{ML_BURST_SIZE, 1, 0, 0},
>  	{ML_QUEUE_PAIRS, 1, 0, 0},
> @@ -316,6 +318,11 @@ ml_options_parse(struct ml_options *opt, int argc,
> char **argv)
>  	while ((opts = getopt_long(argc, argv, "", lgopts, &opt_idx)) != EOF) {
>  		switch (opts) {
>  		case 0: /* parse long options */
> +			if (!strcmp(lgopts[opt_idx].name, "quantized_io")) {
> +				opt->quantized_io = true;
> +				break;
> +			}
> +
>  			if (!strcmp(lgopts[opt_idx].name, "stats")) {
>  				opt->stats = true;
>  				break;
> @@ -360,4 +367,5 @@ ml_options_dump(struct ml_options *opt)
>  		ml_dump("socket_id", "%d", opt->socket_id);
> 
>  	ml_dump("debug", "%s", (opt->debug ? "true" : "false"));
> +	ml_dump("quantized_io", "%s", (opt->quantized_io ? "true" :
> "false"));
>  }
> diff --git a/app/test-mldev/ml_options.h b/app/test-mldev/ml_options.h
> index 90e22adeac..edb9dba8f7 100644
> --- a/app/test-mldev/ml_options.h
> +++ b/app/test-mldev/ml_options.h
> @@ -12,19 +12,20 @@
>  #define ML_TEST_MAX_MODELS   8
> 
>  /* Options names */
> -#define ML_TEST	       ("test")
> -#define ML_DEVICE_ID   ("dev_id")
> -#define ML_SOCKET_ID   ("socket_id")
> -#define ML_MODELS      ("models")
> -#define ML_FILELIST    ("filelist")
> -#define ML_REPETITIONS ("repetitions")
> -#define ML_BURST_SIZE  ("burst_size")
> -#define ML_QUEUE_PAIRS ("queue_pairs")
> -#define ML_QUEUE_SIZE  ("queue_size")
> -#define ML_TOLERANCE   ("tolerance")
> -#define ML_STATS       ("stats")
> -#define ML_DEBUG       ("debug")
> -#define ML_HELP	       ("help")
> +#define ML_TEST		("test")
> +#define ML_DEVICE_ID	("dev_id")
> +#define ML_SOCKET_ID	("socket_id")
> +#define ML_MODELS	("models")
> +#define ML_FILELIST	("filelist")
> +#define ML_QUANTIZED_IO ("quantized_io")
> +#define ML_REPETITIONS	("repetitions")
> +#define ML_BURST_SIZE	("burst_size")
> +#define ML_QUEUE_PAIRS	("queue_pairs")
> +#define ML_QUEUE_SIZE	("queue_size")
> +#define ML_TOLERANCE	("tolerance")
> +#define ML_STATS	("stats")
> +#define ML_DEBUG	("debug")
> +#define ML_HELP		("help")
> 
>  struct ml_filelist {
>  	char model[PATH_MAX];
> @@ -46,6 +47,7 @@ struct ml_options {
>  	float tolerance;
>  	bool stats;
>  	bool debug;
> +	bool quantized_io;
>  };
> 
>  void ml_options_default(struct ml_options *opt); diff --git a/app/test-
> mldev/test_inference_common.c b/app/test-
> mldev/test_inference_common.c
> index 846f71abb1..36629210ee 100644
> --- a/app/test-mldev/test_inference_common.c
> +++ b/app/test-mldev/test_inference_common.c
> @@ -777,14 +777,22 @@ ml_inference_iomem_setup(struct ml_test *test,
> struct ml_options *opt, uint16_t
>  	}
> 
>  	t->model[fid].inp_dsize = 0;
> -	for (i = 0; i < t->model[fid].info.nb_inputs; i++)
> -		t->model[fid].inp_dsize +=
> -			t->model[fid].info.input_info[i].nb_elements *
> sizeof(float);
> +	for (i = 0; i < t->model[fid].info.nb_inputs; i++) {
> +		if (opt->quantized_io)
> +			t->model[fid].inp_dsize += t-
> >model[fid].info.input_info[i].size;
> +		else
> +			t->model[fid].inp_dsize +=
> +				t->model[fid].info.input_info[i].nb_elements
> * sizeof(float);
> +	}
> 
>  	t->model[fid].out_dsize = 0;
> -	for (i = 0; i < t->model[fid].info.nb_outputs; i++)
> -		t->model[fid].out_dsize +=
> -			t->model[fid].info.output_info[i].nb_elements *
> sizeof(float);
> +	for (i = 0; i < t->model[fid].info.nb_outputs; i++) {
> +		if (opt->quantized_io)
> +			t->model[fid].out_dsize += t-
> >model[fid].info.output_info[i].size;
> +		else
> +			t->model[fid].out_dsize +=
> +				t-
> >model[fid].info.output_info[i].nb_elements * sizeof(float);
> +	}
> 
>  	/* allocate buffer for user data */
>  	mz_size = t->model[fid].inp_dsize + t->model[fid].out_dsize; diff --
> git a/doc/guides/tools/testmldev.rst b/doc/guides/tools/testmldev.rst index
> 9b1565a457..55e26eed08 100644
> --- a/doc/guides/tools/testmldev.rst
> +++ b/doc/guides/tools/testmldev.rst
> @@ -89,6 +89,9 @@ The following are the command-line options supported
> by the test application.
>    A suffix ``.q`` is appended to quantized output filename.
>    Maximum number of filelist entries supported by the test is ``8``.
> 
> +``--quantized_io``
> +  Disable IO quantization and dequantization.
> +
>  ``--repetitions <n>``
>    Set the number of inference repetitions to be executed in the test per each
> model.
>    Default value is ``1``.
> --
> 2.41.0

Acked-by: Shivah Shankar S <sshankarnara@marvell.com>

[-- Attachment #2: winmail.dat --]
[-- Type: application/ms-tnef, Size: 38047 bytes --]

  reply	other threads:[~2023-10-03  6:02 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-02 10:02 Srikanth Yalavarthi
2023-10-03  6:01 ` Shivah Shankar Shankar Narayan Rao [this message]
2023-10-26 12:49 ` [PATCH v2 " Srikanth Yalavarthi
2023-10-30  5:15   ` Shivah Shankar Shankar Narayan Rao
2023-11-14 14:08     ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=PH0PR18MB4766170D9D8BBA3373642584DBC4A@PH0PR18MB4766.namprd18.prod.outlook.com \
    --to=sshankarnara@marvell.com \
    --cc=aprabhu@marvell.com \
    --cc=dev@dpdk.org \
    --cc=ptakkar@marvell.com \
    --cc=syalavarthi@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).