DPDK patches and discussions
 help / color / mirror / Atom feed
From: Srikanth Yalavarthi <syalavarthi@marvell.com>
To: Srikanth Yalavarthi <syalavarthi@marvell.com>
Cc: <dev@dpdk.org>, <sshankarnara@marvell.com>, <jerinj@marvell.com>
Subject: [PATCH v2 08/12] app/mldev: enable support for queue pairs and size
Date: Tue, 29 Nov 2022 00:21:05 -0800	[thread overview]
Message-ID: <20221129082109.6809-8-syalavarthi@marvell.com> (raw)
In-Reply-To: <20221129082109.6809-1-syalavarthi@marvell.com>

Added support to create multiple queue-pairs per device to
enqueue and dequeue inference requests. Number of queue pairs
to be created can be specified through "--queue_pairs" option.
Support is also enabled to control the number of descriptors
per each queue pair through "--queue_size" option. Inference
requests for a model are distributed across all available
queue-pairs.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
 app/test-mldev/ml_options.c            | 40 ++++++++++---
 app/test-mldev/ml_options.h            |  4 ++
 app/test-mldev/test_common.c           |  2 +-
 app/test-mldev/test_inference_common.c | 79 +++++++++++++++++++++-----
 app/test-mldev/test_inference_common.h |  1 +
 5 files changed, 102 insertions(+), 24 deletions(-)

diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index 1990939200..d5182a1701 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -31,6 +31,8 @@ ml_options_default(struct ml_options *opt)
 	opt->nb_filelist = 0;
 	opt->repetitions = 1;
 	opt->burst_size = 1;
+	opt->queue_pairs = 1;
+	opt->queue_size = 1;
 	opt->debug = false;
 }
 
@@ -158,11 +160,30 @@ ml_parse_burst_size(struct ml_options *opt, const char *arg)
 	return parser_read_uint16(&opt->burst_size, arg);
 }
 
+static int
+ml_parse_queue_pairs(struct ml_options *opt, const char *arg)
+{
+	int ret;
+
+	ret = parser_read_uint16(&opt->queue_pairs, arg);
+
+	return ret;
+}
+
+static int
+ml_parse_queue_size(struct ml_options *opt, const char *arg)
+{
+	return parser_read_uint16(&opt->queue_size, arg);
+}
+
 static void
 ml_dump_test_options(const char *testname)
 {
-	if (strcmp(testname, "device_ops") == 0)
+	if (strcmp(testname, "device_ops") == 0) {
+		printf("\t\t--queue_pairs      : number of queue pairs to create\n"
+		       "\t\t--queue_size       : size fo queue-pair\n");
 		printf("\n");
+	}
 
 	if (strcmp(testname, "model_ops") == 0) {
 		printf("\t\t--models           : comma separated list of models\n");
@@ -173,7 +194,9 @@ ml_dump_test_options(const char *testname)
 	    (strcmp(testname, "inference_interleave") == 0)) {
 		printf("\t\t--filelist         : comma separated list of model, input and output\n"
 		       "\t\t--repetitions      : number of inference repetitions\n"
-		       "\t\t--burst_size       : inference burst size\n");
+		       "\t\t--burst_size       : inference burst size\n"
+		       "\t\t--queue_pairs      : number of queue pairs to create\n"
+		       "\t\t--queue_size       : size fo queue-pair\n");
 		printf("\n");
 	}
 }
@@ -193,11 +216,11 @@ print_usage(char *program)
 	ml_test_dump_names(ml_dump_test_options);
 }
 
-static struct option lgopts[] = {{ML_TEST, 1, 0, 0},	   {ML_DEVICE_ID, 1, 0, 0},
-				 {ML_SOCKET_ID, 1, 0, 0},  {ML_MODELS, 1, 0, 0},
-				 {ML_FILELIST, 1, 0, 0},   {ML_REPETITIONS, 1, 0, 0},
-				 {ML_BURST_SIZE, 1, 0, 0}, {ML_DEBUG, 0, 0, 0},
-				 {ML_HELP, 0, 0, 0},	   {NULL, 0, 0, 0}};
+static struct option lgopts[] = {
+	{ML_TEST, 1, 0, 0},	  {ML_DEVICE_ID, 1, 0, 0},   {ML_SOCKET_ID, 1, 0, 0},
+	{ML_MODELS, 1, 0, 0},	  {ML_FILELIST, 1, 0, 0},    {ML_REPETITIONS, 1, 0, 0},
+	{ML_BURST_SIZE, 1, 0, 0}, {ML_QUEUE_PAIRS, 1, 0, 0}, {ML_QUEUE_SIZE, 1, 0, 0},
+	{ML_DEBUG, 0, 0, 0},	  {ML_HELP, 0, 0, 0},	     {NULL, 0, 0, 0}};
 
 static int
 ml_opts_parse_long(int opt_idx, struct ml_options *opt)
@@ -208,7 +231,8 @@ ml_opts_parse_long(int opt_idx, struct ml_options *opt)
 		{ML_TEST, ml_parse_test_name},	      {ML_DEVICE_ID, ml_parse_dev_id},
 		{ML_SOCKET_ID, ml_parse_socket_id},   {ML_MODELS, ml_parse_models},
 		{ML_FILELIST, ml_parse_filelist},     {ML_REPETITIONS, ml_parse_repetitions},
-		{ML_BURST_SIZE, ml_parse_burst_size},
+		{ML_BURST_SIZE, ml_parse_burst_size}, {ML_QUEUE_PAIRS, ml_parse_queue_pairs},
+		{ML_QUEUE_SIZE, ml_parse_queue_size},
 	};
 
 	for (i = 0; i < RTE_DIM(parsermap); i++) {
diff --git a/app/test-mldev/ml_options.h b/app/test-mldev/ml_options.h
index 305b39629a..6bfef1b979 100644
--- a/app/test-mldev/ml_options.h
+++ b/app/test-mldev/ml_options.h
@@ -20,6 +20,8 @@
 #define ML_FILELIST    ("filelist")
 #define ML_REPETITIONS ("repetitions")
 #define ML_BURST_SIZE  ("burst_size")
+#define ML_QUEUE_PAIRS ("queue_pairs")
+#define ML_QUEUE_SIZE  ("queue_size")
 #define ML_DEBUG       ("debug")
 #define ML_HELP	       ("help")
 
@@ -37,6 +39,8 @@ struct ml_options {
 	uint8_t nb_filelist;
 	uint64_t repetitions;
 	uint16_t burst_size;
+	uint16_t queue_pairs;
+	uint16_t queue_size;
 	bool debug;
 };
 
diff --git a/app/test-mldev/test_common.c b/app/test-mldev/test_common.c
index b6b32904e4..22e6acb3b6 100644
--- a/app/test-mldev/test_common.c
+++ b/app/test-mldev/test_common.c
@@ -78,7 +78,7 @@ ml_test_device_configure(struct ml_test *test, struct ml_options *opt)
 	/* configure device */
 	dev_config.socket_id = opt->socket_id;
 	dev_config.nb_models = t->dev_info.max_models;
-	dev_config.nb_queue_pairs = t->dev_info.max_queue_pairs;
+	dev_config.nb_queue_pairs = opt->queue_pairs;
 	ret = rte_ml_dev_configure(opt->dev_id, &dev_config);
 	if (ret != 0) {
 		ml_err("Failed to configure ml device, dev_id = %d\n", opt->dev_id);
diff --git a/app/test-mldev/test_inference_common.c b/app/test-mldev/test_inference_common.c
index 252d77616c..d680d68f3d 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -72,7 +72,7 @@ ml_enqueue_single(void *arg)
 	req->fid = fid;
 
 enqueue_req:
-	burst_enq = rte_ml_enqueue_burst(t->cmn.opt->dev_id, 0, &op, 1);
+	burst_enq = rte_ml_enqueue_burst(t->cmn.opt->dev_id, args->qp_id, &op, 1);
 	if (burst_enq == 0)
 		goto enqueue_req;
 
@@ -109,7 +109,7 @@ ml_dequeue_single(void *arg)
 		return 0;
 
 dequeue_req:
-	burst_deq = rte_ml_dequeue_burst(t->cmn.opt->dev_id, 0, &op, 1);
+	burst_deq = rte_ml_dequeue_burst(t->cmn.opt->dev_id, args->qp_id, &op, 1);
 
 	if (likely(burst_deq == 1)) {
 		total_deq += burst_deq;
@@ -188,7 +188,8 @@ ml_enqueue_burst(void *arg)
 	pending = ops_count;
 
 enqueue_reqs:
-	burst_enq = rte_ml_enqueue_burst(t->cmn.opt->dev_id, 0, &args->enq_ops[idx], pending);
+	burst_enq =
+		rte_ml_enqueue_burst(t->cmn.opt->dev_id, args->qp_id, &args->enq_ops[idx], pending);
 	pending = pending - burst_enq;
 
 	if (pending > 0) {
@@ -229,8 +230,8 @@ ml_dequeue_burst(void *arg)
 		return 0;
 
 dequeue_burst:
-	burst_deq =
-		rte_ml_dequeue_burst(t->cmn.opt->dev_id, 0, args->deq_ops, t->cmn.opt->burst_size);
+	burst_deq = rte_ml_dequeue_burst(t->cmn.opt->dev_id, args->qp_id, args->deq_ops,
+					 t->cmn.opt->burst_size);
 
 	if (likely(burst_deq > 0)) {
 		total_deq += burst_deq;
@@ -263,6 +264,19 @@ test_inference_cap_check(struct ml_options *opt)
 		return false;
 
 	rte_ml_dev_info_get(opt->dev_id, &dev_info);
+
+	if (opt->queue_pairs > dev_info.max_queue_pairs) {
+		ml_err("Insufficient capabilities: queue_pairs = %u, max_queue_pairs = %u",
+		       opt->queue_pairs, dev_info.max_queue_pairs);
+		return false;
+	}
+
+	if (opt->queue_size > dev_info.max_desc) {
+		ml_err("Insufficient capabilities: queue_size = %u, max_desc = %u", opt->queue_size,
+		       dev_info.max_desc);
+		return false;
+	}
+
 	if (opt->nb_filelist > dev_info.max_models) {
 		ml_err("Insufficient capabilities:  Filelist count exceeded device limit, count = %u (max limit = %u)",
 		       opt->nb_filelist, dev_info.max_models);
@@ -314,10 +328,21 @@ test_inference_opt_check(struct ml_options *opt)
 		return -EINVAL;
 	}
 
+	if (opt->queue_pairs == 0) {
+		ml_err("Invalid option, queue_pairs = %u\n", opt->queue_pairs);
+		return -EINVAL;
+	}
+
+	if (opt->queue_size == 0) {
+		ml_err("Invalid option, queue_size = %u\n", opt->queue_size);
+		return -EINVAL;
+	}
+
 	/* check number of available lcores. */
-	if (rte_lcore_count() < 3) {
+	if (rte_lcore_count() < (uint32_t)(opt->queue_pairs * 2 + 1)) {
 		ml_err("Insufficient lcores = %u\n", rte_lcore_count());
-		ml_err("Minimum lcores required to create %u queue-pairs = %u\n", 1, 3);
+		ml_err("Minimum lcores required to create %u queue-pairs = %u\n", opt->queue_pairs,
+		       (opt->queue_pairs * 2 + 1));
 		return -EINVAL;
 	}
 
@@ -335,6 +360,8 @@ test_inference_opt_dump(struct ml_options *opt)
 	/* dump test opts */
 	ml_dump("repetitions", "%" PRIu64, opt->repetitions);
 	ml_dump("burst_size", "%u", opt->burst_size);
+	ml_dump("queue_pairs", "%u", opt->queue_pairs);
+	ml_dump("queue_size", "%u", opt->queue_size);
 
 	ml_dump_begin("filelist");
 	for (i = 0; i < opt->nb_filelist; i++) {
@@ -425,23 +452,31 @@ ml_inference_mldev_setup(struct ml_test *test, struct ml_options *opt)
 {
 	struct rte_ml_dev_qp_conf qp_conf;
 	struct test_inference *t;
+	uint16_t qp_id;
 	int ret;
 
 	t = ml_test_priv(test);
 
+	RTE_SET_USED(t);
+
 	ret = ml_test_device_configure(test, opt);
 	if (ret != 0)
 		return ret;
 
 	/* setup queue pairs */
-	qp_conf.nb_desc = t->cmn.dev_info.max_desc;
+	qp_conf.nb_desc = opt->queue_size;
 	qp_conf.cb = NULL;
 
-	ret = rte_ml_dev_queue_pair_setup(opt->dev_id, 0, &qp_conf, opt->socket_id);
-	if (ret != 0) {
-		ml_err("Failed to setup ml device queue-pair, dev_id = %d, qp_id = %u\n",
-		       opt->dev_id, 0);
-		goto error;
+	for (qp_id = 0; qp_id < opt->queue_pairs; qp_id++) {
+		qp_conf.nb_desc = opt->queue_size;
+		qp_conf.cb = NULL;
+
+		ret = rte_ml_dev_queue_pair_setup(opt->dev_id, qp_id, &qp_conf, opt->socket_id);
+		if (ret != 0) {
+			ml_err("Failed to setup ml device queue-pair, dev_id = %d, qp_id = %u\n",
+			       opt->dev_id, qp_id);
+			return ret;
+		}
 	}
 
 	ret = ml_test_device_start(test, opt);
@@ -697,14 +732,28 @@ ml_inference_launch_cores(struct ml_test *test, struct ml_options *opt, int16_t
 {
 	struct test_inference *t = ml_test_priv(test);
 	uint32_t lcore_id;
+	uint32_t nb_reqs;
 	uint32_t id = 0;
+	uint32_t qp_id;
+
+	nb_reqs = opt->repetitions / opt->queue_pairs;
 
 	RTE_LCORE_FOREACH_WORKER(lcore_id)
 	{
-		if (id == 2)
+		if (id >= opt->queue_pairs * 2)
 			break;
 
-		t->args[lcore_id].nb_reqs = opt->repetitions;
+		qp_id = id / 2;
+		t->args[lcore_id].qp_id = qp_id;
+		t->args[lcore_id].nb_reqs = nb_reqs;
+		if (qp_id == 0)
+			t->args[lcore_id].nb_reqs += opt->repetitions - nb_reqs * opt->queue_pairs;
+
+		if (t->args[lcore_id].nb_reqs == 0) {
+			id++;
+			break;
+		}
+
 		t->args[lcore_id].start_fid = start_fid;
 		t->args[lcore_id].end_fid = end_fid;
 
diff --git a/app/test-mldev/test_inference_common.h b/app/test-mldev/test_inference_common.h
index 75d588308b..1bac2dcfa0 100644
--- a/app/test-mldev/test_inference_common.h
+++ b/app/test-mldev/test_inference_common.h
@@ -27,6 +27,7 @@ struct ml_core_args {
 	uint64_t nb_reqs;
 	int16_t start_fid;
 	int16_t end_fid;
+	uint32_t qp_id;
 
 	struct rte_ml_op **enq_ops;
 	struct rte_ml_op **deq_ops;
-- 
2.17.1


  parent reply	other threads:[~2022-11-29  8:21 UTC|newest]

Thread overview: 122+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-29  7:07 [PATCH v1 00/12] implement mldev test application Srikanth Yalavarthi
2022-11-29  7:07 ` [PATCH v1 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2022-11-29  8:20   ` [PATCH v2 " Srikanth Yalavarthi
2022-11-29  8:20     ` [PATCH v2 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2022-11-29  8:21     ` [PATCH v2 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2022-11-29  8:21     ` [PATCH v2 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2022-11-29  8:21     ` [PATCH v2 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2022-11-29  8:21     ` [PATCH v2 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2022-11-29  8:21     ` [PATCH v2 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2022-11-29  8:21     ` Srikanth Yalavarthi [this message]
2022-11-29  8:21     ` [PATCH v2 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2022-11-29  8:21     ` [PATCH v2 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2022-11-29  8:21     ` [PATCH v2 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2022-11-29  8:21     ` [PATCH v2 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2022-12-08 19:29     ` [PATCH v3 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2022-12-08 19:29       ` [PATCH v3 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2022-12-08 19:29       ` [PATCH v3 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2022-12-08 19:29       ` [PATCH v3 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2022-12-08 19:29       ` [PATCH v3 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2022-12-08 19:29       ` [PATCH v3 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2022-12-08 19:29       ` [PATCH v3 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2022-12-08 19:29       ` [PATCH v3 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2022-12-08 19:29       ` [PATCH v3 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2022-12-08 19:29       ` [PATCH v3 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2022-12-08 19:29       ` [PATCH v3 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-02-03  9:49         ` Anup Prabhu
2022-12-08 19:29       ` [PATCH v3 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-02-02 12:39         ` Anup Prabhu
2022-11-29  7:07 ` [PATCH v1 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2022-11-29  7:07 ` [PATCH v1 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2022-11-29  7:07 ` [PATCH v1 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2022-11-29  7:07 ` [PATCH v1 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2022-11-29  7:07 ` [PATCH v1 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2022-11-29  7:07 ` [PATCH v1 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2022-11-29  7:07 ` [PATCH v1 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2022-11-29  7:07 ` [PATCH v1 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2022-11-29  7:07 ` [PATCH v1 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2022-11-29  7:07 ` [PATCH v1 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2022-11-29  7:07 ` [PATCH v1 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-02-07 15:49 ` [PATCH v4 00/12] Implementation of mldev test application Srikanth Yalavarthi
2023-02-07 15:49   ` [PATCH v4 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-02-14  4:55     ` Shivah Shankar Shankar Narayan Rao
2023-03-03  8:15     ` Anup Prabhu
2023-02-07 15:49   ` [PATCH v4 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2023-02-23  9:03     ` Anup Prabhu
2023-02-07 15:49   ` [PATCH v4 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-01  5:35     ` Anup Prabhu
2023-02-07 15:49   ` [PATCH v4 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-02  2:58     ` Anup Prabhu
2023-03-09 18:42     ` Thomas Monjalon
2023-03-10  2:55       ` [EXT] " Srikanth Yalavarthi
2023-02-07 15:49   ` [PATCH v4 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-02-27  6:11     ` Anup Prabhu
2023-03-09 20:06     ` Thomas Monjalon
2023-03-10  8:13       ` [EXT] " Srikanth Yalavarthi
2023-02-07 15:49   ` [PATCH v4 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-02-20  6:31     ` Anup Prabhu
2023-03-09 20:15     ` Thomas Monjalon
2023-03-10  8:14       ` [EXT] " Srikanth Yalavarthi
2023-02-07 15:49   ` [PATCH v4 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-02-20 10:11     ` Anup Prabhu
2023-02-07 15:49   ` [PATCH v4 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-02  8:15     ` Anup Prabhu
2023-02-07 15:49   ` [PATCH v4 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-02-27  3:46     ` Anup Prabhu
2023-02-07 15:49   ` [PATCH v4 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-02-16 12:23     ` Anup Prabhu
2023-02-07 15:49   ` [PATCH v4 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-02-16  4:21     ` Anup Prabhu
2023-02-07 15:49   ` [PATCH v4 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-02-15 12:26     ` Shivah Shankar Shankar Narayan Rao
2023-03-03  6:07     ` Anup Prabhu
2023-03-10  8:09 ` [PATCH v5 00/12] Implementation of mldev test application Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-03-10  8:09   ` [PATCH v5 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-03-11 15:08 ` [PATCH v6 00/12] Implementation of mldev test application Srikanth Yalavarthi
2023-03-11 15:08   ` [PATCH v6 01/12] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-03-11 15:08   ` [PATCH v6 02/12] app/mldev: add common test functions Srikanth Yalavarthi
2023-03-11 15:08   ` [PATCH v6 03/12] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-11 15:08   ` [PATCH v6 04/12] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-11 15:08   ` [PATCH v6 05/12] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-03-16 17:45     ` Thomas Monjalon
2023-03-16 17:47       ` [EXT] " Srikanth Yalavarthi
2023-03-16 18:01         ` Thomas Monjalon
2023-03-16 21:31           ` Srikanth Yalavarthi
2023-03-11 15:08   ` [PATCH v6 06/12] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-03-11 15:09   ` [PATCH v6 07/12] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-03-11 15:09   ` [PATCH v6 08/12] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-11 15:09   ` [PATCH v6 09/12] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-03-16 17:47     ` Thomas Monjalon
2023-03-16 17:52       ` [EXT] " Srikanth Yalavarthi
2023-03-11 15:09   ` [PATCH v6 10/12] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-03-11 15:09   ` [PATCH v6 11/12] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-03-11 15:09   ` [PATCH v6 12/12] app/mldev: add documentation for mldev test cases Srikanth Yalavarthi
2023-03-16 17:50     ` Thomas Monjalon
2023-03-16 17:56       ` [EXT] " Srikanth Yalavarthi
2023-03-16 18:03         ` Thomas Monjalon
2023-03-16 18:07           ` Srikanth Yalavarthi
2023-03-16 21:32             ` Srikanth Yalavarthi
2023-03-16 21:14 ` [PATCH v7 00/11] Implementation of mldev test application Srikanth Yalavarthi
2023-03-16 21:14   ` [PATCH v7 01/11] app/mldev: implement test framework for mldev Srikanth Yalavarthi
2023-03-16 21:14   ` [PATCH v7 02/11] app/mldev: add common test functions Srikanth Yalavarthi
2023-03-16 21:14   ` [PATCH v7 03/11] app/mldev: add test case to validate device ops Srikanth Yalavarthi
2023-03-16 21:14   ` [PATCH v7 04/11] app/mldev: add test case to validate model ops Srikanth Yalavarthi
2023-03-16 21:14   ` [PATCH v7 05/11] app/mldev: add ordered inference test case Srikanth Yalavarthi
2023-03-16 21:14   ` [PATCH v7 06/11] app/mldev: add test case to interleave inferences Srikanth Yalavarthi
2023-03-16 21:14   ` [PATCH v7 07/11] app/mldev: enable support for burst inferences Srikanth Yalavarthi
2023-03-16 21:14   ` [PATCH v7 08/11] app/mldev: enable support for queue pairs and size Srikanth Yalavarthi
2023-03-16 21:14   ` [PATCH v7 09/11] app/mldev: enable support for inference batches Srikanth Yalavarthi
2023-03-16 21:14   ` [PATCH v7 10/11] app/mldev: enable support for inference validation Srikanth Yalavarthi
2023-03-16 21:14   ` [PATCH v7 11/11] app/mldev: enable reporting stats in mldev app Srikanth Yalavarthi
2023-03-19 22:08   ` [PATCH v7 00/11] Implementation of mldev test application Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221129082109.6809-8-syalavarthi@marvell.com \
    --to=syalavarthi@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=sshankarnara@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).