DPDK patches and discussions
 help / color / mirror / Atom feed
From: Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com>
To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>,
	Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com>
Subject: [dpdk-dev] [PATCH v2] app/eventdev: add option to set global dequeue tmo
Date: Wed, 27 Mar 2019 17:07:30 +0000	[thread overview]
Message-ID: <20190327170640.7096-1-pbhagavatula@marvell.com> (raw)
Message-ID: <20190327170730.Y1gNi1v_DX48mGUo7SrxV3sD7NIe_oiV9EaLIK88kOc@z> (raw)
In-Reply-To: <20190319061845.12343-1-pbhagavatula@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add option to provide a global dequeue timeout that is used to create
the eventdev.
The dequeue timeout provided will be common across all the worker
ports. If the eventdev hardware supports power management through
dequeue timeout then this option can be used for verifying power
demands at various packet rates.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 v2 Changes:
 - Update documentation and fix indentation.
 - Add check to see if provided dequeue timeout is within range and adjust as
 required

 app/test-eventdev/evt_options.c         | 13 ++++++++
 app/test-eventdev/evt_options.h         |  2 ++
 app/test-eventdev/test_order_atq.c      | 25 ++++++++++++---
 app/test-eventdev/test_order_queue.c    | 25 ++++++++++++---
 app/test-eventdev/test_perf_atq.c       | 14 +++++++++
 app/test-eventdev/test_perf_queue.c     | 14 +++++++++
 app/test-eventdev/test_pipeline_atq.c   | 14 +++++++++
 app/test-eventdev/test_pipeline_queue.c | 15 +++++++++
 doc/guides/tools/testeventdev.rst       | 41 ++++++++++++++++---------
 9 files changed, 141 insertions(+), 22 deletions(-)

diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c
index cfa43a165..af5c08bac 100644
--- a/app/test-eventdev/evt_options.c
+++ b/app/test-eventdev/evt_options.c
@@ -84,6 +84,16 @@ evt_parse_queue_priority(struct evt_options *opt, const char *arg __rte_unused)
 	return 0;
 }

+static int
+evt_parse_deq_tmo_nsec(struct evt_options *opt, const char *arg)
+{
+	int ret;
+
+	ret = parser_read_uint32(&(opt->deq_tmo_nsec), arg);
+
+	return ret;
+}
+
 static int
 evt_parse_eth_prod_type(struct evt_options *opt, const char *arg __rte_unused)
 {
@@ -240,6 +250,7 @@ usage(char *program)
 		"\t--worker_deq_depth : dequeue depth of the worker\n"
 		"\t--fwd_latency      : perform fwd_latency measurement\n"
 		"\t--queue_priority   : enable queue priority\n"
+		"\t--deq_tmo_nsec     : global dequeue timeout\n"
 		"\t--prod_type_ethdev : use ethernet device as producer.\n"
 		"\t--prod_type_timerdev : use event timer device as producer.\n"
 		"\t                     expity_nsec would be the timeout\n"
@@ -311,6 +322,7 @@ static struct option lgopts[] = {
 	{ EVT_SCHED_TYPE_LIST,     1, 0, 0 },
 	{ EVT_FWD_LATENCY,         0, 0, 0 },
 	{ EVT_QUEUE_PRIORITY,      0, 0, 0 },
+	{ EVT_DEQ_TMO_NSEC,        1, 0, 0 },
 	{ EVT_PROD_ETHDEV,         0, 0, 0 },
 	{ EVT_PROD_TIMERDEV,       0, 0, 0 },
 	{ EVT_PROD_TIMERDEV_BURST, 0, 0, 0 },
@@ -342,6 +354,7 @@ evt_opts_parse_long(int opt_idx, struct evt_options *opt)
 		{ EVT_SCHED_TYPE_LIST, evt_parse_sched_type_list},
 		{ EVT_FWD_LATENCY, evt_parse_fwd_latency},
 		{ EVT_QUEUE_PRIORITY, evt_parse_queue_priority},
+		{ EVT_DEQ_TMO_NSEC, evt_parse_deq_tmo_nsec},
 		{ EVT_PROD_ETHDEV, evt_parse_eth_prod_type},
 		{ EVT_PROD_TIMERDEV, evt_parse_timer_prod_type},
 		{ EVT_PROD_TIMERDEV_BURST, evt_parse_timer_prod_type_burst},
diff --git a/app/test-eventdev/evt_options.h b/app/test-eventdev/evt_options.h
index f3de48a17..c486c4185 100644
--- a/app/test-eventdev/evt_options.h
+++ b/app/test-eventdev/evt_options.h
@@ -31,6 +31,7 @@
 #define EVT_SCHED_TYPE_LIST      ("stlist")
 #define EVT_FWD_LATENCY          ("fwd_latency")
 #define EVT_QUEUE_PRIORITY       ("queue_priority")
+#define EVT_DEQ_TMO_NSEC         ("deq_tmo_nsec")
 #define EVT_PROD_ETHDEV          ("prod_type_ethdev")
 #define EVT_PROD_TIMERDEV        ("prod_type_timerdev")
 #define EVT_PROD_TIMERDEV_BURST  ("prod_type_timerdev_burst")
@@ -71,6 +72,7 @@ struct evt_options {
 	uint8_t dev_id;
 	uint32_t fwd_latency:1;
 	uint32_t q_priority:1;
+	uint32_t deq_tmo_nsec;
 	enum evt_prod_type prod_type;
 	uint8_t timdev_use_burst;
 	uint8_t timdev_cnt;
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 4a8546e7f..0e98cc07e 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -113,18 +113,35 @@ static int
 order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 {
 	int ret;
-
 	const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
 	/* number of active worker cores + 1 producer */
 	const uint8_t nb_ports = nb_workers + 1;
+	struct rte_event_dev_info info;
+
+	rte_event_dev_info_get(opt->dev_id, &info);
+	if (opt->deq_tmo_nsec) {
+		if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too low, using %d",
+					opt->deq_tmo_nsec);
+		}
+		if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too high, using %d",
+					opt->deq_tmo_nsec);
+		}
+	}

 	const struct rte_event_dev_config config = {
+			.dequeue_timeout_ns = opt->deq_tmo_nsec,
 			.nb_event_queues = NB_QUEUES,/* one all types queue */
 			.nb_event_ports = nb_ports,
-			.nb_events_limit  = 4096,
+			.nb_events_limit  = info.max_num_events,
 			.nb_event_queue_flows = opt->nb_flows,
-			.nb_event_port_dequeue_depth = 128,
-			.nb_event_port_enqueue_depth = 128,
+			.nb_event_port_dequeue_depth =
+				info.max_event_port_dequeue_depth,
+			.nb_event_port_enqueue_depth =
+				info.max_event_port_enqueue_depth,
 	};

 	ret = rte_event_dev_configure(opt->dev_id, &config);
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index a272c7a57..aba824a96 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -113,18 +113,35 @@ static int
 order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 {
 	int ret;
-
 	const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
 	/* number of active worker cores + 1 producer */
 	const uint8_t nb_ports = nb_workers + 1;
+	struct rte_event_dev_info info;
+
+	rte_event_dev_info_get(opt->dev_id, &info);
+	if (opt->deq_tmo_nsec) {
+		if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too low, using %d",
+					opt->deq_tmo_nsec);
+		}
+		if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too high, using %d",
+					opt->deq_tmo_nsec);
+		}
+	}

 	const struct rte_event_dev_config config = {
+			.dequeue_timeout_ns = opt->deq_tmo_nsec,
 			.nb_event_queues = NB_QUEUES,/* q0 ordered, q1 atomic */
 			.nb_event_ports = nb_ports,
-			.nb_events_limit  = 4096,
+			.nb_events_limit  = info.max_num_events,
 			.nb_event_queue_flows = opt->nb_flows,
-			.nb_event_port_dequeue_depth = 128,
-			.nb_event_port_enqueue_depth = 128,
+			.nb_event_port_dequeue_depth =
+				info.max_event_port_dequeue_depth,
+			.nb_event_port_enqueue_depth =
+				info.max_event_port_enqueue_depth,
 	};

 	ret = rte_event_dev_configure(opt->dev_id, &config);
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index 73f31e564..a762370f5 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -175,7 +175,21 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 		return ret;
 	}

+	if (opt->deq_tmo_nsec) {
+		if (opt->deq_tmo_nsec < dev_info.min_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = dev_info.min_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too low, using %d",
+					opt->deq_tmo_nsec);
+		}
+		if (opt->deq_tmo_nsec > dev_info.max_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = dev_info.max_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too high, using %d",
+					opt->deq_tmo_nsec);
+		}
+	}
+
 	const struct rte_event_dev_config config = {
+			.dequeue_timeout_ns = opt->deq_tmo_nsec,
 			.nb_event_queues = nb_queues,
 			.nb_event_ports = nb_ports,
 			.nb_events_limit  = dev_info.max_num_events,
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index d89491364..5d5f8cada 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -176,7 +176,21 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 		return ret;
 	}

+	if (opt->deq_tmo_nsec) {
+		if (opt->deq_tmo_nsec < dev_info.min_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = dev_info.min_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too low, using %d",
+					opt->deq_tmo_nsec);
+		}
+		if (opt->deq_tmo_nsec > dev_info.max_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = dev_info.max_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too high, using %d",
+					opt->deq_tmo_nsec);
+		}
+	}
+
 	const struct rte_event_dev_config config = {
+			.dequeue_timeout_ns = opt->deq_tmo_nsec,
 			.nb_event_queues = nb_queues,
 			.nb_event_ports = nb_ports,
 			.nb_events_limit  = dev_info.max_num_events,
diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
index 998a56cfd..a05170353 100644
--- a/app/test-eventdev/test_pipeline_atq.c
+++ b/app/test-eventdev/test_pipeline_atq.c
@@ -314,7 +314,21 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)

 	rte_event_dev_info_get(opt->dev_id, &info);

+	if (opt->deq_tmo_nsec) {
+		if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too low, using %d",
+					opt->deq_tmo_nsec);
+		}
+		if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too high, using %d",
+					opt->deq_tmo_nsec);
+		}
+	}
+
 	const struct rte_event_dev_config config = {
+		.dequeue_timeout_ns = opt->deq_tmo_nsec,
 		.nb_event_queues = nb_queues,
 		.nb_event_ports = nb_ports,
 		.nb_events_limit  = info.max_num_events,
diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
index 7da89dad5..c1b190777 100644
--- a/app/test-eventdev/test_pipeline_queue.c
+++ b/app/test-eventdev/test_pipeline_queue.c
@@ -334,7 +334,22 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 	memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);

 	rte_event_dev_info_get(opt->dev_id, &info);
+
+	if (opt->deq_tmo_nsec) {
+		if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too low, using %d",
+					opt->deq_tmo_nsec);
+		}
+		if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+			opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+			evt_info("dequeue_timeout_ns too high, using %d",
+					opt->deq_tmo_nsec);
+		}
+	}
+
 	const struct rte_event_dev_config config = {
+			.dequeue_timeout_ns = opt->deq_tmo_nsec,
 			.nb_event_queues = nb_queues,
 			.nb_event_ports = nb_ports,
 			.nb_events_limit  = info.max_num_events,
diff --git a/doc/guides/tools/testeventdev.rst b/doc/guides/tools/testeventdev.rst
index cddba3be0..db5c4378b 100644
--- a/doc/guides/tools/testeventdev.rst
+++ b/doc/guides/tools/testeventdev.rst
@@ -129,31 +129,38 @@ The following are the application command-line options:

         Use event timer adapter as producer.

- * ``--prod_type_timerdev_burst``
+* ``--prod_type_timerdev_burst``

-        Use burst mode event timer adapter as producer.
+       Use burst mode event timer adapter as producer.

- * ``--timer_tick_nsec``
+* ``--timer_tick_nsec``

-        Used to dictate number of nano seconds between bucket traversal of the
-        event timer adapter. Refer `rte_event_timer_adapter_conf`.
+       Used to dictate number of nano seconds between bucket traversal of the
+       event timer adapter. Refer `rte_event_timer_adapter_conf`.

- * ``--max_tmo_nsec``
+* ``--max_tmo_nsec``

-        Used to configure event timer adapter max arm timeout in nano seconds.
+       Used to configure event timer adapter max arm timeout in nano seconds.

- * ``--expiry_nsec``
+* ``--expiry_nsec``

-        Dictate the number of nano seconds after which the event timer expires.
+       Dictate the number of nano seconds after which the event timer expires.

- * ``--nb_timers``
+* ``--nb_timers``

-        Number of event timers each producer core will generate.
+       Number of event timers each producer core will generate.

- * ``--nb_timer_adptrs``
+* ``--nb_timer_adptrs``
+
+       Number of event timer adapters to be used. Each adapter is used in
+       round robin manner by the producer cores.
+
+* ``--deq_tmo_nsec``
+
+       Global dequeue timeout for all the event ports if the provided dequeue
+       timeout is out of the supported range of event device it will be
+       adjusted to the highest/lowest supported dequeue timeout supported.

-        Number of event timer adapters to be used. Each adapter is used in
-        round robin manner by the producer cores.

 Eventdev Tests
 --------------
@@ -225,6 +232,7 @@ Supported application command line options are following::
    --nb_flows
    --nb_pkts
    --worker_deq_depth
+   --deq_tmo_nsec

 Example
 ^^^^^^^
@@ -287,6 +295,7 @@ Supported application command line options are following::
    --nb_flows
    --nb_pkts
    --worker_deq_depth
+   --deq_tmo_nsec

 Example
 ^^^^^^^
@@ -386,6 +395,7 @@ Supported application command line options are following::
         --expiry_nsec
         --nb_timers
         --nb_timer_adptrs
+        --deq_tmo_nsec

 Example
 ^^^^^^^
@@ -485,6 +495,7 @@ Supported application command line options are following::
         --expiry_nsec
         --nb_timers
         --nb_timer_adptrs
+        --deq_tmo_nsec

 Example
 ^^^^^^^
@@ -598,6 +609,7 @@ Supported application command line options are following::
         --stlist
         --worker_deq_depth
         --prod_type_ethdev
+        --deq_tmo_nsec


 .. Note::
@@ -689,6 +701,7 @@ Supported application command line options are following::
         --stlist
         --worker_deq_depth
         --prod_type_ethdev
+        --deq_tmo_nsec


 .. Note::
--
2.21.0


  parent reply	other threads:[~2019-03-27 17:09 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-19  6:21 [dpdk-dev] [PATCH] " Pavan Nikhilesh Bhagavatula
2019-03-19  6:21 ` Pavan Nikhilesh Bhagavatula
2019-03-19  9:45 ` Jerin Jacob Kollanukkaran
2019-03-19  9:45   ` Jerin Jacob Kollanukkaran
2019-03-25  9:47   ` Pavan Nikhilesh Bhagavatula
2019-03-25  9:47     ` Pavan Nikhilesh Bhagavatula
2019-03-27 17:07 ` Pavan Nikhilesh Bhagavatula [this message]
2019-03-27 17:07   ` [dpdk-dev] [PATCH v2] " Pavan Nikhilesh Bhagavatula
2019-03-28  5:26   ` Jerin Jacob Kollanukkaran
2019-03-28  5:26     ` Jerin Jacob Kollanukkaran
2019-03-28 13:16     ` Pavan Nikhilesh Bhagavatula
2019-03-28 13:16       ` Pavan Nikhilesh Bhagavatula
2019-03-29  7:11 ` [dpdk-dev] [PATCH v3] " Pavan Nikhilesh Bhagavatula
2019-03-29  7:11   ` Pavan Nikhilesh Bhagavatula
2019-03-31 11:31   ` Jerin Jacob Kollanukkaran
2019-03-31 11:31     ` Jerin Jacob Kollanukkaran

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190327170640.7096-1-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).