DPDK patches and discussions
 help / color / mirror / Atom feed
From: Harry van Haaren <harry.van.haaren@intel.com>
To: dev@dpdk.org
Cc: david.marchand@redhat.com, mattias.ronnblom@ericcson.com,
	jerinj@marvell.com, Harry van Haaren <harry.van.haaren@intel.com>
Subject: [dpdk-dev] [PATCH v4] event/sw: add xstats to expose progress details
Date: Wed,  3 Mar 2021 10:56:43 +0000	[thread overview]
Message-ID: <20210303105643.2552378-1-harry.van.haaren@intel.com> (raw)
In-Reply-To: <20210212165814.2189305-1-harry.van.haaren@intel.com>

Today it is difficult to know if the SW Eventdev PMD is making
forward progress when it runs an iteration of its service. This
commit adds two xstats to give better visibility to the application.

The new xstats provide an application with which Eventdev ports
recieved work in the last iteration of scheduling, as well if
forward progress was made by the scheduler.

This patch implements an xstat for the SW PMD that exposes a
bitmask of ports that were scheduled to. In the unlikely case
that the SW PMD instance has 64 or more ports, return UINT64_MAX.

Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>

---

v3:
- Simplify all metrics to Event SW PMD

v2:
- Fixup printf() %ld to PRIu64

Note most of the changes here are unit-test changes to add
a statistic to the PMD. The actual "useful code" is a mere
handful of lines in a lot of noise.

---
 drivers/event/sw/sw_evdev.h           |  2 ++
 drivers/event/sw/sw_evdev_scheduler.c | 15 ++++++++++++++
 drivers/event/sw/sw_evdev_selftest.c  | 28 ++++++++++++++-------------
 drivers/event/sw/sw_evdev_xstats.c    |  9 ++++++++-
 4 files changed, 40 insertions(+), 14 deletions(-)

diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index 5ab6465c83..33645bd1df 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -259,6 +259,8 @@ struct sw_evdev {
 	uint64_t sched_no_iq_enqueues;
 	uint64_t sched_no_cq_enqueues;
 	uint64_t sched_cq_qid_called;
+	uint64_t sched_last_iter_bitmask;
+	uint8_t sched_progress_last_iter;
 
 	uint8_t started;
 	uint32_t credit_update_quanta;
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index f747b3c6d4..d3a6bd5cda 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -559,6 +559,11 @@ sw_event_schedule(struct rte_eventdev *dev)
 	sw->sched_no_iq_enqueues += (in_pkts_total == 0);
 	sw->sched_no_cq_enqueues += (out_pkts_total == 0);
 
+	uint64_t work_done = (in_pkts_total + out_pkts_total) != 0;
+	sw->sched_progress_last_iter = work_done;
+
+	uint64_t cqs_scheds_last_iter = 0;
+
 	/* push all the internal buffered QEs in port->cq_ring to the
 	 * worker cores: aka, do the ring transfers batched.
 	 */
@@ -578,6 +583,7 @@ sw_event_schedule(struct rte_eventdev *dev)
 					&sw->cq_ring_space[i]);
 			port->cq_buf_count = 0;
 			no_enq = 0;
+			cqs_scheds_last_iter |= (1ULL << i);
 		} else {
 			sw->cq_ring_space[i] =
 					rte_event_ring_free_count(worker) -
@@ -597,4 +603,13 @@ sw_event_schedule(struct rte_eventdev *dev)
 			sw->sched_min_burst = sw->sched_min_burst_size;
 	}
 
+	/* Provide stats on what eventdev ports were scheduled to this
+	 * iteration. If more than 64 ports are active, always report that
+	 * all Eventdev ports have been scheduled events.
+	 */
+	if (likely(sw->port_count < 64)) {
+		sw->sched_last_iter_bitmask = cqs_scheds_last_iter;
+	} else {
+		sw->sched_last_iter_bitmask = UINT64_MAX;
+	}
 }
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
index e4bfb3a0f1..d53e903129 100644
--- a/drivers/event/sw/sw_evdev_selftest.c
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -873,15 +873,15 @@ xstats_tests(struct test *t)
 	int ret = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, xstats_names, ids, XSTATS_MAX);
-	if (ret != 6) {
-		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+	if (ret != 8) {
+		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
 		return -1;
 	}
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, ret);
-	if (ret != 6) {
-		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+	if (ret != 8) {
+		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
 		return -1;
 	}
 
@@ -959,7 +959,7 @@ xstats_tests(struct test *t)
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, num_stats);
-	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
+	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0, 4, 1};
 	for (i = 0; (signed int)i < ret; i++) {
 		if (expected[i] != values[i]) {
 			printf(
@@ -975,7 +975,7 @@ xstats_tests(struct test *t)
 					0, NULL, 0);
 
 	/* ensure reset statistics are zero-ed */
-	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
+	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0, 0, 0};
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, num_stats);
@@ -1460,7 +1460,7 @@ xstats_id_reset_tests(struct test *t)
 	for (i = 0; i < XSTATS_MAX; i++)
 		ids[i] = i;
 
-#define NUM_DEV_STATS 6
+#define NUM_DEV_STATS 8
 	/* Device names / values */
 	int num_stats = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
@@ -1504,8 +1504,10 @@ xstats_id_reset_tests(struct test *t)
 	static const char * const dev_names[] = {
 		"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
 		"dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
+		"dev_sched_last_iter_bitmask",
+		"dev_sched_progress_last_iter"
 	};
-	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
+	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0, 4, 1};
 	for (i = 0; (int)i < ret; i++) {
 		unsigned int id;
 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
@@ -1518,8 +1520,8 @@ xstats_id_reset_tests(struct test *t)
 		}
 		if (val != dev_expected[i]) {
 			printf("%d: %s value incorrect, expected %"
-				PRIu64" got %d\n", __LINE__, dev_names[i],
-				dev_expected[i], id);
+				PRIu64" got %"PRIu64"\n", __LINE__,
+				dev_names[i], dev_expected[i], val);
 			goto fail;
 		}
 		/* reset to zero */
@@ -1542,11 +1544,11 @@ xstats_id_reset_tests(struct test *t)
 		}
 	};
 
-/* 48 is stat offset from start of the devices whole xstats.
+/* 49 is stat offset from start of the devices whole xstats.
  * This WILL break every time we add a statistic to a port
  * or the device, but there is no other way to test
  */
-#define PORT_OFF 48
+#define PORT_OFF 50
 /* num stats for the tested port. CQ size adds more stats to a port */
 #define NUM_PORT_STATS 21
 /* the port to test. */
@@ -1670,7 +1672,7 @@ xstats_id_reset_tests(struct test *t)
 /* queue offset from start of the devices whole xstats.
  * This will break every time we add a statistic to a device/port/queue
  */
-#define QUEUE_OFF 90
+#define QUEUE_OFF 92
 	const uint32_t queue = 0;
 	num_stats = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_QUEUE, queue,
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
index 02f7874180..c2647d7da2 100644
--- a/drivers/event/sw/sw_evdev_xstats.c
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -17,6 +17,8 @@ enum xstats_type {
 	/* device instance specific */
 	no_iq_enq,
 	no_cq_enq,
+	sched_last_iter_bitmask,
+	sched_progress_last_iter,
 	/* port_specific */
 	rx_used,
 	rx_free,
@@ -57,6 +59,9 @@ get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
 	case calls: return sw->sched_called;
 	case no_iq_enq: return sw->sched_no_iq_enqueues;
 	case no_cq_enq: return sw->sched_no_cq_enqueues;
+	case sched_last_iter_bitmask: return sw->sched_last_iter_bitmask;
+	case sched_progress_last_iter: return sw->sched_progress_last_iter;
+
 	default: return -1;
 	}
 }
@@ -177,9 +182,11 @@ sw_xstats_init(struct sw_evdev *sw)
 	 */
 	static const char * const dev_stats[] = { "rx", "tx", "drop",
 			"sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
+			"sched_last_iter_bitmask", "sched_progress_last_iter",
 	};
 	static const enum xstats_type dev_types[] = { rx, tx, dropped,
-			calls, no_iq_enq, no_cq_enq,
+			calls, no_iq_enq, no_cq_enq, sched_last_iter_bitmask,
+			sched_progress_last_iter,
 	};
 	/* all device stats are allowed to be reset */
 
-- 
2.25.1


  parent reply	other threads:[~2021-03-03 10:57 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-12 16:58 [dpdk-dev] [PATCH v3 1/3] service: add component useful work attribute Harry van Haaren
2021-02-12 16:58 ` [dpdk-dev] [PATCH v3 2/3] event/sw: add useful work done attribute Harry van Haaren
2021-02-12 16:58 ` [dpdk-dev] [PATCH v3 3/3] event/sw: add xstat for work done in last iteration Harry van Haaren
2021-03-03 10:56 ` Harry van Haaren [this message]
2021-03-04 11:02   ` [dpdk-dev] [PATCH v4] event/sw: add xstats to expose progress details Van Haaren, Harry
2021-03-08  7:52     ` Jerin Jacob
2021-03-08  9:22   ` [dpdk-dev] [PATCH v5] " Harry van Haaren
2021-03-08 16:54     ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210303105643.2552378-1-harry.van.haaren@intel.com \
    --to=harry.van.haaren@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=mattias.ronnblom@ericcson.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).