DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v3 1/3] service: add component useful work attribute
@ 2021-02-12 16:58 Harry van Haaren
  2021-02-12 16:58 ` [dpdk-dev] [PATCH v3 2/3] event/sw: add useful work done attribute Harry van Haaren
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Harry van Haaren @ 2021-02-12 16:58 UTC (permalink / raw)
  To: dev
  Cc: david.marchand, mattias.ronnblom, honnappa.nagarahalli, Harry van Haaren

This commit adds a new attribute which allows the service to indicate
if the previous iteration of work was "useful". Useful work here implies
forward progress was made.

Exposing this information via an attribute to the application allows
tracking of CPU cycles as being useful or not-useful, and a CPU load
estimate can be deduced from that information.

Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
---
 lib/librte_eal/common/rte_service.c           | 19 +++++++++++++++++++
 lib/librte_eal/include/rte_service.h          |  5 +++++
 .../include/rte_service_component.h           | 13 +++++++++++++
 lib/librte_eal/version.map                    |  3 +++
 4 files changed, 40 insertions(+)

diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c
index bd8fb72e78..8595555fc3 100644
--- a/lib/librte_eal/common/rte_service.c
+++ b/lib/librte_eal/common/rte_service.c
@@ -58,6 +58,7 @@ struct rte_service_spec_impl {
 	uint32_t num_mapped_cores;
 	uint64_t calls;
 	uint64_t cycles_spent;
+	uint8_t useful_work_last_iter;
 } __rte_cache_aligned;
 
 /* the internal values of a service core */
@@ -294,6 +295,21 @@ rte_service_component_unregister(uint32_t id)
 	return 0;
 }
 
+int32_t
+rte_service_component_attr_set(uint32_t id, uint32_t attr, uint64_t value)
+{
+	struct rte_service_spec_impl *s;
+	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+
+	switch (attr) {
+	case RTE_SERVICE_ATTR_USEFUL_WORK_LAST_ITER:
+		s->useful_work_last_iter = value;
+		return 0;
+	default:
+		return -EINVAL;
+	};
+}
+
 int32_t
 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
 {
@@ -799,6 +815,9 @@ rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
 		return -EINVAL;
 
 	switch (attr_id) {
+	case RTE_SERVICE_ATTR_USEFUL_WORK_LAST_ITER:
+		*attr_value = s->useful_work_last_iter;
+		return 0;
 	case RTE_SERVICE_ATTR_CYCLES:
 		*attr_value = s->cycles_spent;
 		return 0;
diff --git a/lib/librte_eal/include/rte_service.h b/lib/librte_eal/include/rte_service.h
index ca9950d091..d50b5c8d7a 100644
--- a/lib/librte_eal/include/rte_service.h
+++ b/lib/librte_eal/include/rte_service.h
@@ -390,6 +390,11 @@ int32_t rte_service_dump(FILE *f, uint32_t id);
  */
 #define RTE_SERVICE_ATTR_CALL_COUNT 1
 
+/**
+ * Returns if the last iteration of the service resulted in useful work done.
+ */
+#define RTE_SERVICE_ATTR_USEFUL_WORK_LAST_ITER 2
+
 /**
  * Get an attribute from a service.
  *
diff --git a/lib/librte_eal/include/rte_service_component.h b/lib/librte_eal/include/rte_service_component.h
index 9e66ee7e29..534f41f531 100644
--- a/lib/librte_eal/include/rte_service_component.h
+++ b/lib/librte_eal/include/rte_service_component.h
@@ -87,6 +87,19 @@ int32_t rte_service_component_register(const struct rte_service_spec *spec,
  */
 int32_t rte_service_component_unregister(uint32_t id);
 
+/**
+ * Set an attribute for this service.
+ *
+ * Note this API is to be called by the service implementation, to make the
+ * statistic available via the usual attr_get() service APIs.
+ *
+ * @retval 0 Success
+ * @retval -EINVAL Invalid service id or attribute provided
+ */
+__rte_experimental
+int32_t rte_service_component_attr_set(uint32_t id, uint32_t attr,
+		uint64_t value);
+
 /**
  * Private function to allow EAL to initialized default mappings.
  *
diff --git a/lib/librte_eal/version.map b/lib/librte_eal/version.map
index fce90a112f..e60eaa3dd9 100644
--- a/lib/librte_eal/version.map
+++ b/lib/librte_eal/version.map
@@ -412,6 +412,9 @@ EXPERIMENTAL {
 	rte_thread_tls_key_delete;
 	rte_thread_tls_value_get;
 	rte_thread_tls_value_set;
+
+	# added in 21.05
+	rte_service_component_attr_set;
 };
 
 INTERNAL {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v3 2/3] event/sw: add useful work done attribute
  2021-02-12 16:58 [dpdk-dev] [PATCH v3 1/3] service: add component useful work attribute Harry van Haaren
@ 2021-02-12 16:58 ` Harry van Haaren
  2021-02-12 16:58 ` [dpdk-dev] [PATCH v3 3/3] event/sw: add xstat for work done in last iteration Harry van Haaren
  2021-03-03 10:56 ` [dpdk-dev] [PATCH v4] event/sw: add xstats to expose progress details Harry van Haaren
  2 siblings, 0 replies; 8+ messages in thread
From: Harry van Haaren @ 2021-02-12 16:58 UTC (permalink / raw)
  To: dev
  Cc: david.marchand, mattias.ronnblom, honnappa.nagarahalli, Harry van Haaren

This commit exposes if useful work is done to the service
instance. The normal service_attr_get() API can be used to
retrieve the value of the attribute.

Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
---
 drivers/event/sw/sw_evdev_scheduler.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index f747b3c6d4..c78f687446 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -5,6 +5,9 @@
 #include <rte_ring.h>
 #include <rte_hash_crc.h>
 #include <rte_event_ring.h>
+
+#include <rte_service_component.h>
+
 #include "sw_evdev.h"
 #include "iq_chunk.h"
 #include "event_ring.h"
@@ -559,6 +562,10 @@ sw_event_schedule(struct rte_eventdev *dev)
 	sw->sched_no_iq_enqueues += (in_pkts_total == 0);
 	sw->sched_no_cq_enqueues += (out_pkts_total == 0);
 
+	uint64_t work_done = (in_pkts_total + out_pkts_total) != 0;
+	rte_service_component_attr_set(sw->service_id,
+			RTE_SERVICE_ATTR_USEFUL_WORK_LAST_ITER, work_done);
+
 	/* push all the internal buffered QEs in port->cq_ring to the
 	 * worker cores: aka, do the ring transfers batched.
 	 */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v3 3/3] event/sw: add xstat for work done in last iteration
  2021-02-12 16:58 [dpdk-dev] [PATCH v3 1/3] service: add component useful work attribute Harry van Haaren
  2021-02-12 16:58 ` [dpdk-dev] [PATCH v3 2/3] event/sw: add useful work done attribute Harry van Haaren
@ 2021-02-12 16:58 ` Harry van Haaren
  2021-03-03 10:56 ` [dpdk-dev] [PATCH v4] event/sw: add xstats to expose progress details Harry van Haaren
  2 siblings, 0 replies; 8+ messages in thread
From: Harry van Haaren @ 2021-02-12 16:58 UTC (permalink / raw)
  To: dev
  Cc: david.marchand, mattias.ronnblom, honnappa.nagarahalli, Harry van Haaren

Today it is difficult to know what Eventdev ports recieved work
from the scheduling core. Sometimes it is useful to know where
work has been scheduled.

This patch implements an xstat for the SW PMD, which provides a
bitmask of ports that were scheduled to. If the SW PMD instance
has more than 64 ports, always report that a port got an event.

Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>

---

Note most of the changes here are unit-test changes to add
a statistic to the PMD. The actual "useful code" is a mere
handful of lines in a lot of noise.. could split into 2 patches?
---
 drivers/event/sw/sw_evdev.h           |  1 +
 drivers/event/sw/sw_evdev_scheduler.c | 12 ++++++++++++
 drivers/event/sw/sw_evdev_selftest.c  | 27 ++++++++++++++-------------
 drivers/event/sw/sw_evdev_xstats.c    |  6 +++++-
 4 files changed, 32 insertions(+), 14 deletions(-)

diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index 5ab6465c83..5dfa4508b3 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -259,6 +259,7 @@ struct sw_evdev {
 	uint64_t sched_no_iq_enqueues;
 	uint64_t sched_no_cq_enqueues;
 	uint64_t sched_cq_qid_called;
+	uint64_t sched_last_iter_bitmask;
 
 	uint8_t started;
 	uint32_t credit_update_quanta;
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index c78f687446..3ee1188be0 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -566,6 +566,8 @@ sw_event_schedule(struct rte_eventdev *dev)
 	rte_service_component_attr_set(sw->service_id,
 			RTE_SERVICE_ATTR_USEFUL_WORK_LAST_ITER, work_done);
 
+	uint64_t cqs_scheds_last_iter = 0;
+
 	/* push all the internal buffered QEs in port->cq_ring to the
 	 * worker cores: aka, do the ring transfers batched.
 	 */
@@ -585,6 +587,7 @@ sw_event_schedule(struct rte_eventdev *dev)
 					&sw->cq_ring_space[i]);
 			port->cq_buf_count = 0;
 			no_enq = 0;
+			cqs_scheds_last_iter |= (1ULL << i);
 		} else {
 			sw->cq_ring_space[i] =
 					rte_event_ring_free_count(worker) -
@@ -604,4 +607,13 @@ sw_event_schedule(struct rte_eventdev *dev)
 			sw->sched_min_burst = sw->sched_min_burst_size;
 	}
 
+	/* Provide stats on what eventdev ports were scheduled to this
+	 * iteration. If more than 64 ports are active, always report that
+	 * all Eventdev ports have been scheduled events.
+	 */
+	if (likely(sw->port_count < 64)) {
+		sw->sched_last_iter_bitmask = cqs_scheds_last_iter;
+	} else {
+		sw->sched_last_iter_bitmask = UINT64_MAX;
+	}
 }
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
index e4bfb3a0f1..7dd35cb22e 100644
--- a/drivers/event/sw/sw_evdev_selftest.c
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -873,15 +873,15 @@ xstats_tests(struct test *t)
 	int ret = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, xstats_names, ids, XSTATS_MAX);
-	if (ret != 6) {
-		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+	if (ret != 7) {
+		printf("%d: expected 7 stats, got return %d\n", __LINE__, ret);
 		return -1;
 	}
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, ret);
-	if (ret != 6) {
-		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+	if (ret != 7) {
+		printf("%d: expected 7 stats, got return %d\n", __LINE__, ret);
 		return -1;
 	}
 
@@ -959,7 +959,7 @@ xstats_tests(struct test *t)
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, num_stats);
-	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
+	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0, 4};
 	for (i = 0; (signed int)i < ret; i++) {
 		if (expected[i] != values[i]) {
 			printf(
@@ -975,7 +975,7 @@ xstats_tests(struct test *t)
 					0, NULL, 0);
 
 	/* ensure reset statistics are zero-ed */
-	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
+	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0, 0};
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, num_stats);
@@ -1460,7 +1460,7 @@ xstats_id_reset_tests(struct test *t)
 	for (i = 0; i < XSTATS_MAX; i++)
 		ids[i] = i;
 
-#define NUM_DEV_STATS 6
+#define NUM_DEV_STATS 7
 	/* Device names / values */
 	int num_stats = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
@@ -1504,8 +1504,9 @@ xstats_id_reset_tests(struct test *t)
 	static const char * const dev_names[] = {
 		"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
 		"dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
+		"dev_sched_last_iter_bitmask",
 	};
-	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
+	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0, 4};
 	for (i = 0; (int)i < ret; i++) {
 		unsigned int id;
 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
@@ -1518,8 +1519,8 @@ xstats_id_reset_tests(struct test *t)
 		}
 		if (val != dev_expected[i]) {
 			printf("%d: %s value incorrect, expected %"
-				PRIu64" got %d\n", __LINE__, dev_names[i],
-				dev_expected[i], id);
+				PRIu64" got %ld\n", __LINE__, dev_names[i],
+				dev_expected[i], val);
 			goto fail;
 		}
 		/* reset to zero */
@@ -1542,11 +1543,11 @@ xstats_id_reset_tests(struct test *t)
 		}
 	};
 
-/* 48 is stat offset from start of the devices whole xstats.
+/* 49 is stat offset from start of the devices whole xstats.
  * This WILL break every time we add a statistic to a port
  * or the device, but there is no other way to test
  */
-#define PORT_OFF 48
+#define PORT_OFF 49
 /* num stats for the tested port. CQ size adds more stats to a port */
 #define NUM_PORT_STATS 21
 /* the port to test. */
@@ -1670,7 +1671,7 @@ xstats_id_reset_tests(struct test *t)
 /* queue offset from start of the devices whole xstats.
  * This will break every time we add a statistic to a device/port/queue
  */
-#define QUEUE_OFF 90
+#define QUEUE_OFF 91
 	const uint32_t queue = 0;
 	num_stats = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_QUEUE, queue,
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
index 02f7874180..1b0ad68414 100644
--- a/drivers/event/sw/sw_evdev_xstats.c
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -17,6 +17,7 @@ enum xstats_type {
 	/* device instance specific */
 	no_iq_enq,
 	no_cq_enq,
+	sched_last_iter_bitmask,
 	/* port_specific */
 	rx_used,
 	rx_free,
@@ -57,6 +58,8 @@ get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
 	case calls: return sw->sched_called;
 	case no_iq_enq: return sw->sched_no_iq_enqueues;
 	case no_cq_enq: return sw->sched_no_cq_enqueues;
+	case sched_last_iter_bitmask: return sw->sched_last_iter_bitmask;
+
 	default: return -1;
 	}
 }
@@ -177,9 +180,10 @@ sw_xstats_init(struct sw_evdev *sw)
 	 */
 	static const char * const dev_stats[] = { "rx", "tx", "drop",
 			"sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
+			"sched_last_iter_bitmask",
 	};
 	static const enum xstats_type dev_types[] = { rx, tx, dropped,
-			calls, no_iq_enq, no_cq_enq,
+			calls, no_iq_enq, no_cq_enq, sched_last_iter_bitmask,
 	};
 	/* all device stats are allowed to be reset */
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v4] event/sw: add xstats to expose progress details
  2021-02-12 16:58 [dpdk-dev] [PATCH v3 1/3] service: add component useful work attribute Harry van Haaren
  2021-02-12 16:58 ` [dpdk-dev] [PATCH v3 2/3] event/sw: add useful work done attribute Harry van Haaren
  2021-02-12 16:58 ` [dpdk-dev] [PATCH v3 3/3] event/sw: add xstat for work done in last iteration Harry van Haaren
@ 2021-03-03 10:56 ` Harry van Haaren
  2021-03-04 11:02   ` Van Haaren, Harry
  2021-03-08  9:22   ` [dpdk-dev] [PATCH v5] " Harry van Haaren
  2 siblings, 2 replies; 8+ messages in thread
From: Harry van Haaren @ 2021-03-03 10:56 UTC (permalink / raw)
  To: dev; +Cc: david.marchand, mattias.ronnblom, jerinj, Harry van Haaren

Today it is difficult to know if the SW Eventdev PMD is making
forward progress when it runs an iteration of its service. This
commit adds two xstats to give better visibility to the application.

The new xstats provide an application with which Eventdev ports
recieved work in the last iteration of scheduling, as well if
forward progress was made by the scheduler.

This patch implements an xstat for the SW PMD that exposes a
bitmask of ports that were scheduled to. In the unlikely case
that the SW PMD instance has 64 or more ports, return UINT64_MAX.

Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>

---

v3:
- Simplify all metrics to Event SW PMD

v2:
- Fixup printf() %ld to PRIu64

Note most of the changes here are unit-test changes to add
a statistic to the PMD. The actual "useful code" is a mere
handful of lines in a lot of noise.

---
 drivers/event/sw/sw_evdev.h           |  2 ++
 drivers/event/sw/sw_evdev_scheduler.c | 15 ++++++++++++++
 drivers/event/sw/sw_evdev_selftest.c  | 28 ++++++++++++++-------------
 drivers/event/sw/sw_evdev_xstats.c    |  9 ++++++++-
 4 files changed, 40 insertions(+), 14 deletions(-)

diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index 5ab6465c83..33645bd1df 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -259,6 +259,8 @@ struct sw_evdev {
 	uint64_t sched_no_iq_enqueues;
 	uint64_t sched_no_cq_enqueues;
 	uint64_t sched_cq_qid_called;
+	uint64_t sched_last_iter_bitmask;
+	uint8_t sched_progress_last_iter;
 
 	uint8_t started;
 	uint32_t credit_update_quanta;
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index f747b3c6d4..d3a6bd5cda 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -559,6 +559,11 @@ sw_event_schedule(struct rte_eventdev *dev)
 	sw->sched_no_iq_enqueues += (in_pkts_total == 0);
 	sw->sched_no_cq_enqueues += (out_pkts_total == 0);
 
+	uint64_t work_done = (in_pkts_total + out_pkts_total) != 0;
+	sw->sched_progress_last_iter = work_done;
+
+	uint64_t cqs_scheds_last_iter = 0;
+
 	/* push all the internal buffered QEs in port->cq_ring to the
 	 * worker cores: aka, do the ring transfers batched.
 	 */
@@ -578,6 +583,7 @@ sw_event_schedule(struct rte_eventdev *dev)
 					&sw->cq_ring_space[i]);
 			port->cq_buf_count = 0;
 			no_enq = 0;
+			cqs_scheds_last_iter |= (1ULL << i);
 		} else {
 			sw->cq_ring_space[i] =
 					rte_event_ring_free_count(worker) -
@@ -597,4 +603,13 @@ sw_event_schedule(struct rte_eventdev *dev)
 			sw->sched_min_burst = sw->sched_min_burst_size;
 	}
 
+	/* Provide stats on what eventdev ports were scheduled to this
+	 * iteration. If more than 64 ports are active, always report that
+	 * all Eventdev ports have been scheduled events.
+	 */
+	if (likely(sw->port_count < 64)) {
+		sw->sched_last_iter_bitmask = cqs_scheds_last_iter;
+	} else {
+		sw->sched_last_iter_bitmask = UINT64_MAX;
+	}
 }
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
index e4bfb3a0f1..d53e903129 100644
--- a/drivers/event/sw/sw_evdev_selftest.c
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -873,15 +873,15 @@ xstats_tests(struct test *t)
 	int ret = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, xstats_names, ids, XSTATS_MAX);
-	if (ret != 6) {
-		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+	if (ret != 8) {
+		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
 		return -1;
 	}
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, ret);
-	if (ret != 6) {
-		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+	if (ret != 8) {
+		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
 		return -1;
 	}
 
@@ -959,7 +959,7 @@ xstats_tests(struct test *t)
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, num_stats);
-	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
+	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0, 4, 1};
 	for (i = 0; (signed int)i < ret; i++) {
 		if (expected[i] != values[i]) {
 			printf(
@@ -975,7 +975,7 @@ xstats_tests(struct test *t)
 					0, NULL, 0);
 
 	/* ensure reset statistics are zero-ed */
-	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
+	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0, 0, 0};
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, num_stats);
@@ -1460,7 +1460,7 @@ xstats_id_reset_tests(struct test *t)
 	for (i = 0; i < XSTATS_MAX; i++)
 		ids[i] = i;
 
-#define NUM_DEV_STATS 6
+#define NUM_DEV_STATS 8
 	/* Device names / values */
 	int num_stats = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
@@ -1504,8 +1504,10 @@ xstats_id_reset_tests(struct test *t)
 	static const char * const dev_names[] = {
 		"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
 		"dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
+		"dev_sched_last_iter_bitmask",
+		"dev_sched_progress_last_iter"
 	};
-	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
+	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0, 4, 1};
 	for (i = 0; (int)i < ret; i++) {
 		unsigned int id;
 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
@@ -1518,8 +1520,8 @@ xstats_id_reset_tests(struct test *t)
 		}
 		if (val != dev_expected[i]) {
 			printf("%d: %s value incorrect, expected %"
-				PRIu64" got %d\n", __LINE__, dev_names[i],
-				dev_expected[i], id);
+				PRIu64" got %"PRIu64"\n", __LINE__,
+				dev_names[i], dev_expected[i], val);
 			goto fail;
 		}
 		/* reset to zero */
@@ -1542,11 +1544,11 @@ xstats_id_reset_tests(struct test *t)
 		}
 	};
 
-/* 48 is stat offset from start of the devices whole xstats.
+/* 49 is stat offset from start of the devices whole xstats.
  * This WILL break every time we add a statistic to a port
  * or the device, but there is no other way to test
  */
-#define PORT_OFF 48
+#define PORT_OFF 50
 /* num stats for the tested port. CQ size adds more stats to a port */
 #define NUM_PORT_STATS 21
 /* the port to test. */
@@ -1670,7 +1672,7 @@ xstats_id_reset_tests(struct test *t)
 /* queue offset from start of the devices whole xstats.
  * This will break every time we add a statistic to a device/port/queue
  */
-#define QUEUE_OFF 90
+#define QUEUE_OFF 92
 	const uint32_t queue = 0;
 	num_stats = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_QUEUE, queue,
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
index 02f7874180..c2647d7da2 100644
--- a/drivers/event/sw/sw_evdev_xstats.c
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -17,6 +17,8 @@ enum xstats_type {
 	/* device instance specific */
 	no_iq_enq,
 	no_cq_enq,
+	sched_last_iter_bitmask,
+	sched_progress_last_iter,
 	/* port_specific */
 	rx_used,
 	rx_free,
@@ -57,6 +59,9 @@ get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
 	case calls: return sw->sched_called;
 	case no_iq_enq: return sw->sched_no_iq_enqueues;
 	case no_cq_enq: return sw->sched_no_cq_enqueues;
+	case sched_last_iter_bitmask: return sw->sched_last_iter_bitmask;
+	case sched_progress_last_iter: return sw->sched_progress_last_iter;
+
 	default: return -1;
 	}
 }
@@ -177,9 +182,11 @@ sw_xstats_init(struct sw_evdev *sw)
 	 */
 	static const char * const dev_stats[] = { "rx", "tx", "drop",
 			"sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
+			"sched_last_iter_bitmask", "sched_progress_last_iter",
 	};
 	static const enum xstats_type dev_types[] = { rx, tx, dropped,
-			calls, no_iq_enq, no_cq_enq,
+			calls, no_iq_enq, no_cq_enq, sched_last_iter_bitmask,
+			sched_progress_last_iter,
 	};
 	/* all device stats are allowed to be reset */
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH v4] event/sw: add xstats to expose progress details
  2021-03-03 10:56 ` [dpdk-dev] [PATCH v4] event/sw: add xstats to expose progress details Harry van Haaren
@ 2021-03-04 11:02   ` Van Haaren, Harry
  2021-03-08  7:52     ` Jerin Jacob
  2021-03-08  9:22   ` [dpdk-dev] [PATCH v5] " Harry van Haaren
  1 sibling, 1 reply; 8+ messages in thread
From: Van Haaren, Harry @ 2021-03-04 11:02 UTC (permalink / raw)
  To: dev; +Cc: david.marchand, mattias.ronnblom, jerinj

Fix typo in Mattias' email, apologies for noise.

> -----Original Message-----
> From: Van Haaren, Harry <harry.van.haaren@intel.com>
> Sent: Wednesday, March 3, 2021 10:57 AM
> To: dev@dpdk.org
> Cc: david.marchand@redhat.com; mattias.ronnblom@ericcson.com;
> jerinj@marvell.com; Van Haaren, Harry <harry.van.haaren@intel.com>
> Subject: [PATCH v4] event/sw: add xstats to expose progress details
> 
> Today it is difficult to know if the SW Eventdev PMD is making
> forward progress when it runs an iteration of its service. This
> commit adds two xstats to give better visibility to the application.
> 
> The new xstats provide an application with which Eventdev ports
> recieved work in the last iteration of scheduling, as well if
> forward progress was made by the scheduler.
> 
> This patch implements an xstat for the SW PMD that exposes a
> bitmask of ports that were scheduled to. In the unlikely case
> that the SW PMD instance has 64 or more ports, return UINT64_MAX.
> 
> Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
> 
> ---
> 
> v3:
> - Simplify all metrics to Event SW PMD
> 
> v2:
> - Fixup printf() %ld to PRIu64
> 
> Note most of the changes here are unit-test changes to add
> a statistic to the PMD. The actual "useful code" is a mere
> handful of lines in a lot of noise.
> 
> ---
>  drivers/event/sw/sw_evdev.h           |  2 ++
>  drivers/event/sw/sw_evdev_scheduler.c | 15 ++++++++++++++
>  drivers/event/sw/sw_evdev_selftest.c  | 28 ++++++++++++++-------------
>  drivers/event/sw/sw_evdev_xstats.c    |  9 ++++++++-
>  4 files changed, 40 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
> index 5ab6465c83..33645bd1df 100644
> --- a/drivers/event/sw/sw_evdev.h
> +++ b/drivers/event/sw/sw_evdev.h
> @@ -259,6 +259,8 @@ struct sw_evdev {
>  	uint64_t sched_no_iq_enqueues;
>  	uint64_t sched_no_cq_enqueues;
>  	uint64_t sched_cq_qid_called;
> +	uint64_t sched_last_iter_bitmask;
> +	uint8_t sched_progress_last_iter;
> 
>  	uint8_t started;
>  	uint32_t credit_update_quanta;
> diff --git a/drivers/event/sw/sw_evdev_scheduler.c
> b/drivers/event/sw/sw_evdev_scheduler.c
> index f747b3c6d4..d3a6bd5cda 100644
> --- a/drivers/event/sw/sw_evdev_scheduler.c
> +++ b/drivers/event/sw/sw_evdev_scheduler.c
> @@ -559,6 +559,11 @@ sw_event_schedule(struct rte_eventdev *dev)
>  	sw->sched_no_iq_enqueues += (in_pkts_total == 0);
>  	sw->sched_no_cq_enqueues += (out_pkts_total == 0);
> 
> +	uint64_t work_done = (in_pkts_total + out_pkts_total) != 0;
> +	sw->sched_progress_last_iter = work_done;
> +
> +	uint64_t cqs_scheds_last_iter = 0;
> +
>  	/* push all the internal buffered QEs in port->cq_ring to the
>  	 * worker cores: aka, do the ring transfers batched.
>  	 */
> @@ -578,6 +583,7 @@ sw_event_schedule(struct rte_eventdev *dev)
>  					&sw->cq_ring_space[i]);
>  			port->cq_buf_count = 0;
>  			no_enq = 0;
> +			cqs_scheds_last_iter |= (1ULL << i);
>  		} else {
>  			sw->cq_ring_space[i] =
>  					rte_event_ring_free_count(worker) -
> @@ -597,4 +603,13 @@ sw_event_schedule(struct rte_eventdev *dev)
>  			sw->sched_min_burst = sw->sched_min_burst_size;
>  	}
> 
> +	/* Provide stats on what eventdev ports were scheduled to this
> +	 * iteration. If more than 64 ports are active, always report that
> +	 * all Eventdev ports have been scheduled events.
> +	 */
> +	if (likely(sw->port_count < 64)) {
> +		sw->sched_last_iter_bitmask = cqs_scheds_last_iter;
> +	} else {
> +		sw->sched_last_iter_bitmask = UINT64_MAX;
> +	}
>  }
> diff --git a/drivers/event/sw/sw_evdev_selftest.c
> b/drivers/event/sw/sw_evdev_selftest.c
> index e4bfb3a0f1..d53e903129 100644
> --- a/drivers/event/sw/sw_evdev_selftest.c
> +++ b/drivers/event/sw/sw_evdev_selftest.c
> @@ -873,15 +873,15 @@ xstats_tests(struct test *t)
>  	int ret = rte_event_dev_xstats_names_get(evdev,
>  					RTE_EVENT_DEV_XSTATS_DEVICE,
>  					0, xstats_names, ids, XSTATS_MAX);
> -	if (ret != 6) {
> -		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
> +	if (ret != 8) {
> +		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
>  		return -1;
>  	}
>  	ret = rte_event_dev_xstats_get(evdev,
>  					RTE_EVENT_DEV_XSTATS_DEVICE,
>  					0, ids, values, ret);
> -	if (ret != 6) {
> -		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
> +	if (ret != 8) {
> +		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
>  		return -1;
>  	}
> 
> @@ -959,7 +959,7 @@ xstats_tests(struct test *t)
>  	ret = rte_event_dev_xstats_get(evdev,
>  					RTE_EVENT_DEV_XSTATS_DEVICE,
>  					0, ids, values, num_stats);
> -	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
> +	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0, 4, 1};
>  	for (i = 0; (signed int)i < ret; i++) {
>  		if (expected[i] != values[i]) {
>  			printf(
> @@ -975,7 +975,7 @@ xstats_tests(struct test *t)
>  					0, NULL, 0);
> 
>  	/* ensure reset statistics are zero-ed */
> -	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
> +	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0, 0, 0};
>  	ret = rte_event_dev_xstats_get(evdev,
>  					RTE_EVENT_DEV_XSTATS_DEVICE,
>  					0, ids, values, num_stats);
> @@ -1460,7 +1460,7 @@ xstats_id_reset_tests(struct test *t)
>  	for (i = 0; i < XSTATS_MAX; i++)
>  		ids[i] = i;
> 
> -#define NUM_DEV_STATS 6
> +#define NUM_DEV_STATS 8
>  	/* Device names / values */
>  	int num_stats = rte_event_dev_xstats_names_get(evdev,
>  					RTE_EVENT_DEV_XSTATS_DEVICE,
> @@ -1504,8 +1504,10 @@ xstats_id_reset_tests(struct test *t)
>  	static const char * const dev_names[] = {
>  		"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
>  		"dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
> +		"dev_sched_last_iter_bitmask",
> +		"dev_sched_progress_last_iter"
>  	};
> -	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
> +	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0, 4, 1};
>  	for (i = 0; (int)i < ret; i++) {
>  		unsigned int id;
>  		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
> @@ -1518,8 +1520,8 @@ xstats_id_reset_tests(struct test *t)
>  		}
>  		if (val != dev_expected[i]) {
>  			printf("%d: %s value incorrect, expected %"
> -				PRIu64" got %d\n", __LINE__, dev_names[i],
> -				dev_expected[i], id);
> +				PRIu64" got %"PRIu64"\n", __LINE__,
> +				dev_names[i], dev_expected[i], val);
>  			goto fail;
>  		}
>  		/* reset to zero */
> @@ -1542,11 +1544,11 @@ xstats_id_reset_tests(struct test *t)
>  		}
>  	};
> 
> -/* 48 is stat offset from start of the devices whole xstats.
> +/* 49 is stat offset from start of the devices whole xstats.
>   * This WILL break every time we add a statistic to a port
>   * or the device, but there is no other way to test
>   */
> -#define PORT_OFF 48
> +#define PORT_OFF 50
>  /* num stats for the tested port. CQ size adds more stats to a port */
>  #define NUM_PORT_STATS 21
>  /* the port to test. */
> @@ -1670,7 +1672,7 @@ xstats_id_reset_tests(struct test *t)
>  /* queue offset from start of the devices whole xstats.
>   * This will break every time we add a statistic to a device/port/queue
>   */
> -#define QUEUE_OFF 90
> +#define QUEUE_OFF 92
>  	const uint32_t queue = 0;
>  	num_stats = rte_event_dev_xstats_names_get(evdev,
>  					RTE_EVENT_DEV_XSTATS_QUEUE, queue,
> diff --git a/drivers/event/sw/sw_evdev_xstats.c
> b/drivers/event/sw/sw_evdev_xstats.c
> index 02f7874180..c2647d7da2 100644
> --- a/drivers/event/sw/sw_evdev_xstats.c
> +++ b/drivers/event/sw/sw_evdev_xstats.c
> @@ -17,6 +17,8 @@ enum xstats_type {
>  	/* device instance specific */
>  	no_iq_enq,
>  	no_cq_enq,
> +	sched_last_iter_bitmask,
> +	sched_progress_last_iter,
>  	/* port_specific */
>  	rx_used,
>  	rx_free,
> @@ -57,6 +59,9 @@ get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx
> __rte_unused,
>  	case calls: return sw->sched_called;
>  	case no_iq_enq: return sw->sched_no_iq_enqueues;
>  	case no_cq_enq: return sw->sched_no_cq_enqueues;
> +	case sched_last_iter_bitmask: return sw->sched_last_iter_bitmask;
> +	case sched_progress_last_iter: return sw->sched_progress_last_iter;
> +
>  	default: return -1;
>  	}
>  }
> @@ -177,9 +182,11 @@ sw_xstats_init(struct sw_evdev *sw)
>  	 */
>  	static const char * const dev_stats[] = { "rx", "tx", "drop",
>  			"sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
> +			"sched_last_iter_bitmask", "sched_progress_last_iter",
>  	};
>  	static const enum xstats_type dev_types[] = { rx, tx, dropped,
> -			calls, no_iq_enq, no_cq_enq,
> +			calls, no_iq_enq, no_cq_enq, sched_last_iter_bitmask,
> +			sched_progress_last_iter,
>  	};
>  	/* all device stats are allowed to be reset */
> 
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH v4] event/sw: add xstats to expose progress details
  2021-03-04 11:02   ` Van Haaren, Harry
@ 2021-03-08  7:52     ` Jerin Jacob
  0 siblings, 0 replies; 8+ messages in thread
From: Jerin Jacob @ 2021-03-08  7:52 UTC (permalink / raw)
  To: Van Haaren, Harry; +Cc: dev, david.marchand, mattias.ronnblom, jerinj

On Thu, Mar 4, 2021 at 4:33 PM Van Haaren, Harry
<harry.van.haaren@intel.com> wrote:
>
> Fix typo in Mattias' email, apologies for noise.
>
> > -----Original Message-----
> > From: Van Haaren, Harry <harry.van.haaren@intel.com>
> > Sent: Wednesday, March 3, 2021 10:57 AM
> > To: dev@dpdk.org
> > Cc: david.marchand@redhat.com; mattias.ronnblom@ericcson.com;
> > jerinj@marvell.com; Van Haaren, Harry <harry.van.haaren@intel.com>
> > Subject: [PATCH v4] event/sw: add xstats to expose progress details
> >
> > Today it is difficult to know if the SW Eventdev PMD is making
> > forward progress when it runs an iteration of its service. This
> > commit adds two xstats to give better visibility to the application.
> >
> > The new xstats provide an application with which Eventdev ports
> > recieved work in the last iteration of scheduling, as well if
> > forward progress was made by the scheduler.
> >
> > This patch implements an xstat for the SW PMD that exposes a
> > bitmask of ports that were scheduled to. In the unlikely case
> > that the SW PMD instance has 64 or more ports, return UINT64_MAX.
> >
> > Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>


Please fix the following checkpatch issue as needed.

### event/sw: add xstats to expose progress details

WARNING:TYPO_SPELLING: 'recieved' may be misspelled - perhaps 'received'?
#11:
recieved work in the last iteration of scheduling, as well if
^^^^^^^^

WARNING:BRACES: braces {} are not necessary for any arm of this statement
#65: FILE: drivers/event/sw/sw_evdev_scheduler.c:610:
+       if (likely(sw->port_count < 64)) {
[...]
+       } else {
[...]

total: 0 errors, 2 warnings, 153 lines checked



> >
> > ---
> >
> > v3:
> > - Simplify all metrics to Event SW PMD
> >
> > v2:
> > - Fixup printf() %ld to PRIu64
> >
> > Note most of the changes here are unit-test changes to add
> > a statistic to the PMD. The actual "useful code" is a mere
> > handful of lines in a lot of noise.
> >
> > ---
> >  drivers/event/sw/sw_evdev.h           |  2 ++
> >  drivers/event/sw/sw_evdev_scheduler.c | 15 ++++++++++++++
> >  drivers/event/sw/sw_evdev_selftest.c  | 28 ++++++++++++++-------------
> >  drivers/event/sw/sw_evdev_xstats.c    |  9 ++++++++-
> >  4 files changed, 40 insertions(+), 14 deletions(-)
> >
> > diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
> > index 5ab6465c83..33645bd1df 100644
> > --- a/drivers/event/sw/sw_evdev.h
> > +++ b/drivers/event/sw/sw_evdev.h
> > @@ -259,6 +259,8 @@ struct sw_evdev {
> >       uint64_t sched_no_iq_enqueues;
> >       uint64_t sched_no_cq_enqueues;
> >       uint64_t sched_cq_qid_called;
> > +     uint64_t sched_last_iter_bitmask;
> > +     uint8_t sched_progress_last_iter;
> >
> >       uint8_t started;
> >       uint32_t credit_update_quanta;
> > diff --git a/drivers/event/sw/sw_evdev_scheduler.c
> > b/drivers/event/sw/sw_evdev_scheduler.c
> > index f747b3c6d4..d3a6bd5cda 100644
> > --- a/drivers/event/sw/sw_evdev_scheduler.c
> > +++ b/drivers/event/sw/sw_evdev_scheduler.c
> > @@ -559,6 +559,11 @@ sw_event_schedule(struct rte_eventdev *dev)
> >       sw->sched_no_iq_enqueues += (in_pkts_total == 0);
> >       sw->sched_no_cq_enqueues += (out_pkts_total == 0);
> >
> > +     uint64_t work_done = (in_pkts_total + out_pkts_total) != 0;
> > +     sw->sched_progress_last_iter = work_done;
> > +
> > +     uint64_t cqs_scheds_last_iter = 0;
> > +
> >       /* push all the internal buffered QEs in port->cq_ring to the
> >        * worker cores: aka, do the ring transfers batched.
> >        */
> > @@ -578,6 +583,7 @@ sw_event_schedule(struct rte_eventdev *dev)
> >                                       &sw->cq_ring_space[i]);
> >                       port->cq_buf_count = 0;
> >                       no_enq = 0;
> > +                     cqs_scheds_last_iter |= (1ULL << i);
> >               } else {
> >                       sw->cq_ring_space[i] =
> >                                       rte_event_ring_free_count(worker) -
> > @@ -597,4 +603,13 @@ sw_event_schedule(struct rte_eventdev *dev)
> >                       sw->sched_min_burst = sw->sched_min_burst_size;
> >       }
> >
> > +     /* Provide stats on what eventdev ports were scheduled to this
> > +      * iteration. If more than 64 ports are active, always report that
> > +      * all Eventdev ports have been scheduled events.
> > +      */
> > +     if (likely(sw->port_count < 64)) {
> > +             sw->sched_last_iter_bitmask = cqs_scheds_last_iter;
> > +     } else {
> > +             sw->sched_last_iter_bitmask = UINT64_MAX;
> > +     }
> >  }
> > diff --git a/drivers/event/sw/sw_evdev_selftest.c
> > b/drivers/event/sw/sw_evdev_selftest.c
> > index e4bfb3a0f1..d53e903129 100644
> > --- a/drivers/event/sw/sw_evdev_selftest.c
> > +++ b/drivers/event/sw/sw_evdev_selftest.c
> > @@ -873,15 +873,15 @@ xstats_tests(struct test *t)
> >       int ret = rte_event_dev_xstats_names_get(evdev,
> >                                       RTE_EVENT_DEV_XSTATS_DEVICE,
> >                                       0, xstats_names, ids, XSTATS_MAX);
> > -     if (ret != 6) {
> > -             printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
> > +     if (ret != 8) {
> > +             printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
> >               return -1;
> >       }
> >       ret = rte_event_dev_xstats_get(evdev,
> >                                       RTE_EVENT_DEV_XSTATS_DEVICE,
> >                                       0, ids, values, ret);
> > -     if (ret != 6) {
> > -             printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
> > +     if (ret != 8) {
> > +             printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
> >               return -1;
> >       }
> >
> > @@ -959,7 +959,7 @@ xstats_tests(struct test *t)
> >       ret = rte_event_dev_xstats_get(evdev,
> >                                       RTE_EVENT_DEV_XSTATS_DEVICE,
> >                                       0, ids, values, num_stats);
> > -     static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
> > +     static const uint64_t expected[] = {3, 3, 0, 1, 0, 0, 4, 1};
> >       for (i = 0; (signed int)i < ret; i++) {
> >               if (expected[i] != values[i]) {
> >                       printf(
> > @@ -975,7 +975,7 @@ xstats_tests(struct test *t)
> >                                       0, NULL, 0);
> >
> >       /* ensure reset statistics are zero-ed */
> > -     static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
> > +     static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0, 0, 0};
> >       ret = rte_event_dev_xstats_get(evdev,
> >                                       RTE_EVENT_DEV_XSTATS_DEVICE,
> >                                       0, ids, values, num_stats);
> > @@ -1460,7 +1460,7 @@ xstats_id_reset_tests(struct test *t)
> >       for (i = 0; i < XSTATS_MAX; i++)
> >               ids[i] = i;
> >
> > -#define NUM_DEV_STATS 6
> > +#define NUM_DEV_STATS 8
> >       /* Device names / values */
> >       int num_stats = rte_event_dev_xstats_names_get(evdev,
> >                                       RTE_EVENT_DEV_XSTATS_DEVICE,
> > @@ -1504,8 +1504,10 @@ xstats_id_reset_tests(struct test *t)
> >       static const char * const dev_names[] = {
> >               "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
> >               "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
> > +             "dev_sched_last_iter_bitmask",
> > +             "dev_sched_progress_last_iter"
> >       };
> > -     uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
> > +     uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0, 4, 1};
> >       for (i = 0; (int)i < ret; i++) {
> >               unsigned int id;
> >               uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
> > @@ -1518,8 +1520,8 @@ xstats_id_reset_tests(struct test *t)
> >               }
> >               if (val != dev_expected[i]) {
> >                       printf("%d: %s value incorrect, expected %"
> > -                             PRIu64" got %d\n", __LINE__, dev_names[i],
> > -                             dev_expected[i], id);
> > +                             PRIu64" got %"PRIu64"\n", __LINE__,
> > +                             dev_names[i], dev_expected[i], val);
> >                       goto fail;
> >               }
> >               /* reset to zero */
> > @@ -1542,11 +1544,11 @@ xstats_id_reset_tests(struct test *t)
> >               }
> >       };
> >
> > -/* 48 is stat offset from start of the devices whole xstats.
> > +/* 49 is stat offset from start of the devices whole xstats.
> >   * This WILL break every time we add a statistic to a port
> >   * or the device, but there is no other way to test
> >   */
> > -#define PORT_OFF 48
> > +#define PORT_OFF 50
> >  /* num stats for the tested port. CQ size adds more stats to a port */
> >  #define NUM_PORT_STATS 21
> >  /* the port to test. */
> > @@ -1670,7 +1672,7 @@ xstats_id_reset_tests(struct test *t)
> >  /* queue offset from start of the devices whole xstats.
> >   * This will break every time we add a statistic to a device/port/queue
> >   */
> > -#define QUEUE_OFF 90
> > +#define QUEUE_OFF 92
> >       const uint32_t queue = 0;
> >       num_stats = rte_event_dev_xstats_names_get(evdev,
> >                                       RTE_EVENT_DEV_XSTATS_QUEUE, queue,
> > diff --git a/drivers/event/sw/sw_evdev_xstats.c
> > b/drivers/event/sw/sw_evdev_xstats.c
> > index 02f7874180..c2647d7da2 100644
> > --- a/drivers/event/sw/sw_evdev_xstats.c
> > +++ b/drivers/event/sw/sw_evdev_xstats.c
> > @@ -17,6 +17,8 @@ enum xstats_type {
> >       /* device instance specific */
> >       no_iq_enq,
> >       no_cq_enq,
> > +     sched_last_iter_bitmask,
> > +     sched_progress_last_iter,
> >       /* port_specific */
> >       rx_used,
> >       rx_free,
> > @@ -57,6 +59,9 @@ get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx
> > __rte_unused,
> >       case calls: return sw->sched_called;
> >       case no_iq_enq: return sw->sched_no_iq_enqueues;
> >       case no_cq_enq: return sw->sched_no_cq_enqueues;
> > +     case sched_last_iter_bitmask: return sw->sched_last_iter_bitmask;
> > +     case sched_progress_last_iter: return sw->sched_progress_last_iter;
> > +
> >       default: return -1;
> >       }
> >  }
> > @@ -177,9 +182,11 @@ sw_xstats_init(struct sw_evdev *sw)
> >        */
> >       static const char * const dev_stats[] = { "rx", "tx", "drop",
> >                       "sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
> > +                     "sched_last_iter_bitmask", "sched_progress_last_iter",
> >       };
> >       static const enum xstats_type dev_types[] = { rx, tx, dropped,
> > -                     calls, no_iq_enq, no_cq_enq,
> > +                     calls, no_iq_enq, no_cq_enq, sched_last_iter_bitmask,
> > +                     sched_progress_last_iter,
> >       };
> >       /* all device stats are allowed to be reset */
> >
> > --
> > 2.25.1
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v5] event/sw: add xstats to expose progress details
  2021-03-03 10:56 ` [dpdk-dev] [PATCH v4] event/sw: add xstats to expose progress details Harry van Haaren
  2021-03-04 11:02   ` Van Haaren, Harry
@ 2021-03-08  9:22   ` Harry van Haaren
  2021-03-08 16:54     ` Jerin Jacob
  1 sibling, 1 reply; 8+ messages in thread
From: Harry van Haaren @ 2021-03-08  9:22 UTC (permalink / raw)
  To: dev; +Cc: david.marchand, mattias.ronnblom, jerinj, Harry van Haaren

Today it is difficult to know if the SW Eventdev PMD is making
forward progress when it runs an iteration of its service. This
commit adds two xstats to give better visibility to the application.

The new xstats provide an application with which Eventdev ports
received work in the last iteration of scheduling, as well if
forward progress was made by the scheduler.

This patch implements an xstat for the SW PMD that exposes a
bitmask of ports that were scheduled to. In the unlikely case
that the SW PMD instance has 64 or more ports, return UINT64_MAX.

Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>

---

Thanks for review Jerin!


v5:
- Fixup if/else {} brackets by refactor and removing (Jerin/checkpatch)
- Fixup recieved typo in commit message (Jerin/checkpatch)

v4:
- Simplify all metrics to Event SW PMD

---
 drivers/event/sw/sw_evdev.h           |  2 ++
 drivers/event/sw/sw_evdev_scheduler.c | 13 +++++++++++++
 drivers/event/sw/sw_evdev_selftest.c  | 28 ++++++++++++++-------------
 drivers/event/sw/sw_evdev_xstats.c    |  9 ++++++++-
 4 files changed, 38 insertions(+), 14 deletions(-)

diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index 5ab6465c83..33645bd1df 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -259,6 +259,8 @@ struct sw_evdev {
 	uint64_t sched_no_iq_enqueues;
 	uint64_t sched_no_cq_enqueues;
 	uint64_t sched_cq_qid_called;
+	uint64_t sched_last_iter_bitmask;
+	uint8_t sched_progress_last_iter;
 
 	uint8_t started;
 	uint32_t credit_update_quanta;
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index f747b3c6d4..809a54d731 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -559,6 +559,11 @@ sw_event_schedule(struct rte_eventdev *dev)
 	sw->sched_no_iq_enqueues += (in_pkts_total == 0);
 	sw->sched_no_cq_enqueues += (out_pkts_total == 0);
 
+	uint64_t work_done = (in_pkts_total + out_pkts_total) != 0;
+	sw->sched_progress_last_iter = work_done;
+
+	uint64_t cqs_scheds_last_iter = 0;
+
 	/* push all the internal buffered QEs in port->cq_ring to the
 	 * worker cores: aka, do the ring transfers batched.
 	 */
@@ -578,6 +583,7 @@ sw_event_schedule(struct rte_eventdev *dev)
 					&sw->cq_ring_space[i]);
 			port->cq_buf_count = 0;
 			no_enq = 0;
+			cqs_scheds_last_iter |= (1ULL << i);
 		} else {
 			sw->cq_ring_space[i] =
 					rte_event_ring_free_count(worker) -
@@ -597,4 +603,11 @@ sw_event_schedule(struct rte_eventdev *dev)
 			sw->sched_min_burst = sw->sched_min_burst_size;
 	}
 
+	/* Provide stats on what eventdev ports were scheduled to this
+	 * iteration. If more than 64 ports are active, always report that
+	 * all Eventdev ports have been scheduled events.
+	 */
+	sw->sched_last_iter_bitmask = cqs_scheds_last_iter;
+	if (unlikely(sw->port_count >= 64))
+		sw->sched_last_iter_bitmask = UINT64_MAX;
 }
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
index e4bfb3a0f1..d53e903129 100644
--- a/drivers/event/sw/sw_evdev_selftest.c
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -873,15 +873,15 @@ xstats_tests(struct test *t)
 	int ret = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, xstats_names, ids, XSTATS_MAX);
-	if (ret != 6) {
-		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+	if (ret != 8) {
+		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
 		return -1;
 	}
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, ret);
-	if (ret != 6) {
-		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+	if (ret != 8) {
+		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
 		return -1;
 	}
 
@@ -959,7 +959,7 @@ xstats_tests(struct test *t)
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, num_stats);
-	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
+	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0, 4, 1};
 	for (i = 0; (signed int)i < ret; i++) {
 		if (expected[i] != values[i]) {
 			printf(
@@ -975,7 +975,7 @@ xstats_tests(struct test *t)
 					0, NULL, 0);
 
 	/* ensure reset statistics are zero-ed */
-	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
+	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0, 0, 0};
 	ret = rte_event_dev_xstats_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
 					0, ids, values, num_stats);
@@ -1460,7 +1460,7 @@ xstats_id_reset_tests(struct test *t)
 	for (i = 0; i < XSTATS_MAX; i++)
 		ids[i] = i;
 
-#define NUM_DEV_STATS 6
+#define NUM_DEV_STATS 8
 	/* Device names / values */
 	int num_stats = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_DEVICE,
@@ -1504,8 +1504,10 @@ xstats_id_reset_tests(struct test *t)
 	static const char * const dev_names[] = {
 		"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
 		"dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
+		"dev_sched_last_iter_bitmask",
+		"dev_sched_progress_last_iter"
 	};
-	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
+	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0, 4, 1};
 	for (i = 0; (int)i < ret; i++) {
 		unsigned int id;
 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
@@ -1518,8 +1520,8 @@ xstats_id_reset_tests(struct test *t)
 		}
 		if (val != dev_expected[i]) {
 			printf("%d: %s value incorrect, expected %"
-				PRIu64" got %d\n", __LINE__, dev_names[i],
-				dev_expected[i], id);
+				PRIu64" got %"PRIu64"\n", __LINE__,
+				dev_names[i], dev_expected[i], val);
 			goto fail;
 		}
 		/* reset to zero */
@@ -1542,11 +1544,11 @@ xstats_id_reset_tests(struct test *t)
 		}
 	};
 
-/* 48 is stat offset from start of the devices whole xstats.
+/* 49 is stat offset from start of the devices whole xstats.
  * This WILL break every time we add a statistic to a port
  * or the device, but there is no other way to test
  */
-#define PORT_OFF 48
+#define PORT_OFF 50
 /* num stats for the tested port. CQ size adds more stats to a port */
 #define NUM_PORT_STATS 21
 /* the port to test. */
@@ -1670,7 +1672,7 @@ xstats_id_reset_tests(struct test *t)
 /* queue offset from start of the devices whole xstats.
  * This will break every time we add a statistic to a device/port/queue
  */
-#define QUEUE_OFF 90
+#define QUEUE_OFF 92
 	const uint32_t queue = 0;
 	num_stats = rte_event_dev_xstats_names_get(evdev,
 					RTE_EVENT_DEV_XSTATS_QUEUE, queue,
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
index 02f7874180..c2647d7da2 100644
--- a/drivers/event/sw/sw_evdev_xstats.c
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -17,6 +17,8 @@ enum xstats_type {
 	/* device instance specific */
 	no_iq_enq,
 	no_cq_enq,
+	sched_last_iter_bitmask,
+	sched_progress_last_iter,
 	/* port_specific */
 	rx_used,
 	rx_free,
@@ -57,6 +59,9 @@ get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
 	case calls: return sw->sched_called;
 	case no_iq_enq: return sw->sched_no_iq_enqueues;
 	case no_cq_enq: return sw->sched_no_cq_enqueues;
+	case sched_last_iter_bitmask: return sw->sched_last_iter_bitmask;
+	case sched_progress_last_iter: return sw->sched_progress_last_iter;
+
 	default: return -1;
 	}
 }
@@ -177,9 +182,11 @@ sw_xstats_init(struct sw_evdev *sw)
 	 */
 	static const char * const dev_stats[] = { "rx", "tx", "drop",
 			"sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
+			"sched_last_iter_bitmask", "sched_progress_last_iter",
 	};
 	static const enum xstats_type dev_types[] = { rx, tx, dropped,
-			calls, no_iq_enq, no_cq_enq,
+			calls, no_iq_enq, no_cq_enq, sched_last_iter_bitmask,
+			sched_progress_last_iter,
 	};
 	/* all device stats are allowed to be reset */
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH v5] event/sw: add xstats to expose progress details
  2021-03-08  9:22   ` [dpdk-dev] [PATCH v5] " Harry van Haaren
@ 2021-03-08 16:54     ` Jerin Jacob
  0 siblings, 0 replies; 8+ messages in thread
From: Jerin Jacob @ 2021-03-08 16:54 UTC (permalink / raw)
  To: Harry van Haaren; +Cc: dpdk-dev, David Marchand, mattias.ronnblom, Jerin Jacob

On Mon, Mar 8, 2021 at 2:52 PM Harry van Haaren
<harry.van.haaren@intel.com> wrote:
>
> Today it is difficult to know if the SW Eventdev PMD is making
> forward progress when it runs an iteration of its service. This
> commit adds two xstats to give better visibility to the application.
>
> The new xstats provide an application with which Eventdev ports
> received work in the last iteration of scheduling, as well if
> forward progress was made by the scheduler.
>
> This patch implements an xstat for the SW PMD that exposes a
> bitmask of ports that were scheduled to. In the unlikely case
> that the SW PMD instance has 64 or more ports, return UINT64_MAX.
>
> Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>


Applied to dpdk-next-net-eventdev/for-main. Thanks


>
> ---
>
> Thanks for review Jerin!
>
>
> v5:
> - Fixup if/else {} brackets by refactor and removing (Jerin/checkpatch)
> - Fixup recieved typo in commit message (Jerin/checkpatch)
>
> v4:
> - Simplify all metrics to Event SW PMD
>
> ---
>  drivers/event/sw/sw_evdev.h           |  2 ++
>  drivers/event/sw/sw_evdev_scheduler.c | 13 +++++++++++++
>  drivers/event/sw/sw_evdev_selftest.c  | 28 ++++++++++++++-------------
>  drivers/event/sw/sw_evdev_xstats.c    |  9 ++++++++-
>  4 files changed, 38 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
> index 5ab6465c83..33645bd1df 100644
> --- a/drivers/event/sw/sw_evdev.h
> +++ b/drivers/event/sw/sw_evdev.h
> @@ -259,6 +259,8 @@ struct sw_evdev {
>         uint64_t sched_no_iq_enqueues;
>         uint64_t sched_no_cq_enqueues;
>         uint64_t sched_cq_qid_called;
> +       uint64_t sched_last_iter_bitmask;
> +       uint8_t sched_progress_last_iter;
>
>         uint8_t started;
>         uint32_t credit_update_quanta;
> diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
> index f747b3c6d4..809a54d731 100644
> --- a/drivers/event/sw/sw_evdev_scheduler.c
> +++ b/drivers/event/sw/sw_evdev_scheduler.c
> @@ -559,6 +559,11 @@ sw_event_schedule(struct rte_eventdev *dev)
>         sw->sched_no_iq_enqueues += (in_pkts_total == 0);
>         sw->sched_no_cq_enqueues += (out_pkts_total == 0);
>
> +       uint64_t work_done = (in_pkts_total + out_pkts_total) != 0;
> +       sw->sched_progress_last_iter = work_done;
> +
> +       uint64_t cqs_scheds_last_iter = 0;
> +
>         /* push all the internal buffered QEs in port->cq_ring to the
>          * worker cores: aka, do the ring transfers batched.
>          */
> @@ -578,6 +583,7 @@ sw_event_schedule(struct rte_eventdev *dev)
>                                         &sw->cq_ring_space[i]);
>                         port->cq_buf_count = 0;
>                         no_enq = 0;
> +                       cqs_scheds_last_iter |= (1ULL << i);
>                 } else {
>                         sw->cq_ring_space[i] =
>                                         rte_event_ring_free_count(worker) -
> @@ -597,4 +603,11 @@ sw_event_schedule(struct rte_eventdev *dev)
>                         sw->sched_min_burst = sw->sched_min_burst_size;
>         }
>
> +       /* Provide stats on what eventdev ports were scheduled to this
> +        * iteration. If more than 64 ports are active, always report that
> +        * all Eventdev ports have been scheduled events.
> +        */
> +       sw->sched_last_iter_bitmask = cqs_scheds_last_iter;
> +       if (unlikely(sw->port_count >= 64))
> +               sw->sched_last_iter_bitmask = UINT64_MAX;
>  }
> diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
> index e4bfb3a0f1..d53e903129 100644
> --- a/drivers/event/sw/sw_evdev_selftest.c
> +++ b/drivers/event/sw/sw_evdev_selftest.c
> @@ -873,15 +873,15 @@ xstats_tests(struct test *t)
>         int ret = rte_event_dev_xstats_names_get(evdev,
>                                         RTE_EVENT_DEV_XSTATS_DEVICE,
>                                         0, xstats_names, ids, XSTATS_MAX);
> -       if (ret != 6) {
> -               printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
> +       if (ret != 8) {
> +               printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
>                 return -1;
>         }
>         ret = rte_event_dev_xstats_get(evdev,
>                                         RTE_EVENT_DEV_XSTATS_DEVICE,
>                                         0, ids, values, ret);
> -       if (ret != 6) {
> -               printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
> +       if (ret != 8) {
> +               printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
>                 return -1;
>         }
>
> @@ -959,7 +959,7 @@ xstats_tests(struct test *t)
>         ret = rte_event_dev_xstats_get(evdev,
>                                         RTE_EVENT_DEV_XSTATS_DEVICE,
>                                         0, ids, values, num_stats);
> -       static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
> +       static const uint64_t expected[] = {3, 3, 0, 1, 0, 0, 4, 1};
>         for (i = 0; (signed int)i < ret; i++) {
>                 if (expected[i] != values[i]) {
>                         printf(
> @@ -975,7 +975,7 @@ xstats_tests(struct test *t)
>                                         0, NULL, 0);
>
>         /* ensure reset statistics are zero-ed */
> -       static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
> +       static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0, 0, 0};
>         ret = rte_event_dev_xstats_get(evdev,
>                                         RTE_EVENT_DEV_XSTATS_DEVICE,
>                                         0, ids, values, num_stats);
> @@ -1460,7 +1460,7 @@ xstats_id_reset_tests(struct test *t)
>         for (i = 0; i < XSTATS_MAX; i++)
>                 ids[i] = i;
>
> -#define NUM_DEV_STATS 6
> +#define NUM_DEV_STATS 8
>         /* Device names / values */
>         int num_stats = rte_event_dev_xstats_names_get(evdev,
>                                         RTE_EVENT_DEV_XSTATS_DEVICE,
> @@ -1504,8 +1504,10 @@ xstats_id_reset_tests(struct test *t)
>         static const char * const dev_names[] = {
>                 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
>                 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
> +               "dev_sched_last_iter_bitmask",
> +               "dev_sched_progress_last_iter"
>         };
> -       uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
> +       uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0, 4, 1};
>         for (i = 0; (int)i < ret; i++) {
>                 unsigned int id;
>                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
> @@ -1518,8 +1520,8 @@ xstats_id_reset_tests(struct test *t)
>                 }
>                 if (val != dev_expected[i]) {
>                         printf("%d: %s value incorrect, expected %"
> -                               PRIu64" got %d\n", __LINE__, dev_names[i],
> -                               dev_expected[i], id);
> +                               PRIu64" got %"PRIu64"\n", __LINE__,
> +                               dev_names[i], dev_expected[i], val);
>                         goto fail;
>                 }
>                 /* reset to zero */
> @@ -1542,11 +1544,11 @@ xstats_id_reset_tests(struct test *t)
>                 }
>         };
>
> -/* 48 is stat offset from start of the devices whole xstats.
> +/* 49 is stat offset from start of the devices whole xstats.
>   * This WILL break every time we add a statistic to a port
>   * or the device, but there is no other way to test
>   */
> -#define PORT_OFF 48
> +#define PORT_OFF 50
>  /* num stats for the tested port. CQ size adds more stats to a port */
>  #define NUM_PORT_STATS 21
>  /* the port to test. */
> @@ -1670,7 +1672,7 @@ xstats_id_reset_tests(struct test *t)
>  /* queue offset from start of the devices whole xstats.
>   * This will break every time we add a statistic to a device/port/queue
>   */
> -#define QUEUE_OFF 90
> +#define QUEUE_OFF 92
>         const uint32_t queue = 0;
>         num_stats = rte_event_dev_xstats_names_get(evdev,
>                                         RTE_EVENT_DEV_XSTATS_QUEUE, queue,
> diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
> index 02f7874180..c2647d7da2 100644
> --- a/drivers/event/sw/sw_evdev_xstats.c
> +++ b/drivers/event/sw/sw_evdev_xstats.c
> @@ -17,6 +17,8 @@ enum xstats_type {
>         /* device instance specific */
>         no_iq_enq,
>         no_cq_enq,
> +       sched_last_iter_bitmask,
> +       sched_progress_last_iter,
>         /* port_specific */
>         rx_used,
>         rx_free,
> @@ -57,6 +59,9 @@ get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
>         case calls: return sw->sched_called;
>         case no_iq_enq: return sw->sched_no_iq_enqueues;
>         case no_cq_enq: return sw->sched_no_cq_enqueues;
> +       case sched_last_iter_bitmask: return sw->sched_last_iter_bitmask;
> +       case sched_progress_last_iter: return sw->sched_progress_last_iter;
> +
>         default: return -1;
>         }
>  }
> @@ -177,9 +182,11 @@ sw_xstats_init(struct sw_evdev *sw)
>          */
>         static const char * const dev_stats[] = { "rx", "tx", "drop",
>                         "sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
> +                       "sched_last_iter_bitmask", "sched_progress_last_iter",
>         };
>         static const enum xstats_type dev_types[] = { rx, tx, dropped,
> -                       calls, no_iq_enq, no_cq_enq,
> +                       calls, no_iq_enq, no_cq_enq, sched_last_iter_bitmask,
> +                       sched_progress_last_iter,
>         };
>         /* all device stats are allowed to be reset */
>
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2021-03-08 16:54 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-12 16:58 [dpdk-dev] [PATCH v3 1/3] service: add component useful work attribute Harry van Haaren
2021-02-12 16:58 ` [dpdk-dev] [PATCH v3 2/3] event/sw: add useful work done attribute Harry van Haaren
2021-02-12 16:58 ` [dpdk-dev] [PATCH v3 3/3] event/sw: add xstat for work done in last iteration Harry van Haaren
2021-03-03 10:56 ` [dpdk-dev] [PATCH v4] event/sw: add xstats to expose progress details Harry van Haaren
2021-03-04 11:02   ` Van Haaren, Harry
2021-03-08  7:52     ` Jerin Jacob
2021-03-08  9:22   ` [dpdk-dev] [PATCH v5] " Harry van Haaren
2021-03-08 16:54     ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).