DPDK patches and discussions
 help / color / mirror / Atom feed
From: Timothy McDaniel <timothy.mcdaniel@intel.com>
Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,
	harry.van.haaren@intel.com, jerinj@marvell.com
Subject: [dpdk-dev] [PATCH v5 07/22] event/dlb: add xstats
Date: Sat, 17 Oct 2020 14:04:01 -0500	[thread overview]
Message-ID: <1602961456-17392-8-git-send-email-timothy.mcdaniel@intel.com> (raw)
In-Reply-To: <1602961456-17392-1-git-send-email-timothy.mcdaniel@intel.com>

Add support for DLB xstats.  Perform initialization and add
standard xstats entry points

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb/dlb.c        |   23 +
 drivers/event/dlb/dlb_xstats.c | 1222 ++++++++++++++++++++++++++++++++++++++++
 drivers/event/dlb/meson.build  |    1 +
 3 files changed, 1246 insertions(+)
 create mode 100644 drivers/event/dlb/dlb_xstats.c

diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
index fbec8f1..6cc3535 100644
--- a/drivers/event/dlb/dlb.c
+++ b/drivers/event/dlb/dlb.c
@@ -71,6 +71,17 @@ static struct rte_event_dev_info evdev_dlb_default_info = {
 struct process_local_port_data
 dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
 
+uint32_t
+dlb_get_queue_depth(struct dlb_eventdev *dlb,
+		    struct dlb_eventdev_queue *queue)
+{
+	/* DUMMY FOR NOW So "xstats" patch compiles */
+	RTE_SET_USED(dlb);
+	RTE_SET_USED(queue);
+
+	return 0;
+}
+
 static int
 dlb_hw_query_resources(struct dlb_eventdev *dlb)
 {
@@ -298,6 +309,11 @@ void
 dlb_entry_points_init(struct rte_eventdev *dev)
 {
 	static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
+		.dump             = dlb_eventdev_dump,
+		.xstats_get       = dlb_eventdev_xstats_get,
+		.xstats_get_names = dlb_eventdev_xstats_get_names,
+		.xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
+		.xstats_reset	    = dlb_eventdev_xstats_reset,
 	};
 
 	/* Expose PMD's eventdev interface */
@@ -352,6 +368,13 @@ dlb_primary_eventdev_probe(struct rte_eventdev *dev,
 		return err;
 	}
 
+	/* Complete xtstats runtime initialization */
+	err = dlb_xstats_init(dlb);
+	if (err) {
+		DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err);
+		return err;
+	}
+
 	rte_spinlock_init(&dlb->qm_instance.resource_lock);
 
 	dlb_iface_low_level_io_init(dlb);
diff --git a/drivers/event/dlb/dlb_xstats.c b/drivers/event/dlb/dlb_xstats.c
new file mode 100644
index 0000000..597c3d7
--- /dev/null
+++ b/drivers/event/dlb/dlb_xstats.c
@@ -0,0 +1,1222 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "dlb_priv.h"
+#include "dlb_inline_fns.h"
+
+enum dlb_xstats_type {
+	/* common to device and port */
+	rx_ok,				/**< Receive an event */
+	rx_drop,                        /**< Error bit set in received QE */
+	rx_interrupt_wait,		/**< Wait on an interrupt */
+	rx_umonitor_umwait,		/**< Block using umwait */
+	tx_ok,				/**< Transmit an event */
+	total_polls,			/**< Call dequeue_burst */
+	zero_polls,			/**< Call dequeue burst and return 0 */
+	tx_nospc_ldb_hw_credits,	/**< Insufficient LDB h/w credits */
+	tx_nospc_dir_hw_credits,	/**< Insufficient DIR h/w credits */
+	tx_nospc_inflight_max,		/**< Reach the new_event_threshold */
+	tx_nospc_new_event_limit,	/**< Insufficient s/w credits */
+	tx_nospc_inflight_credits,	/**< Port has too few s/w credits */
+	/* device specific */
+	nb_events_limit,		/**< Maximum num of events */
+	inflight_events,		/**< Current num events outstanding */
+	ldb_pool_size,			/**< Num load balanced credits */
+	dir_pool_size,			/**< Num directed credits */
+	/* port specific */
+	tx_new,				/**< Send an OP_NEW event */
+	tx_fwd,				/**< Send an OP_FORWARD event */
+	tx_rel,				/**< Send an OP_RELEASE event */
+	tx_implicit_rel,		/**< Issue an implicit event release */
+	tx_sched_ordered,		/**< Send a SCHED_TYPE_ORDERED event */
+	tx_sched_unordered,		/**< Send a SCHED_TYPE_PARALLEL event */
+	tx_sched_atomic,		/**< Send a SCHED_TYPE_ATOMIC event */
+	tx_sched_directed,		/**< Send a directed event */
+	tx_invalid,                     /**< Send an event with an invalid op */
+	outstanding_releases,		/**< # of releases a port owes */
+	max_outstanding_releases,	/**< max # of releases a port can owe */
+	rx_sched_ordered,		/**< Dequeue an ordered event */
+	rx_sched_unordered,		/**< Dequeue an unordered event */
+	rx_sched_atomic,		/**< Dequeue an atomic event */
+	rx_sched_directed,		/**< Dequeue an directed event */
+	rx_sched_invalid,               /**< Dequeue event sched type invalid */
+	/* common to port and queue */
+	is_configured,			/**< Port is configured */
+	is_load_balanced,		/**< Port is LDB */
+	hw_id,				/**< Hardware ID */
+	/* queue specific */
+	num_links,			/**< Number of ports linked */
+	sched_type,			/**< Queue sched type */
+	enq_ok,				/**< # events enqueued to the queue */
+	current_depth			/**< Current queue depth */
+};
+
+typedef uint64_t (*dlb_xstats_fn)(struct dlb_eventdev *dlb,
+		uint16_t obj_idx, /* port or queue id */
+		enum dlb_xstats_type stat, int extra_arg);
+
+enum dlb_xstats_fn_type {
+	DLB_XSTATS_FN_DEV,
+	DLB_XSTATS_FN_PORT,
+	DLB_XSTATS_FN_QUEUE
+};
+
+struct dlb_xstats_entry {
+	struct rte_event_dev_xstats_name name;
+	uint64_t reset_value; /* an offset to be taken away to emulate resets */
+	enum dlb_xstats_fn_type fn_id;
+	enum dlb_xstats_type stat;
+	enum rte_event_dev_xstats_mode mode;
+	int extra_arg;
+	uint16_t obj_idx;
+	uint8_t reset_allowed; /* when set, this value can be reset */
+};
+
+/* Some device stats are simply a summation of the corresponding port values */
+static uint64_t
+dlb_device_traffic_stat_get(struct dlb_eventdev *dlb, int which_stat)
+{
+	int i;
+	uint64_t val = 0;
+
+	for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
+		struct dlb_eventdev_port *port = &dlb->ev_ports[i];
+
+		if (!port->setup_done)
+			continue;
+
+		switch (which_stat) {
+		case rx_ok:
+			val += port->stats.traffic.rx_ok;
+			break;
+		case rx_drop:
+			val += port->stats.traffic.rx_drop;
+			break;
+		case rx_interrupt_wait:
+			val += port->stats.traffic.rx_interrupt_wait;
+			break;
+		case rx_umonitor_umwait:
+			val += port->stats.traffic.rx_umonitor_umwait;
+			break;
+		case tx_ok:
+			val += port->stats.traffic.tx_ok;
+			break;
+		case total_polls:
+			val += port->stats.traffic.total_polls;
+			break;
+		case zero_polls:
+			val += port->stats.traffic.zero_polls;
+			break;
+		case tx_nospc_ldb_hw_credits:
+			val += port->stats.traffic.tx_nospc_ldb_hw_credits;
+			break;
+		case tx_nospc_dir_hw_credits:
+			val += port->stats.traffic.tx_nospc_dir_hw_credits;
+			break;
+		case tx_nospc_inflight_max:
+			val += port->stats.traffic.tx_nospc_inflight_max;
+			break;
+		case tx_nospc_new_event_limit:
+			val += port->stats.traffic.tx_nospc_new_event_limit;
+			break;
+		case tx_nospc_inflight_credits:
+			val += port->stats.traffic.tx_nospc_inflight_credits;
+			break;
+		default:
+			return -1;
+		}
+	}
+	return val;
+}
+
+static uint64_t
+get_dev_stat(struct dlb_eventdev *dlb, uint16_t obj_idx __rte_unused,
+	     enum dlb_xstats_type type, int extra_arg __rte_unused)
+{
+	switch (type) {
+	case rx_ok:
+	case rx_drop:
+	case rx_interrupt_wait:
+	case rx_umonitor_umwait:
+	case tx_ok:
+	case total_polls:
+	case zero_polls:
+	case tx_nospc_ldb_hw_credits:
+	case tx_nospc_dir_hw_credits:
+	case tx_nospc_inflight_max:
+	case tx_nospc_new_event_limit:
+	case tx_nospc_inflight_credits:
+		return dlb_device_traffic_stat_get(dlb, type);
+	case nb_events_limit:
+		return dlb->new_event_limit;
+	case inflight_events:
+		return __atomic_load_n(&dlb->inflights, __ATOMIC_SEQ_CST);
+	case ldb_pool_size:
+		return dlb->num_ldb_credits;
+	case dir_pool_size:
+		return dlb->num_dir_credits;
+	default: return -1;
+	}
+}
+
+static uint64_t
+get_port_stat(struct dlb_eventdev *dlb, uint16_t obj_idx,
+	      enum dlb_xstats_type type, int extra_arg __rte_unused)
+{
+	struct dlb_eventdev_port *ev_port = &dlb->ev_ports[obj_idx];
+
+	switch (type) {
+	case rx_ok: return ev_port->stats.traffic.rx_ok;
+
+	case rx_drop: return ev_port->stats.traffic.rx_drop;
+
+	case rx_interrupt_wait: return ev_port->stats.traffic.rx_interrupt_wait;
+
+	case rx_umonitor_umwait:
+		return ev_port->stats.traffic.rx_umonitor_umwait;
+
+	case tx_ok: return ev_port->stats.traffic.tx_ok;
+
+	case total_polls: return ev_port->stats.traffic.total_polls;
+
+	case zero_polls: return ev_port->stats.traffic.zero_polls;
+
+	case tx_nospc_ldb_hw_credits:
+		return ev_port->stats.traffic.tx_nospc_ldb_hw_credits;
+
+	case tx_nospc_dir_hw_credits:
+		return ev_port->stats.traffic.tx_nospc_dir_hw_credits;
+
+	case tx_nospc_inflight_max:
+		return ev_port->stats.traffic.tx_nospc_inflight_max;
+
+	case tx_nospc_new_event_limit:
+		return ev_port->stats.traffic.tx_nospc_new_event_limit;
+
+	case tx_nospc_inflight_credits:
+		return ev_port->stats.traffic.tx_nospc_inflight_credits;
+
+	case is_configured: return ev_port->setup_done;
+
+	case is_load_balanced: return !ev_port->qm_port.is_directed;
+
+	case hw_id: return ev_port->qm_port.id;
+
+	case tx_new: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_NEW];
+
+	case tx_fwd: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD];
+
+	case tx_rel: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE];
+
+	case tx_implicit_rel: return ev_port->stats.tx_implicit_rel;
+
+	case tx_sched_ordered:
+		return ev_port->stats.tx_sched_cnt[DLB_SCHED_ORDERED];
+
+	case tx_sched_unordered:
+		return ev_port->stats.tx_sched_cnt[DLB_SCHED_UNORDERED];
+
+	case tx_sched_atomic:
+		return ev_port->stats.tx_sched_cnt[DLB_SCHED_ATOMIC];
+
+	case tx_sched_directed:
+		return ev_port->stats.tx_sched_cnt[DLB_SCHED_DIRECTED];
+
+	case tx_invalid: return ev_port->stats.tx_invalid;
+
+	case outstanding_releases: return ev_port->outstanding_releases;
+
+	case max_outstanding_releases:
+		return DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
+
+	case rx_sched_ordered:
+		return ev_port->stats.rx_sched_cnt[DLB_SCHED_ORDERED];
+
+	case rx_sched_unordered:
+		return ev_port->stats.rx_sched_cnt[DLB_SCHED_UNORDERED];
+
+	case rx_sched_atomic:
+		return ev_port->stats.rx_sched_cnt[DLB_SCHED_ATOMIC];
+
+	case rx_sched_directed:
+		return ev_port->stats.rx_sched_cnt[DLB_SCHED_DIRECTED];
+
+	case rx_sched_invalid: return ev_port->stats.rx_sched_invalid;
+
+	default: return -1;
+	}
+}
+
+static uint64_t
+get_queue_stat(struct dlb_eventdev *dlb, uint16_t obj_idx,
+	       enum dlb_xstats_type type, int extra_arg __rte_unused)
+{
+	struct dlb_eventdev_queue *ev_queue = &dlb->ev_queues[obj_idx];
+
+	switch (type) {
+	case is_configured: return ev_queue->setup_done;
+
+	case is_load_balanced: return !ev_queue->qm_queue.is_directed;
+
+	case hw_id: return ev_queue->qm_queue.id;
+
+	case num_links: return ev_queue->num_links;
+
+	case sched_type: return ev_queue->qm_queue.sched_type;
+
+	case enq_ok:
+	{
+		int port_count = 0;
+		uint64_t enq_ok_tally = 0;
+
+		ev_queue->enq_ok = 0;
+		for (port_count = 0; port_count < DLB_MAX_NUM_PORTS;
+		     port_count++) {
+			struct dlb_eventdev_port *ev_port =
+				&dlb->ev_ports[port_count];
+			enq_ok_tally += ev_port->stats.enq_ok[ev_queue->id];
+		}
+		ev_queue->enq_ok = enq_ok_tally;
+		return ev_queue->enq_ok;
+	}
+
+	case current_depth: return dlb_get_queue_depth(dlb, ev_queue);
+
+	default: return -1;
+	}
+}
+
+int
+dlb_xstats_init(struct dlb_eventdev *dlb)
+{
+	/*
+	 * define the stats names and types. Used to build up the device
+	 * xstats array
+	 * There are multiple set of stats:
+	 *   - device-level,
+	 *   - per-port,
+	 *   - per-qid,
+	 *
+	 * For each of these sets, we have three parallel arrays, one for the
+	 * names, the other for the stat type parameter to be passed in the fn
+	 * call to get that stat. The third array allows resetting or not.
+	 * All these arrays must be kept in sync
+	 */
+	static const char * const dev_stats[] = {
+		"rx_ok",
+		"rx_drop",
+		"rx_interrupt_wait",
+		"rx_umonitor_umwait",
+		"tx_ok",
+		"total_polls",
+		"zero_polls",
+		"tx_nospc_ldb_hw_credits",
+		"tx_nospc_dir_hw_credits",
+		"tx_nospc_inflight_max",
+		"tx_nospc_new_event_limit",
+		"tx_nospc_inflight_credits",
+		"nb_events_limit",
+		"inflight_events",
+		"ldb_pool_size",
+		"dir_pool_size",
+	};
+	static const enum dlb_xstats_type dev_types[] = {
+		rx_ok,
+		rx_drop,
+		rx_interrupt_wait,
+		rx_umonitor_umwait,
+		tx_ok,
+		total_polls,
+		zero_polls,
+		tx_nospc_ldb_hw_credits,
+		tx_nospc_dir_hw_credits,
+		tx_nospc_inflight_max,
+		tx_nospc_new_event_limit,
+		tx_nospc_inflight_credits,
+		nb_events_limit,
+		inflight_events,
+		ldb_pool_size,
+		dir_pool_size,
+	};
+	/* Note: generated device stats are not allowed to be reset. */
+	static const uint8_t dev_reset_allowed[] = {
+		0, /* rx_ok */
+		0, /* rx_drop */
+		0, /* rx_interrupt_wait */
+		0, /* rx_umonitor_umwait */
+		0, /* tx_ok */
+		0, /* total_polls */
+		0, /* zero_polls */
+		0, /* tx_nospc_ldb_hw_credits */
+		0, /* tx_nospc_dir_hw_credits */
+		0, /* tx_nospc_inflight_max */
+		0, /* tx_nospc_new_event_limit */
+		0, /* tx_nospc_inflight_credits */
+		0, /* nb_events_limit */
+		0, /* inflight_events */
+		0, /* ldb_pool_size */
+		0, /* dir_pool_size */
+	};
+	static const char * const port_stats[] = {
+		"is_configured",
+		"is_load_balanced",
+		"hw_id",
+		"rx_ok",
+		"rx_drop",
+		"rx_interrupt_wait",
+		"rx_umonitor_umwait",
+		"tx_ok",
+		"total_polls",
+		"zero_polls",
+		"tx_nospc_ldb_hw_credits",
+		"tx_nospc_dir_hw_credits",
+		"tx_nospc_inflight_max",
+		"tx_nospc_new_event_limit",
+		"tx_nospc_inflight_credits",
+		"tx_new",
+		"tx_fwd",
+		"tx_rel",
+		"tx_implicit_rel",
+		"tx_sched_ordered",
+		"tx_sched_unordered",
+		"tx_sched_atomic",
+		"tx_sched_directed",
+		"tx_invalid",
+		"outstanding_releases",
+		"max_outstanding_releases",
+		"rx_sched_ordered",
+		"rx_sched_unordered",
+		"rx_sched_atomic",
+		"rx_sched_directed",
+		"rx_sched_invalid"
+	};
+	static const enum dlb_xstats_type port_types[] = {
+		is_configured,
+		is_load_balanced,
+		hw_id,
+		rx_ok,
+		rx_drop,
+		rx_interrupt_wait,
+		rx_umonitor_umwait,
+		tx_ok,
+		total_polls,
+		zero_polls,
+		tx_nospc_ldb_hw_credits,
+		tx_nospc_dir_hw_credits,
+		tx_nospc_inflight_max,
+		tx_nospc_new_event_limit,
+		tx_nospc_inflight_credits,
+		tx_new,
+		tx_fwd,
+		tx_rel,
+		tx_implicit_rel,
+		tx_sched_ordered,
+		tx_sched_unordered,
+		tx_sched_atomic,
+		tx_sched_directed,
+		tx_invalid,
+		outstanding_releases,
+		max_outstanding_releases,
+		rx_sched_ordered,
+		rx_sched_unordered,
+		rx_sched_atomic,
+		rx_sched_directed,
+		rx_sched_invalid
+	};
+	static const uint8_t port_reset_allowed[] = {
+		0, /* is_configured */
+		0, /* is_load_balanced */
+		0, /* hw_id */
+		1, /* rx_ok */
+		1, /* rx_drop */
+		1, /* rx_interrupt_wait */
+		1, /* rx_umonitor_umwait */
+		1, /* tx_ok */
+		1, /* total_polls */
+		1, /* zero_polls */
+		1, /* tx_nospc_ldb_hw_credits */
+		1, /* tx_nospc_dir_hw_credits */
+		1, /* tx_nospc_inflight_max */
+		1, /* tx_nospc_new_event_limit */
+		1, /* tx_nospc_inflight_credits */
+		1, /* tx_new */
+		1, /* tx_fwd */
+		1, /* tx_rel */
+		1, /* tx_implicit_rel */
+		1, /* tx_sched_ordered */
+		1, /* tx_sched_unordered */
+		1, /* tx_sched_atomic */
+		1, /* tx_sched_directed */
+		1, /* tx_invalid */
+		0, /* outstanding_releases */
+		0, /* max_outstanding_releases */
+		1, /* rx_sched_ordered */
+		1, /* rx_sched_unordered */
+		1, /* rx_sched_atomic */
+		1, /* rx_sched_directed */
+		1  /* rx_sched_invalid */
+	};
+
+	/* QID specific stats */
+	static const char * const qid_stats[] = {
+		"is_configured",
+		"is_load_balanced",
+		"hw_id",
+		"num_links",
+		"sched_type",
+		"enq_ok",
+		"current_depth",
+	};
+	static const enum dlb_xstats_type qid_types[] = {
+		is_configured,
+		is_load_balanced,
+		hw_id,
+		num_links,
+		sched_type,
+		enq_ok,
+		current_depth,
+	};
+	static const uint8_t qid_reset_allowed[] = {
+		0, /* is_configured */
+		0, /* is_load_balanced */
+		0, /* hw_id */
+		0, /* num_links */
+		0, /* sched_type */
+		1, /* enq_ok */
+		0, /* current_depth */
+	};
+
+	/* ---- end of stat definitions ---- */
+
+	/* check sizes, since a missed comma can lead to strings being
+	 * joined by the compiler.
+	 */
+	RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
+	RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
+	RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
+
+	RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_reset_allowed));
+	RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
+	RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
+
+	/* other vars */
+	const unsigned int count = RTE_DIM(dev_stats) +
+			DLB_MAX_NUM_PORTS * RTE_DIM(port_stats) +
+			DLB_MAX_NUM_QUEUES * RTE_DIM(qid_stats);
+	unsigned int i, port, qid, stat_id = 0;
+
+	dlb->xstats = rte_zmalloc_socket(NULL,
+					 sizeof(dlb->xstats[0]) * count, 0,
+					 dlb->qm_instance.info.socket_id);
+	if (dlb->xstats == NULL)
+		return -ENOMEM;
+
+#define sname dlb->xstats[stat_id].name.name
+	for (i = 0; i < RTE_DIM(dev_stats); i++, stat_id++) {
+		dlb->xstats[stat_id] = (struct dlb_xstats_entry) {
+			.fn_id = DLB_XSTATS_FN_DEV,
+			.stat = dev_types[i],
+			.mode = RTE_EVENT_DEV_XSTATS_DEVICE,
+			.reset_allowed = dev_reset_allowed[i],
+		};
+		snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
+	}
+	dlb->xstats_count_mode_dev = stat_id;
+
+	for (port = 0; port < DLB_MAX_NUM_PORTS; port++) {
+		uint32_t count_offset = stat_id;
+
+		dlb->xstats_offset_for_port[port] = stat_id;
+
+		for (i = 0; i < RTE_DIM(port_stats); i++, stat_id++) {
+			dlb->xstats[stat_id] = (struct dlb_xstats_entry){
+				.fn_id = DLB_XSTATS_FN_PORT,
+				.obj_idx = port,
+				.stat = port_types[i],
+				.mode = RTE_EVENT_DEV_XSTATS_PORT,
+				.reset_allowed = port_reset_allowed[i],
+			};
+			snprintf(sname, sizeof(sname), "port_%u_%s",
+				 port, port_stats[i]);
+		}
+
+		dlb->xstats_count_per_port[port] = stat_id - count_offset;
+	}
+
+	dlb->xstats_count_mode_port = stat_id - dlb->xstats_count_mode_dev;
+
+	for (qid = 0; qid < DLB_MAX_NUM_QUEUES; qid++) {
+		uint32_t count_offset = stat_id;
+
+		dlb->xstats_offset_for_qid[qid] = stat_id;
+
+		for (i = 0; i < RTE_DIM(qid_stats); i++, stat_id++) {
+			dlb->xstats[stat_id] = (struct dlb_xstats_entry){
+				.fn_id = DLB_XSTATS_FN_QUEUE,
+				.obj_idx = qid,
+				.stat = qid_types[i],
+				.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+				.reset_allowed = qid_reset_allowed[i],
+			};
+			snprintf(sname, sizeof(sname), "qid_%u_%s",
+				 qid, qid_stats[i]);
+		}
+
+		dlb->xstats_count_per_qid[qid] = stat_id - count_offset;
+	}
+
+	dlb->xstats_count_mode_queue = stat_id -
+		(dlb->xstats_count_mode_dev + dlb->xstats_count_mode_port);
+#undef sname
+
+	dlb->xstats_count = stat_id;
+
+	return 0;
+}
+
+void
+dlb_xstats_uninit(struct dlb_eventdev *dlb)
+{
+	rte_free(dlb->xstats);
+	dlb->xstats_count = 0;
+}
+
+int
+dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+		struct rte_event_dev_xstats_name *xstats_names,
+		unsigned int *ids, unsigned int size)
+{
+	const struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	unsigned int i;
+	unsigned int xidx = 0;
+	uint32_t xstats_mode_count = 0;
+	uint32_t start_offset = 0;
+
+	switch (mode) {
+	case RTE_EVENT_DEV_XSTATS_DEVICE:
+		xstats_mode_count = dlb->xstats_count_mode_dev;
+		break;
+	case RTE_EVENT_DEV_XSTATS_PORT:
+		if (queue_port_id >= DLB_MAX_NUM_PORTS)
+			break;
+		xstats_mode_count = dlb->xstats_count_per_port[queue_port_id];
+		start_offset = dlb->xstats_offset_for_port[queue_port_id];
+		break;
+	case RTE_EVENT_DEV_XSTATS_QUEUE:
+#if (DLB_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
+		if (queue_port_id >= DLB_MAX_NUM_QUEUES)
+			break;
+#endif
+		xstats_mode_count = dlb->xstats_count_per_qid[queue_port_id];
+		start_offset = dlb->xstats_offset_for_qid[queue_port_id];
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	if (xstats_mode_count > size || ids == NULL || xstats_names == NULL)
+		return xstats_mode_count;
+
+	for (i = 0; i < dlb->xstats_count && xidx < size; i++) {
+		if (dlb->xstats[i].mode != mode)
+			continue;
+
+		if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
+		    queue_port_id != dlb->xstats[i].obj_idx)
+			continue;
+
+		xstats_names[xidx] = dlb->xstats[i].name;
+		if (ids)
+			ids[xidx] = start_offset + xidx;
+		xidx++;
+	}
+	return xidx;
+}
+
+static int
+dlb_xstats_update(struct dlb_eventdev *dlb,
+		enum rte_event_dev_xstats_mode mode,
+		uint8_t queue_port_id, const unsigned int ids[],
+		uint64_t values[], unsigned int n, const uint32_t reset)
+{
+	unsigned int i;
+	unsigned int xidx = 0;
+	uint32_t xstats_mode_count = 0;
+
+	switch (mode) {
+	case RTE_EVENT_DEV_XSTATS_DEVICE:
+		xstats_mode_count = dlb->xstats_count_mode_dev;
+		break;
+	case RTE_EVENT_DEV_XSTATS_PORT:
+		if (queue_port_id >= DLB_MAX_NUM_PORTS)
+			goto invalid_value;
+		xstats_mode_count = dlb->xstats_count_per_port[queue_port_id];
+		break;
+	case RTE_EVENT_DEV_XSTATS_QUEUE:
+#if (DLB_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
+		if (queue_port_id >= DLB_MAX_NUM_QUEUES)
+			goto invalid_value;
+#endif
+		xstats_mode_count = dlb->xstats_count_per_qid[queue_port_id];
+		break;
+	default:
+		goto invalid_value;
+	};
+
+	for (i = 0; i < n && xidx < xstats_mode_count; i++) {
+		struct dlb_xstats_entry *xs = &dlb->xstats[ids[i]];
+		dlb_xstats_fn fn;
+
+		if (ids[i] > dlb->xstats_count || xs->mode != mode)
+			continue;
+
+		if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
+		    queue_port_id != xs->obj_idx)
+			continue;
+
+		switch (xs->fn_id) {
+		case DLB_XSTATS_FN_DEV:
+			fn = get_dev_stat;
+			break;
+		case DLB_XSTATS_FN_PORT:
+			fn = get_port_stat;
+			break;
+		case DLB_XSTATS_FN_QUEUE:
+			fn = get_queue_stat;
+			break;
+		default:
+			DLB_LOG_ERR("Unexpected xstat fn_id %d\n",
+				     xs->fn_id);
+			return -EINVAL;
+		}
+
+		uint64_t val = fn(dlb, xs->obj_idx, xs->stat,
+				  xs->extra_arg) - xs->reset_value;
+
+		if (values)
+			values[xidx] = val;
+
+		if (xs->reset_allowed && reset)
+			xs->reset_value += val;
+
+		xidx++;
+	}
+
+	return xidx;
+
+invalid_value:
+	return -EINVAL;
+}
+
+int
+dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+		const unsigned int ids[], uint64_t values[], unsigned int n)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	const uint32_t reset = 0;
+
+	return dlb_xstats_update(dlb, mode, queue_port_id, ids, values, n,
+				  reset);
+}
+
+uint64_t
+dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
+				const char *name, unsigned int *id)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	unsigned int i;
+	dlb_xstats_fn fn;
+
+	for (i = 0; i < dlb->xstats_count; i++) {
+		struct dlb_xstats_entry *xs = &dlb->xstats[i];
+
+		if (strncmp(xs->name.name, name,
+			    RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
+			if (id != NULL)
+				*id = i;
+
+			switch (xs->fn_id) {
+			case DLB_XSTATS_FN_DEV:
+				fn = get_dev_stat;
+				break;
+			case DLB_XSTATS_FN_PORT:
+				fn = get_port_stat;
+				break;
+			case DLB_XSTATS_FN_QUEUE:
+				fn = get_queue_stat;
+				break;
+			default:
+				DLB_LOG_ERR("Unexpected xstat fn_id %d\n",
+					    xs->fn_id);
+				return (uint64_t)-1;
+			}
+
+			return fn(dlb, xs->obj_idx, xs->stat,
+				  xs->extra_arg) - xs->reset_value;
+		}
+	}
+	if (id != NULL)
+		*id = (uint32_t)-1;
+	return (uint64_t)-1;
+}
+
+static void
+dlb_xstats_reset_range(struct dlb_eventdev *dlb, uint32_t start,
+		       uint32_t num)
+{
+	uint32_t i;
+	dlb_xstats_fn fn;
+
+	for (i = start; i < start + num; i++) {
+		struct dlb_xstats_entry *xs = &dlb->xstats[i];
+
+		if (!xs->reset_allowed)
+			continue;
+
+		switch (xs->fn_id) {
+		case DLB_XSTATS_FN_DEV:
+			fn = get_dev_stat;
+			break;
+		case DLB_XSTATS_FN_PORT:
+			fn = get_port_stat;
+			break;
+		case DLB_XSTATS_FN_QUEUE:
+			fn = get_queue_stat;
+			break;
+		default:
+			DLB_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
+			return;
+		}
+
+		uint64_t val = fn(dlb, xs->obj_idx, xs->stat, xs->extra_arg);
+		xs->reset_value = val;
+	}
+}
+
+static int
+dlb_xstats_reset_queue(struct dlb_eventdev *dlb, uint8_t queue_id,
+		       const uint32_t ids[], uint32_t nb_ids)
+{
+	const uint32_t reset = 1;
+
+	if (ids) {
+		uint32_t nb_reset = dlb_xstats_update(dlb,
+					RTE_EVENT_DEV_XSTATS_QUEUE,
+					queue_id, ids, NULL, nb_ids,
+					reset);
+		return nb_reset == nb_ids ? 0 : -EINVAL;
+	}
+
+	if (ids == NULL)
+		dlb_xstats_reset_range(dlb,
+				       dlb->xstats_offset_for_qid[queue_id],
+				       dlb->xstats_count_per_qid[queue_id]);
+
+	return 0;
+}
+
+static int
+dlb_xstats_reset_port(struct dlb_eventdev *dlb, uint8_t port_id,
+		      const uint32_t ids[], uint32_t nb_ids)
+{
+	const uint32_t reset = 1;
+	int offset = dlb->xstats_offset_for_port[port_id];
+	int nb_stat = dlb->xstats_count_per_port[port_id];
+
+	if (ids) {
+		uint32_t nb_reset = dlb_xstats_update(dlb,
+					RTE_EVENT_DEV_XSTATS_PORT, port_id,
+					ids, NULL, nb_ids,
+					reset);
+		return nb_reset == nb_ids ? 0 : -EINVAL;
+	}
+
+	dlb_xstats_reset_range(dlb, offset, nb_stat);
+	return 0;
+}
+
+static int
+dlb_xstats_reset_dev(struct dlb_eventdev *dlb, const uint32_t ids[],
+		     uint32_t nb_ids)
+{
+	uint32_t i;
+
+	if (ids) {
+		for (i = 0; i < nb_ids; i++) {
+			uint32_t id = ids[i];
+
+			if (id >= dlb->xstats_count_mode_dev)
+				return -EINVAL;
+			dlb_xstats_reset_range(dlb, id, 1);
+		}
+	} else {
+		for (i = 0; i < dlb->xstats_count_mode_dev; i++)
+			dlb_xstats_reset_range(dlb, i, 1);
+	}
+
+	return 0;
+}
+
+int
+dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
+			  enum rte_event_dev_xstats_mode mode,
+			  int16_t queue_port_id,
+			  const uint32_t ids[],
+			  uint32_t nb_ids)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	uint32_t i;
+
+	/* handle -1 for queue_port_id here, looping over all ports/queues */
+	switch (mode) {
+	case RTE_EVENT_DEV_XSTATS_DEVICE:
+		if (dlb_xstats_reset_dev(dlb, ids, nb_ids))
+			return -EINVAL;
+		break;
+	case RTE_EVENT_DEV_XSTATS_PORT:
+		if (queue_port_id == -1) {
+			for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
+				if (dlb_xstats_reset_port(dlb, i, ids,
+							  nb_ids))
+					return -EINVAL;
+			}
+		} else if (queue_port_id < DLB_MAX_NUM_PORTS) {
+			if (dlb_xstats_reset_port(dlb, queue_port_id, ids,
+						  nb_ids))
+				return -EINVAL;
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case RTE_EVENT_DEV_XSTATS_QUEUE:
+		if (queue_port_id == -1) {
+			for (i = 0; i < DLB_MAX_NUM_QUEUES; i++) {
+				if (dlb_xstats_reset_queue(dlb, i, ids,
+							   nb_ids))
+					return -EINVAL;
+			}
+		} else if (queue_port_id < DLB_MAX_NUM_QUEUES) {
+			if (dlb_xstats_reset_queue(dlb, queue_port_id, ids,
+						   nb_ids))
+				return -EINVAL;
+		} else {
+			return -EINVAL;
+		}
+		break;
+	};
+
+	return 0;
+}
+
+void
+dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f)
+{
+	struct dlb_eventdev *dlb;
+	struct dlb_hw_dev *handle;
+	int i;
+
+	if (f == NULL) {
+		printf("Invalid file pointer\n");
+		return;
+	}
+
+	if (dev == NULL) {
+		fprintf(f, "Invalid event device\n");
+		return;
+	}
+
+	dlb = dlb_pmd_priv(dev);
+
+	if (dlb == NULL) {
+		fprintf(f, "DLB Event device cannot be dumped!\n");
+		return;
+	}
+
+	if (!dlb->configured)
+		fprintf(f, "DLB Event device is not configured\n");
+
+	handle = &dlb->qm_instance;
+
+	fprintf(f, "================\n");
+	fprintf(f, "DLB Device Dump\n");
+	fprintf(f, "================\n");
+
+	fprintf(f, "Processor supports umonitor/umwait instructions = %s\n",
+		dlb->umwait_allowed ? "yes" : "no");
+
+	/* Generic top level device information */
+
+	fprintf(f, "device is configured and run state =");
+	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
+		fprintf(f, "STOPPED\n");
+	else if (dlb->run_state == DLB_RUN_STATE_STOPPING)
+		fprintf(f, "STOPPING\n");
+	else if (dlb->run_state == DLB_RUN_STATE_STARTING)
+		fprintf(f, "STARTING\n");
+	else if (dlb->run_state == DLB_RUN_STATE_STARTED)
+		fprintf(f, "STARTED\n");
+	else
+		fprintf(f, "UNEXPECTED\n");
+
+	fprintf(f,
+		"dev ID=%d, dom ID=%u, sock=%u, evdev=%p\n",
+		handle->device_id, handle->domain_id,
+		handle->info.socket_id, dlb->event_dev);
+
+	fprintf(f, "num dir ports=%u, num dir queues=%u\n",
+		dlb->num_dir_ports, dlb->num_dir_queues);
+
+	fprintf(f, "num ldb ports=%u, num ldb queues=%u\n",
+		dlb->num_ldb_ports, dlb->num_ldb_queues);
+
+	fprintf(f, "dir_credit_pool_id=%u, num_credits=%u\n",
+		handle->cfg.dir_credit_pool_id, handle->cfg.num_dir_credits);
+
+	fprintf(f, "ldb_credit_pool_id=%u, num_credits=%u\n",
+		handle->cfg.ldb_credit_pool_id, handle->cfg.num_ldb_credits);
+
+	fprintf(f, "num atomic inflights=%u, hist list entries=%u\n",
+		handle->cfg.resources.num_atomic_inflights,
+		handle->cfg.resources.num_hist_list_entries);
+
+	fprintf(f, "results from most recent hw resource query:\n");
+
+	fprintf(f, "\tnum_sched_domains = %u\n",
+		dlb->hw_rsrc_query_results.num_sched_domains);
+
+	fprintf(f, "\tnum_ldb_queues = %u\n",
+		dlb->hw_rsrc_query_results.num_ldb_queues);
+
+	fprintf(f, "\tnum_ldb_ports = %u\n",
+		dlb->hw_rsrc_query_results.num_ldb_ports);
+
+	fprintf(f, "\tnum_dir_ports = %u\n",
+		dlb->hw_rsrc_query_results.num_dir_ports);
+
+	fprintf(f, "\tnum_atomic_inflights = %u\n",
+		dlb->hw_rsrc_query_results.num_atomic_inflights);
+
+	fprintf(f, "\tmax_contiguous_atomic_inflights = %u\n",
+		dlb->hw_rsrc_query_results.max_contiguous_atomic_inflights);
+
+	fprintf(f, "\tnum_hist_list_entries = %u\n",
+		dlb->hw_rsrc_query_results.num_hist_list_entries);
+
+	fprintf(f, "\tmax_contiguous_hist_list_entries = %u\n",
+		dlb->hw_rsrc_query_results.max_contiguous_hist_list_entries);
+
+	fprintf(f, "\tnum_ldb_credits = %u\n",
+		dlb->hw_rsrc_query_results.num_ldb_credits);
+
+	fprintf(f, "\tmax_contiguous_ldb_credits = %u\n",
+		dlb->hw_rsrc_query_results.max_contiguous_ldb_credits);
+
+	fprintf(f, "\tnum_dir_credits = %u\n",
+		dlb->hw_rsrc_query_results.num_dir_credits);
+
+	fprintf(f, "\tmax_contiguous_dir_credits = %u\n",
+		dlb->hw_rsrc_query_results.max_contiguous_dir_credits);
+
+	fprintf(f, "\tnum_ldb_credit_pools = %u\n",
+		dlb->hw_rsrc_query_results.num_ldb_credit_pools);
+
+	fprintf(f, "\tnum_dir_credit_pools = %u\n",
+		dlb->hw_rsrc_query_results.num_dir_credit_pools);
+
+	/* Port level information */
+
+	for (i = 0; i < dlb->num_ports; i++) {
+		struct dlb_eventdev_port *p = &dlb->ev_ports[i];
+		int j;
+
+		if (!p->enq_configured)
+			fprintf(f, "Port_%d is not configured\n", i);
+
+		fprintf(f, "Port_%d\n", i);
+		fprintf(f, "=======\n");
+
+		fprintf(f, "\tevport_%u is configured, setup done=%d\n",
+			p->id, p->setup_done);
+
+		fprintf(f, "\tconfig state=%d, port state=%d\n",
+			p->qm_port.config_state, p->qm_port.state);
+
+		fprintf(f, "\tport is %s\n",
+			p->qm_port.is_directed ? "directed" : "load balanced");
+
+		fprintf(f, "\toutstanding releases=%u\n",
+			p->outstanding_releases);
+
+		fprintf(f, "\tinflight max=%u, inflight credits=%u\n",
+			p->inflight_max, p->inflight_credits);
+
+		fprintf(f, "\tcredit update quanta=%u, implicit release =%u\n",
+			p->credit_update_quanta, p->implicit_release);
+
+		fprintf(f, "\tnum_links=%d, queues -> ", p->num_links);
+
+		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
+			if (p->link[j].valid)
+				fprintf(f, "id=%u prio=%u ",
+					p->link[j].queue_id,
+					p->link[j].priority);
+		}
+		fprintf(f, "\n");
+
+		fprintf(f, "\thardware port id=%u\n", p->qm_port.id);
+
+		fprintf(f, "\tcached_ldb_credits=%u\n",
+			p->qm_port.cached_ldb_credits);
+
+		fprintf(f, "\tldb_pushcount_at_credit_expiry = %u\n",
+			p->qm_port.ldb_pushcount_at_credit_expiry);
+
+		fprintf(f, "\tldb_credits = %u\n",
+			p->qm_port.ldb_credits);
+
+		fprintf(f, "\tcached_dir_credits = %u\n",
+			p->qm_port.cached_dir_credits);
+
+		fprintf(f, "\tdir_pushcount_at_credit_expiry=%u\n",
+			p->qm_port.dir_pushcount_at_credit_expiry);
+
+		fprintf(f, "\tdir_credits = %u\n",
+			p->qm_port.dir_credits);
+
+		fprintf(f, "\tgenbit=%d, cq_idx=%d, cq_depth=%d\n",
+			p->qm_port.gen_bit,
+			p->qm_port.cq_idx,
+			p->qm_port.cq_depth);
+
+		fprintf(f, "\tuse reserved token scheme=%d, cq_rsvd_token_deficit=%u\n",
+			p->qm_port.use_rsvd_token_scheme,
+			p->qm_port.cq_rsvd_token_deficit);
+
+		fprintf(f, "\tinterrupt armed=%d\n",
+			p->qm_port.int_armed);
+
+		fprintf(f, "\tPort statistics\n");
+
+		fprintf(f, "\t\trx_ok %" PRIu64 "\n",
+			p->stats.traffic.rx_ok);
+
+		fprintf(f, "\t\trx_drop %" PRIu64 "\n",
+			p->stats.traffic.rx_drop);
+
+		fprintf(f, "\t\trx_interrupt_wait %" PRIu64 "\n",
+			p->stats.traffic.rx_interrupt_wait);
+
+		fprintf(f, "\t\trx_umonitor_umwait %" PRIu64 "\n",
+			p->stats.traffic.rx_umonitor_umwait);
+
+		fprintf(f, "\t\ttx_ok %" PRIu64 "\n",
+			p->stats.traffic.tx_ok);
+
+		fprintf(f, "\t\ttotal_polls %" PRIu64 "\n",
+			p->stats.traffic.total_polls);
+
+		fprintf(f, "\t\tzero_polls %" PRIu64 "\n",
+			p->stats.traffic.zero_polls);
+
+		fprintf(f, "\t\ttx_nospc_ldb_hw_credits %" PRIu64 "\n",
+			p->stats.traffic.tx_nospc_ldb_hw_credits);
+
+		fprintf(f, "\t\ttx_nospc_dir_hw_credits %" PRIu64 "\n",
+			p->stats.traffic.tx_nospc_dir_hw_credits);
+
+		fprintf(f, "\t\ttx_nospc_inflight_max %" PRIu64 "\n",
+			p->stats.traffic.tx_nospc_inflight_max);
+
+		fprintf(f, "\t\ttx_nospc_new_event_limit %" PRIu64 "\n",
+			p->stats.traffic.tx_nospc_new_event_limit);
+
+		fprintf(f, "\t\ttx_nospc_inflight_credits %" PRIu64 "\n",
+			p->stats.traffic.tx_nospc_inflight_credits);
+
+		fprintf(f, "\t\ttx_new %" PRIu64 "\n",
+			p->stats.tx_op_cnt[RTE_EVENT_OP_NEW]);
+
+		fprintf(f, "\t\ttx_fwd %" PRIu64 "\n",
+			p->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD]);
+
+		fprintf(f, "\t\ttx_rel %" PRIu64 "\n",
+			p->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE]);
+
+		fprintf(f, "\t\ttx_implicit_rel %" PRIu64 "\n",
+			p->stats.tx_implicit_rel);
+
+		fprintf(f, "\t\ttx_sched_ordered %" PRIu64 "\n",
+			p->stats.tx_sched_cnt[DLB_SCHED_ORDERED]);
+
+		fprintf(f, "\t\ttx_sched_unordered %" PRIu64 "\n",
+			p->stats.tx_sched_cnt[DLB_SCHED_UNORDERED]);
+
+		fprintf(f, "\t\ttx_sched_atomic %" PRIu64 "\n",
+			p->stats.tx_sched_cnt[DLB_SCHED_ATOMIC]);
+
+		fprintf(f, "\t\ttx_sched_directed %" PRIu64 "\n",
+			p->stats.tx_sched_cnt[DLB_SCHED_DIRECTED]);
+
+		fprintf(f, "\t\ttx_invalid %" PRIu64 "\n",
+			p->stats.tx_invalid);
+
+		fprintf(f, "\t\trx_sched_ordered %" PRIu64 "\n",
+			p->stats.rx_sched_cnt[DLB_SCHED_ORDERED]);
+
+		fprintf(f, "\t\trx_sched_unordered %" PRIu64 "\n",
+			p->stats.rx_sched_cnt[DLB_SCHED_UNORDERED]);
+
+		fprintf(f, "\t\trx_sched_atomic %" PRIu64 "\n",
+			p->stats.rx_sched_cnt[DLB_SCHED_ATOMIC]);
+
+		fprintf(f, "\t\trx_sched_directed %" PRIu64 "\n",
+			p->stats.rx_sched_cnt[DLB_SCHED_DIRECTED]);
+
+		fprintf(f, "\t\trx_sched_invalid %" PRIu64 "\n",
+			p->stats.rx_sched_invalid);
+	}
+
+	/* Queue level information */
+
+	for (i = 0; i < dlb->num_queues; i++) {
+		struct dlb_eventdev_queue *q = &dlb->ev_queues[i];
+		int j, k;
+
+		if (!q->setup_done)
+			fprintf(f, "Queue_%d is not configured\n", i);
+
+		fprintf(f, "Queue_%d\n", i);
+		fprintf(f, "========\n");
+
+		fprintf(f, "\tevqueue_%u is set up\n", q->id);
+
+		fprintf(f, "\tqueue is %s\n",
+			q->qm_queue.is_directed ? "directed" : "load balanced");
+
+		fprintf(f, "\tnum_links=%d, ports -> ", q->num_links);
+
+		for (j = 0; j < dlb->num_ports; j++) {
+			struct dlb_eventdev_port *p = &dlb->ev_ports[j];
+
+			for (k = 0; k < DLB_MAX_NUM_QIDS_PER_LDB_CQ; k++) {
+				if (p->link[k].valid &&
+				    p->link[k].queue_id == q->id)
+					fprintf(f, "id=%u prio=%u ",
+						p->id, p->link[k].priority);
+			}
+		}
+		fprintf(f, "\n");
+
+		 fprintf(f, "\tcurrent depth: %u events\n",
+			 dlb_get_queue_depth(dlb, q));
+
+		fprintf(f, "\tnum qid inflights=%u, sched_type=%d\n",
+			q->qm_queue.num_qid_inflights, q->qm_queue.sched_type);
+	}
+}
diff --git a/drivers/event/dlb/meson.build b/drivers/event/dlb/meson.build
index 414b3ed..6f87274 100644
--- a/drivers/event/dlb/meson.build
+++ b/drivers/event/dlb/meson.build
@@ -3,6 +3,7 @@
 
 sources = files('dlb.c',
 		'dlb_iface.c',
+		'dlb_xstats.c',
 		'pf/dlb_main.c',
 		'pf/dlb_pf.c',
 		'pf/base/dlb_resource.c'
-- 
2.6.4


  parent reply	other threads:[~2020-10-17 19:04 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1593232671-5690-0-git-send-email-timothy.mcdaniel@intel.com>
2020-07-30 19:49 ` [dpdk-dev] [PATCH 00/27] Add Intel DLM PMD to 20.11 McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 01/27] eventdev: dlb upstream prerequisites McDaniel, Timothy
2020-08-11 17:44     ` Jerin Jacob
2020-10-17 19:03     ` [dpdk-dev] [PATCH v5 00/22] Add DLB PMD Timothy McDaniel
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 01/22] event/dlb: add documentation and meson infrastructure Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 02/22] event/dlb: add dynamic logging Timothy McDaniel
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 03/22] event/dlb: add private data structures and constants Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 04/22] event/dlb: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 05/22] event/dlb: add inline functions Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 06/22] event/dlb: add probe Timothy McDaniel
2020-10-18 12:49         ` Jerin Jacob
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` Timothy McDaniel [this message]
2020-10-20 20:06         ` [dpdk-dev] [PATCH v5 07/22] event/dlb: add xstats Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 08/22] event/dlb: add infos get and configure Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 09/22] event/dlb: add queue and port default conf Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 10/22] event/dlb: add queue setup Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 11/22] event/dlb: add port setup Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 12/22] event/dlb: add port link Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 13/22] event/dlb: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 14/22] event/dlb: add eventdev start Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 15/22] event/dlb: add enqueue and its burst variants Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 16/22] event/dlb: add dequeue " Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 17/22] event/dlb: add eventdev stop and close Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 18/22] event/dlb: add PMD's token pop public interface Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 19/22] event/dlb: add PMD self-tests Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 20/22] event/dlb: add queue and port release Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 21/22] event/dlb: add timeout ticks entry point Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 22/22] doc: Add new DLB eventdev driver to relnotes Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-23 18:32     ` [dpdk-dev] [PATCH v6 00/23] Add DLB PMD Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 01/23] event/dlb: add documentation and meson infrastructure Timothy McDaniel
2020-10-24 13:05         ` Jerin Jacob
2020-10-26 16:02           ` McDaniel, Timothy
2020-10-24 14:05         ` Thomas Monjalon
2020-10-26 16:12           ` McDaniel, Timothy
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 02/23] event/dlb: add dynamic logging Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 03/23] event/dlb: add private data structures and constants Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 04/23] event/dlb: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 05/23] event/dlb: add inline functions Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 06/23] event/dlb: add eventdev probe Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 07/23] event/dlb: add flexible interface Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 08/23] event/dlb: add probe-time hardware init Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 09/23] event/dlb: add xstats Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 10/23] event/dlb: add infos get and configure Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 11/23] event/dlb: add queue and port default conf Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 12/23] event/dlb: add queue setup Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 13/23] event/dlb: add port setup Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 14/23] event/dlb: add port link Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 15/23] event/dlb: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 16/23] event/dlb: add eventdev start Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 17/23] event/dlb: add enqueue and its burst variants Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 18/23] event/dlb: add dequeue " Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 19/23] event/dlb: add eventdev stop and close Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 20/23] event/dlb: add PMD's token pop public interface Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 21/23] event/dlb: add PMD self-tests Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 22/23] event/dlb: add queue and port release Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 23/23] event/dlb: add timeout ticks entry point Timothy McDaniel
2020-07-30 19:49   ` [dpdk-dev] [PATCH 02/27] eventdev: do not pass disable_implicit_release bit to trace macro McDaniel, Timothy
2020-08-11 17:48     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 03/27] event/dlb: add shared code version 10.7.9 McDaniel, Timothy
2020-08-11 18:22     ` [dpdk-dev] [EXT] " Jerin Jacob Kollanukkaran
2020-07-30 19:49   ` [dpdk-dev] [PATCH 04/27] event/dlb: add make and meson build infrastructure McDaniel, Timothy
2020-08-11 18:24     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 05/27] event/dlb: add DLB documentation McDaniel, Timothy
2020-08-11 18:26     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 06/27] event/dlb: add dynamic logging McDaniel, Timothy
2020-08-11 18:27     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 07/27] event/dlb: add private data structures and constants McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 08/27] event/dlb: add definitions shared with LKM or shared code McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 09/27] event/dlb: add inline functions used in multiple files McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 10/27] event/dlb: add PFPMD-specific interface layer to shared code McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 11/27] event/dlb: add flexible PMD to device interfaces McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 12/27] event/dlb: add the PMD's public interfaces McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 13/27] event/dlb: add xstats support McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 14/27] event/dlb: add PMD self-tests McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 15/27] event/dlb: add probe McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 16/27] event/dlb: add infos_get and configure McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 17/27] event/dlb: add queue_def_conf and port_def_conf McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 18/27] event/dlb: add queue setup McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 19/27] event/dlb: add port_setup McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 20/27] event/dlb: add port_link McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 21/27] event/dlb: add queue_release and port_release McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 22/27] event/dlb: add port_unlink and port_unlinks_in_progress McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 23/27] event/dlb: add eventdev_start McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 24/27] event/dlb: add timeout_ticks, dump, xstats, and selftest McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 25/27] event/dlb: add enqueue and its burst variants McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 26/27] event/dlb: add dequeue, dequeue_burst, and variants McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 27/27] event/dlb: add eventdev_stop and eventdev_close McDaniel, Timothy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1602961456-17392-8-git-send-email-timothy.mcdaniel@intel.com \
    --to=timothy.mcdaniel@intel.com \
    --cc=dev@dpdk.org \
    --cc=erik.g.carrillo@intel.com \
    --cc=gage.eads@intel.com \
    --cc=harry.van.haaren@intel.com \
    --cc=jerinj@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).