From: Tim McDaniel <timothy.mcdaniel@intel.com>
To: jerinj@marvell.com
Cc: mattias.ronnblom@ericsson.com, dev@dpdk.org, gage.eads@intel.com,
harry.van.haaren@intel.com, "McDaniel,
Timothy" <timothy.mcdaniel@intel.com>
Subject: [dpdk-dev] [PATCH 13/27] event/dlb: add xstats support
Date: Fri, 26 Jun 2020 23:37:37 -0500 [thread overview]
Message-ID: <1593232671-5690-14-git-send-email-timothy.mcdaniel@intel.com> (raw)
In-Reply-To: <1593232671-5690-1-git-send-email-timothy.mcdaniel@intel.com>
From: "McDaniel, Timothy" <timothy.mcdaniel@intel.com>
Signed-off-by: McDaniel, Timothy <timothy.mcdaniel@intel.com>
---
drivers/event/dlb/Makefile | 1 +
drivers/event/dlb/dlb_xstats.c | 1249 ++++++++++++++++++++++++++++++++++++++++
drivers/event/dlb/meson.build | 3 +-
3 files changed, 1252 insertions(+), 1 deletion(-)
create mode 100644 drivers/event/dlb/dlb_xstats.c
diff --git a/drivers/event/dlb/Makefile b/drivers/event/dlb/Makefile
index 9bcb8ef..27aafdb 100644
--- a/drivers/event/dlb/Makefile
+++ b/drivers/event/dlb/Makefile
@@ -20,6 +20,7 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_dlb_event_version.map
# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DLB_EVENTDEV) += dlb_xstats.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_DLB_EVENTDEV) += rte_pmd_dlb.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_DLB_EVENTDEV) += dlb_iface.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_DLB_EVENTDEV) += pf/dlb_pf.c
diff --git a/drivers/event/dlb/dlb_xstats.c b/drivers/event/dlb/dlb_xstats.c
new file mode 100644
index 0000000..85298b7
--- /dev/null
+++ b/drivers/event/dlb/dlb_xstats.c
@@ -0,0 +1,1249 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/fcntl.h>
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_dev.h>
+#include <rte_mbuf.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd.h>
+
+#include "dlb_priv.h"
+#include "dlb_inline_fns.h"
+
+enum dlb_xstats_type {
+ /* common to device and port */
+ rx_ok, /**< Receive an event */
+ rx_drop, /**< Error bit set in received QE */
+ rx_interrupt_wait, /**< Wait on an interrupt */
+ rx_umonitor_umwait, /**< Block using umwait */
+ tx_ok, /**< Transmit an event */
+ total_polls, /**< Call dequeue_burst */
+ zero_polls, /**< Call dequeue burst and return 0 */
+ tx_nospc_ldb_hw_credits, /**< Insufficient LDB h/w credits */
+ tx_nospc_dir_hw_credits, /**< Insufficient DIR h/w credits */
+ tx_nospc_inflight_max, /**< Reach the new_event_threshold */
+ tx_nospc_new_event_limit, /**< Insufficient s/w credits */
+ tx_nospc_inflight_credits, /**< Port has too few s/w credits */
+ /* device specific */
+ nb_events_limit, /**< Maximum num of events */
+ inflight_events, /**< Current num events outstanding */
+ ldb_pool_size, /**< Num load balanced credits */
+ dir_pool_size, /**< Num directed credits */
+ /* port specific */
+ tx_new, /**< Send an OP_NEW event */
+ tx_fwd, /**< Send an OP_FORWARD event */
+ tx_rel, /**< Send an OP_RELEASE event */
+ tx_implicit_rel, /**< Issue an implicit event release */
+ tx_sched_ordered, /**< Send a SCHED_TYPE_ORDERED event */
+ tx_sched_unordered, /**< Send a SCHED_TYPE_PARALLEL event */
+ tx_sched_atomic, /**< Send a SCHED_TYPE_ATOMIC event */
+ tx_sched_directed, /**< Send a directed event */
+ tx_invalid, /**< Send an event with an invalid op */
+ outstanding_releases, /**< # of releases a port owes */
+ max_outstanding_releases, /**< max # of releases a port can owe */
+ rx_sched_ordered, /**< Dequeue an ordered event */
+ rx_sched_unordered, /**< Dequeue an unordered event */
+ rx_sched_atomic, /**< Dequeue an atomic event */
+ rx_sched_directed, /**< Dequeue an directed event */
+ rx_sched_invalid, /**< Dequeue event sched type invalid */
+ /* common to port and queue */
+ is_configured, /**< Port is configured */
+ is_load_balanced, /**< Port is LDB */
+ hw_id, /**< Hardware ID */
+ /* queue specific */
+ num_links, /**< Number of ports linked */
+ sched_type, /**< Queue sched type */
+ enq_ok, /**< # events enqueued to the queue */
+ current_depth /**< Current queue depth */
+};
+
+typedef uint64_t (*dlb_xstats_fn)(struct dlb_eventdev *dlb,
+ uint16_t obj_idx, /* port or queue id */
+ enum dlb_xstats_type stat, int extra_arg);
+
+enum dlb_xstats_fn_type {
+ DLB_XSTATS_FN_DEV,
+ DLB_XSTATS_FN_PORT,
+ DLB_XSTATS_FN_QUEUE
+};
+
+struct dlb_xstats_entry {
+ struct rte_event_dev_xstats_name name;
+ uint64_t reset_value; /* an offset to be taken away to emulate resets */
+ enum dlb_xstats_fn_type fn_id;
+ enum dlb_xstats_type stat;
+ enum rte_event_dev_xstats_mode mode;
+ int extra_arg;
+ uint16_t obj_idx;
+ uint8_t reset_allowed; /* when set, this value can be reset */
+};
+
+/* Some device stats are simply a summation of the corresponding port values */
+static uint64_t
+dlb_device_traffic_stat_get(struct dlb_eventdev *dlb, int which_stat)
+{
+ int i;
+ uint64_t val = 0;
+
+ for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
+ struct dlb_eventdev_port *port = &dlb->ev_ports[i];
+
+ if (!port->setup_done)
+ continue;
+
+ switch (which_stat) {
+ case rx_ok:
+ val += port->stats.traffic.rx_ok;
+ break;
+ case rx_drop:
+ val += port->stats.traffic.rx_drop;
+ break;
+ case rx_interrupt_wait:
+ val += port->stats.traffic.rx_interrupt_wait;
+ break;
+ case rx_umonitor_umwait:
+ val += port->stats.traffic.rx_umonitor_umwait;
+ break;
+ case tx_ok:
+ val += port->stats.traffic.tx_ok;
+ break;
+ case total_polls:
+ val += port->stats.traffic.total_polls;
+ break;
+ case zero_polls:
+ val += port->stats.traffic.zero_polls;
+ break;
+ case tx_nospc_ldb_hw_credits:
+ val += port->stats.traffic.tx_nospc_ldb_hw_credits;
+ break;
+ case tx_nospc_dir_hw_credits:
+ val += port->stats.traffic.tx_nospc_dir_hw_credits;
+ break;
+ case tx_nospc_inflight_max:
+ val += port->stats.traffic.tx_nospc_inflight_max;
+ break;
+ case tx_nospc_new_event_limit:
+ val += port->stats.traffic.tx_nospc_new_event_limit;
+ break;
+ case tx_nospc_inflight_credits:
+ val += port->stats.traffic.tx_nospc_inflight_credits;
+ break;
+ default:
+ return -1;
+ }
+ }
+ return val;
+}
+
+static uint64_t
+get_dev_stat(struct dlb_eventdev *dlb, uint16_t obj_idx __rte_unused,
+ enum dlb_xstats_type type, int extra_arg __rte_unused)
+{
+ switch (type) {
+ case rx_ok:
+ case rx_drop:
+ case rx_interrupt_wait:
+ case rx_umonitor_umwait:
+ case tx_ok:
+ case total_polls:
+ case zero_polls:
+ case tx_nospc_ldb_hw_credits:
+ case tx_nospc_dir_hw_credits:
+ case tx_nospc_inflight_max:
+ case tx_nospc_new_event_limit:
+ case tx_nospc_inflight_credits:
+ return dlb_device_traffic_stat_get(dlb, type);
+ case nb_events_limit:
+ return dlb->new_event_limit;
+ case inflight_events:
+ return rte_atomic32_read(&dlb->inflights);
+ case ldb_pool_size:
+ return dlb->num_ldb_credits;
+ case dir_pool_size:
+ return dlb->num_dir_credits;
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_port_stat(struct dlb_eventdev *dlb, uint16_t obj_idx,
+ enum dlb_xstats_type type, int extra_arg __rte_unused)
+{
+ struct dlb_eventdev_port *ev_port = &dlb->ev_ports[obj_idx];
+
+ switch (type) {
+ case rx_ok: return ev_port->stats.traffic.rx_ok;
+
+ case rx_drop: return ev_port->stats.traffic.rx_drop;
+
+ case rx_interrupt_wait: return ev_port->stats.traffic.rx_interrupt_wait;
+
+ case rx_umonitor_umwait:
+ return ev_port->stats.traffic.rx_umonitor_umwait;
+
+ case tx_ok: return ev_port->stats.traffic.tx_ok;
+
+ case total_polls: return ev_port->stats.traffic.total_polls;
+
+ case zero_polls: return ev_port->stats.traffic.zero_polls;
+
+ case tx_nospc_ldb_hw_credits:
+ return ev_port->stats.traffic.tx_nospc_ldb_hw_credits;
+
+ case tx_nospc_dir_hw_credits:
+ return ev_port->stats.traffic.tx_nospc_dir_hw_credits;
+
+ case tx_nospc_inflight_max:
+ return ev_port->stats.traffic.tx_nospc_inflight_max;
+
+ case tx_nospc_new_event_limit:
+ return ev_port->stats.traffic.tx_nospc_new_event_limit;
+
+ case tx_nospc_inflight_credits:
+ return ev_port->stats.traffic.tx_nospc_inflight_credits;
+
+ case is_configured: return ev_port->setup_done;
+
+ case is_load_balanced: return !ev_port->qm_port.is_directed;
+
+ case hw_id: return ev_port->qm_port.id;
+
+ case tx_new: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_NEW];
+
+ case tx_fwd: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD];
+
+ case tx_rel: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE];
+
+ case tx_implicit_rel: return ev_port->stats.tx_implicit_rel;
+
+ case tx_sched_ordered:
+ return ev_port->stats.tx_sched_cnt[DLB_SCHED_ORDERED];
+
+ case tx_sched_unordered:
+ return ev_port->stats.tx_sched_cnt[DLB_SCHED_UNORDERED];
+
+ case tx_sched_atomic:
+ return ev_port->stats.tx_sched_cnt[DLB_SCHED_ATOMIC];
+
+ case tx_sched_directed:
+ return ev_port->stats.tx_sched_cnt[DLB_SCHED_DIRECTED];
+
+ case tx_invalid: return ev_port->stats.tx_invalid;
+
+ case outstanding_releases: return ev_port->outstanding_releases;
+
+ case max_outstanding_releases:
+ return DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
+
+ case rx_sched_ordered:
+ return ev_port->stats.rx_sched_cnt[DLB_SCHED_ORDERED];
+
+ case rx_sched_unordered:
+ return ev_port->stats.rx_sched_cnt[DLB_SCHED_UNORDERED];
+
+ case rx_sched_atomic:
+ return ev_port->stats.rx_sched_cnt[DLB_SCHED_ATOMIC];
+
+ case rx_sched_directed:
+ return ev_port->stats.rx_sched_cnt[DLB_SCHED_DIRECTED];
+
+ case rx_sched_invalid: return ev_port->stats.rx_sched_invalid;
+
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_queue_stat(struct dlb_eventdev *dlb, uint16_t obj_idx,
+ enum dlb_xstats_type type, int extra_arg __rte_unused)
+{
+ struct dlb_eventdev_queue *ev_queue =
+ &dlb->ev_queues[obj_idx];
+
+ switch (type) {
+ case is_configured: return ev_queue->setup_done;
+
+ case is_load_balanced: return !ev_queue->qm_queue.is_directed;
+
+ case hw_id: return ev_queue->qm_queue.id;
+
+ case num_links: return ev_queue->num_links;
+
+ case sched_type: return ev_queue->qm_queue.sched_type;
+
+ case enq_ok:
+ {
+ int port_count = 0;
+ uint64_t enq_ok_tally = 0;
+
+ ev_queue->enq_ok = 0;
+ for (port_count = 0; port_count < DLB_MAX_NUM_PORTS;
+ port_count++) {
+ struct dlb_eventdev_port *ev_port =
+ &dlb->ev_ports[port_count];
+ enq_ok_tally += ev_port->stats.enq_ok[ev_queue->id];
+ }
+ ev_queue->enq_ok = enq_ok_tally;
+ return ev_queue->enq_ok;
+ }
+
+ case current_depth: return dlb_get_queue_depth(dlb, ev_queue);
+
+ default: return -1;
+ }
+}
+
+int
+dlb_xstats_init(struct dlb_eventdev *dlb)
+{
+ /*
+ * define the stats names and types. Used to build up the device
+ * xstats array
+ * There are multiple set of stats:
+ * - device-level,
+ * - per-port,
+ * - per-qid,
+ *
+ * For each of these sets, we have three parallel arrays, one for the
+ * names, the other for the stat type parameter to be passed in the fn
+ * call to get that stat. The third array allows resetting or not.
+ * All these arrays must be kept in sync
+ */
+ static const char * const dev_stats[] = {
+ "rx_ok",
+ "rx_drop",
+ "rx_interrupt_wait",
+ "rx_umonitor_umwait",
+ "tx_ok",
+ "total_polls",
+ "zero_polls",
+ "tx_nospc_ldb_hw_credits",
+ "tx_nospc_dir_hw_credits",
+ "tx_nospc_inflight_max",
+ "tx_nospc_new_event_limit",
+ "tx_nospc_inflight_credits",
+ "nb_events_limit",
+ "inflight_events",
+ "ldb_pool_size",
+ "dir_pool_size",
+ };
+ static const enum dlb_xstats_type dev_types[] = {
+ rx_ok,
+ rx_drop,
+ rx_interrupt_wait,
+ rx_umonitor_umwait,
+ tx_ok,
+ total_polls,
+ zero_polls,
+ tx_nospc_ldb_hw_credits,
+ tx_nospc_dir_hw_credits,
+ tx_nospc_inflight_max,
+ tx_nospc_new_event_limit,
+ tx_nospc_inflight_credits,
+ nb_events_limit,
+ inflight_events,
+ ldb_pool_size,
+ dir_pool_size,
+ };
+ /* Note: generated device stats are not allowed to be reset. */
+ static const uint8_t dev_reset_allowed[] = {
+ 0, /* rx_ok */
+ 0, /* rx_drop */
+ 0, /* rx_interrupt_wait */
+ 0, /* rx_umonitor_umwait */
+ 0, /* tx_ok */
+ 0, /* total_polls */
+ 0, /* zero_polls */
+ 0, /* tx_nospc_ldb_hw_credits */
+ 0, /* tx_nospc_dir_hw_credits */
+ 0, /* tx_nospc_inflight_max */
+ 0, /* tx_nospc_new_event_limit */
+ 0, /* tx_nospc_inflight_credits */
+ 0, /* nb_events_limit */
+ 0, /* inflight_events */
+ 0, /* ldb_pool_size */
+ 0, /* dir_pool_size */
+ };
+ static const char * const port_stats[] = {
+ "is_configured",
+ "is_load_balanced",
+ "hw_id",
+ "rx_ok",
+ "rx_drop",
+ "rx_interrupt_wait",
+ "rx_umonitor_umwait",
+ "tx_ok",
+ "total_polls",
+ "zero_polls",
+ "tx_nospc_ldb_hw_credits",
+ "tx_nospc_dir_hw_credits",
+ "tx_nospc_inflight_max",
+ "tx_nospc_new_event_limit",
+ "tx_nospc_inflight_credits",
+ "tx_new",
+ "tx_fwd",
+ "tx_rel",
+ "tx_implicit_rel",
+ "tx_sched_ordered",
+ "tx_sched_unordered",
+ "tx_sched_atomic",
+ "tx_sched_directed",
+ "tx_invalid",
+ "outstanding_releases",
+ "max_outstanding_releases",
+ "rx_sched_ordered",
+ "rx_sched_unordered",
+ "rx_sched_atomic",
+ "rx_sched_directed",
+ "rx_sched_invalid"
+ };
+ static const enum dlb_xstats_type port_types[] = {
+ is_configured,
+ is_load_balanced,
+ hw_id,
+ rx_ok,
+ rx_drop,
+ rx_interrupt_wait,
+ rx_umonitor_umwait,
+ tx_ok,
+ total_polls,
+ zero_polls,
+ tx_nospc_ldb_hw_credits,
+ tx_nospc_dir_hw_credits,
+ tx_nospc_inflight_max,
+ tx_nospc_new_event_limit,
+ tx_nospc_inflight_credits,
+ tx_new,
+ tx_fwd,
+ tx_rel,
+ tx_implicit_rel,
+ tx_sched_ordered,
+ tx_sched_unordered,
+ tx_sched_atomic,
+ tx_sched_directed,
+ tx_invalid,
+ outstanding_releases,
+ max_outstanding_releases,
+ rx_sched_ordered,
+ rx_sched_unordered,
+ rx_sched_atomic,
+ rx_sched_directed,
+ rx_sched_invalid
+ };
+ static const uint8_t port_reset_allowed[] = {
+ 0, /* is_configured */
+ 0, /* is_load_balanced */
+ 0, /* hw_id */
+ 1, /* rx_ok */
+ 1, /* rx_drop */
+ 1, /* rx_interrupt_wait */
+ 1, /* rx_umonitor_umwait */
+ 1, /* tx_ok */
+ 1, /* total_polls */
+ 1, /* zero_polls */
+ 1, /* tx_nospc_ldb_hw_credits */
+ 1, /* tx_nospc_dir_hw_credits */
+ 1, /* tx_nospc_inflight_max */
+ 1, /* tx_nospc_new_event_limit */
+ 1, /* tx_nospc_inflight_credits */
+ 1, /* tx_new */
+ 1, /* tx_fwd */
+ 1, /* tx_rel */
+ 1, /* tx_implicit_rel */
+ 1, /* tx_sched_ordered */
+ 1, /* tx_sched_unordered */
+ 1, /* tx_sched_atomic */
+ 1, /* tx_sched_directed */
+ 1, /* tx_invalid */
+ 0, /* outstanding_releases */
+ 0, /* max_outstanding_releases */
+ 1, /* rx_sched_ordered */
+ 1, /* rx_sched_unordered */
+ 1, /* rx_sched_atomic */
+ 1, /* rx_sched_directed */
+ 1 /* rx_sched_invalid */
+ };
+
+ /* QID specific stats */
+ static const char * const qid_stats[] = {
+ "is_configured",
+ "is_load_balanced",
+ "hw_id",
+ "num_links",
+ "sched_type",
+ "enq_ok",
+ "current_depth",
+ };
+ static const enum dlb_xstats_type qid_types[] = {
+ is_configured,
+ is_load_balanced,
+ hw_id,
+ num_links,
+ sched_type,
+ enq_ok,
+ current_depth,
+ };
+ static const uint8_t qid_reset_allowed[] = {
+ 0, /* is_configured */
+ 0, /* is_load_balanced */
+ 0, /* hw_id */
+ 0, /* num_links */
+ 0, /* sched_type */
+ 1, /* enq_ok */
+ 0, /* current_depth */
+ };
+
+ /* ---- end of stat definitions ---- */
+
+ /* check sizes, since a missed comma can lead to strings being
+ * joined by the compiler.
+ */
+ RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
+
+ RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_reset_allowed));
+ RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
+
+ /* other vars */
+ const unsigned int count = RTE_DIM(dev_stats) +
+ DLB_MAX_NUM_PORTS * RTE_DIM(port_stats) +
+ DLB_MAX_NUM_QUEUES * RTE_DIM(qid_stats);
+ unsigned int i, port, qid, stat_id = 0;
+
+ dlb->xstats = rte_zmalloc_socket(NULL,
+ sizeof(dlb->xstats[0]) * count, 0,
+ dlb->qm_instance.info.socket_id);
+ if (dlb->xstats == NULL)
+ return -ENOMEM;
+
+#define sname dlb->xstats[stat_id].name.name
+ for (i = 0; i < RTE_DIM(dev_stats); i++, stat_id++) {
+ dlb->xstats[stat_id] = (struct dlb_xstats_entry) {
+ .fn_id = DLB_XSTATS_FN_DEV,
+ .stat = dev_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
+ .reset_allowed = dev_reset_allowed[i],
+ };
+ snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
+ }
+ dlb->xstats_count_mode_dev = stat_id;
+
+ for (port = 0; port < DLB_MAX_NUM_PORTS; port++) {
+ uint32_t count_offset = stat_id;
+
+ dlb->xstats_offset_for_port[port] = stat_id;
+
+ for (i = 0; i < RTE_DIM(port_stats); i++, stat_id++) {
+ dlb->xstats[stat_id] = (struct dlb_xstats_entry){
+ .fn_id = DLB_XSTATS_FN_PORT,
+ .obj_idx = port,
+ .stat = port_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_PORT,
+ .reset_allowed = port_reset_allowed[i],
+ };
+ snprintf(sname, sizeof(sname), "port_%u_%s",
+ port, port_stats[i]);
+ }
+
+ dlb->xstats_count_per_port[port] = stat_id - count_offset;
+ }
+
+ dlb->xstats_count_mode_port = stat_id - dlb->xstats_count_mode_dev;
+
+ for (qid = 0; qid < DLB_MAX_NUM_QUEUES; qid++) {
+ uint32_t count_offset = stat_id;
+
+ dlb->xstats_offset_for_qid[qid] = stat_id;
+
+ for (i = 0; i < RTE_DIM(qid_stats); i++, stat_id++) {
+ dlb->xstats[stat_id] = (struct dlb_xstats_entry){
+ .fn_id = DLB_XSTATS_FN_QUEUE,
+ .obj_idx = qid,
+ .stat = qid_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+ .reset_allowed = qid_reset_allowed[i],
+ };
+ snprintf(sname, sizeof(sname), "qid_%u_%s",
+ qid, qid_stats[i]);
+ }
+
+ dlb->xstats_count_per_qid[qid] = stat_id - count_offset;
+ }
+
+ dlb->xstats_count_mode_queue = stat_id -
+ (dlb->xstats_count_mode_dev + dlb->xstats_count_mode_port);
+#undef sname
+
+ dlb->xstats_count = stat_id;
+
+ return 0;
+}
+
+void
+dlb_xstats_uninit(struct dlb_eventdev *dlb)
+{
+ rte_free(dlb->xstats);
+ dlb->xstats_count = 0;
+}
+
+int
+dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ const struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ unsigned int i;
+ unsigned int xidx = 0;
+
+ RTE_SET_USED(mode);
+ RTE_SET_USED(queue_port_id);
+
+ uint32_t xstats_mode_count = 0;
+ uint32_t start_offset = 0;
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ xstats_mode_count = dlb->xstats_count_mode_dev;
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id >= DLB_MAX_NUM_PORTS)
+ break;
+ xstats_mode_count = dlb->xstats_count_per_port[queue_port_id];
+ start_offset = dlb->xstats_offset_for_port[queue_port_id];
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+#if (DLB_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
+ if (queue_port_id >= DLB_MAX_NUM_QUEUES)
+ break;
+#endif
+ xstats_mode_count = dlb->xstats_count_per_qid[queue_port_id];
+ start_offset = dlb->xstats_offset_for_qid[queue_port_id];
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (xstats_mode_count > size || !ids || !xstats_names)
+ return xstats_mode_count;
+
+ for (i = 0; i < dlb->xstats_count && xidx < size; i++) {
+ if (dlb->xstats[i].mode != mode)
+ continue;
+
+ if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
+ queue_port_id != dlb->xstats[i].obj_idx)
+ continue;
+
+ xstats_names[xidx] = dlb->xstats[i].name;
+ if (ids)
+ ids[xidx] = start_offset + xidx;
+ xidx++;
+ }
+ return xidx;
+}
+
+static int
+dlb_xstats_update(struct dlb_eventdev *dlb,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id, const unsigned int ids[],
+ uint64_t values[], unsigned int n, const uint32_t reset)
+{
+ unsigned int i;
+ unsigned int xidx = 0;
+
+ RTE_SET_USED(mode);
+ RTE_SET_USED(queue_port_id);
+
+ uint32_t xstats_mode_count = 0;
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ xstats_mode_count = dlb->xstats_count_mode_dev;
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id >= DLB_MAX_NUM_PORTS)
+ goto invalid_value;
+ xstats_mode_count = dlb->xstats_count_per_port[queue_port_id];
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+#if (DLB_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
+ if (queue_port_id >= DLB_MAX_NUM_QUEUES)
+ goto invalid_value;
+#endif
+ xstats_mode_count = dlb->xstats_count_per_qid[queue_port_id];
+ break;
+ default:
+ goto invalid_value;
+ };
+
+ for (i = 0; i < n && xidx < xstats_mode_count; i++) {
+ struct dlb_xstats_entry *xs = &dlb->xstats[ids[i]];
+ dlb_xstats_fn fn;
+
+ if (ids[i] > dlb->xstats_count || xs->mode != mode)
+ continue;
+
+ if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
+ queue_port_id != xs->obj_idx)
+ continue;
+
+ switch (xs->fn_id) {
+ case DLB_XSTATS_FN_DEV:
+ fn = get_dev_stat;
+ break;
+ case DLB_XSTATS_FN_PORT:
+ fn = get_port_stat;
+ break;
+ case DLB_XSTATS_FN_QUEUE:
+ fn = get_queue_stat;
+ break;
+ default:
+ rte_panic("Unexpected xstat fn_id %d\n", xs->fn_id);
+ }
+
+ uint64_t val = fn(dlb, xs->obj_idx, xs->stat,
+ xs->extra_arg) - xs->reset_value;
+
+ if (values)
+ values[xidx] = val;
+
+ if (xs->reset_allowed && reset)
+ xs->reset_value += val;
+
+ xidx++;
+ }
+
+ return xidx;
+
+invalid_value:
+ return -EINVAL;
+}
+
+int
+dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n)
+{
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ const uint32_t reset = 0;
+
+ return dlb_xstats_update(dlb, mode, queue_port_id, ids, values, n,
+ reset);
+}
+
+uint64_t
+dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id)
+{
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ unsigned int i;
+ dlb_xstats_fn fn;
+
+ for (i = 0; i < dlb->xstats_count; i++) {
+ struct dlb_xstats_entry *xs = &dlb->xstats[i];
+
+ if (strncmp(xs->name.name, name,
+ RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
+ if (id != NULL)
+ *id = i;
+
+ switch (xs->fn_id) {
+ case DLB_XSTATS_FN_DEV:
+ fn = get_dev_stat;
+ break;
+ case DLB_XSTATS_FN_PORT:
+ fn = get_port_stat;
+ break;
+ case DLB_XSTATS_FN_QUEUE:
+ fn = get_queue_stat;
+ break;
+ default:
+ rte_panic("Unexpected xstat fn_id %d\n",
+ xs->fn_id);
+ }
+
+ return fn(dlb, xs->obj_idx, xs->stat,
+ xs->extra_arg) - xs->reset_value;
+ }
+ }
+ if (id != NULL)
+ *id = (uint32_t)-1;
+ return (uint64_t)-1;
+}
+
+static void
+dlb_xstats_reset_range(struct dlb_eventdev *dlb, uint32_t start,
+ uint32_t num)
+{
+ uint32_t i;
+ dlb_xstats_fn fn;
+
+ for (i = start; i < start + num; i++) {
+ struct dlb_xstats_entry *xs = &dlb->xstats[i];
+
+ if (!xs->reset_allowed)
+ continue;
+
+ switch (xs->fn_id) {
+ case DLB_XSTATS_FN_DEV:
+ fn = get_dev_stat;
+ break;
+ case DLB_XSTATS_FN_PORT:
+ fn = get_port_stat;
+ break;
+ case DLB_XSTATS_FN_QUEUE:
+ fn = get_queue_stat;
+ break;
+ default:
+ rte_panic("Unexpected xstat fn_id %d\n", xs->fn_id);
+ }
+
+ uint64_t val = fn(dlb, xs->obj_idx, xs->stat, xs->extra_arg);
+ xs->reset_value = val;
+ }
+}
+
+static int
+dlb_xstats_reset_queue(struct dlb_eventdev *dlb, uint8_t queue_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ const uint32_t reset = 1;
+
+ if (ids) {
+ uint32_t nb_reset = dlb_xstats_update(dlb,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ queue_id, ids, NULL, nb_ids,
+ reset);
+ return nb_reset == nb_ids ? 0 : -EINVAL;
+ }
+
+ if (ids == NULL)
+ dlb_xstats_reset_range(dlb,
+ dlb->xstats_offset_for_qid[queue_id],
+ dlb->xstats_count_per_qid[queue_id]);
+
+ return 0;
+}
+
+static int
+dlb_xstats_reset_port(struct dlb_eventdev *dlb, uint8_t port_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ const uint32_t reset = 1;
+ int offset = dlb->xstats_offset_for_port[port_id];
+ int nb_stat = dlb->xstats_count_per_port[port_id];
+
+ if (ids) {
+ uint32_t nb_reset = dlb_xstats_update(dlb,
+ RTE_EVENT_DEV_XSTATS_PORT, port_id,
+ ids, NULL, nb_ids,
+ reset);
+ return nb_reset == nb_ids ? 0 : -EINVAL;
+ }
+
+ dlb_xstats_reset_range(dlb, offset, nb_stat);
+ return 0;
+}
+
+static int
+dlb_xstats_reset_dev(struct dlb_eventdev *dlb, const uint32_t ids[],
+ uint32_t nb_ids)
+{
+ uint32_t i;
+
+ if (ids) {
+ for (i = 0; i < nb_ids; i++) {
+ uint32_t id = ids[i];
+
+ if (id >= dlb->xstats_count_mode_dev)
+ return -EINVAL;
+ dlb_xstats_reset_range(dlb, id, 1);
+ }
+ } else {
+ for (i = 0; i < dlb->xstats_count_mode_dev; i++)
+ dlb_xstats_reset_range(dlb, i, 1);
+ }
+
+ return 0;
+}
+
+int
+dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids)
+{
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ uint32_t i;
+
+ /* handle -1 for queue_port_id here, looping over all ports/queues */
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ if (dlb_xstats_reset_dev(dlb, ids, nb_ids))
+ return -EINVAL;
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id == -1) {
+ for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
+ if (dlb_xstats_reset_port(dlb, i, ids,
+ nb_ids))
+ return -EINVAL;
+ }
+ } else if (queue_port_id < DLB_MAX_NUM_PORTS) {
+ if (dlb_xstats_reset_port(dlb, queue_port_id, ids,
+ nb_ids))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id == -1) {
+ for (i = 0; i < DLB_MAX_NUM_QUEUES; i++) {
+ if (dlb_xstats_reset_queue(dlb, i, ids,
+ nb_ids))
+ return -EINVAL;
+ }
+ } else if (queue_port_id < DLB_MAX_NUM_QUEUES) {
+ if (dlb_xstats_reset_queue(dlb, queue_port_id, ids,
+ nb_ids))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ };
+
+ return 0;
+}
+
+void
+dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f)
+{
+ struct dlb_eventdev *dlb;
+ struct dlb_hw_dev *handle;
+ int i;
+
+ if (!f) {
+ printf("Invalid file pointer\n");
+ return;
+ }
+
+ if (!dev) {
+ fprintf(f, "Invalid event device\n");
+ return;
+ }
+
+ dlb = dlb_pmd_priv(dev);
+
+ if (!dlb) {
+ fprintf(f, "DLB Event device cannot be dumped!\n");
+ return;
+ }
+
+ if (!dlb->configured)
+ fprintf(f, "DLB Event device is not configured\n");
+
+ handle = &dlb->qm_instance;
+
+ fprintf(f, "================\n");
+ fprintf(f, "DLB Device Dump\n");
+ fprintf(f, "================\n");
+
+ fprintf(f, "Processor supports umonitor/umwait instructions = %s\n",
+ dlb->umwait_allowed ? "yes" : "no");
+
+ /* Generic top level device information */
+
+ fprintf(f, "device is configured and run state =");
+ if (dlb->run_state == DLB_RUN_STATE_STOPPED)
+ fprintf(f, "STOPPED\n");
+ else if (dlb->run_state == DLB_RUN_STATE_STOPPING)
+ fprintf(f, "STOPPING\n");
+ else if (dlb->run_state == DLB_RUN_STATE_STARTING)
+ fprintf(f, "STARTING\n");
+ else if (dlb->run_state == DLB_RUN_STATE_STARTED)
+ fprintf(f, "STARTED\n");
+ else
+ fprintf(f, "UNEXPECTED\n");
+
+ fprintf(f,
+ "dev ID=%d, dom ID=%u, name=%s, path=%s, sock=%u, evdev=%p\n",
+ handle->device_id, handle->domain_id, handle->device_name,
+ handle->device_path, handle->info.socket_id, dlb->event_dev);
+
+ fprintf(f, "domain device path=%s\n", handle->domain_device_path);
+
+ fprintf(f, "num dir ports=%u, num dir queues=%u\n",
+ dlb->num_dir_ports, dlb->num_dir_queues);
+
+ fprintf(f, "num ldb ports=%u, num ldb queues=%u\n",
+ dlb->num_ldb_ports, dlb->num_ldb_queues);
+
+ fprintf(f, "dir_credit_pool_id=%u, num_credits=%u\n",
+ handle->cfg.dir_credit_pool_id, handle->cfg.num_dir_credits);
+
+ fprintf(f, "ldb_credit_pool_id=%u, num_credits=%u\n",
+ handle->cfg.ldb_credit_pool_id, handle->cfg.num_ldb_credits);
+
+ fprintf(f, "num atomic inflights=%u, hist list entries=%u\n",
+ handle->cfg.resources.num_atomic_inflights,
+ handle->cfg.resources.num_hist_list_entries);
+
+ fprintf(f, "results from most recent hw resource query:\n");
+
+ fprintf(f, "\tnum_sched_domains = %u\n",
+ dlb->hw_rsrc_query_results.num_sched_domains);
+
+ fprintf(f, "\tnum_ldb_queues = %u\n",
+ dlb->hw_rsrc_query_results.num_ldb_queues);
+
+ fprintf(f, "\tnum_ldb_ports = %u\n",
+ dlb->hw_rsrc_query_results.num_ldb_ports);
+
+ fprintf(f, "\tnum_dir_ports = %u\n",
+ dlb->hw_rsrc_query_results.num_dir_ports);
+
+ fprintf(f, "\tnum_atomic_inflights = %u\n",
+ dlb->hw_rsrc_query_results.num_atomic_inflights);
+
+ fprintf(f, "\tmax_contiguous_atomic_inflights = %u\n",
+ dlb->hw_rsrc_query_results.max_contiguous_atomic_inflights);
+
+ fprintf(f, "\tnum_hist_list_entries = %u\n",
+ dlb->hw_rsrc_query_results.num_hist_list_entries);
+
+ fprintf(f, "\tmax_contiguous_hist_list_entries = %u\n",
+ dlb->hw_rsrc_query_results.max_contiguous_hist_list_entries);
+
+ fprintf(f, "\tnum_ldb_credits = %u\n",
+ dlb->hw_rsrc_query_results.num_ldb_credits);
+
+ fprintf(f, "\tmax_contiguous_ldb_credits = %u\n",
+ dlb->hw_rsrc_query_results.max_contiguous_ldb_credits);
+
+ fprintf(f, "\tnum_dir_credits = %u\n",
+ dlb->hw_rsrc_query_results.num_dir_credits);
+
+ fprintf(f, "\tmax_contiguous_dir_credits = %u\n",
+ dlb->hw_rsrc_query_results.max_contiguous_dir_credits);
+
+ fprintf(f, "\tnum_ldb_credit_pools = %u\n",
+ dlb->hw_rsrc_query_results.num_ldb_credit_pools);
+
+ fprintf(f, "\tnum_dir_credit_pools = %u\n",
+ dlb->hw_rsrc_query_results.num_dir_credit_pools);
+
+ /* Port level information */
+
+ for (i = 0; i < dlb->num_ports; i++) {
+ struct dlb_eventdev_port *p = &dlb->ev_ports[i];
+ int j;
+
+ if (!p->enq_configured)
+ fprintf(f, "Port_%d is not configured\n", i);
+
+ fprintf(f, "Port_%d\n", i);
+ fprintf(f, "=======\n");
+
+ fprintf(f, "\tevport_%u is configured, setup done=%d\n",
+ p->id, p->setup_done);
+
+ fprintf(f, "\tconfig state=%d, port state=%d\n",
+ p->qm_port.config_state, p->qm_port.state);
+
+ fprintf(f, "\tport is %s\n",
+ p->qm_port.is_directed ? "directed" : "load balanced");
+
+ fprintf(f, "\toutstanding releases=%u\n",
+ p->outstanding_releases);
+
+ fprintf(f, "\tinflight max=%u, inflight credits=%u\n",
+ p->inflight_max, p->inflight_credits);
+
+ fprintf(f, "\tcredit update quanta=%u, implicit release =%u\n",
+ p->credit_update_quanta, p->implicit_release);
+
+ fprintf(f, "\tnum_links=%d, queues -> ", p->num_links);
+
+ for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
+ if (p->link[j].valid)
+ fprintf(f, "id=%u prio=%u ",
+ p->link[j].queue_id,
+ p->link[j].priority);
+ }
+ fprintf(f, "\n");
+
+ fprintf(f, "\thardware port id=%u\n", p->qm_port.id);
+
+ fprintf(f, "\tcached_ldb_credits=%u\n",
+ p->qm_port.cached_ldb_credits);
+
+ fprintf(f, "\tldb_pushcount_at_credit_expiry = %u\n",
+ p->qm_port.ldb_pushcount_at_credit_expiry);
+
+ fprintf(f, "\tldb_credits = %u\n",
+ p->qm_port.ldb_credits);
+
+ fprintf(f, "\tcached_dir_credits = %u\n",
+ p->qm_port.cached_dir_credits);
+
+ fprintf(f, "\tdir_pushcount_at_credit_expiry=%u\n",
+ p->qm_port.dir_pushcount_at_credit_expiry);
+
+ fprintf(f, "\tdir_credits = %u\n",
+ p->qm_port.dir_credits);
+
+ fprintf(f, "\tgenbit=%d, cq_idx=%d, cq_depth=%d\n",
+ p->qm_port.gen_bit,
+ p->qm_port.cq_idx,
+ p->qm_port.cq_depth);
+
+ fprintf(f, "\tuse reserved token scheme=%d, cq_rsvd_token_deficit=%u\n",
+ p->qm_port.use_rsvd_token_scheme,
+ p->qm_port.cq_rsvd_token_deficit);
+
+ fprintf(f, "\tinterrupt armed=%d\n",
+ p->qm_port.int_armed);
+
+ fprintf(f, "\tPort statistics\n");
+
+ fprintf(f, "\t\trx_ok %" PRIu64 "\n",
+ p->stats.traffic.rx_ok);
+
+ fprintf(f, "\t\trx_drop %" PRIu64 "\n",
+ p->stats.traffic.rx_drop);
+
+ fprintf(f, "\t\trx_interrupt_wait %" PRIu64 "\n",
+ p->stats.traffic.rx_interrupt_wait);
+
+ fprintf(f, "\t\trx_umonitor_umwait %" PRIu64 "\n",
+ p->stats.traffic.rx_umonitor_umwait);
+
+ fprintf(f, "\t\ttx_ok %" PRIu64 "\n",
+ p->stats.traffic.tx_ok);
+
+ fprintf(f, "\t\ttotal_polls %" PRIu64 "\n",
+ p->stats.traffic.total_polls);
+
+ fprintf(f, "\t\tzero_polls %" PRIu64 "\n",
+ p->stats.traffic.zero_polls);
+
+ fprintf(f, "\t\ttx_nospc_ldb_hw_credits %" PRIu64 "\n",
+ p->stats.traffic.tx_nospc_ldb_hw_credits);
+
+ fprintf(f, "\t\ttx_nospc_dir_hw_credits %" PRIu64 "\n",
+ p->stats.traffic.tx_nospc_dir_hw_credits);
+
+ fprintf(f, "\t\ttx_nospc_inflight_max %" PRIu64 "\n",
+ p->stats.traffic.tx_nospc_inflight_max);
+
+ fprintf(f, "\t\ttx_nospc_new_event_limit %" PRIu64 "\n",
+ p->stats.traffic.tx_nospc_new_event_limit);
+
+ fprintf(f, "\t\ttx_nospc_inflight_credits %" PRIu64 "\n",
+ p->stats.traffic.tx_nospc_inflight_credits);
+
+ fprintf(f, "\t\ttx_new %" PRIu64 "\n",
+ p->stats.tx_op_cnt[RTE_EVENT_OP_NEW]);
+
+ fprintf(f, "\t\ttx_fwd %" PRIu64 "\n",
+ p->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD]);
+
+ fprintf(f, "\t\ttx_rel %" PRIu64 "\n",
+ p->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE]);
+
+ fprintf(f, "\t\ttx_implicit_rel %" PRIu64 "\n",
+ p->stats.tx_implicit_rel);
+
+ fprintf(f, "\t\ttx_sched_ordered %" PRIu64 "\n",
+ p->stats.tx_sched_cnt[DLB_SCHED_ORDERED]);
+
+ fprintf(f, "\t\ttx_sched_unordered %" PRIu64 "\n",
+ p->stats.tx_sched_cnt[DLB_SCHED_UNORDERED]);
+
+ fprintf(f, "\t\ttx_sched_atomic %" PRIu64 "\n",
+ p->stats.tx_sched_cnt[DLB_SCHED_ATOMIC]);
+
+ fprintf(f, "\t\ttx_sched_directed %" PRIu64 "\n",
+ p->stats.tx_sched_cnt[DLB_SCHED_DIRECTED]);
+
+ fprintf(f, "\t\ttx_invalid %" PRIu64 "\n",
+ p->stats.tx_invalid);
+
+ fprintf(f, "\t\trx_sched_ordered %" PRIu64 "\n",
+ p->stats.rx_sched_cnt[DLB_SCHED_ORDERED]);
+
+ fprintf(f, "\t\trx_sched_unordered %" PRIu64 "\n",
+ p->stats.rx_sched_cnt[DLB_SCHED_UNORDERED]);
+
+ fprintf(f, "\t\trx_sched_atomic %" PRIu64 "\n",
+ p->stats.rx_sched_cnt[DLB_SCHED_ATOMIC]);
+
+ fprintf(f, "\t\trx_sched_directed %" PRIu64 "\n",
+ p->stats.rx_sched_cnt[DLB_SCHED_DIRECTED]);
+
+ fprintf(f, "\t\trx_sched_invalid %" PRIu64 "\n",
+ p->stats.rx_sched_invalid);
+ }
+
+ /* Queue level information */
+
+ for (i = 0; i < dlb->num_queues; i++) {
+ struct dlb_eventdev_queue *q = &dlb->ev_queues[i];
+ int j, k;
+
+ if (!q->setup_done)
+ fprintf(f, "Queue_%d is not configured\n", i);
+
+ fprintf(f, "Queue_%d\n", i);
+ fprintf(f, "========\n");
+
+ fprintf(f, "\tevqueue_%u is set up\n", q->id);
+
+ fprintf(f, "\tqueue is %s\n",
+ q->qm_queue.is_directed ? "directed" : "load balanced");
+
+ fprintf(f, "\tnum_links=%d, ports -> ", q->num_links);
+
+ for (j = 0; j < dlb->num_ports; j++) {
+ struct dlb_eventdev_port *p = &dlb->ev_ports[j];
+
+ for (k = 0; k < DLB_MAX_NUM_QIDS_PER_LDB_CQ; k++) {
+ if (p->link[k].valid &&
+ p->link[k].queue_id == q->id)
+ fprintf(f, "id=%u prio=%u ",
+ p->id, p->link[k].priority);
+ }
+ }
+ fprintf(f, "\n");
+
+ fprintf(f, "\tcurrent depth: %u events\n",
+ dlb_get_queue_depth(dlb, q));
+
+ fprintf(f, "\tnum qid inflights=%u, sched_type=%d\n",
+ q->qm_queue.num_qid_inflights, q->qm_queue.sched_type);
+ }
+}
diff --git a/drivers/event/dlb/meson.build b/drivers/event/dlb/meson.build
index 06285c5..ef74e50 100644
--- a/drivers/event/dlb/meson.build
+++ b/drivers/event/dlb/meson.build
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2019-2020 Intel Corporation
-sources = files('rte_pmd_dlb.c',
+sources = files('dlb_xstats.c',
+ 'rte_pmd_dlb.c',
'dlb_iface.c',
'pf/dlb_pf.c',
'pf/dlb_main.c',
--
1.7.10
next prev parent reply other threads:[~2020-06-27 4:42 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-27 4:37 [dpdk-dev] [PATCH 00/27] event/dlb Intel DLB PMD Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 01/27] eventdev: dlb upstream prerequisites Tim McDaniel
2020-06-27 7:44 ` Jerin Jacob
2020-06-29 19:30 ` McDaniel, Timothy
2020-06-30 4:21 ` Jerin Jacob
2020-06-30 15:37 ` McDaniel, Timothy
2020-06-30 15:57 ` Jerin Jacob
2020-06-30 19:26 ` McDaniel, Timothy
2020-06-30 20:40 ` Pavan Nikhilesh Bhagavatula
2020-06-30 21:07 ` McDaniel, Timothy
2020-07-01 4:50 ` Jerin Jacob
2020-07-01 16:48 ` McDaniel, Timothy
2020-06-30 11:22 ` Kinsella, Ray
2020-06-30 11:30 ` Jerin Jacob
2020-06-30 11:36 ` Kinsella, Ray
2020-06-30 12:14 ` Jerin Jacob
2020-07-02 15:21 ` Kinsella, Ray
2020-07-02 16:35 ` McDaniel, Timothy
2020-06-27 4:37 ` [dpdk-dev] [PATCH 02/27] eventdev: do not pass disable_implicit_release bit to trace macro Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 03/27] event/dlb: add shared code version 10.7.9 Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 04/27] event/dlb: add make and meson build infrastructure Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 05/27] event/dlb: add DLB documentation Tim McDaniel
2020-07-09 3:29 ` Eads, Gage
2020-06-27 4:37 ` [dpdk-dev] [PATCH 06/27] event/dlb: add dynamic logging Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 07/27] event/dlb: add private data structures and constants Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 08/27] event/dlb: add definitions shared with LKM or shared code Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 09/27] event/dlb: add inline functions used in multiple files Tim McDaniel
2020-07-07 12:02 ` Bruce Richardson
2020-07-07 14:33 ` McDaniel, Timothy
2020-06-27 4:37 ` [dpdk-dev] [PATCH 10/27] event/dlb: add PFPMD-specific interface layer to shared code Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 11/27] event/dlb: add flexible PMD to device interfaces Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 12/27] event/dlb: add the PMD's public interfaces Tim McDaniel
2020-06-27 4:37 ` Tim McDaniel [this message]
2020-06-27 4:37 ` [dpdk-dev] [PATCH 14/27] event/dlb: add PMD self-tests Tim McDaniel
2020-07-10 20:42 ` Eads, Gage
2020-07-29 18:56 ` McDaniel, Timothy
2020-06-27 4:37 ` [dpdk-dev] [PATCH 15/27] event/dlb: add probe Tim McDaniel
2020-07-09 3:45 ` Eads, Gage
2020-06-27 4:37 ` [dpdk-dev] [PATCH 16/27] event/dlb: add infos_get and configure Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 17/27] event/dlb: add queue_def_conf and port_def_conf Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 18/27] event/dlb: add queue setup Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 19/27] event/dlb: add port_setup Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 20/27] event/dlb: add port_link Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 21/27] event/dlb: add queue_release and port_release Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 22/27] event/dlb: add port_unlink and port_unlinks_in_progress Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 23/27] event/dlb: add eventdev_start Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 24/27] event/dlb: add timeout_ticks, dump, xstats, and selftest Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 25/27] event/dlb: add enqueue and its burst variants Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 26/27] event/dlb: add dequeue, dequeue_burst, and variants Tim McDaniel
2020-06-27 4:37 ` [dpdk-dev] [PATCH 27/27] event/dlb: add eventdev_stop and eventdev_close Tim McDaniel
[not found] <1593232671-5690-0-git-send-email-timothy.mcdaniel@intel.com>
2020-07-30 19:49 ` [dpdk-dev] [PATCH 00/27] Add Intel DLM PMD to 20.11 McDaniel, Timothy
2020-07-30 19:50 ` [dpdk-dev] [PATCH 13/27] event/dlb: add xstats support McDaniel, Timothy
-- strict thread matches above, loose matches on Subject: below --
2020-06-12 21:24 [dpdk-dev] [PATCH 00/27] V1 event/dlb add Intel DLB PMD McDaniel, Timothy
2020-06-12 21:24 ` [dpdk-dev] [PATCH 13/27] event/dlb: add xstats support McDaniel, Timothy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1593232671-5690-14-git-send-email-timothy.mcdaniel@intel.com \
--to=timothy.mcdaniel@intel.com \
--cc=dev@dpdk.org \
--cc=gage.eads@intel.com \
--cc=harry.van.haaren@intel.com \
--cc=jerinj@marvell.com \
--cc=mattias.ronnblom@ericsson.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).