DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [RFC] eventdev: introduce event dispatcher
@ 2021-02-18 18:30 Mattias Rönnblom
  2021-02-22 15:28 ` Luca Boccassi
  2021-02-25 12:32 ` Jerin Jacob
  0 siblings, 2 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2021-02-18 18:30 UTC (permalink / raw)
  To: jerinj; +Cc: dev, bruce.richardson, Mattias Rönnblom

The purpose of the event dispatcher is primarily to decouple different
parts of an application (e.g., processing pipeline stages), which
share the same underlying event device.

The event dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on the destination queue id.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the event dispatcher binds callbacks to queue ids.

An event dispatcher is configured to dequeue events from a specific
event device, and ties into the service core framework, to do its (and
the application's) work.

The event dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 lib/librte_eventdev/Makefile                 |   2 +
 lib/librte_eventdev/meson.build              |   6 +-
 lib/librte_eventdev/rte_event_dispatcher.c   | 420 +++++++++++++++++++
 lib/librte_eventdev/rte_event_dispatcher.h   | 251 +++++++++++
 lib/librte_eventdev/rte_eventdev_version.map |  10 +
 5 files changed, 687 insertions(+), 2 deletions(-)
 create mode 100644 lib/librte_eventdev/rte_event_dispatcher.c
 create mode 100644 lib/librte_eventdev/rte_event_dispatcher.h

diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
index 0715256bb4..614d53af1b 100644
--- a/lib/librte_eventdev/Makefile
+++ b/lib/librte_eventdev/Makefile
@@ -26,6 +26,7 @@ SRCS-y += rte_event_eth_rx_adapter.c
 SRCS-y += rte_event_timer_adapter.c
 SRCS-y += rte_event_crypto_adapter.c
 SRCS-y += rte_event_eth_tx_adapter.c
+SRCS-y += rte_event_dispatcher.c
 
 # export include files
 SYMLINK-y-include += rte_eventdev.h
@@ -40,6 +41,7 @@ SYMLINK-y-include += rte_event_timer_adapter.h
 SYMLINK-y-include += rte_event_timer_adapter_pmd.h
 SYMLINK-y-include += rte_event_crypto_adapter.h
 SYMLINK-y-include += rte_event_eth_tx_adapter.h
+SYMLINK-y-include += rte_event_dispatcher.h
 
 # versioning export map
 EXPORT_MAP := rte_eventdev_version.map
diff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build
index d1f25ee5ca..2ca81983b5 100644
--- a/lib/librte_eventdev/meson.build
+++ b/lib/librte_eventdev/meson.build
@@ -13,7 +13,8 @@ sources = files('rte_eventdev.c',
 		'rte_event_eth_rx_adapter.c',
 		'rte_event_timer_adapter.c',
 		'rte_event_crypto_adapter.c',
-		'rte_event_eth_tx_adapter.c')
+		'rte_event_eth_tx_adapter.c',
+		'rte_event_dispatcher.c')
 headers = files('rte_eventdev.h',
 		'rte_eventdev_pmd.h',
 		'rte_eventdev_pmd_pci.h',
@@ -25,5 +26,6 @@ headers = files('rte_eventdev.h',
 		'rte_event_timer_adapter.h',
 		'rte_event_timer_adapter_pmd.h',
 		'rte_event_crypto_adapter.h',
-		'rte_event_eth_tx_adapter.h')
+		'rte_event_eth_tx_adapter.h',
+		'rte_event_dispatcher.h')
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
diff --git a/lib/librte_eventdev/rte_event_dispatcher.c b/lib/librte_eventdev/rte_event_dispatcher.c
new file mode 100644
index 0000000000..1c7e55a752
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_dispatcher.c
@@ -0,0 +1,420 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_lcore.h>
+#include <rte_service_component.h>
+#include <rte_eventdev_pmd.h>
+
+#include <rte_event_dispatcher.h>
+
+#define RED_MAX_PORTS_PER_LCORE (4)
+
+struct rte_event_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_event_dispatcher_lcore {
+	uint8_t num_ports;
+	struct rte_event_dispatcher_lcore_port ports[RED_MAX_PORTS_PER_LCORE];
+};
+
+struct rte_event_dispatcher_cb {
+	rte_event_dispatcher_cb_t cb_fun;
+	void *cb_data;
+};
+
+struct rte_event_dispatcher {
+	uint8_t id;
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_event_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	struct rte_event_dispatcher_cb queue_cbs[UINT8_MAX];
+	struct rte_event_dispatcher_cb fallback;
+};
+
+static struct rte_event_dispatcher *dispatchers[UINT8_MAX];
+
+static bool
+red_has_dispatcher(uint8_t id)
+{
+	return dispatchers[id] != NULL;
+}
+
+static struct rte_event_dispatcher *
+red_get_dispatcher(uint8_t id)
+{
+	return dispatchers[id];
+}
+
+static void
+red_set_dispatcher(uint8_t id, struct rte_event_dispatcher *dispatcher)
+{
+	dispatchers[id] = dispatcher;
+}
+
+#define RED_VALID_ID_OR_RET_EINVAL(id)					\
+	do {								\
+		if (unlikely(!red_has_dispatcher(id))) {		\
+			RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
+			return -EINVAL;					\
+		}							\
+	} while (0)
+
+static struct rte_event_dispatcher_cb *
+red_lookup_cb(struct rte_event_dispatcher *dispatcher, uint8_t queue_id)
+{
+	struct rte_event_dispatcher_cb *cb = &dispatcher->queue_cbs[queue_id];
+
+	if (unlikely(cb->cb_fun == NULL))
+	    cb = &dispatcher->fallback;
+
+	return cb;
+}
+
+static void
+red_dispatch_events(struct rte_event_dispatcher *dispatcher,
+		    struct rte_event *events, uint16_t num_events)
+{
+	uint16_t cb_start;
+	uint16_t cb_len;
+
+	for (cb_start = 0; cb_start < num_events; cb_start += cb_len) {
+		uint16_t cb_end = cb_start;
+		uint8_t queue_id = events[cb_start].queue_id;
+		struct rte_event_dispatcher_cb *cb;
+
+		while (++cb_end < num_events &&
+		       events[cb_end].queue_id == queue_id)
+			;
+
+		cb_len = cb_end - cb_start;
+
+		cb = red_lookup_cb(dispatcher, queue_id);
+
+		if (unlikely(cb->cb_fun == NULL)) {
+			RTE_EDEV_LOG_ERR("Attempted to dispatch %d events "
+					 "for queue id %d, but no queue or "
+					 "fallback cb were configured\n",
+					 cb_len, queue_id);
+			continue;
+		}
+
+		cb->cb_fun(&events[cb_start], cb_len, cb->cb_data);
+	}
+}
+
+static void
+red_port_dequeue(struct rte_event_dispatcher *dispatcher,
+		 struct rte_event_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	red_dispatch_events(dispatcher, events, n);
+}
+
+static int32_t
+red_lcore_process(void *userdata)
+{
+	uint16_t i;
+	struct rte_event_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_event_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		red_port_dequeue(dispatcher, port);
+	}
+
+	return 0;
+}
+
+static int
+red_service_runstate_set(uint32_t service_id, int state)
+{
+	int rc;
+
+	rc = rte_service_component_runstate_set(service_id, state);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Error %d occurred while setting service "
+				 "component run state to %d\n", rc, state);
+
+	return rc;
+}
+
+static int
+red_service_register(struct rte_event_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = red_lcore_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "red_%d",
+		 dispatcher->id);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Registration of event dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	rc = red_service_runstate_set(dispatcher->service_id, 1);
+
+	if (rc)
+		rte_service_component_unregister(dispatcher->service_id);
+
+	return rc;
+}
+
+static int
+red_service_unregister(struct rte_event_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = red_service_runstate_set(dispatcher->service_id, 0);
+
+	if (rc)
+		return rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Unregistration of event dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	if (red_has_dispatcher(id)) {
+		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
+				 id);
+		return -EEXIST;
+	}
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("event dispatcher",
+				  sizeof(struct rte_event_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for event "
+				 "dispatcher\n");
+		return -ENOMEM;
+	}
+
+	*dispatcher = (struct rte_event_dispatcher) {
+		.id = id,
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = red_service_register(dispatcher);
+
+	if (rc < 0) {
+		rte_free(dispatcher);
+		return rc;
+	}
+
+	red_set_dispatcher(id, dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_free(uint8_t id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	rc = red_service_unregister(dispatcher);
+
+	if (rc)
+		return rc;
+
+	red_set_dispatcher(id, NULL);
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	*service_id = dispatcher->service_id;
+
+	return 0;
+}
+
+static int16_t
+lcore_port_index(struct rte_event_dispatcher_lcore *lcore,
+		 uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	struct rte_event_dispatcher_lcore_port *port;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == RED_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_event_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	int16_t port_idx;
+	struct rte_event_dispatcher_lcore_port *port;
+	struct rte_event_dispatcher_lcore_port *last;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static int
+red_cb_manage(uint8_t id, uint8_t queue_id, bool reg, bool fallback,
+	   rte_event_dispatcher_cb_t cb_fun, void *cb_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_cb *cb;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	if (fallback)
+		cb = &dispatcher->fallback;
+	else
+		cb = &dispatcher->queue_cbs[queue_id];
+
+	if (reg && cb->cb_fun != NULL)
+		return -EEXIST;
+
+	if (!reg && cb->cb_fun == NULL)
+		return -ENOENT;
+
+	*cb = (struct rte_event_dispatcher_cb) {
+		.cb_fun = cb_fun,
+		.cb_data = cb_data
+	};
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_register(uint8_t id, uint8_t queue_id,
+			      rte_event_dispatcher_cb_t cb_fun, void *cb_data)
+{
+	return red_cb_manage(id, queue_id, true, false, cb_fun, cb_data);
+}
+
+int
+rte_event_dispatcher_unregister(uint8_t id, uint8_t queue_id)
+{
+	return red_cb_manage(id, queue_id, false, false, NULL, NULL);
+}
+
+int
+rte_event_dispatcher_register_fallback(uint8_t id,
+				       rte_event_dispatcher_cb_t cb_fun,
+				       void *cb_data)
+{
+	return red_cb_manage(id, 0, true, true, cb_fun, cb_data);
+}
+
+int
+rte_event_dispatcher_unregister_fallback(uint8_t id)
+{
+	return red_cb_manage(id, 0, false, true, NULL, NULL);
+}
diff --git a/lib/librte_eventdev/rte_event_dispatcher.h b/lib/librte_eventdev/rte_event_dispatcher.h
new file mode 100644
index 0000000000..11f57571ab
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_dispatcher.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Ericsson AB
+ */
+
+#ifndef __RTE_EVENT_DISPATCHER_H__
+#define __RTE_EVENT_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Event Dispatcher
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for dispatcher callbacks.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register() or
+ *  rte_event_dispatcher_register_fallback().
+ */
+
+typedef void (*rte_event_dispatcher_cb_t)(struct rte_event *events,
+					  uint16_t num, void *cb_data);
+
+/**
+ * Create an event dispatcher with the specified id.
+ *
+ * @param id
+ *  An application-specified, unique (across all event dispatcher
+ *  instances) identifier.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this event dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   - 0: Success
+ *   - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id);
+
+/**
+ * Frees an event dispatcher with the specified id.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *   - 0: Success
+ *   - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_free(uint8_t id);
+
+/**
+ * Retrieve the service identifier of the event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param [out] service_id
+ *  A pointer to a caller-supplied buffer where the event dispatcher's
+ *  service id will be stored.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * event dispatcher.
+ *
+ * This function configures an event dispatcher to dequeue events from
+ * an event device port (as specified by @p event_port_id), in case
+ * its service function is run on particular lcore (as specified by @p
+ * lcore_id).
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port may only be bound to one lcore.
+ *
+ * If the event dispatcher service is mapped (with
+ * rte_service_map_lcore_set()) to a lcore for which no ports are
+ * bound, the service function will be a no-operation.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id);
+
+/**
+ * Register a callback function for the specified queue identifier.
+ *
+ * At most one callback may be registered per queue id.
+ *
+ * The same callback function may be registered for multiple queue ids.
+ *
+ * For each callback invocation, events belonging to a single queue id
+ * will dispatched.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param queue_id
+ *  The event device queue id for which @p cb_fun should be called.
+ *
+ * @param cb_fun
+ *  The callback function.
+ *
+ * @param cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application in the callback.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_register(uint8_t id, uint8_t queue_id,
+			      rte_event_dispatcher_cb_t cb_fun, void *cb_data);
+
+/**
+ * Unregister a callback function for the specified queue identifier.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param queue_id
+ *  The event device queue id for which the callback should be removed.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unregister(uint8_t id, uint8_t queue_id);
+
+/**
+ * Register a fallback callback function for the specified queue
+ * identifier.
+ *
+ * Only events for which no queue-specific callback function will be
+ * dispatched to the @p cb_fun callback.
+ *
+ * At most one callback fallback function may be registered.
+ *
+ * For each callback invocation, only events belonging to a single
+ * queue id will be included.
+ *
+ * If the event dispatcher encounters an event with a queue id for
+ * which the application has not registered any specific callback, and
+ * there is also no fallback configured, the event will be dropped.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param cb_fun
+ *  The fallback callback function.
+ *
+ * @param cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application in the callback.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_register_fallback(uint8_t id,
+				       rte_event_dispatcher_cb_t cb_fun,
+				       void *cb_data);
+
+/**
+ * Unregister the fallback callback function.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unregister_fallback(uint8_t id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_EVENT_DISPATCHER__ */
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
index 91a62cd077..dcb887601b 100644
--- a/lib/librte_eventdev/rte_eventdev_version.map
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -134,4 +134,14 @@ EXPERIMENTAL {
 	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
 	__rte_eventdev_trace_crypto_adapter_start;
 	__rte_eventdev_trace_crypto_adapter_stop;
+
+	rte_event_dispatcher_create;
+	rte_event_dispatcher_free;
+	rte_event_dispatcher_service_id_get;
+	rte_event_dispatcher_bind_port_to_lcore;
+	rte_event_dispatcher_unbind_port_from_lcore;
+	rte_event_dispatcher_register;
+	rte_event_dispatcher_unregister;
+	rte_event_dispatcher_register_fallback;
+	rte_event_dispatcher_unregister_fallback;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [dpdk-dev] [RFC] eventdev: introduce event dispatcher
  2021-02-18 18:30 [dpdk-dev] [RFC] eventdev: introduce event dispatcher Mattias Rönnblom
@ 2021-02-22 15:28 ` Luca Boccassi
  2021-02-26  7:48   ` Mattias Rönnblom
  2021-02-25 12:32 ` Jerin Jacob
  1 sibling, 1 reply; 102+ messages in thread
From: Luca Boccassi @ 2021-02-22 15:28 UTC (permalink / raw)
  To: Mattias Rönnblom, jerinj; +Cc: dev, bruce.richardson

On Thu, 2021-02-18 at 19:30 +0100, Mattias Rönnblom wrote:
> The purpose of the event dispatcher is primarily to decouple different
> parts of an application (e.g., processing pipeline stages), which
> share the same underlying event device.
> 
> The event dispatcher replaces the conditional logic (often, a switch
> statement) that typically follows an event device dequeue operation,
> where events are dispatched to different parts of the application
> based on the destination queue id.
> 
> The concept is similar to a UNIX file descriptor event loop library.
> Instead of tying callback functions to fds as for example libevent
> does, the event dispatcher binds callbacks to queue ids.
> 
> An event dispatcher is configured to dequeue events from a specific
> event device, and ties into the service core framework, to do its (and
> the application's) work.
> 
> The event dispatcher provides a convenient way for an eventdev-based
> application to use service cores for application-level processing, and
> thus for sharing those cores with other DPDK services.
> 
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> ---
>  lib/librte_eventdev/Makefile                 |   2 +
>  lib/librte_eventdev/meson.build              |   6 +-
>  lib/librte_eventdev/rte_event_dispatcher.c   | 420 +++++++++++++++++++
>  lib/librte_eventdev/rte_event_dispatcher.h   | 251 +++++++++++
>  lib/librte_eventdev/rte_eventdev_version.map |  10 +
>  5 files changed, 687 insertions(+), 2 deletions(-)
>  create mode 100644 lib/librte_eventdev/rte_event_dispatcher.c
>  create mode 100644 lib/librte_eventdev/rte_event_dispatcher.h

Hi,

Is this intended to be used by applications or by PMDs? If the former,
then IMHO the interface should really be based around (or allow using)
FDs, so that it can be polled. Applications normally have more event
sources that just DPDK.

-- 
Kind regards,
Luca Boccassi

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [dpdk-dev] [RFC] eventdev: introduce event dispatcher
  2021-02-18 18:30 [dpdk-dev] [RFC] eventdev: introduce event dispatcher Mattias Rönnblom
  2021-02-22 15:28 ` Luca Boccassi
@ 2021-02-25 12:32 ` Jerin Jacob
  2021-02-26  8:01   ` Mattias Rönnblom
  1 sibling, 1 reply; 102+ messages in thread
From: Jerin Jacob @ 2021-02-25 12:32 UTC (permalink / raw)
  To: Mattias Rönnblom; +Cc: Jerin Jacob, dpdk-dev, Richardson, Bruce

On Fri, Feb 19, 2021 at 12:00 AM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> The purpose of the event dispatcher is primarily to decouple different
> parts of an application (e.g., processing pipeline stages), which
> share the same underlying event device.
>
> The event dispatcher replaces the conditional logic (often, a switch
> statement) that typically follows an event device dequeue operation,
> where events are dispatched to different parts of the application
> based on the destination queue id.

# If the device has all type queue[1] this RFC would restrict to
use queue ONLY as stage. A stage can be a Queue Type also.
How we can abstract this in this model?

# Also, I think, it may make sense to add this type of infrastructure as
helper functions as these are built on top of existing APIs i.e There
is no support
required from the driver to establish this model. IMO, If we need to
add such support as
one fixed set of functionality, we could have helper APIs to express a certain
usage of eventdev. Rather defining the that's only way to do this.
I think, A helper function can be used to as abstraction to define
this kind of model.

# Also, There is function pointer overhead and aggregating the events
in implementation,
That may be not always "the" optimized model of making it work vs switch case in
application.


[1]
See RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES in
https://doc.dpdk.org/guides/prog_guide/eventdev.html


>
> The concept is similar to a UNIX file descriptor event loop library.
> Instead of tying callback functions to fds as for example libevent
> does, the event dispatcher binds callbacks to queue ids.
>
> An event dispatcher is configured to dequeue events from a specific
> event device, and ties into the service core framework, to do its (and
> the application's) work.
>
> The event dispatcher provides a convenient way for an eventdev-based
> application to use service cores for application-level processing, and
> thus for sharing those cores with other DPDK services.
>
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> ---
>  lib/librte_eventdev/Makefile                 |   2 +
>  lib/librte_eventdev/meson.build              |   6 +-
>  lib/librte_eventdev/rte_event_dispatcher.c   | 420 +++++++++++++++++++
>  lib/librte_eventdev/rte_event_dispatcher.h   | 251 +++++++++++
>  lib/librte_eventdev/rte_eventdev_version.map |  10 +
>  5 files changed, 687 insertions(+), 2 deletions(-)
>  create mode 100644 lib/librte_eventdev/rte_event_dispatcher.c
>  create mode 100644 lib/librte_eventdev/rte_event_dispatcher.h
>
> diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
> index 0715256bb4..614d53af1b 100644
> --- a/lib/librte_eventdev/Makefile
> +++ b/lib/librte_eventdev/Makefile
> @@ -26,6 +26,7 @@ SRCS-y += rte_event_eth_rx_adapter.c
>  SRCS-y += rte_event_timer_adapter.c
>  SRCS-y += rte_event_crypto_adapter.c
>  SRCS-y += rte_event_eth_tx_adapter.c
> +SRCS-y += rte_event_dispatcher.c
>
>  # export include files
>  SYMLINK-y-include += rte_eventdev.h
> @@ -40,6 +41,7 @@ SYMLINK-y-include += rte_event_timer_adapter.h
>  SYMLINK-y-include += rte_event_timer_adapter_pmd.h
>  SYMLINK-y-include += rte_event_crypto_adapter.h
>  SYMLINK-y-include += rte_event_eth_tx_adapter.h
> +SYMLINK-y-include += rte_event_dispatcher.h
>
>  # versioning export map
>  EXPORT_MAP := rte_eventdev_version.map
> diff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build
> index d1f25ee5ca..2ca81983b5 100644
> --- a/lib/librte_eventdev/meson.build
> +++ b/lib/librte_eventdev/meson.build
> @@ -13,7 +13,8 @@ sources = files('rte_eventdev.c',
>                 'rte_event_eth_rx_adapter.c',
>                 'rte_event_timer_adapter.c',
>                 'rte_event_crypto_adapter.c',
> -               'rte_event_eth_tx_adapter.c')
> +               'rte_event_eth_tx_adapter.c',
> +               'rte_event_dispatcher.c')
>  headers = files('rte_eventdev.h',
>                 'rte_eventdev_pmd.h',
>                 'rte_eventdev_pmd_pci.h',
> @@ -25,5 +26,6 @@ headers = files('rte_eventdev.h',
>                 'rte_event_timer_adapter.h',
>                 'rte_event_timer_adapter_pmd.h',
>                 'rte_event_crypto_adapter.h',
> -               'rte_event_eth_tx_adapter.h')
> +               'rte_event_eth_tx_adapter.h',
> +               'rte_event_dispatcher.h')
>  deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
> diff --git a/lib/librte_eventdev/rte_event_dispatcher.c b/lib/librte_eventdev/rte_event_dispatcher.c
> new file mode 100644
> index 0000000000..1c7e55a752
> --- /dev/null
> +++ b/lib/librte_eventdev/rte_event_dispatcher.c
> @@ -0,0 +1,420 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 Ericsson AB
> + */
> +
> +#include <stdbool.h>
> +#include <stdint.h>
> +
> +#include <rte_lcore.h>
> +#include <rte_service_component.h>
> +#include <rte_eventdev_pmd.h>
> +
> +#include <rte_event_dispatcher.h>
> +
> +#define RED_MAX_PORTS_PER_LCORE (4)
> +
> +struct rte_event_dispatcher_lcore_port {
> +       uint8_t port_id;
> +       uint16_t batch_size;
> +       uint64_t timeout;
> +};
> +
> +struct rte_event_dispatcher_lcore {
> +       uint8_t num_ports;
> +       struct rte_event_dispatcher_lcore_port ports[RED_MAX_PORTS_PER_LCORE];
> +};
> +
> +struct rte_event_dispatcher_cb {
> +       rte_event_dispatcher_cb_t cb_fun;
> +       void *cb_data;
> +};
> +
> +struct rte_event_dispatcher {
> +       uint8_t id;
> +       uint8_t event_dev_id;
> +       int socket_id;
> +       uint32_t service_id;
> +       struct rte_event_dispatcher_lcore lcores[RTE_MAX_LCORE];
> +       struct rte_event_dispatcher_cb queue_cbs[UINT8_MAX];
> +       struct rte_event_dispatcher_cb fallback;
> +};
> +
> +static struct rte_event_dispatcher *dispatchers[UINT8_MAX];
> +
> +static bool
> +red_has_dispatcher(uint8_t id)
> +{
> +       return dispatchers[id] != NULL;
> +}
> +
> +static struct rte_event_dispatcher *
> +red_get_dispatcher(uint8_t id)
> +{
> +       return dispatchers[id];
> +}
> +
> +static void
> +red_set_dispatcher(uint8_t id, struct rte_event_dispatcher *dispatcher)
> +{
> +       dispatchers[id] = dispatcher;
> +}
> +
> +#define RED_VALID_ID_OR_RET_EINVAL(id)                                 \
> +       do {                                                            \
> +               if (unlikely(!red_has_dispatcher(id))) {                \
> +                       RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
> +                       return -EINVAL;                                 \
> +               }                                                       \
> +       } while (0)
> +
> +static struct rte_event_dispatcher_cb *
> +red_lookup_cb(struct rte_event_dispatcher *dispatcher, uint8_t queue_id)
> +{
> +       struct rte_event_dispatcher_cb *cb = &dispatcher->queue_cbs[queue_id];
> +
> +       if (unlikely(cb->cb_fun == NULL))
> +           cb = &dispatcher->fallback;
> +
> +       return cb;
> +}
> +
> +static void
> +red_dispatch_events(struct rte_event_dispatcher *dispatcher,
> +                   struct rte_event *events, uint16_t num_events)
> +{
> +       uint16_t cb_start;
> +       uint16_t cb_len;
> +
> +       for (cb_start = 0; cb_start < num_events; cb_start += cb_len) {
> +               uint16_t cb_end = cb_start;
> +               uint8_t queue_id = events[cb_start].queue_id;
> +               struct rte_event_dispatcher_cb *cb;
> +
> +               while (++cb_end < num_events &&
> +                      events[cb_end].queue_id == queue_id)
> +                       ;
> +
> +               cb_len = cb_end - cb_start;
> +
> +               cb = red_lookup_cb(dispatcher, queue_id);
> +
> +               if (unlikely(cb->cb_fun == NULL)) {
> +                       RTE_EDEV_LOG_ERR("Attempted to dispatch %d events "
> +                                        "for queue id %d, but no queue or "
> +                                        "fallback cb were configured\n",
> +                                        cb_len, queue_id);
> +                       continue;
> +               }
> +
> +               cb->cb_fun(&events[cb_start], cb_len, cb->cb_data);
> +       }
> +}
> +
> +static void
> +red_port_dequeue(struct rte_event_dispatcher *dispatcher,
> +                struct rte_event_dispatcher_lcore_port *port)
> +{
> +       uint16_t batch_size = port->batch_size;
> +       struct rte_event events[batch_size];
> +       uint16_t n;
> +
> +       n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
> +                                   events, batch_size, port->timeout);
> +
> +       red_dispatch_events(dispatcher, events, n);
> +}
> +
> +static int32_t
> +red_lcore_process(void *userdata)
> +{
> +       uint16_t i;
> +       struct rte_event_dispatcher *dispatcher = userdata;
> +       unsigned int lcore_id = rte_lcore_id();
> +       struct rte_event_dispatcher_lcore *lcore =
> +               &dispatcher->lcores[lcore_id];
> +
> +       for (i = 0; i < lcore->num_ports; i++) {
> +               struct rte_event_dispatcher_lcore_port *port =
> +                       &lcore->ports[i];
> +
> +               red_port_dequeue(dispatcher, port);
> +       }
> +
> +       return 0;
> +}
> +
> +static int
> +red_service_runstate_set(uint32_t service_id, int state)
> +{
> +       int rc;
> +
> +       rc = rte_service_component_runstate_set(service_id, state);
> +
> +       if (rc)
> +               RTE_EDEV_LOG_ERR("Error %d occurred while setting service "
> +                                "component run state to %d\n", rc, state);
> +
> +       return rc;
> +}
> +
> +static int
> +red_service_register(struct rte_event_dispatcher *dispatcher)
> +{
> +       struct rte_service_spec service = {
> +               .callback = red_lcore_process,
> +               .callback_userdata = dispatcher,
> +               .capabilities = RTE_SERVICE_CAP_MT_SAFE,
> +               .socket_id = dispatcher->socket_id
> +       };
> +       int rc;
> +
> +       snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "red_%d",
> +                dispatcher->id);
> +
> +       rc = rte_service_component_register(&service, &dispatcher->service_id);
> +
> +       if (rc)
> +               RTE_EDEV_LOG_ERR("Registration of event dispatcher service "
> +                                "%s failed with error code %d\n",
> +                                service.name, rc);
> +
> +       rc = red_service_runstate_set(dispatcher->service_id, 1);
> +
> +       if (rc)
> +               rte_service_component_unregister(dispatcher->service_id);
> +
> +       return rc;
> +}
> +
> +static int
> +red_service_unregister(struct rte_event_dispatcher *dispatcher)
> +{
> +       int rc;
> +
> +       rc = red_service_runstate_set(dispatcher->service_id, 0);
> +
> +       if (rc)
> +               return rc;
> +
> +       rc = rte_service_component_unregister(dispatcher->service_id);
> +
> +       if (rc)
> +               RTE_EDEV_LOG_ERR("Unregistration of event dispatcher service "
> +                                "failed with error code %d\n", rc);
> +
> +       return rc;
> +}
> +
> +int
> +rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id)
> +{
> +       int socket_id;
> +       struct rte_event_dispatcher *dispatcher;
> +       int rc;
> +
> +       if (red_has_dispatcher(id)) {
> +               RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
> +                                id);
> +               return -EEXIST;
> +       }
> +
> +       socket_id = rte_event_dev_socket_id(event_dev_id);
> +
> +       dispatcher =
> +               rte_malloc_socket("event dispatcher",
> +                                 sizeof(struct rte_event_dispatcher),
> +                                 RTE_CACHE_LINE_SIZE, socket_id);
> +
> +       if (dispatcher == NULL) {
> +               RTE_EDEV_LOG_ERR("Unable to allocate memory for event "
> +                                "dispatcher\n");
> +               return -ENOMEM;
> +       }
> +
> +       *dispatcher = (struct rte_event_dispatcher) {
> +               .id = id,
> +               .event_dev_id = event_dev_id,
> +               .socket_id = socket_id
> +       };
> +
> +       rc = red_service_register(dispatcher);
> +
> +       if (rc < 0) {
> +               rte_free(dispatcher);
> +               return rc;
> +       }
> +
> +       red_set_dispatcher(id, dispatcher);
> +
> +       return 0;
> +}
> +
> +int
> +rte_event_dispatcher_free(uint8_t id)
> +{
> +       struct rte_event_dispatcher *dispatcher;
> +       int rc;
> +
> +       RED_VALID_ID_OR_RET_EINVAL(id);
> +       dispatcher = red_get_dispatcher(id);
> +
> +       rc = red_service_unregister(dispatcher);
> +
> +       if (rc)
> +               return rc;
> +
> +       red_set_dispatcher(id, NULL);
> +
> +       rte_free(dispatcher);
> +
> +       return 0;
> +}
> +
> +int
> +rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
> +{
> +       struct rte_event_dispatcher *dispatcher;
> +
> +       RED_VALID_ID_OR_RET_EINVAL(id);
> +       dispatcher = red_get_dispatcher(id);
> +
> +       *service_id = dispatcher->service_id;
> +
> +       return 0;
> +}
> +
> +static int16_t
> +lcore_port_index(struct rte_event_dispatcher_lcore *lcore,
> +                uint8_t event_port_id)
> +{
> +       uint16_t i;
> +
> +       for (i = 0; i < lcore->num_ports; i++) {
> +               struct rte_event_dispatcher_lcore_port *port =
> +                       &lcore->ports[i];
> +               if (port->port_id == event_port_id)
> +                       return i;
> +       }
> +
> +       return -1;
> +}
> +
> +int
> +rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
> +                                       uint16_t batch_size, uint64_t timeout,
> +                                       unsigned int lcore_id)
> +{
> +       struct rte_event_dispatcher *dispatcher;
> +       struct rte_event_dispatcher_lcore *lcore;
> +       struct rte_event_dispatcher_lcore_port *port;
> +
> +       RED_VALID_ID_OR_RET_EINVAL(id);
> +       dispatcher = red_get_dispatcher(id);
> +
> +       lcore = &dispatcher->lcores[lcore_id];
> +
> +       if (lcore->num_ports == RED_MAX_PORTS_PER_LCORE)
> +               return -ENOMEM;
> +
> +       if (lcore_port_index(lcore, event_port_id) >= 0)
> +               return -EEXIST;
> +
> +       port = &lcore->ports[lcore->num_ports];
> +
> +       *port = (struct rte_event_dispatcher_lcore_port) {
> +               .port_id = event_port_id,
> +               .batch_size = batch_size,
> +               .timeout = timeout
> +       };
> +
> +       lcore->num_ports++;
> +
> +       return 0;
> +}
> +
> +int
> +rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
> +                                           unsigned int lcore_id)
> +{
> +       struct rte_event_dispatcher *dispatcher;
> +       struct rte_event_dispatcher_lcore *lcore;
> +       int16_t port_idx;
> +       struct rte_event_dispatcher_lcore_port *port;
> +       struct rte_event_dispatcher_lcore_port *last;
> +
> +       RED_VALID_ID_OR_RET_EINVAL(id);
> +       dispatcher = red_get_dispatcher(id);
> +
> +       lcore = &dispatcher->lcores[lcore_id];
> +
> +       port_idx = lcore_port_index(lcore, event_port_id);
> +
> +       if (port_idx < 0)
> +               return -ENOENT;
> +
> +       port = &lcore->ports[port_idx];
> +       last = &lcore->ports[lcore->num_ports - 1];
> +
> +       if (port != last)
> +               *port = *last;
> +
> +       lcore->num_ports--;
> +
> +       return 0;
> +}
> +
> +static int
> +red_cb_manage(uint8_t id, uint8_t queue_id, bool reg, bool fallback,
> +          rte_event_dispatcher_cb_t cb_fun, void *cb_data)
> +{
> +       struct rte_event_dispatcher *dispatcher;
> +       struct rte_event_dispatcher_cb *cb;
> +
> +       RED_VALID_ID_OR_RET_EINVAL(id);
> +       dispatcher = red_get_dispatcher(id);
> +
> +       if (fallback)
> +               cb = &dispatcher->fallback;
> +       else
> +               cb = &dispatcher->queue_cbs[queue_id];
> +
> +       if (reg && cb->cb_fun != NULL)
> +               return -EEXIST;
> +
> +       if (!reg && cb->cb_fun == NULL)
> +               return -ENOENT;
> +
> +       *cb = (struct rte_event_dispatcher_cb) {
> +               .cb_fun = cb_fun,
> +               .cb_data = cb_data
> +       };
> +
> +       return 0;
> +}
> +
> +int
> +rte_event_dispatcher_register(uint8_t id, uint8_t queue_id,
> +                             rte_event_dispatcher_cb_t cb_fun, void *cb_data)
> +{
> +       return red_cb_manage(id, queue_id, true, false, cb_fun, cb_data);
> +}
> +
> +int
> +rte_event_dispatcher_unregister(uint8_t id, uint8_t queue_id)
> +{
> +       return red_cb_manage(id, queue_id, false, false, NULL, NULL);
> +}
> +
> +int
> +rte_event_dispatcher_register_fallback(uint8_t id,
> +                                      rte_event_dispatcher_cb_t cb_fun,
> +                                      void *cb_data)
> +{
> +       return red_cb_manage(id, 0, true, true, cb_fun, cb_data);
> +}
> +
> +int
> +rte_event_dispatcher_unregister_fallback(uint8_t id)
> +{
> +       return red_cb_manage(id, 0, false, true, NULL, NULL);
> +}
> diff --git a/lib/librte_eventdev/rte_event_dispatcher.h b/lib/librte_eventdev/rte_event_dispatcher.h
> new file mode 100644
> index 0000000000..11f57571ab
> --- /dev/null
> +++ b/lib/librte_eventdev/rte_event_dispatcher.h
> @@ -0,0 +1,251 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 Ericsson AB
> + */
> +
> +#ifndef __RTE_EVENT_DISPATCHER_H__
> +#define __RTE_EVENT_DISPATCHER_H__
> +
> +/**
> + * @file
> + *
> + * RTE Event Dispatcher
> + *
> + */
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#include <rte_eventdev.h>
> +
> +/**
> + * Function prototype for dispatcher callbacks.
> + *
> + * @param events
> + *  Pointer to an array of events.
> + *
> + * @param num
> + *  The number of events in the @p events array.
> + *
> + * @param cb_data
> + *  The pointer supplied by the application in
> + *  rte_event_dispatcher_register() or
> + *  rte_event_dispatcher_register_fallback().
> + */
> +
> +typedef void (*rte_event_dispatcher_cb_t)(struct rte_event *events,
> +                                         uint16_t num, void *cb_data);
> +
> +/**
> + * Create an event dispatcher with the specified id.
> + *
> + * @param id
> + *  An application-specified, unique (across all event dispatcher
> + *  instances) identifier.
> + *
> + * @param event_dev_id
> + *  The identifier of the event device from which this event dispatcher
> + *  will dequeue events.
> + *
> + * @return
> + *   - 0: Success
> + *   - <0: Error code on failure
> + */
> +__rte_experimental
> +int
> +rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id);
> +
> +/**
> + * Frees an event dispatcher with the specified id.
> + *
> + * @param id
> + *  The event dispatcher identifier.
> + *
> + * @return
> + *   - 0: Success
> + *   - <0: Error code on failure
> + */
> +__rte_experimental
> +int
> +rte_event_dispatcher_free(uint8_t id);
> +
> +/**
> + * Retrieve the service identifier of the event dispatcher.
> + *
> + * @param id
> + *  The event dispatcher identifier.
> + *
> + * @param [out] service_id
> + *  A pointer to a caller-supplied buffer where the event dispatcher's
> + *  service id will be stored.
> + *
> + * @return
> + *  - 0: Success
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
> +
> +/**
> + * Binds an event device port to a specific lcore on the specified
> + * event dispatcher.
> + *
> + * This function configures an event dispatcher to dequeue events from
> + * an event device port (as specified by @p event_port_id), in case
> + * its service function is run on particular lcore (as specified by @p
> + * lcore_id).
> + *
> + * Multiple event device ports may be bound to the same lcore. A
> + * particular port may only be bound to one lcore.
> + *
> + * If the event dispatcher service is mapped (with
> + * rte_service_map_lcore_set()) to a lcore for which no ports are
> + * bound, the service function will be a no-operation.
> + *
> + * @param id
> + *  The event dispatcher identifier.
> + *
> + * @param event_port_id
> + *  The event device port identifier.
> + *
> + * @param batch_size
> + *  The batch size to use in rte_event_dequeue_burst(), for the
> + *  configured event device port and lcore.
> + *
> + * @param timeout
> + *  The timeout parameter to use in rte_event_dequeue_burst(), for the
> + *  configured event device port and lcore.
> + *
> + * @return
> + *  - 0: Success
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
> +                                       uint16_t batch_size, uint64_t timeout,
> +                                       unsigned int lcore_id);
> +
> +/**
> + * Unbind an event device port from a specific lcore.
> + *
> + * @param id
> + *  The event dispatcher identifier.
> + *
> + * @param event_port_id
> + *  The event device port identifier.
> + *
> + * @return
> + *  - 0: Success
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
> +                                           unsigned int lcore_id);
> +
> +/**
> + * Register a callback function for the specified queue identifier.
> + *
> + * At most one callback may be registered per queue id.
> + *
> + * The same callback function may be registered for multiple queue ids.
> + *
> + * For each callback invocation, events belonging to a single queue id
> + * will dispatched.
> + *
> + * @param id
> + *  The event dispatcher identifier.
> + *
> + * @param queue_id
> + *  The event device queue id for which @p cb_fun should be called.
> + *
> + * @param cb_fun
> + *  The callback function.
> + *
> + * @param cb_data
> + *  A pointer to some application-specific opaque data (or NULL),
> + *  which is supplied back to the application in the callback.
> + *
> + * @return
> + *  - 0: Success
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_event_dispatcher_register(uint8_t id, uint8_t queue_id,
> +                             rte_event_dispatcher_cb_t cb_fun, void *cb_data);
> +
> +/**
> + * Unregister a callback function for the specified queue identifier.
> + *
> + * @param id
> + *  The event dispatcher identifier.
> + *
> + * @param queue_id
> + *  The event device queue id for which the callback should be removed.
> + *
> + * @return
> + *  - 0: Success
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_event_dispatcher_unregister(uint8_t id, uint8_t queue_id);
> +
> +/**
> + * Register a fallback callback function for the specified queue
> + * identifier.
> + *
> + * Only events for which no queue-specific callback function will be
> + * dispatched to the @p cb_fun callback.
> + *
> + * At most one callback fallback function may be registered.
> + *
> + * For each callback invocation, only events belonging to a single
> + * queue id will be included.
> + *
> + * If the event dispatcher encounters an event with a queue id for
> + * which the application has not registered any specific callback, and
> + * there is also no fallback configured, the event will be dropped.
> + *
> + * @param id
> + *  The event dispatcher identifier.
> + *
> + * @param cb_fun
> + *  The fallback callback function.
> + *
> + * @param cb_data
> + *  A pointer to some application-specific opaque data (or NULL),
> + *  which is supplied back to the application in the callback.
> + *
> + * @return
> + *  - 0: Success
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_event_dispatcher_register_fallback(uint8_t id,
> +                                      rte_event_dispatcher_cb_t cb_fun,
> +                                      void *cb_data);
> +
> +/**
> + * Unregister the fallback callback function.
> + *
> + * @param id
> + *  The event dispatcher identifier.
> + *
> + * @return
> + *  - 0: Success
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_event_dispatcher_unregister_fallback(uint8_t id);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* __RTE_EVENT_DISPATCHER__ */
> diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
> index 91a62cd077..dcb887601b 100644
> --- a/lib/librte_eventdev/rte_eventdev_version.map
> +++ b/lib/librte_eventdev/rte_eventdev_version.map
> @@ -134,4 +134,14 @@ EXPERIMENTAL {
>         __rte_eventdev_trace_crypto_adapter_queue_pair_del;
>         __rte_eventdev_trace_crypto_adapter_start;
>         __rte_eventdev_trace_crypto_adapter_stop;
> +
> +       rte_event_dispatcher_create;
> +       rte_event_dispatcher_free;
> +       rte_event_dispatcher_service_id_get;
> +       rte_event_dispatcher_bind_port_to_lcore;
> +       rte_event_dispatcher_unbind_port_from_lcore;
> +       rte_event_dispatcher_register;
> +       rte_event_dispatcher_unregister;
> +       rte_event_dispatcher_register_fallback;
> +       rte_event_dispatcher_unregister_fallback;
>  };
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [dpdk-dev] [RFC] eventdev: introduce event dispatcher
  2021-02-22 15:28 ` Luca Boccassi
@ 2021-02-26  7:48   ` Mattias Rönnblom
  0 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2021-02-26  7:48 UTC (permalink / raw)
  To: Luca Boccassi, jerinj; +Cc: dev, bruce.richardson

On 2021-02-22 16:28, Luca Boccassi wrote:
> On Thu, 2021-02-18 at 19:30 +0100, Mattias Rönnblom wrote:
>> The purpose of the event dispatcher is primarily to decouple different
>> parts of an application (e.g., processing pipeline stages), which
>> share the same underlying event device.
>>
>> The event dispatcher replaces the conditional logic (often, a switch
>> statement) that typically follows an event device dequeue operation,
>> where events are dispatched to different parts of the application
>> based on the destination queue id.
>>
>> The concept is similar to a UNIX file descriptor event loop library.
>> Instead of tying callback functions to fds as for example libevent
>> does, the event dispatcher binds callbacks to queue ids.
>>
>> An event dispatcher is configured to dequeue events from a specific
>> event device, and ties into the service core framework, to do its (and
>> the application's) work.
>>
>> The event dispatcher provides a convenient way for an eventdev-based
>> application to use service cores for application-level processing, and
>> thus for sharing those cores with other DPDK services.
>>
>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>> ---
>>   lib/librte_eventdev/Makefile                 |   2 +
>>   lib/librte_eventdev/meson.build              |   6 +-
>>   lib/librte_eventdev/rte_event_dispatcher.c   | 420 +++++++++++++++++++
>>   lib/librte_eventdev/rte_event_dispatcher.h   | 251 +++++++++++
>>   lib/librte_eventdev/rte_eventdev_version.map |  10 +
>>   5 files changed, 687 insertions(+), 2 deletions(-)
>>   create mode 100644 lib/librte_eventdev/rte_event_dispatcher.c
>>   create mode 100644 lib/librte_eventdev/rte_event_dispatcher.h
> Hi,
>
> Is this intended to be used by applications or by PMDs? If the former,
> then IMHO the interface should really be based around (or allow using)
> FDs, so that it can be polled. Applications normally have more event
> sources that just DPDK.
>
It's for applications. File descriptors might be involved in the API 
somehow, but I think for most applications, they are too costly. I mean, 
the whole point of DPDK is to avoid the operating system overhead.



^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [dpdk-dev] [RFC] eventdev: introduce event dispatcher
  2021-02-25 12:32 ` Jerin Jacob
@ 2021-02-26  8:01   ` Mattias Rönnblom
  2021-03-07 13:04     ` Jerin Jacob
  0 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2021-02-26  8:01 UTC (permalink / raw)
  To: Jerin Jacob; +Cc: Jerin Jacob, dpdk-dev, Richardson, Bruce

On 2021-02-25 13:32, Jerin Jacob wrote:
> On Fri, Feb 19, 2021 at 12:00 AM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>> The purpose of the event dispatcher is primarily to decouple different
>> parts of an application (e.g., processing pipeline stages), which
>> share the same underlying event device.
>>
>> The event dispatcher replaces the conditional logic (often, a switch
>> statement) that typically follows an event device dequeue operation,
>> where events are dispatched to different parts of the application
>> based on the destination queue id.
> # If the device has all type queue[1] this RFC would restrict to
> use queue ONLY as stage. A stage can be a Queue Type also.
> How we can abstract this in this model?


"All queue type" is about scheduling policy. I would think that would be 
independent of the "logical endpoint" of the event (i.e., the queue id). 
I feel like I'm missing something here.


> # Also, I think, it may make sense to add this type of infrastructure as
> helper functions as these are built on top of existing APIs i.e There
> is no support
> required from the driver to establish this model. IMO, If we need to
> add such support as
> one fixed set of functionality, we could have helper APIs to express a certain
> usage of eventdev. Rather defining the that's only way to do this.
> I think, A helper function can be used to as abstraction to define
> this kind of model.
>
> # Also, There is function pointer overhead and aggregating the events
> in implementation,
> That may be not always "the" optimized model of making it work vs switch case in
> application.


Sure, but what to do in a reasonable generic framework?


If you are very sensitive to that 20 cc or whatever function pointer 
call, you won't use this library. Or you will, and use static linking 
and LTO to get rid of that overhead.


Probably, you have a few queues, not many. Probably, your dequeue bursts 
are large, if the system load is high (and otherwise, you don't care 
about efficiency). Then, you will have at least of couple of events per 
function call.


>
> [1]
> See RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES in
> https://protect2.fireeye.com/v1/url?k=dcf3a2b9-83689b94-dcf3e222-8692dc8284cb-5ba19813a1556a85&q=1&e=0ff1861f-8e24-453c-a93b-73fd88e0f316&u=https%3A%2F%2Fdoc.dpdk.org%2Fguides%2Fprog_guide%2Feventdev.html
>
>
>> The concept is similar to a UNIX file descriptor event loop library.
>> Instead of tying callback functions to fds as for example libevent
>> does, the event dispatcher binds callbacks to queue ids.
>>
>> An event dispatcher is configured to dequeue events from a specific
>> event device, and ties into the service core framework, to do its (and
>> the application's) work.
>>
>> The event dispatcher provides a convenient way for an eventdev-based
>> application to use service cores for application-level processing, and
>> thus for sharing those cores with other DPDK services.
>>
>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>> ---
>>   lib/librte_eventdev/Makefile                 |   2 +
>>   lib/librte_eventdev/meson.build              |   6 +-
>>   lib/librte_eventdev/rte_event_dispatcher.c   | 420 +++++++++++++++++++
>>   lib/librte_eventdev/rte_event_dispatcher.h   | 251 +++++++++++
>>   lib/librte_eventdev/rte_eventdev_version.map |  10 +
>>   5 files changed, 687 insertions(+), 2 deletions(-)
>>   create mode 100644 lib/librte_eventdev/rte_event_dispatcher.c
>>   create mode 100644 lib/librte_eventdev/rte_event_dispatcher.h
>>
>> diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
>> index 0715256bb4..614d53af1b 100644
>> --- a/lib/librte_eventdev/Makefile
>> +++ b/lib/librte_eventdev/Makefile
>> @@ -26,6 +26,7 @@ SRCS-y += rte_event_eth_rx_adapter.c
>>   SRCS-y += rte_event_timer_adapter.c
>>   SRCS-y += rte_event_crypto_adapter.c
>>   SRCS-y += rte_event_eth_tx_adapter.c
>> +SRCS-y += rte_event_dispatcher.c
>>
>>   # export include files
>>   SYMLINK-y-include += rte_eventdev.h
>> @@ -40,6 +41,7 @@ SYMLINK-y-include += rte_event_timer_adapter.h
>>   SYMLINK-y-include += rte_event_timer_adapter_pmd.h
>>   SYMLINK-y-include += rte_event_crypto_adapter.h
>>   SYMLINK-y-include += rte_event_eth_tx_adapter.h
>> +SYMLINK-y-include += rte_event_dispatcher.h
>>
>>   # versioning export map
>>   EXPORT_MAP := rte_eventdev_version.map
>> diff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build
>> index d1f25ee5ca..2ca81983b5 100644
>> --- a/lib/librte_eventdev/meson.build
>> +++ b/lib/librte_eventdev/meson.build
>> @@ -13,7 +13,8 @@ sources = files('rte_eventdev.c',
>>                  'rte_event_eth_rx_adapter.c',
>>                  'rte_event_timer_adapter.c',
>>                  'rte_event_crypto_adapter.c',
>> -               'rte_event_eth_tx_adapter.c')
>> +               'rte_event_eth_tx_adapter.c',
>> +               'rte_event_dispatcher.c')
>>   headers = files('rte_eventdev.h',
>>                  'rte_eventdev_pmd.h',
>>                  'rte_eventdev_pmd_pci.h',
>> @@ -25,5 +26,6 @@ headers = files('rte_eventdev.h',
>>                  'rte_event_timer_adapter.h',
>>                  'rte_event_timer_adapter_pmd.h',
>>                  'rte_event_crypto_adapter.h',
>> -               'rte_event_eth_tx_adapter.h')
>> +               'rte_event_eth_tx_adapter.h',
>> +               'rte_event_dispatcher.h')
>>   deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
>> diff --git a/lib/librte_eventdev/rte_event_dispatcher.c b/lib/librte_eventdev/rte_event_dispatcher.c
>> new file mode 100644
>> index 0000000000..1c7e55a752
>> --- /dev/null
>> +++ b/lib/librte_eventdev/rte_event_dispatcher.c
>> @@ -0,0 +1,420 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2021 Ericsson AB
>> + */
>> +
>> +#include <stdbool.h>
>> +#include <stdint.h>
>> +
>> +#include <rte_lcore.h>
>> +#include <rte_service_component.h>
>> +#include <rte_eventdev_pmd.h>
>> +
>> +#include <rte_event_dispatcher.h>
>> +
>> +#define RED_MAX_PORTS_PER_LCORE (4)
>> +
>> +struct rte_event_dispatcher_lcore_port {
>> +       uint8_t port_id;
>> +       uint16_t batch_size;
>> +       uint64_t timeout;
>> +};
>> +
>> +struct rte_event_dispatcher_lcore {
>> +       uint8_t num_ports;
>> +       struct rte_event_dispatcher_lcore_port ports[RED_MAX_PORTS_PER_LCORE];
>> +};
>> +
>> +struct rte_event_dispatcher_cb {
>> +       rte_event_dispatcher_cb_t cb_fun;
>> +       void *cb_data;
>> +};
>> +
>> +struct rte_event_dispatcher {
>> +       uint8_t id;
>> +       uint8_t event_dev_id;
>> +       int socket_id;
>> +       uint32_t service_id;
>> +       struct rte_event_dispatcher_lcore lcores[RTE_MAX_LCORE];
>> +       struct rte_event_dispatcher_cb queue_cbs[UINT8_MAX];
>> +       struct rte_event_dispatcher_cb fallback;
>> +};
>> +
>> +static struct rte_event_dispatcher *dispatchers[UINT8_MAX];
>> +
>> +static bool
>> +red_has_dispatcher(uint8_t id)
>> +{
>> +       return dispatchers[id] != NULL;
>> +}
>> +
>> +static struct rte_event_dispatcher *
>> +red_get_dispatcher(uint8_t id)
>> +{
>> +       return dispatchers[id];
>> +}
>> +
>> +static void
>> +red_set_dispatcher(uint8_t id, struct rte_event_dispatcher *dispatcher)
>> +{
>> +       dispatchers[id] = dispatcher;
>> +}
>> +
>> +#define RED_VALID_ID_OR_RET_EINVAL(id)                                 \
>> +       do {                                                            \
>> +               if (unlikely(!red_has_dispatcher(id))) {                \
>> +                       RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
>> +                       return -EINVAL;                                 \
>> +               }                                                       \
>> +       } while (0)
>> +
>> +static struct rte_event_dispatcher_cb *
>> +red_lookup_cb(struct rte_event_dispatcher *dispatcher, uint8_t queue_id)
>> +{
>> +       struct rte_event_dispatcher_cb *cb = &dispatcher->queue_cbs[queue_id];
>> +
>> +       if (unlikely(cb->cb_fun == NULL))
>> +           cb = &dispatcher->fallback;
>> +
>> +       return cb;
>> +}
>> +
>> +static void
>> +red_dispatch_events(struct rte_event_dispatcher *dispatcher,
>> +                   struct rte_event *events, uint16_t num_events)
>> +{
>> +       uint16_t cb_start;
>> +       uint16_t cb_len;
>> +
>> +       for (cb_start = 0; cb_start < num_events; cb_start += cb_len) {
>> +               uint16_t cb_end = cb_start;
>> +               uint8_t queue_id = events[cb_start].queue_id;
>> +               struct rte_event_dispatcher_cb *cb;
>> +
>> +               while (++cb_end < num_events &&
>> +                      events[cb_end].queue_id == queue_id)
>> +                       ;
>> +
>> +               cb_len = cb_end - cb_start;
>> +
>> +               cb = red_lookup_cb(dispatcher, queue_id);
>> +
>> +               if (unlikely(cb->cb_fun == NULL)) {
>> +                       RTE_EDEV_LOG_ERR("Attempted to dispatch %d events "
>> +                                        "for queue id %d, but no queue or "
>> +                                        "fallback cb were configured\n",
>> +                                        cb_len, queue_id);
>> +                       continue;
>> +               }
>> +
>> +               cb->cb_fun(&events[cb_start], cb_len, cb->cb_data);
>> +       }
>> +}
>> +
>> +static void
>> +red_port_dequeue(struct rte_event_dispatcher *dispatcher,
>> +                struct rte_event_dispatcher_lcore_port *port)
>> +{
>> +       uint16_t batch_size = port->batch_size;
>> +       struct rte_event events[batch_size];
>> +       uint16_t n;
>> +
>> +       n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
>> +                                   events, batch_size, port->timeout);
>> +
>> +       red_dispatch_events(dispatcher, events, n);
>> +}
>> +
>> +static int32_t
>> +red_lcore_process(void *userdata)
>> +{
>> +       uint16_t i;
>> +       struct rte_event_dispatcher *dispatcher = userdata;
>> +       unsigned int lcore_id = rte_lcore_id();
>> +       struct rte_event_dispatcher_lcore *lcore =
>> +               &dispatcher->lcores[lcore_id];
>> +
>> +       for (i = 0; i < lcore->num_ports; i++) {
>> +               struct rte_event_dispatcher_lcore_port *port =
>> +                       &lcore->ports[i];
>> +
>> +               red_port_dequeue(dispatcher, port);
>> +       }
>> +
>> +       return 0;
>> +}
>> +
>> +static int
>> +red_service_runstate_set(uint32_t service_id, int state)
>> +{
>> +       int rc;
>> +
>> +       rc = rte_service_component_runstate_set(service_id, state);
>> +
>> +       if (rc)
>> +               RTE_EDEV_LOG_ERR("Error %d occurred while setting service "
>> +                                "component run state to %d\n", rc, state);
>> +
>> +       return rc;
>> +}
>> +
>> +static int
>> +red_service_register(struct rte_event_dispatcher *dispatcher)
>> +{
>> +       struct rte_service_spec service = {
>> +               .callback = red_lcore_process,
>> +               .callback_userdata = dispatcher,
>> +               .capabilities = RTE_SERVICE_CAP_MT_SAFE,
>> +               .socket_id = dispatcher->socket_id
>> +       };
>> +       int rc;
>> +
>> +       snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "red_%d",
>> +                dispatcher->id);
>> +
>> +       rc = rte_service_component_register(&service, &dispatcher->service_id);
>> +
>> +       if (rc)
>> +               RTE_EDEV_LOG_ERR("Registration of event dispatcher service "
>> +                                "%s failed with error code %d\n",
>> +                                service.name, rc);
>> +
>> +       rc = red_service_runstate_set(dispatcher->service_id, 1);
>> +
>> +       if (rc)
>> +               rte_service_component_unregister(dispatcher->service_id);
>> +
>> +       return rc;
>> +}
>> +
>> +static int
>> +red_service_unregister(struct rte_event_dispatcher *dispatcher)
>> +{
>> +       int rc;
>> +
>> +       rc = red_service_runstate_set(dispatcher->service_id, 0);
>> +
>> +       if (rc)
>> +               return rc;
>> +
>> +       rc = rte_service_component_unregister(dispatcher->service_id);
>> +
>> +       if (rc)
>> +               RTE_EDEV_LOG_ERR("Unregistration of event dispatcher service "
>> +                                "failed with error code %d\n", rc);
>> +
>> +       return rc;
>> +}
>> +
>> +int
>> +rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id)
>> +{
>> +       int socket_id;
>> +       struct rte_event_dispatcher *dispatcher;
>> +       int rc;
>> +
>> +       if (red_has_dispatcher(id)) {
>> +               RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
>> +                                id);
>> +               return -EEXIST;
>> +       }
>> +
>> +       socket_id = rte_event_dev_socket_id(event_dev_id);
>> +
>> +       dispatcher =
>> +               rte_malloc_socket("event dispatcher",
>> +                                 sizeof(struct rte_event_dispatcher),
>> +                                 RTE_CACHE_LINE_SIZE, socket_id);
>> +
>> +       if (dispatcher == NULL) {
>> +               RTE_EDEV_LOG_ERR("Unable to allocate memory for event "
>> +                                "dispatcher\n");
>> +               return -ENOMEM;
>> +       }
>> +
>> +       *dispatcher = (struct rte_event_dispatcher) {
>> +               .id = id,
>> +               .event_dev_id = event_dev_id,
>> +               .socket_id = socket_id
>> +       };
>> +
>> +       rc = red_service_register(dispatcher);
>> +
>> +       if (rc < 0) {
>> +               rte_free(dispatcher);
>> +               return rc;
>> +       }
>> +
>> +       red_set_dispatcher(id, dispatcher);
>> +
>> +       return 0;
>> +}
>> +
>> +int
>> +rte_event_dispatcher_free(uint8_t id)
>> +{
>> +       struct rte_event_dispatcher *dispatcher;
>> +       int rc;
>> +
>> +       RED_VALID_ID_OR_RET_EINVAL(id);
>> +       dispatcher = red_get_dispatcher(id);
>> +
>> +       rc = red_service_unregister(dispatcher);
>> +
>> +       if (rc)
>> +               return rc;
>> +
>> +       red_set_dispatcher(id, NULL);
>> +
>> +       rte_free(dispatcher);
>> +
>> +       return 0;
>> +}
>> +
>> +int
>> +rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
>> +{
>> +       struct rte_event_dispatcher *dispatcher;
>> +
>> +       RED_VALID_ID_OR_RET_EINVAL(id);
>> +       dispatcher = red_get_dispatcher(id);
>> +
>> +       *service_id = dispatcher->service_id;
>> +
>> +       return 0;
>> +}
>> +
>> +static int16_t
>> +lcore_port_index(struct rte_event_dispatcher_lcore *lcore,
>> +                uint8_t event_port_id)
>> +{
>> +       uint16_t i;
>> +
>> +       for (i = 0; i < lcore->num_ports; i++) {
>> +               struct rte_event_dispatcher_lcore_port *port =
>> +                       &lcore->ports[i];
>> +               if (port->port_id == event_port_id)
>> +                       return i;
>> +       }
>> +
>> +       return -1;
>> +}
>> +
>> +int
>> +rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
>> +                                       uint16_t batch_size, uint64_t timeout,
>> +                                       unsigned int lcore_id)
>> +{
>> +       struct rte_event_dispatcher *dispatcher;
>> +       struct rte_event_dispatcher_lcore *lcore;
>> +       struct rte_event_dispatcher_lcore_port *port;
>> +
>> +       RED_VALID_ID_OR_RET_EINVAL(id);
>> +       dispatcher = red_get_dispatcher(id);
>> +
>> +       lcore = &dispatcher->lcores[lcore_id];
>> +
>> +       if (lcore->num_ports == RED_MAX_PORTS_PER_LCORE)
>> +               return -ENOMEM;
>> +
>> +       if (lcore_port_index(lcore, event_port_id) >= 0)
>> +               return -EEXIST;
>> +
>> +       port = &lcore->ports[lcore->num_ports];
>> +
>> +       *port = (struct rte_event_dispatcher_lcore_port) {
>> +               .port_id = event_port_id,
>> +               .batch_size = batch_size,
>> +               .timeout = timeout
>> +       };
>> +
>> +       lcore->num_ports++;
>> +
>> +       return 0;
>> +}
>> +
>> +int
>> +rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
>> +                                           unsigned int lcore_id)
>> +{
>> +       struct rte_event_dispatcher *dispatcher;
>> +       struct rte_event_dispatcher_lcore *lcore;
>> +       int16_t port_idx;
>> +       struct rte_event_dispatcher_lcore_port *port;
>> +       struct rte_event_dispatcher_lcore_port *last;
>> +
>> +       RED_VALID_ID_OR_RET_EINVAL(id);
>> +       dispatcher = red_get_dispatcher(id);
>> +
>> +       lcore = &dispatcher->lcores[lcore_id];
>> +
>> +       port_idx = lcore_port_index(lcore, event_port_id);
>> +
>> +       if (port_idx < 0)
>> +               return -ENOENT;
>> +
>> +       port = &lcore->ports[port_idx];
>> +       last = &lcore->ports[lcore->num_ports - 1];
>> +
>> +       if (port != last)
>> +               *port = *last;
>> +
>> +       lcore->num_ports--;
>> +
>> +       return 0;
>> +}
>> +
>> +static int
>> +red_cb_manage(uint8_t id, uint8_t queue_id, bool reg, bool fallback,
>> +          rte_event_dispatcher_cb_t cb_fun, void *cb_data)
>> +{
>> +       struct rte_event_dispatcher *dispatcher;
>> +       struct rte_event_dispatcher_cb *cb;
>> +
>> +       RED_VALID_ID_OR_RET_EINVAL(id);
>> +       dispatcher = red_get_dispatcher(id);
>> +
>> +       if (fallback)
>> +               cb = &dispatcher->fallback;
>> +       else
>> +               cb = &dispatcher->queue_cbs[queue_id];
>> +
>> +       if (reg && cb->cb_fun != NULL)
>> +               return -EEXIST;
>> +
>> +       if (!reg && cb->cb_fun == NULL)
>> +               return -ENOENT;
>> +
>> +       *cb = (struct rte_event_dispatcher_cb) {
>> +               .cb_fun = cb_fun,
>> +               .cb_data = cb_data
>> +       };
>> +
>> +       return 0;
>> +}
>> +
>> +int
>> +rte_event_dispatcher_register(uint8_t id, uint8_t queue_id,
>> +                             rte_event_dispatcher_cb_t cb_fun, void *cb_data)
>> +{
>> +       return red_cb_manage(id, queue_id, true, false, cb_fun, cb_data);
>> +}
>> +
>> +int
>> +rte_event_dispatcher_unregister(uint8_t id, uint8_t queue_id)
>> +{
>> +       return red_cb_manage(id, queue_id, false, false, NULL, NULL);
>> +}
>> +
>> +int
>> +rte_event_dispatcher_register_fallback(uint8_t id,
>> +                                      rte_event_dispatcher_cb_t cb_fun,
>> +                                      void *cb_data)
>> +{
>> +       return red_cb_manage(id, 0, true, true, cb_fun, cb_data);
>> +}
>> +
>> +int
>> +rte_event_dispatcher_unregister_fallback(uint8_t id)
>> +{
>> +       return red_cb_manage(id, 0, false, true, NULL, NULL);
>> +}
>> diff --git a/lib/librte_eventdev/rte_event_dispatcher.h b/lib/librte_eventdev/rte_event_dispatcher.h
>> new file mode 100644
>> index 0000000000..11f57571ab
>> --- /dev/null
>> +++ b/lib/librte_eventdev/rte_event_dispatcher.h
>> @@ -0,0 +1,251 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2021 Ericsson AB
>> + */
>> +
>> +#ifndef __RTE_EVENT_DISPATCHER_H__
>> +#define __RTE_EVENT_DISPATCHER_H__
>> +
>> +/**
>> + * @file
>> + *
>> + * RTE Event Dispatcher
>> + *
>> + */
>> +
>> +#ifdef __cplusplus
>> +extern "C" {
>> +#endif
>> +
>> +#include <rte_eventdev.h>
>> +
>> +/**
>> + * Function prototype for dispatcher callbacks.
>> + *
>> + * @param events
>> + *  Pointer to an array of events.
>> + *
>> + * @param num
>> + *  The number of events in the @p events array.
>> + *
>> + * @param cb_data
>> + *  The pointer supplied by the application in
>> + *  rte_event_dispatcher_register() or
>> + *  rte_event_dispatcher_register_fallback().
>> + */
>> +
>> +typedef void (*rte_event_dispatcher_cb_t)(struct rte_event *events,
>> +                                         uint16_t num, void *cb_data);
>> +
>> +/**
>> + * Create an event dispatcher with the specified id.
>> + *
>> + * @param id
>> + *  An application-specified, unique (across all event dispatcher
>> + *  instances) identifier.
>> + *
>> + * @param event_dev_id
>> + *  The identifier of the event device from which this event dispatcher
>> + *  will dequeue events.
>> + *
>> + * @return
>> + *   - 0: Success
>> + *   - <0: Error code on failure
>> + */
>> +__rte_experimental
>> +int
>> +rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id);
>> +
>> +/**
>> + * Frees an event dispatcher with the specified id.
>> + *
>> + * @param id
>> + *  The event dispatcher identifier.
>> + *
>> + * @return
>> + *   - 0: Success
>> + *   - <0: Error code on failure
>> + */
>> +__rte_experimental
>> +int
>> +rte_event_dispatcher_free(uint8_t id);
>> +
>> +/**
>> + * Retrieve the service identifier of the event dispatcher.
>> + *
>> + * @param id
>> + *  The event dispatcher identifier.
>> + *
>> + * @param [out] service_id
>> + *  A pointer to a caller-supplied buffer where the event dispatcher's
>> + *  service id will be stored.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - <0: Error code on failure.
>> + */
>> +__rte_experimental
>> +int
>> +rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
>> +
>> +/**
>> + * Binds an event device port to a specific lcore on the specified
>> + * event dispatcher.
>> + *
>> + * This function configures an event dispatcher to dequeue events from
>> + * an event device port (as specified by @p event_port_id), in case
>> + * its service function is run on particular lcore (as specified by @p
>> + * lcore_id).
>> + *
>> + * Multiple event device ports may be bound to the same lcore. A
>> + * particular port may only be bound to one lcore.
>> + *
>> + * If the event dispatcher service is mapped (with
>> + * rte_service_map_lcore_set()) to a lcore for which no ports are
>> + * bound, the service function will be a no-operation.
>> + *
>> + * @param id
>> + *  The event dispatcher identifier.
>> + *
>> + * @param event_port_id
>> + *  The event device port identifier.
>> + *
>> + * @param batch_size
>> + *  The batch size to use in rte_event_dequeue_burst(), for the
>> + *  configured event device port and lcore.
>> + *
>> + * @param timeout
>> + *  The timeout parameter to use in rte_event_dequeue_burst(), for the
>> + *  configured event device port and lcore.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - <0: Error code on failure.
>> + */
>> +__rte_experimental
>> +int
>> +rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
>> +                                       uint16_t batch_size, uint64_t timeout,
>> +                                       unsigned int lcore_id);
>> +
>> +/**
>> + * Unbind an event device port from a specific lcore.
>> + *
>> + * @param id
>> + *  The event dispatcher identifier.
>> + *
>> + * @param event_port_id
>> + *  The event device port identifier.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - <0: Error code on failure.
>> + */
>> +__rte_experimental
>> +int
>> +rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
>> +                                           unsigned int lcore_id);
>> +
>> +/**
>> + * Register a callback function for the specified queue identifier.
>> + *
>> + * At most one callback may be registered per queue id.
>> + *
>> + * The same callback function may be registered for multiple queue ids.
>> + *
>> + * For each callback invocation, events belonging to a single queue id
>> + * will dispatched.
>> + *
>> + * @param id
>> + *  The event dispatcher identifier.
>> + *
>> + * @param queue_id
>> + *  The event device queue id for which @p cb_fun should be called.
>> + *
>> + * @param cb_fun
>> + *  The callback function.
>> + *
>> + * @param cb_data
>> + *  A pointer to some application-specific opaque data (or NULL),
>> + *  which is supplied back to the application in the callback.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - <0: Error code on failure.
>> + */
>> +__rte_experimental
>> +int
>> +rte_event_dispatcher_register(uint8_t id, uint8_t queue_id,
>> +                             rte_event_dispatcher_cb_t cb_fun, void *cb_data);
>> +
>> +/**
>> + * Unregister a callback function for the specified queue identifier.
>> + *
>> + * @param id
>> + *  The event dispatcher identifier.
>> + *
>> + * @param queue_id
>> + *  The event device queue id for which the callback should be removed.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - <0: Error code on failure.
>> + */
>> +__rte_experimental
>> +int
>> +rte_event_dispatcher_unregister(uint8_t id, uint8_t queue_id);
>> +
>> +/**
>> + * Register a fallback callback function for the specified queue
>> + * identifier.
>> + *
>> + * Only events for which no queue-specific callback function will be
>> + * dispatched to the @p cb_fun callback.
>> + *
>> + * At most one callback fallback function may be registered.
>> + *
>> + * For each callback invocation, only events belonging to a single
>> + * queue id will be included.
>> + *
>> + * If the event dispatcher encounters an event with a queue id for
>> + * which the application has not registered any specific callback, and
>> + * there is also no fallback configured, the event will be dropped.
>> + *
>> + * @param id
>> + *  The event dispatcher identifier.
>> + *
>> + * @param cb_fun
>> + *  The fallback callback function.
>> + *
>> + * @param cb_data
>> + *  A pointer to some application-specific opaque data (or NULL),
>> + *  which is supplied back to the application in the callback.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - <0: Error code on failure.
>> + */
>> +__rte_experimental
>> +int
>> +rte_event_dispatcher_register_fallback(uint8_t id,
>> +                                      rte_event_dispatcher_cb_t cb_fun,
>> +                                      void *cb_data);
>> +
>> +/**
>> + * Unregister the fallback callback function.
>> + *
>> + * @param id
>> + *  The event dispatcher identifier.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - <0: Error code on failure.
>> + */
>> +__rte_experimental
>> +int
>> +rte_event_dispatcher_unregister_fallback(uint8_t id);
>> +
>> +#ifdef __cplusplus
>> +}
>> +#endif
>> +
>> +#endif /* __RTE_EVENT_DISPATCHER__ */
>> diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
>> index 91a62cd077..dcb887601b 100644
>> --- a/lib/librte_eventdev/rte_eventdev_version.map
>> +++ b/lib/librte_eventdev/rte_eventdev_version.map
>> @@ -134,4 +134,14 @@ EXPERIMENTAL {
>>          __rte_eventdev_trace_crypto_adapter_queue_pair_del;
>>          __rte_eventdev_trace_crypto_adapter_start;
>>          __rte_eventdev_trace_crypto_adapter_stop;
>> +
>> +       rte_event_dispatcher_create;
>> +       rte_event_dispatcher_free;
>> +       rte_event_dispatcher_service_id_get;
>> +       rte_event_dispatcher_bind_port_to_lcore;
>> +       rte_event_dispatcher_unbind_port_from_lcore;
>> +       rte_event_dispatcher_register;
>> +       rte_event_dispatcher_unregister;
>> +       rte_event_dispatcher_register_fallback;
>> +       rte_event_dispatcher_unregister_fallback;
>>   };
>> --
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [dpdk-dev] [RFC] eventdev: introduce event dispatcher
  2021-02-26  8:01   ` Mattias Rönnblom
@ 2021-03-07 13:04     ` Jerin Jacob
  2021-03-15 14:44       ` Mattias Rönnblom
  0 siblings, 1 reply; 102+ messages in thread
From: Jerin Jacob @ 2021-03-07 13:04 UTC (permalink / raw)
  To: Mattias Rönnblom; +Cc: Jerin Jacob, dpdk-dev, Richardson, Bruce

On Fri, Feb 26, 2021 at 1:31 PM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> On 2021-02-25 13:32, Jerin Jacob wrote:
> > On Fri, Feb 19, 2021 at 12:00 AM Mattias Rönnblom
> > <mattias.ronnblom@ericsson.com> wrote:
> >> The purpose of the event dispatcher is primarily to decouple different
> >> parts of an application (e.g., processing pipeline stages), which
> >> share the same underlying event device.
> >>
> >> The event dispatcher replaces the conditional logic (often, a switch
> >> statement) that typically follows an event device dequeue operation,
> >> where events are dispatched to different parts of the application
> >> based on the destination queue id.
> > # If the device has all type queue[1] this RFC would restrict to
> > use queue ONLY as stage. A stage can be a Queue Type also.
> > How we can abstract this in this model?
>
>
> "All queue type" is about scheduling policy. I would think that would be
> independent of the "logical endpoint" of the event (i.e., the queue id).
> I feel like I'm missing something here.

Each queue type also can be represented as a stage.
For example, If the system has only one queue, the Typical IPsec
outbound stages can be
Q0-Ordered(For SA lookup) -> Q0(Atomic)(For Sequence number update) ->
Q0(Orderd)(Crypto operation)->Q0(Atomic)(Send on wire)

>
>
> > # Also, I think, it may make sense to add this type of infrastructure as
> > helper functions as these are built on top of existing APIs i.e There
> > is no support
> > required from the driver to establish this model. IMO, If we need to
> > add such support as
> > one fixed set of functionality, we could have helper APIs to express a certain
> > usage of eventdev. Rather defining the that's only way to do this.
> > I think, A helper function can be used to as abstraction to define
> > this kind of model.
> >
> > # Also, There is function pointer overhead and aggregating the events
> > in implementation,
> > That may be not always "the" optimized model of making it work vs switch case in
> > application.
>
>
> Sure, but what to do in a reasonable generic framework?
>
>
> If you are very sensitive to that 20 cc or whatever function pointer
> call, you won't use this library. Or you will, and use static linking
> and LTO to get rid of that overhead.
>
>
> Probably, you have a few queues, not many. Probably, your dequeue bursts
> are large, if the system load is high (and otherwise, you don't care
> about efficiency). Then, you will have at least of couple of events per
> function call.

I am fine with this library and exposing it as a function pointer if
someone needs to
have a "helper" function to model the system around this logic.

This RFC looks good to me in general. I would suggest to make it as

- Helper functions i.e if someone chooses to do write the stage in
this way, it can be enabled through this helper function.
By choosing as helper function it depicts, this is one way to do the
stuff but the NOT ONLY WAY.
- Abstract stages as a queue(which already added in the patch) and
each type in the queue for all type queue cases.
- Enhance test-eventdev to showcase the functionality and performance
of these helpers.

Thanks for the RFC.

>
>
> >
> > [1]
> > See RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES in
> > https://protect2.fireeye.com/v1/url?k=dcf3a2b9-83689b94-dcf3e222-8692dc8284cb-5ba19813a1556a85&q=1&e=0ff1861f-8e24-453c-a93b-73fd88e0f316&u=https%3A%2F%2Fdoc.dpdk.org%2Fguides%2Fprog_guide%2Feventdev.html
> >
> >
> >> The concept is similar to a UNIX file descriptor event loop library.
> >> Instead of tying callback functions to fds as for example libevent
> >> does, the event dispatcher binds callbacks to queue ids.
> >>
> >> An event dispatcher is configured to dequeue events from a specific
> >> event device, and ties into the service core framework, to do its (and
> >> the application's) work.
> >>
> >> The event dispatcher provides a convenient way for an eventdev-based
> >> application to use service cores for application-level processing, and
> >> thus for sharing those cores with other DPDK services.
> >>
> >> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> >> ---
> >>   lib/librte_eventdev/Makefile                 |   2 +
> >>   lib/librte_eventdev/meson.build              |   6 +-
> >>   lib/librte_eventdev/rte_event_dispatcher.c   | 420 +++++++++++++++++++
> >>   lib/librte_eventdev/rte_event_dispatcher.h   | 251 +++++++++++
> >>   lib/librte_eventdev/rte_eventdev_version.map |  10 +
> >>   5 files changed, 687 insertions(+), 2 deletions(-)
> >>   create mode 100644 lib/librte_eventdev/rte_event_dispatcher.c
> >>   create mode 100644 lib/librte_eventdev/rte_event_dispatcher.h
> >>
> >> diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
> >> index 0715256bb4..614d53af1b 100644
> >> --- a/lib/librte_eventdev/Makefile
> >> +++ b/lib/librte_eventdev/Makefile
> >> @@ -26,6 +26,7 @@ SRCS-y += rte_event_eth_rx_adapter.c
> >>   SRCS-y += rte_event_timer_adapter.c
> >>   SRCS-y += rte_event_crypto_adapter.c
> >>   SRCS-y += rte_event_eth_tx_adapter.c
> >> +SRCS-y += rte_event_dispatcher.c
> >>
> >>   # export include files
> >>   SYMLINK-y-include += rte_eventdev.h
> >> @@ -40,6 +41,7 @@ SYMLINK-y-include += rte_event_timer_adapter.h
> >>   SYMLINK-y-include += rte_event_timer_adapter_pmd.h
> >>   SYMLINK-y-include += rte_event_crypto_adapter.h
> >>   SYMLINK-y-include += rte_event_eth_tx_adapter.h
> >> +SYMLINK-y-include += rte_event_dispatcher.h
> >>
> >>   # versioning export map
> >>   EXPORT_MAP := rte_eventdev_version.map
> >> diff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build
> >> index d1f25ee5ca..2ca81983b5 100644
> >> --- a/lib/librte_eventdev/meson.build
> >> +++ b/lib/librte_eventdev/meson.build
> >> @@ -13,7 +13,8 @@ sources = files('rte_eventdev.c',
> >>                  'rte_event_eth_rx_adapter.c',
> >>                  'rte_event_timer_adapter.c',
> >>                  'rte_event_crypto_adapter.c',
> >> -               'rte_event_eth_tx_adapter.c')
> >> +               'rte_event_eth_tx_adapter.c',
> >> +               'rte_event_dispatcher.c')
> >>   headers = files('rte_eventdev.h',
> >>                  'rte_eventdev_pmd.h',
> >>                  'rte_eventdev_pmd_pci.h',
> >> @@ -25,5 +26,6 @@ headers = files('rte_eventdev.h',
> >>                  'rte_event_timer_adapter.h',
> >>                  'rte_event_timer_adapter_pmd.h',
> >>                  'rte_event_crypto_adapter.h',
> >> -               'rte_event_eth_tx_adapter.h')
> >> +               'rte_event_eth_tx_adapter.h',
> >> +               'rte_event_dispatcher.h')
> >>   deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
> >> diff --git a/lib/librte_eventdev/rte_event_dispatcher.c b/lib/librte_eventdev/rte_event_dispatcher.c
> >> new file mode 100644
> >> index 0000000000..1c7e55a752
> >> --- /dev/null
> >> +++ b/lib/librte_eventdev/rte_event_dispatcher.c
> >> @@ -0,0 +1,420 @@
> >> +/* SPDX-License-Identifier: BSD-3-Clause
> >> + * Copyright(c) 2021 Ericsson AB
> >> + */
> >> +
> >> +#include <stdbool.h>
> >> +#include <stdint.h>
> >> +
> >> +#include <rte_lcore.h>
> >> +#include <rte_service_component.h>
> >> +#include <rte_eventdev_pmd.h>
> >> +
> >> +#include <rte_event_dispatcher.h>
> >> +
> >> +#define RED_MAX_PORTS_PER_LCORE (4)
> >> +
> >> +struct rte_event_dispatcher_lcore_port {
> >> +       uint8_t port_id;
> >> +       uint16_t batch_size;
> >> +       uint64_t timeout;
> >> +};
> >> +
> >> +struct rte_event_dispatcher_lcore {
> >> +       uint8_t num_ports;
> >> +       struct rte_event_dispatcher_lcore_port ports[RED_MAX_PORTS_PER_LCORE];
> >> +};
> >> +
> >> +struct rte_event_dispatcher_cb {
> >> +       rte_event_dispatcher_cb_t cb_fun;
> >> +       void *cb_data;
> >> +};
> >> +
> >> +struct rte_event_dispatcher {
> >> +       uint8_t id;
> >> +       uint8_t event_dev_id;
> >> +       int socket_id;
> >> +       uint32_t service_id;
> >> +       struct rte_event_dispatcher_lcore lcores[RTE_MAX_LCORE];
> >> +       struct rte_event_dispatcher_cb queue_cbs[UINT8_MAX];
> >> +       struct rte_event_dispatcher_cb fallback;
> >> +};
> >> +
> >> +static struct rte_event_dispatcher *dispatchers[UINT8_MAX];
> >> +
> >> +static bool
> >> +red_has_dispatcher(uint8_t id)
> >> +{
> >> +       return dispatchers[id] != NULL;
> >> +}
> >> +
> >> +static struct rte_event_dispatcher *
> >> +red_get_dispatcher(uint8_t id)
> >> +{
> >> +       return dispatchers[id];
> >> +}
> >> +
> >> +static void
> >> +red_set_dispatcher(uint8_t id, struct rte_event_dispatcher *dispatcher)
> >> +{
> >> +       dispatchers[id] = dispatcher;
> >> +}
> >> +
> >> +#define RED_VALID_ID_OR_RET_EINVAL(id)                                 \
> >> +       do {                                                            \
> >> +               if (unlikely(!red_has_dispatcher(id))) {                \
> >> +                       RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
> >> +                       return -EINVAL;                                 \
> >> +               }                                                       \
> >> +       } while (0)
> >> +
> >> +static struct rte_event_dispatcher_cb *
> >> +red_lookup_cb(struct rte_event_dispatcher *dispatcher, uint8_t queue_id)
> >> +{
> >> +       struct rte_event_dispatcher_cb *cb = &dispatcher->queue_cbs[queue_id];
> >> +
> >> +       if (unlikely(cb->cb_fun == NULL))
> >> +           cb = &dispatcher->fallback;
> >> +
> >> +       return cb;
> >> +}
> >> +
> >> +static void
> >> +red_dispatch_events(struct rte_event_dispatcher *dispatcher,
> >> +                   struct rte_event *events, uint16_t num_events)
> >> +{
> >> +       uint16_t cb_start;
> >> +       uint16_t cb_len;
> >> +
> >> +       for (cb_start = 0; cb_start < num_events; cb_start += cb_len) {
> >> +               uint16_t cb_end = cb_start;
> >> +               uint8_t queue_id = events[cb_start].queue_id;
> >> +               struct rte_event_dispatcher_cb *cb;
> >> +
> >> +               while (++cb_end < num_events &&
> >> +                      events[cb_end].queue_id == queue_id)
> >> +                       ;
> >> +
> >> +               cb_len = cb_end - cb_start;
> >> +
> >> +               cb = red_lookup_cb(dispatcher, queue_id);
> >> +
> >> +               if (unlikely(cb->cb_fun == NULL)) {
> >> +                       RTE_EDEV_LOG_ERR("Attempted to dispatch %d events "
> >> +                                        "for queue id %d, but no queue or "
> >> +                                        "fallback cb were configured\n",
> >> +                                        cb_len, queue_id);
> >> +                       continue;
> >> +               }
> >> +
> >> +               cb->cb_fun(&events[cb_start], cb_len, cb->cb_data);
> >> +       }
> >> +}
> >> +
> >> +static void
> >> +red_port_dequeue(struct rte_event_dispatcher *dispatcher,
> >> +                struct rte_event_dispatcher_lcore_port *port)
> >> +{
> >> +       uint16_t batch_size = port->batch_size;
> >> +       struct rte_event events[batch_size];
> >> +       uint16_t n;
> >> +
> >> +       n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
> >> +                                   events, batch_size, port->timeout);
> >> +
> >> +       red_dispatch_events(dispatcher, events, n);
> >> +}
> >> +
> >> +static int32_t
> >> +red_lcore_process(void *userdata)
> >> +{
> >> +       uint16_t i;
> >> +       struct rte_event_dispatcher *dispatcher = userdata;
> >> +       unsigned int lcore_id = rte_lcore_id();
> >> +       struct rte_event_dispatcher_lcore *lcore =
> >> +               &dispatcher->lcores[lcore_id];
> >> +
> >> +       for (i = 0; i < lcore->num_ports; i++) {
> >> +               struct rte_event_dispatcher_lcore_port *port =
> >> +                       &lcore->ports[i];
> >> +
> >> +               red_port_dequeue(dispatcher, port);
> >> +       }
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +static int
> >> +red_service_runstate_set(uint32_t service_id, int state)
> >> +{
> >> +       int rc;
> >> +
> >> +       rc = rte_service_component_runstate_set(service_id, state);
> >> +
> >> +       if (rc)
> >> +               RTE_EDEV_LOG_ERR("Error %d occurred while setting service "
> >> +                                "component run state to %d\n", rc, state);
> >> +
> >> +       return rc;
> >> +}
> >> +
> >> +static int
> >> +red_service_register(struct rte_event_dispatcher *dispatcher)
> >> +{
> >> +       struct rte_service_spec service = {
> >> +               .callback = red_lcore_process,
> >> +               .callback_userdata = dispatcher,
> >> +               .capabilities = RTE_SERVICE_CAP_MT_SAFE,
> >> +               .socket_id = dispatcher->socket_id
> >> +       };
> >> +       int rc;
> >> +
> >> +       snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "red_%d",
> >> +                dispatcher->id);
> >> +
> >> +       rc = rte_service_component_register(&service, &dispatcher->service_id);
> >> +
> >> +       if (rc)
> >> +               RTE_EDEV_LOG_ERR("Registration of event dispatcher service "
> >> +                                "%s failed with error code %d\n",
> >> +                                service.name, rc);
> >> +
> >> +       rc = red_service_runstate_set(dispatcher->service_id, 1);
> >> +
> >> +       if (rc)
> >> +               rte_service_component_unregister(dispatcher->service_id);
> >> +
> >> +       return rc;
> >> +}
> >> +
> >> +static int
> >> +red_service_unregister(struct rte_event_dispatcher *dispatcher)
> >> +{
> >> +       int rc;
> >> +
> >> +       rc = red_service_runstate_set(dispatcher->service_id, 0);
> >> +
> >> +       if (rc)
> >> +               return rc;
> >> +
> >> +       rc = rte_service_component_unregister(dispatcher->service_id);
> >> +
> >> +       if (rc)
> >> +               RTE_EDEV_LOG_ERR("Unregistration of event dispatcher service "
> >> +                                "failed with error code %d\n", rc);
> >> +
> >> +       return rc;
> >> +}
> >> +
> >> +int
> >> +rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id)
> >> +{
> >> +       int socket_id;
> >> +       struct rte_event_dispatcher *dispatcher;
> >> +       int rc;
> >> +
> >> +       if (red_has_dispatcher(id)) {
> >> +               RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
> >> +                                id);
> >> +               return -EEXIST;
> >> +       }
> >> +
> >> +       socket_id = rte_event_dev_socket_id(event_dev_id);
> >> +
> >> +       dispatcher =
> >> +               rte_malloc_socket("event dispatcher",
> >> +                                 sizeof(struct rte_event_dispatcher),
> >> +                                 RTE_CACHE_LINE_SIZE, socket_id);
> >> +
> >> +       if (dispatcher == NULL) {
> >> +               RTE_EDEV_LOG_ERR("Unable to allocate memory for event "
> >> +                                "dispatcher\n");
> >> +               return -ENOMEM;
> >> +       }
> >> +
> >> +       *dispatcher = (struct rte_event_dispatcher) {
> >> +               .id = id,
> >> +               .event_dev_id = event_dev_id,
> >> +               .socket_id = socket_id
> >> +       };
> >> +
> >> +       rc = red_service_register(dispatcher);
> >> +
> >> +       if (rc < 0) {
> >> +               rte_free(dispatcher);
> >> +               return rc;
> >> +       }
> >> +
> >> +       red_set_dispatcher(id, dispatcher);
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +int
> >> +rte_event_dispatcher_free(uint8_t id)
> >> +{
> >> +       struct rte_event_dispatcher *dispatcher;
> >> +       int rc;
> >> +
> >> +       RED_VALID_ID_OR_RET_EINVAL(id);
> >> +       dispatcher = red_get_dispatcher(id);
> >> +
> >> +       rc = red_service_unregister(dispatcher);
> >> +
> >> +       if (rc)
> >> +               return rc;
> >> +
> >> +       red_set_dispatcher(id, NULL);
> >> +
> >> +       rte_free(dispatcher);
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +int
> >> +rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
> >> +{
> >> +       struct rte_event_dispatcher *dispatcher;
> >> +
> >> +       RED_VALID_ID_OR_RET_EINVAL(id);
> >> +       dispatcher = red_get_dispatcher(id);
> >> +
> >> +       *service_id = dispatcher->service_id;
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +static int16_t
> >> +lcore_port_index(struct rte_event_dispatcher_lcore *lcore,
> >> +                uint8_t event_port_id)
> >> +{
> >> +       uint16_t i;
> >> +
> >> +       for (i = 0; i < lcore->num_ports; i++) {
> >> +               struct rte_event_dispatcher_lcore_port *port =
> >> +                       &lcore->ports[i];
> >> +               if (port->port_id == event_port_id)
> >> +                       return i;
> >> +       }
> >> +
> >> +       return -1;
> >> +}
> >> +
> >> +int
> >> +rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
> >> +                                       uint16_t batch_size, uint64_t timeout,
> >> +                                       unsigned int lcore_id)
> >> +{
> >> +       struct rte_event_dispatcher *dispatcher;
> >> +       struct rte_event_dispatcher_lcore *lcore;
> >> +       struct rte_event_dispatcher_lcore_port *port;
> >> +
> >> +       RED_VALID_ID_OR_RET_EINVAL(id);
> >> +       dispatcher = red_get_dispatcher(id);
> >> +
> >> +       lcore = &dispatcher->lcores[lcore_id];
> >> +
> >> +       if (lcore->num_ports == RED_MAX_PORTS_PER_LCORE)
> >> +               return -ENOMEM;
> >> +
> >> +       if (lcore_port_index(lcore, event_port_id) >= 0)
> >> +               return -EEXIST;
> >> +
> >> +       port = &lcore->ports[lcore->num_ports];
> >> +
> >> +       *port = (struct rte_event_dispatcher_lcore_port) {
> >> +               .port_id = event_port_id,
> >> +               .batch_size = batch_size,
> >> +               .timeout = timeout
> >> +       };
> >> +
> >> +       lcore->num_ports++;
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +int
> >> +rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
> >> +                                           unsigned int lcore_id)
> >> +{
> >> +       struct rte_event_dispatcher *dispatcher;
> >> +       struct rte_event_dispatcher_lcore *lcore;
> >> +       int16_t port_idx;
> >> +       struct rte_event_dispatcher_lcore_port *port;
> >> +       struct rte_event_dispatcher_lcore_port *last;
> >> +
> >> +       RED_VALID_ID_OR_RET_EINVAL(id);
> >> +       dispatcher = red_get_dispatcher(id);
> >> +
> >> +       lcore = &dispatcher->lcores[lcore_id];
> >> +
> >> +       port_idx = lcore_port_index(lcore, event_port_id);
> >> +
> >> +       if (port_idx < 0)
> >> +               return -ENOENT;
> >> +
> >> +       port = &lcore->ports[port_idx];
> >> +       last = &lcore->ports[lcore->num_ports - 1];
> >> +
> >> +       if (port != last)
> >> +               *port = *last;
> >> +
> >> +       lcore->num_ports--;
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +static int
> >> +red_cb_manage(uint8_t id, uint8_t queue_id, bool reg, bool fallback,
> >> +          rte_event_dispatcher_cb_t cb_fun, void *cb_data)
> >> +{
> >> +       struct rte_event_dispatcher *dispatcher;
> >> +       struct rte_event_dispatcher_cb *cb;
> >> +
> >> +       RED_VALID_ID_OR_RET_EINVAL(id);
> >> +       dispatcher = red_get_dispatcher(id);
> >> +
> >> +       if (fallback)
> >> +               cb = &dispatcher->fallback;
> >> +       else
> >> +               cb = &dispatcher->queue_cbs[queue_id];
> >> +
> >> +       if (reg && cb->cb_fun != NULL)
> >> +               return -EEXIST;
> >> +
> >> +       if (!reg && cb->cb_fun == NULL)
> >> +               return -ENOENT;
> >> +
> >> +       *cb = (struct rte_event_dispatcher_cb) {
> >> +               .cb_fun = cb_fun,
> >> +               .cb_data = cb_data
> >> +       };
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +int
> >> +rte_event_dispatcher_register(uint8_t id, uint8_t queue_id,
> >> +                             rte_event_dispatcher_cb_t cb_fun, void *cb_data)
> >> +{
> >> +       return red_cb_manage(id, queue_id, true, false, cb_fun, cb_data);
> >> +}
> >> +
> >> +int
> >> +rte_event_dispatcher_unregister(uint8_t id, uint8_t queue_id)
> >> +{
> >> +       return red_cb_manage(id, queue_id, false, false, NULL, NULL);
> >> +}
> >> +
> >> +int
> >> +rte_event_dispatcher_register_fallback(uint8_t id,
> >> +                                      rte_event_dispatcher_cb_t cb_fun,
> >> +                                      void *cb_data)
> >> +{
> >> +       return red_cb_manage(id, 0, true, true, cb_fun, cb_data);
> >> +}
> >> +
> >> +int
> >> +rte_event_dispatcher_unregister_fallback(uint8_t id)
> >> +{
> >> +       return red_cb_manage(id, 0, false, true, NULL, NULL);
> >> +}
> >> diff --git a/lib/librte_eventdev/rte_event_dispatcher.h b/lib/librte_eventdev/rte_event_dispatcher.h
> >> new file mode 100644
> >> index 0000000000..11f57571ab
> >> --- /dev/null
> >> +++ b/lib/librte_eventdev/rte_event_dispatcher.h
> >> @@ -0,0 +1,251 @@
> >> +/* SPDX-License-Identifier: BSD-3-Clause
> >> + * Copyright(c) 2021 Ericsson AB
> >> + */
> >> +
> >> +#ifndef __RTE_EVENT_DISPATCHER_H__
> >> +#define __RTE_EVENT_DISPATCHER_H__
> >> +
> >> +/**
> >> + * @file
> >> + *
> >> + * RTE Event Dispatcher
> >> + *
> >> + */
> >> +
> >> +#ifdef __cplusplus
> >> +extern "C" {
> >> +#endif
> >> +
> >> +#include <rte_eventdev.h>
> >> +
> >> +/**
> >> + * Function prototype for dispatcher callbacks.
> >> + *
> >> + * @param events
> >> + *  Pointer to an array of events.
> >> + *
> >> + * @param num
> >> + *  The number of events in the @p events array.
> >> + *
> >> + * @param cb_data
> >> + *  The pointer supplied by the application in
> >> + *  rte_event_dispatcher_register() or
> >> + *  rte_event_dispatcher_register_fallback().
> >> + */
> >> +
> >> +typedef void (*rte_event_dispatcher_cb_t)(struct rte_event *events,
> >> +                                         uint16_t num, void *cb_data);
> >> +
> >> +/**
> >> + * Create an event dispatcher with the specified id.
> >> + *
> >> + * @param id
> >> + *  An application-specified, unique (across all event dispatcher
> >> + *  instances) identifier.
> >> + *
> >> + * @param event_dev_id
> >> + *  The identifier of the event device from which this event dispatcher
> >> + *  will dequeue events.
> >> + *
> >> + * @return
> >> + *   - 0: Success
> >> + *   - <0: Error code on failure
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id);
> >> +
> >> +/**
> >> + * Frees an event dispatcher with the specified id.
> >> + *
> >> + * @param id
> >> + *  The event dispatcher identifier.
> >> + *
> >> + * @return
> >> + *   - 0: Success
> >> + *   - <0: Error code on failure
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_event_dispatcher_free(uint8_t id);
> >> +
> >> +/**
> >> + * Retrieve the service identifier of the event dispatcher.
> >> + *
> >> + * @param id
> >> + *  The event dispatcher identifier.
> >> + *
> >> + * @param [out] service_id
> >> + *  A pointer to a caller-supplied buffer where the event dispatcher's
> >> + *  service id will be stored.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - <0: Error code on failure.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
> >> +
> >> +/**
> >> + * Binds an event device port to a specific lcore on the specified
> >> + * event dispatcher.
> >> + *
> >> + * This function configures an event dispatcher to dequeue events from
> >> + * an event device port (as specified by @p event_port_id), in case
> >> + * its service function is run on particular lcore (as specified by @p
> >> + * lcore_id).
> >> + *
> >> + * Multiple event device ports may be bound to the same lcore. A
> >> + * particular port may only be bound to one lcore.
> >> + *
> >> + * If the event dispatcher service is mapped (with
> >> + * rte_service_map_lcore_set()) to a lcore for which no ports are
> >> + * bound, the service function will be a no-operation.
> >> + *
> >> + * @param id
> >> + *  The event dispatcher identifier.
> >> + *
> >> + * @param event_port_id
> >> + *  The event device port identifier.
> >> + *
> >> + * @param batch_size
> >> + *  The batch size to use in rte_event_dequeue_burst(), for the
> >> + *  configured event device port and lcore.
> >> + *
> >> + * @param timeout
> >> + *  The timeout parameter to use in rte_event_dequeue_burst(), for the
> >> + *  configured event device port and lcore.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - <0: Error code on failure.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
> >> +                                       uint16_t batch_size, uint64_t timeout,
> >> +                                       unsigned int lcore_id);
> >> +
> >> +/**
> >> + * Unbind an event device port from a specific lcore.
> >> + *
> >> + * @param id
> >> + *  The event dispatcher identifier.
> >> + *
> >> + * @param event_port_id
> >> + *  The event device port identifier.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - <0: Error code on failure.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
> >> +                                           unsigned int lcore_id);
> >> +
> >> +/**
> >> + * Register a callback function for the specified queue identifier.
> >> + *
> >> + * At most one callback may be registered per queue id.
> >> + *
> >> + * The same callback function may be registered for multiple queue ids.
> >> + *
> >> + * For each callback invocation, events belonging to a single queue id
> >> + * will dispatched.
> >> + *
> >> + * @param id
> >> + *  The event dispatcher identifier.
> >> + *
> >> + * @param queue_id
> >> + *  The event device queue id for which @p cb_fun should be called.
> >> + *
> >> + * @param cb_fun
> >> + *  The callback function.
> >> + *
> >> + * @param cb_data
> >> + *  A pointer to some application-specific opaque data (or NULL),
> >> + *  which is supplied back to the application in the callback.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - <0: Error code on failure.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_event_dispatcher_register(uint8_t id, uint8_t queue_id,
> >> +                             rte_event_dispatcher_cb_t cb_fun, void *cb_data);
> >> +
> >> +/**
> >> + * Unregister a callback function for the specified queue identifier.
> >> + *
> >> + * @param id
> >> + *  The event dispatcher identifier.
> >> + *
> >> + * @param queue_id
> >> + *  The event device queue id for which the callback should be removed.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - <0: Error code on failure.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_event_dispatcher_unregister(uint8_t id, uint8_t queue_id);
> >> +
> >> +/**
> >> + * Register a fallback callback function for the specified queue
> >> + * identifier.
> >> + *
> >> + * Only events for which no queue-specific callback function will be
> >> + * dispatched to the @p cb_fun callback.
> >> + *
> >> + * At most one callback fallback function may be registered.
> >> + *
> >> + * For each callback invocation, only events belonging to a single
> >> + * queue id will be included.
> >> + *
> >> + * If the event dispatcher encounters an event with a queue id for
> >> + * which the application has not registered any specific callback, and
> >> + * there is also no fallback configured, the event will be dropped.
> >> + *
> >> + * @param id
> >> + *  The event dispatcher identifier.
> >> + *
> >> + * @param cb_fun
> >> + *  The fallback callback function.
> >> + *
> >> + * @param cb_data
> >> + *  A pointer to some application-specific opaque data (or NULL),
> >> + *  which is supplied back to the application in the callback.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - <0: Error code on failure.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_event_dispatcher_register_fallback(uint8_t id,
> >> +                                      rte_event_dispatcher_cb_t cb_fun,
> >> +                                      void *cb_data);
> >> +
> >> +/**
> >> + * Unregister the fallback callback function.
> >> + *
> >> + * @param id
> >> + *  The event dispatcher identifier.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - <0: Error code on failure.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_event_dispatcher_unregister_fallback(uint8_t id);
> >> +
> >> +#ifdef __cplusplus
> >> +}
> >> +#endif
> >> +
> >> +#endif /* __RTE_EVENT_DISPATCHER__ */
> >> diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
> >> index 91a62cd077..dcb887601b 100644
> >> --- a/lib/librte_eventdev/rte_eventdev_version.map
> >> +++ b/lib/librte_eventdev/rte_eventdev_version.map
> >> @@ -134,4 +134,14 @@ EXPERIMENTAL {
> >>          __rte_eventdev_trace_crypto_adapter_queue_pair_del;
> >>          __rte_eventdev_trace_crypto_adapter_start;
> >>          __rte_eventdev_trace_crypto_adapter_stop;
> >> +
> >> +       rte_event_dispatcher_create;
> >> +       rte_event_dispatcher_free;
> >> +       rte_event_dispatcher_service_id_get;
> >> +       rte_event_dispatcher_bind_port_to_lcore;
> >> +       rte_event_dispatcher_unbind_port_from_lcore;
> >> +       rte_event_dispatcher_register;
> >> +       rte_event_dispatcher_unregister;
> >> +       rte_event_dispatcher_register_fallback;
> >> +       rte_event_dispatcher_unregister_fallback;
> >>   };
> >> --
> >> 2.25.1
> >>
>

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [dpdk-dev] [RFC] eventdev: introduce event dispatcher
  2021-03-07 13:04     ` Jerin Jacob
@ 2021-03-15 14:44       ` Mattias Rönnblom
  2021-03-15 15:00         ` Van Haaren, Harry
  0 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2021-03-15 14:44 UTC (permalink / raw)
  To: Jerin Jacob; +Cc: Jerin Jacob, dpdk-dev, Richardson, Bruce

On 2021-03-07 14:04, Jerin Jacob wrote:
> On Fri, Feb 26, 2021 at 1:31 PM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>> On 2021-02-25 13:32, Jerin Jacob wrote:
>>> On Fri, Feb 19, 2021 at 12:00 AM Mattias Rönnblom
>>> <mattias.ronnblom@ericsson.com> wrote:
>>>> The purpose of the event dispatcher is primarily to decouple different
>>>> parts of an application (e.g., processing pipeline stages), which
>>>> share the same underlying event device.
>>>>
>>>> The event dispatcher replaces the conditional logic (often, a switch
>>>> statement) that typically follows an event device dequeue operation,
>>>> where events are dispatched to different parts of the application
>>>> based on the destination queue id.
>>> # If the device has all type queue[1] this RFC would restrict to
>>> use queue ONLY as stage. A stage can be a Queue Type also.
>>> How we can abstract this in this model?
>>
>> "All queue type" is about scheduling policy. I would think that would be
>> independent of the "logical endpoint" of the event (i.e., the queue id).
>> I feel like I'm missing something here.
> Each queue type also can be represented as a stage.
> For example, If the system has only one queue, the Typical IPsec
> outbound stages can be
> Q0-Ordered(For SA lookup) -> Q0(Atomic)(For Sequence number update) ->
> Q0(Orderd)(Crypto operation)->Q0(Atomic)(Send on wire)


OK, this makes sense.


Would such an application want to add a callback 
per-queue-per-sched-type, or just per-sched-type? In your example, if 
you would have a queue Q1 as well, would want to have the option to have 
different callbacks for atomic-type events on Q0 and Q1?


Would you want to dispatch based on anything else in the event? You 
could basically do it on any field (flow id, priority, etc.), but is 
there some other field that's commonly used to denote a processing stage?


>>
>>> # Also, I think, it may make sense to add this type of infrastructure as
>>> helper functions as these are built on top of existing APIs i.e There
>>> is no support
>>> required from the driver to establish this model. IMO, If we need to
>>> add such support as
>>> one fixed set of functionality, we could have helper APIs to express a certain
>>> usage of eventdev. Rather defining the that's only way to do this.
>>> I think, A helper function can be used to as abstraction to define
>>> this kind of model.
>>>
>>> # Also, There is function pointer overhead and aggregating the events
>>> in implementation,
>>> That may be not always "the" optimized model of making it work vs switch case in
>>> application.
>>
>> Sure, but what to do in a reasonable generic framework?
>>
>>
>> If you are very sensitive to that 20 cc or whatever function pointer
>> call, you won't use this library. Or you will, and use static linking
>> and LTO to get rid of that overhead.
>>
>>
>> Probably, you have a few queues, not many. Probably, your dequeue bursts
>> are large, if the system load is high (and otherwise, you don't care
>> about efficiency). Then, you will have at least of couple of events per
>> function call.
> I am fine with this library and exposing it as a function pointer if
> someone needs to
> have a "helper" function to model the system around this logic.
>
> This RFC looks good to me in general. I would suggest to make it as
>
> - Helper functions i.e if someone chooses to do write the stage in
> this way, it can be enabled through this helper function.
> By choosing as helper function it depicts, this is one way to do the
> stuff but the NOT ONLY WAY.
> - Abstract stages as a queue(which already added in the patch) and
> each type in the queue for all type queue cases.
> - Enhance test-eventdev to showcase the functionality and performance
> of these helpers.
>
> Thanks for the RFC.
>
>>
>>> [1]
>>> See RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES in
>>> https://protect2.fireeye.com/v1/url?k=dcf3a2b9-83689b94-dcf3e222-8692dc8284cb-5ba19813a1556a85&q=1&e=0ff1861f-8e24-453c-a93b-73fd88e0f316&u=https%3A%2F%2Fdoc.dpdk.org%2Fguides%2Fprog_guide%2Feventdev.html
>>>
>>>
>>>> The concept is similar to a UNIX file descriptor event loop library.
>>>> Instead of tying callback functions to fds as for example libevent
>>>> does, the event dispatcher binds callbacks to queue ids.
>>>>
>>>> An event dispatcher is configured to dequeue events from a specific
>>>> event device, and ties into the service core framework, to do its (and
>>>> the application's) work.
>>>>
>>>> The event dispatcher provides a convenient way for an eventdev-based
>>>> application to use service cores for application-level processing, and
>>>> thus for sharing those cores with other DPDK services.
>>>>
>>>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>>>> ---
>>>>    lib/librte_eventdev/Makefile                 |   2 +
>>>>    lib/librte_eventdev/meson.build              |   6 +-
>>>>    lib/librte_eventdev/rte_event_dispatcher.c   | 420 +++++++++++++++++++
>>>>    lib/librte_eventdev/rte_event_dispatcher.h   | 251 +++++++++++
>>>>    lib/librte_eventdev/rte_eventdev_version.map |  10 +
>>>>    5 files changed, 687 insertions(+), 2 deletions(-)
>>>>    create mode 100644 lib/librte_eventdev/rte_event_dispatcher.c
>>>>    create mode 100644 lib/librte_eventdev/rte_event_dispatcher.h
>>>>
>>>> diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
>>>> index 0715256bb4..614d53af1b 100644
>>>> --- a/lib/librte_eventdev/Makefile
>>>> +++ b/lib/librte_eventdev/Makefile
>>>> @@ -26,6 +26,7 @@ SRCS-y += rte_event_eth_rx_adapter.c
>>>>    SRCS-y += rte_event_timer_adapter.c
>>>>    SRCS-y += rte_event_crypto_adapter.c
>>>>    SRCS-y += rte_event_eth_tx_adapter.c
>>>> +SRCS-y += rte_event_dispatcher.c
>>>>
>>>>    # export include files
>>>>    SYMLINK-y-include += rte_eventdev.h
>>>> @@ -40,6 +41,7 @@ SYMLINK-y-include += rte_event_timer_adapter.h
>>>>    SYMLINK-y-include += rte_event_timer_adapter_pmd.h
>>>>    SYMLINK-y-include += rte_event_crypto_adapter.h
>>>>    SYMLINK-y-include += rte_event_eth_tx_adapter.h
>>>> +SYMLINK-y-include += rte_event_dispatcher.h
>>>>
>>>>    # versioning export map
>>>>    EXPORT_MAP := rte_eventdev_version.map
>>>> diff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build
>>>> index d1f25ee5ca..2ca81983b5 100644
>>>> --- a/lib/librte_eventdev/meson.build
>>>> +++ b/lib/librte_eventdev/meson.build
>>>> @@ -13,7 +13,8 @@ sources = files('rte_eventdev.c',
>>>>                   'rte_event_eth_rx_adapter.c',
>>>>                   'rte_event_timer_adapter.c',
>>>>                   'rte_event_crypto_adapter.c',
>>>> -               'rte_event_eth_tx_adapter.c')
>>>> +               'rte_event_eth_tx_adapter.c',
>>>> +               'rte_event_dispatcher.c')
>>>>    headers = files('rte_eventdev.h',
>>>>                   'rte_eventdev_pmd.h',
>>>>                   'rte_eventdev_pmd_pci.h',
>>>> @@ -25,5 +26,6 @@ headers = files('rte_eventdev.h',
>>>>                   'rte_event_timer_adapter.h',
>>>>                   'rte_event_timer_adapter_pmd.h',
>>>>                   'rte_event_crypto_adapter.h',
>>>> -               'rte_event_eth_tx_adapter.h')
>>>> +               'rte_event_eth_tx_adapter.h',
>>>> +               'rte_event_dispatcher.h')
>>>>    deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
>>>> diff --git a/lib/librte_eventdev/rte_event_dispatcher.c b/lib/librte_eventdev/rte_event_dispatcher.c
>>>> new file mode 100644
>>>> index 0000000000..1c7e55a752
>>>> --- /dev/null
>>>> +++ b/lib/librte_eventdev/rte_event_dispatcher.c
>>>> @@ -0,0 +1,420 @@
>>>> +/* SPDX-License-Identifier: BSD-3-Clause
>>>> + * Copyright(c) 2021 Ericsson AB
>>>> + */
>>>> +
>>>> +#include <stdbool.h>
>>>> +#include <stdint.h>
>>>> +
>>>> +#include <rte_lcore.h>
>>>> +#include <rte_service_component.h>
>>>> +#include <rte_eventdev_pmd.h>
>>>> +
>>>> +#include <rte_event_dispatcher.h>
>>>> +
>>>> +#define RED_MAX_PORTS_PER_LCORE (4)
>>>> +
>>>> +struct rte_event_dispatcher_lcore_port {
>>>> +       uint8_t port_id;
>>>> +       uint16_t batch_size;
>>>> +       uint64_t timeout;
>>>> +};
>>>> +
>>>> +struct rte_event_dispatcher_lcore {
>>>> +       uint8_t num_ports;
>>>> +       struct rte_event_dispatcher_lcore_port ports[RED_MAX_PORTS_PER_LCORE];
>>>> +};
>>>> +
>>>> +struct rte_event_dispatcher_cb {
>>>> +       rte_event_dispatcher_cb_t cb_fun;
>>>> +       void *cb_data;
>>>> +};
>>>> +
>>>> +struct rte_event_dispatcher {
>>>> +       uint8_t id;
>>>> +       uint8_t event_dev_id;
>>>> +       int socket_id;
>>>> +       uint32_t service_id;
>>>> +       struct rte_event_dispatcher_lcore lcores[RTE_MAX_LCORE];
>>>> +       struct rte_event_dispatcher_cb queue_cbs[UINT8_MAX];
>>>> +       struct rte_event_dispatcher_cb fallback;
>>>> +};
>>>> +
>>>> +static struct rte_event_dispatcher *dispatchers[UINT8_MAX];
>>>> +
>>>> +static bool
>>>> +red_has_dispatcher(uint8_t id)
>>>> +{
>>>> +       return dispatchers[id] != NULL;
>>>> +}
>>>> +
>>>> +static struct rte_event_dispatcher *
>>>> +red_get_dispatcher(uint8_t id)
>>>> +{
>>>> +       return dispatchers[id];
>>>> +}
>>>> +
>>>> +static void
>>>> +red_set_dispatcher(uint8_t id, struct rte_event_dispatcher *dispatcher)
>>>> +{
>>>> +       dispatchers[id] = dispatcher;
>>>> +}
>>>> +
>>>> +#define RED_VALID_ID_OR_RET_EINVAL(id)                                 \
>>>> +       do {                                                            \
>>>> +               if (unlikely(!red_has_dispatcher(id))) {                \
>>>> +                       RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
>>>> +                       return -EINVAL;                                 \
>>>> +               }                                                       \
>>>> +       } while (0)
>>>> +
>>>> +static struct rte_event_dispatcher_cb *
>>>> +red_lookup_cb(struct rte_event_dispatcher *dispatcher, uint8_t queue_id)
>>>> +{
>>>> +       struct rte_event_dispatcher_cb *cb = &dispatcher->queue_cbs[queue_id];
>>>> +
>>>> +       if (unlikely(cb->cb_fun == NULL))
>>>> +           cb = &dispatcher->fallback;
>>>> +
>>>> +       return cb;
>>>> +}
>>>> +
>>>> +static void
>>>> +red_dispatch_events(struct rte_event_dispatcher *dispatcher,
>>>> +                   struct rte_event *events, uint16_t num_events)
>>>> +{
>>>> +       uint16_t cb_start;
>>>> +       uint16_t cb_len;
>>>> +
>>>> +       for (cb_start = 0; cb_start < num_events; cb_start += cb_len) {
>>>> +               uint16_t cb_end = cb_start;
>>>> +               uint8_t queue_id = events[cb_start].queue_id;
>>>> +               struct rte_event_dispatcher_cb *cb;
>>>> +
>>>> +               while (++cb_end < num_events &&
>>>> +                      events[cb_end].queue_id == queue_id)
>>>> +                       ;
>>>> +
>>>> +               cb_len = cb_end - cb_start;
>>>> +
>>>> +               cb = red_lookup_cb(dispatcher, queue_id);
>>>> +
>>>> +               if (unlikely(cb->cb_fun == NULL)) {
>>>> +                       RTE_EDEV_LOG_ERR("Attempted to dispatch %d events "
>>>> +                                        "for queue id %d, but no queue or "
>>>> +                                        "fallback cb were configured\n",
>>>> +                                        cb_len, queue_id);
>>>> +                       continue;
>>>> +               }
>>>> +
>>>> +               cb->cb_fun(&events[cb_start], cb_len, cb->cb_data);
>>>> +       }
>>>> +}
>>>> +
>>>> +static void
>>>> +red_port_dequeue(struct rte_event_dispatcher *dispatcher,
>>>> +                struct rte_event_dispatcher_lcore_port *port)
>>>> +{
>>>> +       uint16_t batch_size = port->batch_size;
>>>> +       struct rte_event events[batch_size];
>>>> +       uint16_t n;
>>>> +
>>>> +       n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
>>>> +                                   events, batch_size, port->timeout);
>>>> +
>>>> +       red_dispatch_events(dispatcher, events, n);
>>>> +}
>>>> +
>>>> +static int32_t
>>>> +red_lcore_process(void *userdata)
>>>> +{
>>>> +       uint16_t i;
>>>> +       struct rte_event_dispatcher *dispatcher = userdata;
>>>> +       unsigned int lcore_id = rte_lcore_id();
>>>> +       struct rte_event_dispatcher_lcore *lcore =
>>>> +               &dispatcher->lcores[lcore_id];
>>>> +
>>>> +       for (i = 0; i < lcore->num_ports; i++) {
>>>> +               struct rte_event_dispatcher_lcore_port *port =
>>>> +                       &lcore->ports[i];
>>>> +
>>>> +               red_port_dequeue(dispatcher, port);
>>>> +       }
>>>> +
>>>> +       return 0;
>>>> +}
>>>> +
>>>> +static int
>>>> +red_service_runstate_set(uint32_t service_id, int state)
>>>> +{
>>>> +       int rc;
>>>> +
>>>> +       rc = rte_service_component_runstate_set(service_id, state);
>>>> +
>>>> +       if (rc)
>>>> +               RTE_EDEV_LOG_ERR("Error %d occurred while setting service "
>>>> +                                "component run state to %d\n", rc, state);
>>>> +
>>>> +       return rc;
>>>> +}
>>>> +
>>>> +static int
>>>> +red_service_register(struct rte_event_dispatcher *dispatcher)
>>>> +{
>>>> +       struct rte_service_spec service = {
>>>> +               .callback = red_lcore_process,
>>>> +               .callback_userdata = dispatcher,
>>>> +               .capabilities = RTE_SERVICE_CAP_MT_SAFE,
>>>> +               .socket_id = dispatcher->socket_id
>>>> +       };
>>>> +       int rc;
>>>> +
>>>> +       snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "red_%d",
>>>> +                dispatcher->id);
>>>> +
>>>> +       rc = rte_service_component_register(&service, &dispatcher->service_id);
>>>> +
>>>> +       if (rc)
>>>> +               RTE_EDEV_LOG_ERR("Registration of event dispatcher service "
>>>> +                                "%s failed with error code %d\n",
>>>> +                                service.name, rc);
>>>> +
>>>> +       rc = red_service_runstate_set(dispatcher->service_id, 1);
>>>> +
>>>> +       if (rc)
>>>> +               rte_service_component_unregister(dispatcher->service_id);
>>>> +
>>>> +       return rc;
>>>> +}
>>>> +
>>>> +static int
>>>> +red_service_unregister(struct rte_event_dispatcher *dispatcher)
>>>> +{
>>>> +       int rc;
>>>> +
>>>> +       rc = red_service_runstate_set(dispatcher->service_id, 0);
>>>> +
>>>> +       if (rc)
>>>> +               return rc;
>>>> +
>>>> +       rc = rte_service_component_unregister(dispatcher->service_id);
>>>> +
>>>> +       if (rc)
>>>> +               RTE_EDEV_LOG_ERR("Unregistration of event dispatcher service "
>>>> +                                "failed with error code %d\n", rc);
>>>> +
>>>> +       return rc;
>>>> +}
>>>> +
>>>> +int
>>>> +rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id)
>>>> +{
>>>> +       int socket_id;
>>>> +       struct rte_event_dispatcher *dispatcher;
>>>> +       int rc;
>>>> +
>>>> +       if (red_has_dispatcher(id)) {
>>>> +               RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
>>>> +                                id);
>>>> +               return -EEXIST;
>>>> +       }
>>>> +
>>>> +       socket_id = rte_event_dev_socket_id(event_dev_id);
>>>> +
>>>> +       dispatcher =
>>>> +               rte_malloc_socket("event dispatcher",
>>>> +                                 sizeof(struct rte_event_dispatcher),
>>>> +                                 RTE_CACHE_LINE_SIZE, socket_id);
>>>> +
>>>> +       if (dispatcher == NULL) {
>>>> +               RTE_EDEV_LOG_ERR("Unable to allocate memory for event "
>>>> +                                "dispatcher\n");
>>>> +               return -ENOMEM;
>>>> +       }
>>>> +
>>>> +       *dispatcher = (struct rte_event_dispatcher) {
>>>> +               .id = id,
>>>> +               .event_dev_id = event_dev_id,
>>>> +               .socket_id = socket_id
>>>> +       };
>>>> +
>>>> +       rc = red_service_register(dispatcher);
>>>> +
>>>> +       if (rc < 0) {
>>>> +               rte_free(dispatcher);
>>>> +               return rc;
>>>> +       }
>>>> +
>>>> +       red_set_dispatcher(id, dispatcher);
>>>> +
>>>> +       return 0;
>>>> +}
>>>> +
>>>> +int
>>>> +rte_event_dispatcher_free(uint8_t id)
>>>> +{
>>>> +       struct rte_event_dispatcher *dispatcher;
>>>> +       int rc;
>>>> +
>>>> +       RED_VALID_ID_OR_RET_EINVAL(id);
>>>> +       dispatcher = red_get_dispatcher(id);
>>>> +
>>>> +       rc = red_service_unregister(dispatcher);
>>>> +
>>>> +       if (rc)
>>>> +               return rc;
>>>> +
>>>> +       red_set_dispatcher(id, NULL);
>>>> +
>>>> +       rte_free(dispatcher);
>>>> +
>>>> +       return 0;
>>>> +}
>>>> +
>>>> +int
>>>> +rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
>>>> +{
>>>> +       struct rte_event_dispatcher *dispatcher;
>>>> +
>>>> +       RED_VALID_ID_OR_RET_EINVAL(id);
>>>> +       dispatcher = red_get_dispatcher(id);
>>>> +
>>>> +       *service_id = dispatcher->service_id;
>>>> +
>>>> +       return 0;
>>>> +}
>>>> +
>>>> +static int16_t
>>>> +lcore_port_index(struct rte_event_dispatcher_lcore *lcore,
>>>> +                uint8_t event_port_id)
>>>> +{
>>>> +       uint16_t i;
>>>> +
>>>> +       for (i = 0; i < lcore->num_ports; i++) {
>>>> +               struct rte_event_dispatcher_lcore_port *port =
>>>> +                       &lcore->ports[i];
>>>> +               if (port->port_id == event_port_id)
>>>> +                       return i;
>>>> +       }
>>>> +
>>>> +       return -1;
>>>> +}
>>>> +
>>>> +int
>>>> +rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
>>>> +                                       uint16_t batch_size, uint64_t timeout,
>>>> +                                       unsigned int lcore_id)
>>>> +{
>>>> +       struct rte_event_dispatcher *dispatcher;
>>>> +       struct rte_event_dispatcher_lcore *lcore;
>>>> +       struct rte_event_dispatcher_lcore_port *port;
>>>> +
>>>> +       RED_VALID_ID_OR_RET_EINVAL(id);
>>>> +       dispatcher = red_get_dispatcher(id);
>>>> +
>>>> +       lcore = &dispatcher->lcores[lcore_id];
>>>> +
>>>> +       if (lcore->num_ports == RED_MAX_PORTS_PER_LCORE)
>>>> +               return -ENOMEM;
>>>> +
>>>> +       if (lcore_port_index(lcore, event_port_id) >= 0)
>>>> +               return -EEXIST;
>>>> +
>>>> +       port = &lcore->ports[lcore->num_ports];
>>>> +
>>>> +       *port = (struct rte_event_dispatcher_lcore_port) {
>>>> +               .port_id = event_port_id,
>>>> +               .batch_size = batch_size,
>>>> +               .timeout = timeout
>>>> +       };
>>>> +
>>>> +       lcore->num_ports++;
>>>> +
>>>> +       return 0;
>>>> +}
>>>> +
>>>> +int
>>>> +rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
>>>> +                                           unsigned int lcore_id)
>>>> +{
>>>> +       struct rte_event_dispatcher *dispatcher;
>>>> +       struct rte_event_dispatcher_lcore *lcore;
>>>> +       int16_t port_idx;
>>>> +       struct rte_event_dispatcher_lcore_port *port;
>>>> +       struct rte_event_dispatcher_lcore_port *last;
>>>> +
>>>> +       RED_VALID_ID_OR_RET_EINVAL(id);
>>>> +       dispatcher = red_get_dispatcher(id);
>>>> +
>>>> +       lcore = &dispatcher->lcores[lcore_id];
>>>> +
>>>> +       port_idx = lcore_port_index(lcore, event_port_id);
>>>> +
>>>> +       if (port_idx < 0)
>>>> +               return -ENOENT;
>>>> +
>>>> +       port = &lcore->ports[port_idx];
>>>> +       last = &lcore->ports[lcore->num_ports - 1];
>>>> +
>>>> +       if (port != last)
>>>> +               *port = *last;
>>>> +
>>>> +       lcore->num_ports--;
>>>> +
>>>> +       return 0;
>>>> +}
>>>> +
>>>> +static int
>>>> +red_cb_manage(uint8_t id, uint8_t queue_id, bool reg, bool fallback,
>>>> +          rte_event_dispatcher_cb_t cb_fun, void *cb_data)
>>>> +{
>>>> +       struct rte_event_dispatcher *dispatcher;
>>>> +       struct rte_event_dispatcher_cb *cb;
>>>> +
>>>> +       RED_VALID_ID_OR_RET_EINVAL(id);
>>>> +       dispatcher = red_get_dispatcher(id);
>>>> +
>>>> +       if (fallback)
>>>> +               cb = &dispatcher->fallback;
>>>> +       else
>>>> +               cb = &dispatcher->queue_cbs[queue_id];
>>>> +
>>>> +       if (reg && cb->cb_fun != NULL)
>>>> +               return -EEXIST;
>>>> +
>>>> +       if (!reg && cb->cb_fun == NULL)
>>>> +               return -ENOENT;
>>>> +
>>>> +       *cb = (struct rte_event_dispatcher_cb) {
>>>> +               .cb_fun = cb_fun,
>>>> +               .cb_data = cb_data
>>>> +       };
>>>> +
>>>> +       return 0;
>>>> +}
>>>> +
>>>> +int
>>>> +rte_event_dispatcher_register(uint8_t id, uint8_t queue_id,
>>>> +                             rte_event_dispatcher_cb_t cb_fun, void *cb_data)
>>>> +{
>>>> +       return red_cb_manage(id, queue_id, true, false, cb_fun, cb_data);
>>>> +}
>>>> +
>>>> +int
>>>> +rte_event_dispatcher_unregister(uint8_t id, uint8_t queue_id)
>>>> +{
>>>> +       return red_cb_manage(id, queue_id, false, false, NULL, NULL);
>>>> +}
>>>> +
>>>> +int
>>>> +rte_event_dispatcher_register_fallback(uint8_t id,
>>>> +                                      rte_event_dispatcher_cb_t cb_fun,
>>>> +                                      void *cb_data)
>>>> +{
>>>> +       return red_cb_manage(id, 0, true, true, cb_fun, cb_data);
>>>> +}
>>>> +
>>>> +int
>>>> +rte_event_dispatcher_unregister_fallback(uint8_t id)
>>>> +{
>>>> +       return red_cb_manage(id, 0, false, true, NULL, NULL);
>>>> +}
>>>> diff --git a/lib/librte_eventdev/rte_event_dispatcher.h b/lib/librte_eventdev/rte_event_dispatcher.h
>>>> new file mode 100644
>>>> index 0000000000..11f57571ab
>>>> --- /dev/null
>>>> +++ b/lib/librte_eventdev/rte_event_dispatcher.h
>>>> @@ -0,0 +1,251 @@
>>>> +/* SPDX-License-Identifier: BSD-3-Clause
>>>> + * Copyright(c) 2021 Ericsson AB
>>>> + */
>>>> +
>>>> +#ifndef __RTE_EVENT_DISPATCHER_H__
>>>> +#define __RTE_EVENT_DISPATCHER_H__
>>>> +
>>>> +/**
>>>> + * @file
>>>> + *
>>>> + * RTE Event Dispatcher
>>>> + *
>>>> + */
>>>> +
>>>> +#ifdef __cplusplus
>>>> +extern "C" {
>>>> +#endif
>>>> +
>>>> +#include <rte_eventdev.h>
>>>> +
>>>> +/**
>>>> + * Function prototype for dispatcher callbacks.
>>>> + *
>>>> + * @param events
>>>> + *  Pointer to an array of events.
>>>> + *
>>>> + * @param num
>>>> + *  The number of events in the @p events array.
>>>> + *
>>>> + * @param cb_data
>>>> + *  The pointer supplied by the application in
>>>> + *  rte_event_dispatcher_register() or
>>>> + *  rte_event_dispatcher_register_fallback().
>>>> + */
>>>> +
>>>> +typedef void (*rte_event_dispatcher_cb_t)(struct rte_event *events,
>>>> +                                         uint16_t num, void *cb_data);
>>>> +
>>>> +/**
>>>> + * Create an event dispatcher with the specified id.
>>>> + *
>>>> + * @param id
>>>> + *  An application-specified, unique (across all event dispatcher
>>>> + *  instances) identifier.
>>>> + *
>>>> + * @param event_dev_id
>>>> + *  The identifier of the event device from which this event dispatcher
>>>> + *  will dequeue events.
>>>> + *
>>>> + * @return
>>>> + *   - 0: Success
>>>> + *   - <0: Error code on failure
>>>> + */
>>>> +__rte_experimental
>>>> +int
>>>> +rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id);
>>>> +
>>>> +/**
>>>> + * Frees an event dispatcher with the specified id.
>>>> + *
>>>> + * @param id
>>>> + *  The event dispatcher identifier.
>>>> + *
>>>> + * @return
>>>> + *   - 0: Success
>>>> + *   - <0: Error code on failure
>>>> + */
>>>> +__rte_experimental
>>>> +int
>>>> +rte_event_dispatcher_free(uint8_t id);
>>>> +
>>>> +/**
>>>> + * Retrieve the service identifier of the event dispatcher.
>>>> + *
>>>> + * @param id
>>>> + *  The event dispatcher identifier.
>>>> + *
>>>> + * @param [out] service_id
>>>> + *  A pointer to a caller-supplied buffer where the event dispatcher's
>>>> + *  service id will be stored.
>>>> + *
>>>> + * @return
>>>> + *  - 0: Success
>>>> + *  - <0: Error code on failure.
>>>> + */
>>>> +__rte_experimental
>>>> +int
>>>> +rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
>>>> +
>>>> +/**
>>>> + * Binds an event device port to a specific lcore on the specified
>>>> + * event dispatcher.
>>>> + *
>>>> + * This function configures an event dispatcher to dequeue events from
>>>> + * an event device port (as specified by @p event_port_id), in case
>>>> + * its service function is run on particular lcore (as specified by @p
>>>> + * lcore_id).
>>>> + *
>>>> + * Multiple event device ports may be bound to the same lcore. A
>>>> + * particular port may only be bound to one lcore.
>>>> + *
>>>> + * If the event dispatcher service is mapped (with
>>>> + * rte_service_map_lcore_set()) to a lcore for which no ports are
>>>> + * bound, the service function will be a no-operation.
>>>> + *
>>>> + * @param id
>>>> + *  The event dispatcher identifier.
>>>> + *
>>>> + * @param event_port_id
>>>> + *  The event device port identifier.
>>>> + *
>>>> + * @param batch_size
>>>> + *  The batch size to use in rte_event_dequeue_burst(), for the
>>>> + *  configured event device port and lcore.
>>>> + *
>>>> + * @param timeout
>>>> + *  The timeout parameter to use in rte_event_dequeue_burst(), for the
>>>> + *  configured event device port and lcore.
>>>> + *
>>>> + * @return
>>>> + *  - 0: Success
>>>> + *  - <0: Error code on failure.
>>>> + */
>>>> +__rte_experimental
>>>> +int
>>>> +rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
>>>> +                                       uint16_t batch_size, uint64_t timeout,
>>>> +                                       unsigned int lcore_id);
>>>> +
>>>> +/**
>>>> + * Unbind an event device port from a specific lcore.
>>>> + *
>>>> + * @param id
>>>> + *  The event dispatcher identifier.
>>>> + *
>>>> + * @param event_port_id
>>>> + *  The event device port identifier.
>>>> + *
>>>> + * @return
>>>> + *  - 0: Success
>>>> + *  - <0: Error code on failure.
>>>> + */
>>>> +__rte_experimental
>>>> +int
>>>> +rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
>>>> +                                           unsigned int lcore_id);
>>>> +
>>>> +/**
>>>> + * Register a callback function for the specified queue identifier.
>>>> + *
>>>> + * At most one callback may be registered per queue id.
>>>> + *
>>>> + * The same callback function may be registered for multiple queue ids.
>>>> + *
>>>> + * For each callback invocation, events belonging to a single queue id
>>>> + * will dispatched.
>>>> + *
>>>> + * @param id
>>>> + *  The event dispatcher identifier.
>>>> + *
>>>> + * @param queue_id
>>>> + *  The event device queue id for which @p cb_fun should be called.
>>>> + *
>>>> + * @param cb_fun
>>>> + *  The callback function.
>>>> + *
>>>> + * @param cb_data
>>>> + *  A pointer to some application-specific opaque data (or NULL),
>>>> + *  which is supplied back to the application in the callback.
>>>> + *
>>>> + * @return
>>>> + *  - 0: Success
>>>> + *  - <0: Error code on failure.
>>>> + */
>>>> +__rte_experimental
>>>> +int
>>>> +rte_event_dispatcher_register(uint8_t id, uint8_t queue_id,
>>>> +                             rte_event_dispatcher_cb_t cb_fun, void *cb_data);
>>>> +
>>>> +/**
>>>> + * Unregister a callback function for the specified queue identifier.
>>>> + *
>>>> + * @param id
>>>> + *  The event dispatcher identifier.
>>>> + *
>>>> + * @param queue_id
>>>> + *  The event device queue id for which the callback should be removed.
>>>> + *
>>>> + * @return
>>>> + *  - 0: Success
>>>> + *  - <0: Error code on failure.
>>>> + */
>>>> +__rte_experimental
>>>> +int
>>>> +rte_event_dispatcher_unregister(uint8_t id, uint8_t queue_id);
>>>> +
>>>> +/**
>>>> + * Register a fallback callback function for the specified queue
>>>> + * identifier.
>>>> + *
>>>> + * Only events for which no queue-specific callback function will be
>>>> + * dispatched to the @p cb_fun callback.
>>>> + *
>>>> + * At most one callback fallback function may be registered.
>>>> + *
>>>> + * For each callback invocation, only events belonging to a single
>>>> + * queue id will be included.
>>>> + *
>>>> + * If the event dispatcher encounters an event with a queue id for
>>>> + * which the application has not registered any specific callback, and
>>>> + * there is also no fallback configured, the event will be dropped.
>>>> + *
>>>> + * @param id
>>>> + *  The event dispatcher identifier.
>>>> + *
>>>> + * @param cb_fun
>>>> + *  The fallback callback function.
>>>> + *
>>>> + * @param cb_data
>>>> + *  A pointer to some application-specific opaque data (or NULL),
>>>> + *  which is supplied back to the application in the callback.
>>>> + *
>>>> + * @return
>>>> + *  - 0: Success
>>>> + *  - <0: Error code on failure.
>>>> + */
>>>> +__rte_experimental
>>>> +int
>>>> +rte_event_dispatcher_register_fallback(uint8_t id,
>>>> +                                      rte_event_dispatcher_cb_t cb_fun,
>>>> +                                      void *cb_data);
>>>> +
>>>> +/**
>>>> + * Unregister the fallback callback function.
>>>> + *
>>>> + * @param id
>>>> + *  The event dispatcher identifier.
>>>> + *
>>>> + * @return
>>>> + *  - 0: Success
>>>> + *  - <0: Error code on failure.
>>>> + */
>>>> +__rte_experimental
>>>> +int
>>>> +rte_event_dispatcher_unregister_fallback(uint8_t id);
>>>> +
>>>> +#ifdef __cplusplus
>>>> +}
>>>> +#endif
>>>> +
>>>> +#endif /* __RTE_EVENT_DISPATCHER__ */
>>>> diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
>>>> index 91a62cd077..dcb887601b 100644
>>>> --- a/lib/librte_eventdev/rte_eventdev_version.map
>>>> +++ b/lib/librte_eventdev/rte_eventdev_version.map
>>>> @@ -134,4 +134,14 @@ EXPERIMENTAL {
>>>>           __rte_eventdev_trace_crypto_adapter_queue_pair_del;
>>>>           __rte_eventdev_trace_crypto_adapter_start;
>>>>           __rte_eventdev_trace_crypto_adapter_stop;
>>>> +
>>>> +       rte_event_dispatcher_create;
>>>> +       rte_event_dispatcher_free;
>>>> +       rte_event_dispatcher_service_id_get;
>>>> +       rte_event_dispatcher_bind_port_to_lcore;
>>>> +       rte_event_dispatcher_unbind_port_from_lcore;
>>>> +       rte_event_dispatcher_register;
>>>> +       rte_event_dispatcher_unregister;
>>>> +       rte_event_dispatcher_register_fallback;
>>>> +       rte_event_dispatcher_unregister_fallback;
>>>>    };
>>>> --
>>>> 2.25.1
>>>>


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [dpdk-dev] [RFC] eventdev: introduce event dispatcher
  2021-03-15 14:44       ` Mattias Rönnblom
@ 2021-03-15 15:00         ` Van Haaren, Harry
  2021-03-22  9:50           ` Mattias Rönnblom
  0 siblings, 1 reply; 102+ messages in thread
From: Van Haaren, Harry @ 2021-03-15 15:00 UTC (permalink / raw)
  To: mattias.ronnblom, Jerin Jacob; +Cc: Jerin Jacob, dpdk-dev, Richardson, Bruce

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Mattias Rönnblom
> Sent: Monday, March 15, 2021 2:45 PM
> To: Jerin Jacob <jerinjacobk@gmail.com>
> Cc: Jerin Jacob <jerinj@marvell.com>; dpdk-dev <dev@dpdk.org>; Richardson,
> Bruce <bruce.richardson@intel.com>
> Subject: Re: [dpdk-dev] [RFC] eventdev: introduce event dispatcher
> 
> On 2021-03-07 14:04, Jerin Jacob wrote:
> > On Fri, Feb 26, 2021 at 1:31 PM Mattias Rönnblom
> > <mattias.ronnblom@ericsson.com> wrote:
> >> On 2021-02-25 13:32, Jerin Jacob wrote:
> >>> On Fri, Feb 19, 2021 at 12:00 AM Mattias Rönnblom
> >>> <mattias.ronnblom@ericsson.com> wrote:
> >>>> The purpose of the event dispatcher is primarily to decouple different
> >>>> parts of an application (e.g., processing pipeline stages), which
> >>>> share the same underlying event device.
> >>>>
> >>>> The event dispatcher replaces the conditional logic (often, a switch
> >>>> statement) that typically follows an event device dequeue operation,
> >>>> where events are dispatched to different parts of the application
> >>>> based on the destination queue id.
> >>> # If the device has all type queue[1] this RFC would restrict to
> >>> use queue ONLY as stage. A stage can be a Queue Type also.
> >>> How we can abstract this in this model?
> >>
> >> "All queue type" is about scheduling policy. I would think that would be
> >> independent of the "logical endpoint" of the event (i.e., the queue id).
> >> I feel like I'm missing something here.
> > Each queue type also can be represented as a stage.
> > For example, If the system has only one queue, the Typical IPsec
> > outbound stages can be
> > Q0-Ordered(For SA lookup) -> Q0(Atomic)(For Sequence number update) ->
> > Q0(Orderd)(Crypto operation)->Q0(Atomic)(Send on wire)
> 
> 
> OK, this makes sense.
> 
> 
> Would such an application want to add a callback
> per-queue-per-sched-type, or just per-sched-type? In your example, if
> you would have a queue Q1 as well, would want to have the option to have
> different callbacks for atomic-type events on Q0 and Q1?
> 
> 
> Would you want to dispatch based on anything else in the event? You
> could basically do it on any field (flow id, priority, etc.), but is
> there some other field that's commonly used to denote a processing stage?

I expect that struct rte_event::event_type and sub_event_type would regularly
be used to split out different type of "things" that would be handled separately.

Overall, I think we could imagine the Queue number, Queue Scheduling type (Re-Ordered, Atomic), 
Event type, sub event type, Flow-ID.. all contributing somehow to what function to execute in some situation.

As a somewhat extreme example to prove a point:
An RX core might use rte_flow rules to split traffic into some arbitrary grouping, and
then the rte_event::flow_id could be used to select the function-pointer to jump to handle it?

I like the *concept* of having a table of func-ptrs, and removing of a switch() in that way,
but I'm not sure that DPDK Eventdev APIs are the right place for it. I think Jerin already
suggested the "helper function" concept, which seems a good idea to allow optional usage.

To be clear, I'm not against upstreaming of such an event-dispatcher, but I'm not sure
its possible to build it to be generic enough for all use-cases. Maybe focusing on an actual
use-case and driving the design from that is a good approach?


Regards, -Harry

<snip patch contents below>

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [dpdk-dev] [RFC] eventdev: introduce event dispatcher
  2021-03-15 15:00         ` Van Haaren, Harry
@ 2021-03-22  9:50           ` Mattias Rönnblom
  2021-04-09 11:32             ` [dpdk-dev] [RFC v2] " Mattias Rönnblom
  0 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2021-03-22  9:50 UTC (permalink / raw)
  To: Van Haaren, Harry, Jerin Jacob; +Cc: Jerin Jacob, dpdk-dev, Richardson, Bruce

On 2021-03-15 16:00, Van Haaren, Harry wrote:
>> -----Original Message-----
>> From: dev <dev-bounces@dpdk.org> On Behalf Of Mattias Rönnblom
>> Sent: Monday, March 15, 2021 2:45 PM
>> To: Jerin Jacob <jerinjacobk@gmail.com>
>> Cc: Jerin Jacob <jerinj@marvell.com>; dpdk-dev <dev@dpdk.org>; Richardson,
>> Bruce <bruce.richardson@intel.com>
>> Subject: Re: [dpdk-dev] [RFC] eventdev: introduce event dispatcher
>>
>> On 2021-03-07 14:04, Jerin Jacob wrote:
>>> On Fri, Feb 26, 2021 at 1:31 PM Mattias Rönnblom
>>> <mattias.ronnblom@ericsson.com> wrote:
>>>> On 2021-02-25 13:32, Jerin Jacob wrote:
>>>>> On Fri, Feb 19, 2021 at 12:00 AM Mattias Rönnblom
>>>>> <mattias.ronnblom@ericsson.com> wrote:
>>>>>> The purpose of the event dispatcher is primarily to decouple different
>>>>>> parts of an application (e.g., processing pipeline stages), which
>>>>>> share the same underlying event device.
>>>>>>
>>>>>> The event dispatcher replaces the conditional logic (often, a switch
>>>>>> statement) that typically follows an event device dequeue operation,
>>>>>> where events are dispatched to different parts of the application
>>>>>> based on the destination queue id.
>>>>> # If the device has all type queue[1] this RFC would restrict to
>>>>> use queue ONLY as stage. A stage can be a Queue Type also.
>>>>> How we can abstract this in this model?
>>>> "All queue type" is about scheduling policy. I would think that would be
>>>> independent of the "logical endpoint" of the event (i.e., the queue id).
>>>> I feel like I'm missing something here.
>>> Each queue type also can be represented as a stage.
>>> For example, If the system has only one queue, the Typical IPsec
>>> outbound stages can be
>>> Q0-Ordered(For SA lookup) -> Q0(Atomic)(For Sequence number update) ->
>>> Q0(Orderd)(Crypto operation)->Q0(Atomic)(Send on wire)
>>
>> OK, this makes sense.
>>
>>
>> Would such an application want to add a callback
>> per-queue-per-sched-type, or just per-sched-type? In your example, if
>> you would have a queue Q1 as well, would want to have the option to have
>> different callbacks for atomic-type events on Q0 and Q1?
>>
>>
>> Would you want to dispatch based on anything else in the event? You
>> could basically do it on any field (flow id, priority, etc.), but is
>> there some other field that's commonly used to denote a processing stage?
> I expect that struct rte_event::event_type and sub_event_type would regularly
> be used to split out different type of "things" that would be handled separately.
>
> Overall, I think we could imagine the Queue number, Queue Scheduling type (Re-Ordered, Atomic),
> Event type, sub event type, Flow-ID.. all contributing somehow to what function to execute in some situation.


Sure, and add to this list the contents of the mbuf (or other user payload).


What you should keep in mind (and maybe you did), is that the primary 
aim is to allow decoupling of different parts of an application (or even 
multiple applications), sharing an event device.


> As a somewhat extreme example to prove a point:
> An RX core might use rte_flow rules to split traffic into some arbitrary grouping, and
> then the rte_event::flow_id could be used to select the function-pointer to jump to handle it?
>
> I like the *concept* of having a table of func-ptrs, and removing of a switch() in that way,
> but I'm not sure that DPDK Eventdev APIs are the right place for it. I think Jerin already
> suggested the "helper function" concept, which seems a good idea to allow optional usage.
>
> To be clear, I'm not against upstreaming of such an event-dispatcher, but I'm not sure
> its possible to build it to be generic enough for all use-cases. Maybe focusing on an actual
> use-case and driving the design from that is a good approach?
>
The intention is that the event dispatcher is optional. For performance 
and/or flexibility, many applications would still use the normal event 
device enqueue and dequeue operations.


The event dispatcher not supposed to cover all possible use cases in the 
sense that it will able to remove all conditional logic used to select 
the function call which marks the beginning of the processing for an 
event. It should be an aim to cover most cases, where one set of events 
goes to one software module, and another goes elsewhere. (The original 
RFC design stems from an actual use case - but it's just one.)


An alternative design would be to turn the whole thing around, in the 
sense that instead of the application specifying which queue id/sched 
type/sub event type/etc goes to which callback, you split the callback 
in two: one callback to answer the question "is this your event" and 
another callback to actually dispatch the event (with the same signature 
as the callback in the RFC).


If made completely generic (and thus remove any references made to queue 
id in the API), it would require a handful (or maybe more) callback 
calls per event in the dispatcher. Sounds less than ideal from a 
performance point of view.


If you kept the queue id as the basic arbiter of the event stream, the 
overhead should be reduced to something more manageable in most 
applications.


A side-effect of this scheme is it provides an opportunity for the 
dispatcher to order the just-dequeued events in batches to the different 
consumer callbacks, before dispatching them - basically for free. For 
many applications, this should have a large upside in the form of 
improved cache locality and fewer branch mispredicts. To reap those 
benefits, batching on queue id should also be performed.


If there's interest, I could try to do a RFC of this alternative 
approach as well, and have a look at the performance implications.

> Regards, -Harry
>
> <snip patch contents below>



^ permalink raw reply	[flat|nested] 102+ messages in thread

* [dpdk-dev] [RFC v2] eventdev: introduce event dispatcher
  2021-03-22  9:50           ` Mattias Rönnblom
@ 2021-04-09 11:32             ` Mattias Rönnblom
  2023-05-22  9:16               ` [RFC v3 0/3] Add " Mattias Rönnblom
  0 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2021-04-09 11:32 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Mattias Rönnblom, Bogdan Tanasa

The purpose of the event dispatcher is primarily to decouple different
parts of an application (e.g., processing pipeline stages), which
share the same underlying event device.

The event dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on event meta data, such as the queue id or scheduling type.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the event dispatcher relies on application-supplied matching
callback functions to decide where to deliver events.

An event dispatcher is configured to dequeue events from a specific
event device, and ties into the service core framework, to do its (and
the application's) work.

The event dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

RFC v2:
 o Introduced a match callback, allowing the application layer to
   route events to callbacks not only based on queue id (like in v1),
   but on arbitrary event meta data or payload.
 o The fallback was removed, since it now easily can be achieved with a
   match function always returning true.
 o The dispatcher now rearranges the batch of dequeued events from the
   event device, in such a way to minimize the number of deliver calls
   made. This is done primariy to improved application cache locality.

Tested-by: Bogdan Tanasa <bogdan.tanasa@ericsson.com>
Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 lib/librte_eventdev/meson.build            |   6 +-
 lib/librte_eventdev/rte_event_dispatcher.c | 436 +++++++++++++++++++++
 lib/librte_eventdev/rte_event_dispatcher.h | 247 ++++++++++++
 lib/librte_eventdev/version.map            |   9 +-
 4 files changed, 695 insertions(+), 3 deletions(-)
 create mode 100644 lib/librte_eventdev/rte_event_dispatcher.c
 create mode 100644 lib/librte_eventdev/rte_event_dispatcher.h

diff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build
index 79d36d37bb..d12bd731b2 100644
--- a/lib/librte_eventdev/meson.build
+++ b/lib/librte_eventdev/meson.build
@@ -13,7 +13,8 @@ sources = files('rte_eventdev.c',
 		'rte_event_eth_rx_adapter.c',
 		'rte_event_timer_adapter.c',
 		'rte_event_crypto_adapter.c',
-		'rte_event_eth_tx_adapter.c')
+		'rte_event_eth_tx_adapter.c',
+		'rte_event_dispatcher.c')
 headers = files('rte_eventdev.h',
 		'rte_eventdev_trace.h',
 		'rte_eventdev_trace_fp.h',
@@ -22,6 +23,7 @@ headers = files('rte_eventdev.h',
 		'rte_event_timer_adapter.h',
 		'rte_event_timer_adapter_pmd.h',
 		'rte_event_crypto_adapter.h',
-		'rte_event_eth_tx_adapter.h')
+		'rte_event_eth_tx_adapter.h',
+		'rte_event_dispatcher.h')
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
 deps += ['telemetry']
diff --git a/lib/librte_eventdev/rte_event_dispatcher.c b/lib/librte_eventdev/rte_event_dispatcher.c
new file mode 100644
index 0000000000..c5811615f3
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_dispatcher.c
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_lcore.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_event_dispatcher.h>
+
+#define RED_MAX_PORTS_PER_LCORE (4)
+#define RED_MAX_CBS (16)
+
+struct rte_event_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_event_dispatcher_lcore {
+	uint8_t num_ports;
+	struct rte_event_dispatcher_lcore_port ports[RED_MAX_PORTS_PER_LCORE];
+};
+
+struct rte_event_dispatcher_cb {
+	rte_event_dispatcher_match_cb_t match_fun;
+	void *match_data;
+	rte_event_dispatcher_deliver_cb_t deliver_fun;
+	void *deliver_data;
+};
+
+struct rte_event_dispatcher {
+	uint8_t id;
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_event_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	int16_t num_cbs;
+	struct rte_event_dispatcher_cb cbs[RED_MAX_CBS];
+};
+
+static struct rte_event_dispatcher *dispatchers[UINT8_MAX];
+
+static bool
+red_has_dispatcher(uint8_t id)
+{
+	return dispatchers[id] != NULL;
+}
+
+static struct rte_event_dispatcher *
+red_get_dispatcher(uint8_t id)
+{
+	return dispatchers[id];
+}
+
+static void
+red_set_dispatcher(uint8_t id, struct rte_event_dispatcher *dispatcher)
+{
+	dispatchers[id] = dispatcher;
+}
+
+#define RED_VALID_ID_OR_RET_EINVAL(id)					\
+	do {								\
+		if (unlikely(!red_has_dispatcher(id))) {		\
+			RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
+			return -EINVAL;					\
+		}							\
+	} while (0)
+
+static int
+red_lookup_cb_idx(struct rte_event_dispatcher *dispatcher,
+		  const struct rte_event *e)
+{
+	int16_t i;
+
+	for (i = 0; i < dispatcher->num_cbs; i++) {
+		struct rte_event_dispatcher_cb *cb =
+			&dispatcher->cbs[i];
+
+		if (cb->match_fun(e, cb->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static void
+red_dispatch_events(struct rte_event_dispatcher *dispatcher,
+		    struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[RED_MAX_CBS][num_events];
+	uint16_t burst_lens[RED_MAX_CBS] = { 0 };
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *e = &events[i];
+		int cb_idx;
+
+		cb_idx = red_lookup_cb_idx(dispatcher, e);
+
+		if (unlikely(cb_idx < 0)) {
+			RTE_EDEV_LOG_ERR("No matching callback found for "
+					 "event\n");
+			continue;
+		}
+
+		bursts[cb_idx][burst_lens[cb_idx]] = *e;
+		burst_lens[cb_idx]++;
+	}
+
+	for (i = 0; i < dispatcher->num_cbs; i++) {
+		struct rte_event_dispatcher_cb *cb = &dispatcher->cbs[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		cb->deliver_fun(bursts[i], len, cb->deliver_data);
+	}
+}
+
+static void
+red_port_dequeue(struct rte_event_dispatcher *dispatcher,
+		 struct rte_event_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	red_dispatch_events(dispatcher, events, n);
+}
+
+static int32_t
+red_lcore_process(void *userdata)
+{
+	uint16_t i;
+	struct rte_event_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_event_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		red_port_dequeue(dispatcher, port);
+	}
+
+	return 0;
+}
+
+static int
+red_service_runstate_set(uint32_t service_id, int state)
+{
+	int rc;
+
+	rc = rte_service_component_runstate_set(service_id, state);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Error %d occurred while setting service "
+				 "component run state to %d\n", rc, state);
+
+	return rc;
+}
+
+static int
+red_service_register(struct rte_event_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = red_lcore_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "red_%d",
+		 dispatcher->id);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Registration of event dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	rc = red_service_runstate_set(dispatcher->service_id, 1);
+
+	if (rc)
+		rte_service_component_unregister(dispatcher->service_id);
+
+	return rc;
+}
+
+static int
+red_service_unregister(struct rte_event_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = red_service_runstate_set(dispatcher->service_id, 0);
+
+	if (rc)
+		return rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Unregistration of event dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	if (red_has_dispatcher(id)) {
+		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
+				 id);
+		return -EEXIST;
+	}
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("event dispatcher",
+				  sizeof(struct rte_event_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for event "
+				 "dispatcher\n");
+		return -ENOMEM;
+	}
+
+	*dispatcher = (struct rte_event_dispatcher) {
+		.id = id,
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = red_service_register(dispatcher);
+
+	if (rc < 0) {
+		rte_free(dispatcher);
+		return rc;
+	}
+
+	red_set_dispatcher(id, dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_free(uint8_t id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	rc = red_service_unregister(dispatcher);
+
+	if (rc)
+		return rc;
+
+	red_set_dispatcher(id, NULL);
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	*service_id = dispatcher->service_id;
+
+	return 0;
+}
+
+static int16_t
+lcore_port_index(struct rte_event_dispatcher_lcore *lcore,
+		 uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	struct rte_event_dispatcher_lcore_port *port;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == RED_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_event_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	int16_t port_idx;
+	struct rte_event_dispatcher_lcore_port *port;
+	struct rte_event_dispatcher_lcore_port *last;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+int16_t
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_cb_t match_fun,
+			      void *match_data,
+			      rte_event_dispatcher_deliver_cb_t deliver_fun,
+			      void *deliver_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_cb *cb;
+	int16_t cb_idx;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	if (dispatcher->num_cbs == RED_MAX_CBS)
+		return -ENOMEM;
+
+	cb_idx = dispatcher->num_cbs;
+
+	cb = &dispatcher->cbs[cb_idx];
+
+	*cb = (struct rte_event_dispatcher_cb) {
+		.match_fun = match_fun,
+		.match_data = match_data,
+		.deliver_fun = deliver_fun,
+		.deliver_data = deliver_data
+	};
+
+	dispatcher->num_cbs++;
+
+	return cb_idx;
+}
+
+int
+rte_event_dispatcher_unregister(uint8_t id, int16_t unreg_idx)
+{
+	struct rte_event_dispatcher *dispatcher;
+	uint16_t last_idx;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	if (unreg_idx < 0 || unreg_idx >= dispatcher->num_cbs)
+		return -EINVAL;
+
+	last_idx = dispatcher->num_cbs - 1;
+
+	if (unreg_idx != last_idx) {
+		struct rte_event_dispatcher_cb *unreg_cb =
+			&dispatcher->cbs[unreg_idx];
+		int16_t n = last_idx - unreg_idx;
+		memmove(unreg_cb, unreg_cb + 1, sizeof(*unreg_cb) * n);
+	}
+
+	dispatcher->num_cbs--;
+
+	return 0;
+}
diff --git a/lib/librte_eventdev/rte_event_dispatcher.h b/lib/librte_eventdev/rte_event_dispatcher.h
new file mode 100644
index 0000000000..9298446030
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_dispatcher.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Ericsson AB
+ */
+
+#ifndef __RTE_EVENT_DISPATCHER_H__
+#define __RTE_EVENT_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Event Dispatcher
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * With the match callbacks, the application chooses which events it
+ * wants delivered, among the events dequeued by the dispatcher, from
+ * the event device.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this events should be delivered, and false
+ *   otherwise.
+ */
+typedef bool (*rte_event_dispatcher_match_cb_t)(const struct rte_event *event,
+						void *cb_data);
+/**
+ * Function prototype for deliver callbacks.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ */
+
+typedef void (*rte_event_dispatcher_deliver_cb_t)(struct rte_event *events,
+						  uint16_t num, void *cb_data);
+
+/**
+ * Create an event dispatcher with the specified id.
+ *
+ * @param id
+ *  An application-specified, unique (across all event dispatcher
+ *  instances) identifier.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this event dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   - 0: Success
+ *   - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id);
+
+/**
+ * Frees an event dispatcher with the specified id.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_free(uint8_t id);
+
+/**
+ * Retrieve the service identifier of the event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param [out] service_id
+ *  A pointer to a caller-supplied buffer where the event dispatcher's
+ *  service id will be stored.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * event dispatcher.
+ *
+ * This function configures an event dispatcher to dequeue events from
+ * an event device port (as specified by @p event_port_id), in case
+ * its service function is run on particular lcore (as specified by @p
+ * lcore_id).
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port may only be bound to one lcore.
+ *
+ * If the event dispatcher service is mapped (with
+ * rte_service_map_lcore_set()) to a lcore for which no ports are
+ * bound, the service function will be a no-operation.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id);
+
+/**
+ * Registers callback functions.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding deliver callback function.
+ *
+ * The reason for dividing the delivery into two distinct steps is to
+ * allow the dispatcher to deliver all events as a batch. This in turn
+ * will cause processing of a particular kind of events to happen in a
+ * back-to-back manner, which in the typical improve cache locality.
+ *
+ * Ordering is not guaranteed between different deliver callbacks. For
+ * example, suppose there are two callbacks registered, matching
+ * different subsets of events an atomic queue. A batch of events
+ * [ev0, ev1, ev2] are dequeued on a particular port, all pertaining
+ * to the same flow. The match callback for registration A returns
+ * true for ev0 and ev2, and the matching function for registration B
+ * for ev1. In that scenario, the event dispatcher may choose to
+ * deliver first [ev0, ev2] using A's deliver function, and then [ev1]
+ * to B - or vice versa.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param deliver_fun
+ *  The deliver callback function.
+ *
+ * @param deliver_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when deliver_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int16_t
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_cb_t match_fun,
+			      void *match_cb_data,
+			      rte_event_dispatcher_deliver_cb_t deliver_fun,
+			      void *deliver_cb_data);
+
+/**
+ * Unregister callback functions.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param reg_id
+ *  The callback registration id returned by the original
+ *  rte_event_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unregister(uint8_t id, int16_t reg_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_EVENT_DISPATCHER__ */
diff --git a/lib/librte_eventdev/version.map b/lib/librte_eventdev/version.map
index 3e5c09cfdb..46cddcebe4 100644
--- a/lib/librte_eventdev/version.map
+++ b/lib/librte_eventdev/version.map
@@ -133,11 +133,18 @@ EXPERIMENTAL {
 	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
 	__rte_eventdev_trace_crypto_adapter_start;
 	__rte_eventdev_trace_crypto_adapter_stop;
-
 	# changed in 20.11
 	__rte_eventdev_trace_port_setup;
 	# added in 20.11
 	rte_event_pmd_pci_probe_named;
+
+	rte_event_dispatcher_create;
+	rte_event_dispatcher_free;
+	rte_event_dispatcher_service_id_get;
+	rte_event_dispatcher_bind_port_to_lcore;
+	rte_event_dispatcher_unbind_port_from_lcore;
+	rte_event_dispatcher_register;
+	rte_event_dispatcher_unregister;
 };
 
 INTERNAL {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [RFC v3 0/3] Add event dispatcher
  2021-04-09 11:32             ` [dpdk-dev] [RFC v2] " Mattias Rönnblom
@ 2023-05-22  9:16               ` Mattias Rönnblom
  2023-05-22  9:16                 ` [RFC v3 1/3] eventdev: introduce " Mattias Rönnblom
                                   ` (2 more replies)
  0 siblings, 3 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-05-22  9:16 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, Van, Haaren, Harry, Mattias Rönnblom

The purpose of the event dispatcher is to decouple different parts of
an application (e.g., processing pipeline stages), sharing the same
underlying event device.

The event dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on event meta data, such as the queue id or scheduling type.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the event dispatcher relies on application-supplied matching
callback functions to decide where to deliver events.

An event dispatcher is configured to dequeue events from a specific
event device, and ties into the service core framework, to do its (and
the application's) work.

The event dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

Although the event dispatcher adds some overhead, experience suggests
that the net effect on the application (both syntetic benchmarks and
more real-world applications) seems to be positive, except for small
dequeue sizes. This is likely due to improved temporal locality, and
will very greatly. The overhead seems to be ~10 cc/event on a large
core, with a handful of handlers.

Outstanding questions:
 o Explore an option where the callbacks (and callback data) are per lcore.
   More complicated, but more performant for deployments where all
   dispatcher lcores are not used for the same purpose.
 o Reshuffle the handler order in runtime, in such a way that the
   most-often-matched handler is tried first.
 o Consider adding possibility to express simple match functions
   (e..queue_id == 7) without a match callback.
 o Consider runtime reconfiguration.
 o Consider moving dispatcher id allocation from app to DPDK. Avoids
   app-level coordination, but makes the API inconsistent with other
   Eventdev APIs.
 o Should the events delivered to the application in the process callback
   be marked 'const' or not ('const' now, but prohibits reuse for TX).
 o Consider allowing the application setting the process callback to NULL,
   signalling to the dispatcher that processing will occur already at the
   time of the match call. May provide some slight performance benefits
   for applications where the averagenumber of events supplied per process
   function call is very small.

RFC v3:
 o Change from int16_t to int for ids. No particular gain in using
   int16_t over int.
 o Introduce rte_event_dispatcher_start()/stop() to make API consistent
   with the other Eventdev services.
 o Unit tests added.
 o Programming guide added.
 o Added dispatcher statistics.
 o Abandonded the promise to run the match functions in order. This is
   to allow for future performance optimization (i.e., reorder match
   functions so that often-matched functions are checked first).
 o Introduced event_port_id and event_dev_id in the deliver
   callback, to simplify for applications to follow the Eventdev
   API requirements to enqueue forward-type events on the same
   event port as the original events were dequeued.
 o Mark delivered events as constant.
 o Introduce optional callback called by the dispatcher when a full
   batch of events has been distributed to the various handlers. This
   is useful in cases when an event output buffer is shared among
   several handlers.
 o Fixed bug where consumer unregistration would invalidate other
   registration ids.

RFC v2:
 o Introduced a match callback, allowing the application layer to
   route events to callbacks not only based on queue id (like in v1),
   but on arbitrary event meta data or payload.
 o The fallback was removed, since it now easily can be achieved with a
   match function always returning true.
 o The dispatcher now rearranges the batch of dequeued events from the
   event device, in such a way to minimize the number of deliver calls
   made. This is done primariy to improved application cache locality.

Mattias Rönnblom (3):
  eventdev: introduce event dispatcher
  test: add event dispatcher test suite
  doc: add event dispatcher programming guide

 app/test/meson.build                       |   1 +
 app/test/test_event_dispatcher.c           | 814 +++++++++++++++++++++
 doc/api/doxy-api-index.md                  |   1 +
 doc/guides/prog_guide/event_dispatcher.rst | 423 +++++++++++
 doc/guides/prog_guide/index.rst            |   1 +
 lib/eventdev/meson.build                   |   2 +
 lib/eventdev/rte_event_dispatcher.c        | 670 +++++++++++++++++
 lib/eventdev/rte_event_dispatcher.h        | 440 +++++++++++
 lib/eventdev/version.map                   |  12 +
 9 files changed, 2364 insertions(+)
 create mode 100644 app/test/test_event_dispatcher.c
 create mode 100644 doc/guides/prog_guide/event_dispatcher.rst
 create mode 100644 lib/eventdev/rte_event_dispatcher.c
 create mode 100644 lib/eventdev/rte_event_dispatcher.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [RFC v3 1/3] eventdev: introduce event dispatcher
  2023-05-22  9:16               ` [RFC v3 0/3] Add " Mattias Rönnblom
@ 2023-05-22  9:16                 ` Mattias Rönnblom
  2023-06-09  7:08                   ` [RFC v4 0/3] Add " Mattias Rönnblom
  2023-05-22  9:16                 ` [RFC v3 2/3] test: add event dispatcher test suite Mattias Rönnblom
  2023-05-22  9:16                 ` [RFC v3 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
  2 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-05-22  9:16 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, Van, Haaren, Harry, Mattias Rönnblom

The purpose of the event dispatcher is to help reduce coupling in an
Eventdev-based DPDK application.

In addition, the event dispatcher also provides a convenient and
flexible way for the application to use service cores for
application-level processing.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 lib/eventdev/meson.build            |   2 +
 lib/eventdev/rte_event_dispatcher.c | 670 ++++++++++++++++++++++++++++
 lib/eventdev/rte_event_dispatcher.h | 440 ++++++++++++++++++
 lib/eventdev/version.map            |  12 +
 4 files changed, 1124 insertions(+)
 create mode 100644 lib/eventdev/rte_event_dispatcher.c
 create mode 100644 lib/eventdev/rte_event_dispatcher.h

diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 6edf98dfa5..c0edc744fe 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -19,6 +19,7 @@ sources = files(
         'rte_event_crypto_adapter.c',
         'rte_event_eth_rx_adapter.c',
         'rte_event_eth_tx_adapter.c',
+        'rte_event_dispatcher.c',
         'rte_event_ring.c',
         'rte_event_timer_adapter.c',
         'rte_eventdev.c',
@@ -27,6 +28,7 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_rx_adapter.h',
         'rte_event_eth_tx_adapter.h',
+        'rte_event_dispatcher.h',
         'rte_event_ring.h',
         'rte_event_timer_adapter.h',
         'rte_eventdev.h',
diff --git a/lib/eventdev/rte_event_dispatcher.c b/lib/eventdev/rte_event_dispatcher.c
new file mode 100644
index 0000000000..591efeef80
--- /dev/null
+++ b/lib/eventdev/rte_event_dispatcher.c
@@ -0,0 +1,670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_lcore.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_event_dispatcher.h>
+
+#define RED_MAX_PORTS_PER_LCORE (4)
+#define RED_MAX_HANDLERS (32)
+#define RED_MAX_FINALIZERS (16)
+
+struct rte_event_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_event_dispatcher_lcore {
+	uint8_t num_ports;
+	struct rte_event_dispatcher_lcore_port ports[RED_MAX_PORTS_PER_LCORE];
+	struct rte_event_dispatcher_stats stats;
+} __rte_cache_aligned;
+
+struct rte_event_dispatcher_handler {
+	int id;
+	rte_event_dispatcher_match_t match_fun;
+	void *match_data;
+	rte_event_dispatcher_process_t process_fun;
+	void *process_data;
+};
+
+struct rte_event_dispatcher_finalizer {
+	int id;
+	rte_event_dispatcher_finalize_t finalize_fun;
+	void *finalize_data;
+};
+
+struct rte_event_dispatcher {
+	uint8_t id;
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_event_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	uint16_t num_handlers;
+	uint16_t num_finalizers;
+	struct rte_event_dispatcher_handler handlers[RED_MAX_HANDLERS];
+	struct rte_event_dispatcher_finalizer finalizers[RED_MAX_FINALIZERS];
+};
+
+static struct rte_event_dispatcher *dispatchers[UINT8_MAX];
+
+static bool
+red_has_dispatcher(uint8_t id)
+{
+	return dispatchers[id] != NULL;
+}
+
+static struct rte_event_dispatcher *
+red_get_dispatcher(uint8_t id)
+{
+	return dispatchers[id];
+}
+
+static void
+red_set_dispatcher(uint8_t id, struct rte_event_dispatcher *dispatcher)
+{
+	dispatchers[id] = dispatcher;
+}
+
+#define RED_VALID_ID_OR_RET_EINVAL(id)					\
+	do {								\
+		if (unlikely(!red_has_dispatcher(id))) {		\
+			RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
+			return -EINVAL;					\
+		}							\
+	} while (0)
+
+static int
+red_lookup_handler_idx(struct rte_event_dispatcher *dispatcher,
+			const struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < dispatcher->num_handlers; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&dispatcher->handlers[i];
+
+		if (handler->match_fun(event, handler->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static inline void
+red_dispatch_events(struct rte_event_dispatcher *dispatcher,
+		    struct rte_event_dispatcher_lcore *lcore,
+		    struct rte_event_dispatcher_lcore_port *port,
+		    struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[RED_MAX_HANDLERS][num_events];
+	uint16_t burst_lens[RED_MAX_HANDLERS] = { 0 };
+	uint16_t drop_count = 0;
+	uint16_t dispatch_count;
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *event = &events[i];
+		int handler_idx;
+
+		handler_idx = red_lookup_handler_idx(dispatcher, event);
+
+		if (unlikely(handler_idx < 0)) {
+			drop_count++;
+			continue;
+		}
+
+		bursts[handler_idx][burst_lens[handler_idx]] = *event;
+		burst_lens[handler_idx]++;
+	}
+
+	for (i = 0; i < dispatcher->num_handlers; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&dispatcher->handlers[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		handler->process_fun(dispatcher->event_dev_id, port->port_id,
+				      bursts[i], len, handler->process_data);
+	}
+
+	dispatch_count = num_events - drop_count;
+
+	lcore->stats.ev_dispatch_count += dispatch_count;
+	lcore->stats.ev_drop_count += drop_count;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_event_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		finalizer->finalize_fun(dispatcher->event_dev_id,
+					port->port_id,
+					finalizer->finalize_data);
+	}
+}
+
+static __rte_always_inline void
+red_port_dequeue(struct rte_event_dispatcher *dispatcher,
+		 struct rte_event_dispatcher_lcore *lcore,
+		 struct rte_event_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	if (likely(n > 0))
+		red_dispatch_events(dispatcher, lcore, port, events, n);
+
+	lcore->stats.poll_count++;
+}
+
+static __rte_always_inline void
+red_lcore_process(struct rte_event_dispatcher *dispatcher,
+		  struct rte_event_dispatcher_lcore *lcore)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		red_port_dequeue(dispatcher, lcore, port);
+	}
+}
+
+static int32_t
+red_process(void *userdata)
+{
+	struct rte_event_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_event_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+
+	int i;
+	for (i = 0; i < 15; i++)
+		red_lcore_process(dispatcher, lcore);
+
+	return 0;
+}
+
+static int
+red_service_register(struct rte_event_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = red_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "red_%d",
+		 dispatcher->id);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Registration of event dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	return rc;
+}
+
+static int
+red_service_unregister(struct rte_event_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Unregistration of event dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	if (red_has_dispatcher(id)) {
+		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
+				 id);
+		return -EEXIST;
+	}
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("event dispatcher",
+				  sizeof(struct rte_event_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for event "
+				 "dispatcher\n");
+		return -ENOMEM;
+	}
+
+	*dispatcher = (struct rte_event_dispatcher) {
+		.id = id,
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = red_service_register(dispatcher);
+
+	if (rc < 0) {
+		rte_free(dispatcher);
+		return rc;
+	}
+
+	red_set_dispatcher(id, dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_free(uint8_t id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	rc = red_service_unregister(dispatcher);
+
+	if (rc)
+		return rc;
+
+	red_set_dispatcher(id, NULL);
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	*service_id = dispatcher->service_id;
+
+	return 0;
+}
+
+static int
+lcore_port_index(struct rte_event_dispatcher_lcore *lcore,
+		 uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	struct rte_event_dispatcher_lcore_port *port;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == RED_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_event_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	int port_idx;
+	struct rte_event_dispatcher_lcore_port *port;
+	struct rte_event_dispatcher_lcore_port *last;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static struct rte_event_dispatcher_handler*
+red_get_handler_by_id(struct rte_event_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_handlers; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&dispatcher->handlers[i];
+
+		if (handler->id == handler_id)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static int
+red_alloc_handler_id(struct rte_event_dispatcher *dispatcher)
+{
+	int handler_id = 0;
+
+	while (red_get_handler_by_id(dispatcher, handler_id) != NULL)
+		handler_id++;
+
+	return handler_id;
+}
+
+static struct rte_event_dispatcher_handler *
+red_alloc_handler(struct rte_event_dispatcher *dispatcher)
+{
+	int handler_idx;
+	struct rte_event_dispatcher_handler *handler;
+
+	if (dispatcher->num_handlers == RED_MAX_HANDLERS)
+		return NULL;
+
+	handler_idx = dispatcher->num_handlers;
+	handler = &dispatcher->handlers[handler_idx];
+
+	handler->id = red_alloc_handler_id(dispatcher);
+
+	dispatcher->num_handlers++;
+
+	return handler;
+}
+
+int
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_t match_fun,
+			      void *match_data,
+			      rte_event_dispatcher_process_t process_fun,
+			      void *process_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_handler *handler;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	handler = red_alloc_handler(dispatcher);
+
+	if (handler == NULL)
+		return -ENOMEM;
+
+	handler->match_fun = match_fun;
+	handler->match_data = match_data;
+	handler->process_fun = process_fun;
+	handler->process_data = process_data;
+
+	return handler->id;
+}
+
+int
+rte_event_dispatcher_unregister(uint8_t id, int handler_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_handler *unreg_handler;
+	int handler_idx;
+	uint16_t last_idx;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	unreg_handler = red_get_handler_by_id(dispatcher, handler_id);
+
+	if (unreg_handler == NULL)
+		return -EINVAL;
+
+	handler_idx = &dispatcher->handlers[0] - unreg_handler;
+
+	last_idx = dispatcher->num_handlers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all handlers to maintain handler order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_handler, unreg_handler + 1,
+			sizeof(struct rte_event_dispatcher_handler) * n);
+	}
+
+	dispatcher->num_handlers--;
+
+	return 0;
+}
+
+static struct rte_event_dispatcher_finalizer*
+red_get_finalizer_by_id(struct rte_event_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_event_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		if (finalizer->id == handler_id)
+			return finalizer;
+	}
+
+	return NULL;
+}
+
+static int
+red_alloc_finalizer_id(struct rte_event_dispatcher *dispatcher)
+{
+	int finalizer_id = 0;
+
+	while (red_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
+		finalizer_id++;
+
+	return finalizer_id;
+}
+
+static struct rte_event_dispatcher_finalizer *
+red_alloc_finalizer(struct rte_event_dispatcher *dispatcher)
+{
+	int finalizer_idx;
+	struct rte_event_dispatcher_finalizer *finalizer;
+
+	if (dispatcher->num_finalizers == RED_MAX_FINALIZERS)
+		return NULL;
+
+	finalizer_idx = dispatcher->num_finalizers;
+	finalizer = &dispatcher->finalizers[finalizer_idx];
+
+	finalizer->id = red_alloc_finalizer_id(dispatcher);
+
+	dispatcher->num_finalizers++;
+
+	return finalizer;
+}
+
+int
+rte_event_dispatcher_finalize_register(uint8_t id,
+			      rte_event_dispatcher_finalize_t finalize_fun,
+			      void *finalize_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_finalizer *finalizer;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	finalizer = red_alloc_finalizer(dispatcher);
+
+	if (finalizer == NULL)
+		return -ENOMEM;
+
+	finalizer->finalize_fun = finalize_fun;
+	finalizer->finalize_data = finalize_data;
+
+	return finalizer->id;
+}
+
+int
+rte_event_dispatcher_finalize_unregister(uint8_t id, int handler_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_finalizer *unreg_finalizer;
+	int handler_idx;
+	uint16_t last_idx;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	unreg_finalizer = red_get_finalizer_by_id(dispatcher, handler_id);
+
+	if (unreg_finalizer == NULL)
+		return -EINVAL;
+
+	handler_idx = &dispatcher->finalizers[0] - unreg_finalizer;
+
+	last_idx = dispatcher->num_finalizers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all finalizers to maintain finalizer order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_finalizer, unreg_finalizer + 1,
+			sizeof(struct rte_event_dispatcher_finalizer) * n);
+	}
+
+	dispatcher->num_finalizers--;
+
+	return 0;
+}
+
+static void
+red_aggregate_stats(struct rte_event_dispatcher_stats *result,
+		    const struct rte_event_dispatcher_stats *part)
+{
+	result->poll_count += part->poll_count;
+	result->ev_dispatch_count += part->ev_dispatch_count;
+	result->ev_drop_count += part->ev_drop_count;
+}
+
+int
+rte_event_dispatcher_stats_get(uint8_t id,
+			       struct rte_event_dispatcher_stats *stats)
+{
+	struct rte_event_dispatcher *dispatcher;
+	unsigned int lcore_id;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	*stats = (struct rte_event_dispatcher_stats) {};
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		red_aggregate_stats(stats, &lcore->stats);
+	}
+
+	return 0;
+}
+
+static int
+red_set_service_runstate(uint8_t id, int state)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	rc = rte_service_component_runstate_set(dispatcher->service_id,
+						state);
+
+	if (rc != 0) {
+		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+				 "service component run state to %d\n", rc,
+				 state);
+		RTE_ASSERT(0);
+	}
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_start(uint8_t id)
+{
+	return red_set_service_runstate(id, 1);
+}
+
+int
+rte_event_dispatcher_stop(uint8_t id)
+{
+	return red_set_service_runstate(id, 0);
+}
diff --git a/lib/eventdev/rte_event_dispatcher.h b/lib/eventdev/rte_event_dispatcher.h
new file mode 100644
index 0000000000..5563660f31
--- /dev/null
+++ b/lib/eventdev/rte_event_dispatcher.h
@@ -0,0 +1,440 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#ifndef __RTE_EVENT_DISPATCHER_H__
+#define __RTE_EVENT_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Event Dispatcher
+ *
+ * The purpose of the event dispatcher is to decouple different parts
+ * of an application (e.g., modules), sharing the same underlying
+ * event device.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * Match callbacks are used by an application to decide how to the
+ * event dispatcher distributes events to different parts of the
+ * application.
+ *
+ * The application is not expected to process the event at the point
+ * of the match call. Such matters should be deferred to the process
+ * callback invocation.
+ *
+ * The match callback may be used as an opportunity to prefetch data.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this events should be delivered (via
+ *   the process callback), and false otherwise.
+ */
+typedef bool
+(*rte_event_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
+
+/**
+ * Function prototype for process callbacks.
+ *
+ * The process callbacks are used by the event dispatcher to deliver
+ * events for processing.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ */
+
+typedef void
+(*rte_event_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				  const struct rte_event *events,
+				  uint16_t num, void *cb_data);
+
+/**
+ * Function prototype for finalize callbacks.
+ *
+ * Using a finalize callback, the application may ask to be notified when a
+ * complete batch of events have been delivered to the various process
+ * callbacks.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_finalize_register().
+ */
+
+typedef void
+(*rte_event_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				   void *cb_data);
+
+/**
+ * Event dispatcher statistics
+ */
+struct rte_event_dispatcher_stats {
+	uint64_t poll_count;
+	/**< Number of event dequeue calls made toward the event device. */
+	uint64_t ev_dispatch_count;
+	/**< Number of events dispatched to a handler.*/
+	uint64_t ev_drop_count;
+	/**< Number of events dropped because no handler was found. */
+};
+
+/**
+ * Create an event dispatcher with the specified id.
+ *
+ * @param id
+ *  An application-specified, unique (across all event dispatcher
+ *  instances) identifier.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this event dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   - 0: Success
+ *   - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id);
+
+/**
+ * Free an event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_free(uint8_t id);
+
+/**
+ * Retrieve the service identifier of an event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param [out] service_id
+ *  A pointer to a caller-supplied buffer where the event dispatcher's
+ *  service id will be stored.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * event dispatcher.
+ *
+ * This function configures the event port id to be used by the event
+ * dispatcher service, if run on the specified lcore.
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port must not be bound to more than one lcore.
+ *
+ * If the event dispatcher service is mapped (with
+ * rte_service_map_lcore_set()) to a lcore for which no ports are
+ * bound, the service function will be a no-operation.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ *  - -EEXISTS: Event port is already configured.
+ *  - -EINVAL: Invalid arguments.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ *  - -ENOENT: Event port id not bound to this @c lcore_id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id);
+
+/**
+ * Register an event handler.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding process callback
+ * function.
+ *
+ * The reason for having two distinct steps is to allow the dispatcher
+ * to deliver all events as a batch. This in turn will cause
+ * processing of a particular kind of events to happen in a
+ * back-to-back manner, improving cache locality.
+ *
+ * The list of handler callback functions is shared among all lcores,
+ * but will only be executed on lcores which has an eventdev port
+ * bound to them, and which are running the dispatcher service.
+ *
+ * Ordering of events is not guaranteed to be maintained between
+ * different deliver callbacks. For example, suppose there are two
+ * callbacks registered, matching different subsets of events an
+ * atomic queue. A batch of events [ev0, ev1, ev2] are dequeued on a
+ * particular port, all pertaining to the same flow. The match
+ * callback for registration A returns true for ev0 and ev2, and the
+ * matching function for registration B for ev1. In that scenario, the
+ * event dispatcher may choose to deliver first [ev0, ev2] using A's
+ * deliver function, and then [ev1] to B - or vice versa.
+ *
+ * rte_event_dispatcher_register() is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param process_fun
+ *  The process callback function.
+ *
+ * @param process_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when process_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_t match_fun,
+			      void *match_cb_data,
+			      rte_event_dispatcher_process_t process_fun,
+			      void *process_cb_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param handler_id
+ *  The handler registration id returned by the original
+ *  rte_event_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c handler_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unregister(uint8_t id, int handler_id);
+
+/**
+ * Registers a finalize callback function.
+ *
+ * An application may optionally install a finalize callback.
+ *
+ * The finalize callback is called when all event of a particular
+ * batch of events (retrieve using rte_event_dequeue_burst()) have
+ * been delivered (or dropped).
+ *
+ * The finalize callback is not tied to any particular handler.
+ *
+ * The finalize callback provides an opportunity for the application
+ * to do per-batch processing. One case where this may be useful is if
+ * an event output buffer is used, and is shared among several
+ * handlers. In such a case, proper output buffer flushing may be
+ * assured using a finalize callback.
+ *
+ * rte_event_dispatcher_finalize_register() is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param finalize_fun
+ *  The function called after completing the processing of a
+ *  dequeue batch.
+ *
+ * @param finalize_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when @c finalize_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_finalize_register(uint8_t id,
+			    rte_event_dispatcher_finalize_t finalize_fun,
+			    void *finalize_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param reg_id
+ *  The finalize registration id returned by the original
+ *  rte_event_dispatcher_finalize_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c reg_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_finalize_unregister(uint8_t id, int reg_id);
+
+/**
+ * Start an event dispatcher instance.
+ *
+ * Enables the event dispatcher service.
+ *
+ * The underlying event device must have been started prior to calling
+ * rte_event_dispatcher_start().
+ *
+ * For the event dispatcher to actually perform work (i.e., dispatch
+ * events), its service must have been mapped to one or more service
+ * lcores, and its service run state set to '1'. An event dispatcher's
+ * service is retrieved using rte_event_dispatcher_service_id_get().
+ *
+ * Each service lcore to which the event dispatcher is mapped should
+ * have at least one event port configured. Such configuration is
+ * performed by calling rte_event_dispatcher_bind_port_to_lcore(),
+ * prior to starting the event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_start(uint8_t id);
+
+/**
+ * Stop a running event dispatcher instance.
+ *
+ * Disables the event dispatcher service.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_stop(uint8_t id);
+
+/**
+ * Retrieve statistics for an event dispatcher instance.
+ *
+ * This function is MT safe and may be called from any thread
+ * (including unregistered non-EAL threads).
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ * @param[out] stats
+ *   A pointer to a structure to fill with statistics.
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id parameter was invalid.
+ */
+int
+rte_event_dispatcher_stats_get(uint8_t id,
+			       struct rte_event_dispatcher_stats *stats);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_EVENT_DISPATCHER__ */
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 89068a5713..36466e9f24 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -131,6 +131,18 @@ EXPERIMENTAL {
 	rte_event_eth_tx_adapter_runtime_params_init;
 	rte_event_eth_tx_adapter_runtime_params_set;
 	rte_event_timer_remaining_ticks_get;
+
+	rte_event_dispatcher_create;
+	rte_event_dispatcher_free;
+	rte_event_dispatcher_service_id_get;
+	rte_event_dispatcher_bind_port_to_lcore;
+	rte_event_dispatcher_unbind_port_from_lcore;
+	rte_event_dispatcher_register;
+	rte_event_dispatcher_unregister;
+	rte_event_dispatcher_finalize_register;
+	rte_event_dispatcher_finalize_unregister;
+	rte_event_dispatcher_start;
+	rte_event_dispatcher_stop;
 };
 
 INTERNAL {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [RFC v3 2/3] test: add event dispatcher test suite
  2023-05-22  9:16               ` [RFC v3 0/3] Add " Mattias Rönnblom
  2023-05-22  9:16                 ` [RFC v3 1/3] eventdev: introduce " Mattias Rönnblom
@ 2023-05-22  9:16                 ` Mattias Rönnblom
  2023-05-22  9:16                 ` [RFC v3 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-05-22  9:16 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, Van, Haaren, Harry, Mattias Rönnblom

Add unit tests for the event dispatcher.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 app/test/meson.build             |   1 +
 app/test/test_event_dispatcher.c | 814 +++++++++++++++++++++++++++++++
 2 files changed, 815 insertions(+)
 create mode 100644 app/test/test_event_dispatcher.c

diff --git a/app/test/meson.build b/app/test/meson.build
index b9b5432496..fac3b6b88b 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -50,6 +50,7 @@ test_sources = files(
         'test_errno.c',
         'test_ethdev_link.c',
         'test_event_crypto_adapter.c',
+        'test_event_dispatcher.c',
         'test_event_eth_rx_adapter.c',
         'test_event_ring.c',
         'test_event_timer_adapter.c',
diff --git a/app/test/test_event_dispatcher.c b/app/test/test_event_dispatcher.c
new file mode 100644
index 0000000000..93f6c53e33
--- /dev/null
+++ b/app/test/test_event_dispatcher.c
@@ -0,0 +1,814 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include "test.h"
+
+#include <stdatomic.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_event_dispatcher.h>
+#include <rte_eventdev.h>
+#include <rte_random.h>
+#include <rte_service.h>
+
+#define NUM_WORKERS 3
+
+#define NUM_PORTS (NUM_WORKERS + 1)
+#define WORKER_PORT_ID(worker_idx) (worker_idx)
+#define DRIVER_PORT_ID (NUM_PORTS - 1)
+
+#define NUM_SERVICE_CORES NUM_WORKERS
+
+/* Eventdev */
+#define NUM_QUEUES 8
+#define LAST_QUEUE_ID (NUM_QUEUES - 1)
+#define MAX_EVENTS 4096
+#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
+#define DEQUEUE_BURST_SIZE 32
+#define ENQUEUE_BURST_SIZE 32
+
+#define NUM_EVENTS 10000000
+#define NUM_FLOWS 16
+
+#define DSW_VDEV "event_dsw0"
+
+struct app_queue {
+	uint8_t queue_id;
+	uint64_t sn[NUM_FLOWS];
+	int dispatcher_reg_id;
+};
+
+struct test_app {
+	uint8_t event_dev_id;
+	uint8_t dispatcher_id;
+	uint32_t dispatcher_service_id;
+
+	unsigned int service_lcores[NUM_SERVICE_CORES];
+
+	struct app_queue queues[NUM_QUEUES];
+
+	bool running;
+
+	atomic_int completed_events;
+	atomic_int errors;
+};
+
+#define RETURN_ON_ERROR(rc) \
+	do {					\
+		if (rc != TEST_SUCCESS)		\
+			return rc;		\
+	} while (0)
+
+static struct test_app *
+test_app_create(void)
+{
+	int i;
+	struct test_app *app;
+
+	app = calloc(1, sizeof(struct test_app));
+
+	if (app == NULL)
+		return NULL;
+
+	for (i = 0; i < NUM_QUEUES; i++)
+		app->queues[i].queue_id = i;
+
+	return app;
+}
+
+static void
+test_app_free(struct test_app *app)
+{
+	free(app);
+}
+
+static int
+test_app_create_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_vdev_init(DSW_VDEV, NULL);
+	if (rc < 0)
+		return TEST_SKIPPED;
+
+	rc = rte_event_dev_get_dev_id(DSW_VDEV);
+
+	app->event_dev_id = (uint8_t)rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_destroy_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_close(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
+
+	rc = rte_vdev_uninit(DSW_VDEV);
+	TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_event_dev(struct test_app *app)
+{
+	int rc;
+	int i;
+
+	rc = test_app_create_vdev(app);
+	if (rc < 0)
+		return rc;
+
+	struct rte_event_dev_config config = {
+		.nb_event_queues = NUM_QUEUES,
+		.nb_event_ports = NUM_PORTS,
+		.nb_events_limit = MAX_EVENTS,
+		.nb_event_queue_flows = 64,
+		.nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
+		.nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	rc = rte_event_dev_configure(app->event_dev_id, &config);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
+
+	struct rte_event_queue_conf queue_config = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.nb_atomic_flows = 64
+	};
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		uint8_t queue_id = i;
+
+		rc = rte_event_queue_setup(app->event_dev_id, queue_id,
+					   &queue_config);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
+	}
+
+	struct rte_event_port_conf port_config = {
+		.new_event_threshold = NEW_EVENT_THRESHOLD,
+		.dequeue_depth = DEQUEUE_BURST_SIZE,
+		.enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	for (i = 0; i < NUM_PORTS; i++) {
+		uint8_t event_port_id = i;
+
+		rc = rte_event_port_setup(app->event_dev_id, event_port_id,
+					  &port_config);
+		TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
+				    event_port_id);
+
+		if (event_port_id == DRIVER_PORT_ID)
+			continue;
+
+		rc = rte_event_port_link(app->event_dev_id, event_port_id,
+					 NULL, NULL, 0);
+
+		TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
+				  event_port_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_event_dev(struct test_app *app)
+{
+	return test_app_destroy_vdev(app);
+}
+
+static int
+test_app_start_event_dev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_start(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
+
+	return TEST_SUCCESS;
+}
+
+static void
+test_app_stop_event_dev(struct test_app *app)
+{
+	rte_event_dev_stop(app->event_dev_id);
+}
+
+static int
+test_app_create_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	app->dispatcher_id = rte_rand_max(256);
+
+	rc = rte_event_dispatcher_create(app->dispatcher_id,
+					 app->event_dev_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to create event dispatcher");
+
+	rc = rte_event_dispatcher_service_id_get(app->dispatcher_id,
+						 &app->dispatcher_service_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to get event dispatcher service ID");
+
+	rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
+			    "stats");
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_free_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	rte_event_dispatcher_free(app->dispatcher_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_bind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_event_dispatcher_bind_port_to_lcore(
+			app->dispatcher_id, WORKER_PORT_ID(i),
+			DEQUEUE_BURST_SIZE, 0, lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
+				    "to lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unbind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_event_dispatcher_unbind_port_from_lcore(
+			app->dispatcher_id,
+			WORKER_PORT_ID(i),
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
+				    "from lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static bool
+match_queue(const struct rte_event *event, void *cb_data)
+{
+	uintptr_t queue_id = (uintptr_t)cb_data;
+
+	return event->queue_id == queue_id;
+}
+
+static int
+test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++)
+		if (app->service_lcores[i] == lcore_id)
+			return i;
+
+	return -1;
+}
+
+static int
+test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
+{
+	int worker;
+
+	worker = test_app_get_worker_index(app, lcore_id);
+
+	if (worker < 0)
+		return -1;
+
+	return WORKER_PORT_ID(worker);
+}
+
+static void
+test_app_queue_note_error(struct test_app *app)
+{
+	atomic_fetch_add_explicit(&app->errors, 1, memory_order_relaxed);
+}
+
+static void
+test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
+		       const struct rte_event *in_events, uint16_t num,
+		       void *cb_data)
+{
+	struct app_queue *app_queue = cb_data;
+	struct test_app *app = container_of(app_queue, struct test_app,
+					    queues[app_queue->queue_id]);
+	unsigned int lcore_id = rte_lcore_id();
+	bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
+	int event_port_id;
+	uint16_t i;
+	struct rte_event out_events[num];
+
+	event_port_id = test_app_get_worker_port(app, lcore_id);
+
+	if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
+	    p_event_port_id != event_port_id) {
+		test_app_queue_note_error(app);
+		return;
+	}
+
+	for (i = 0; i < num; i++) {
+		const struct rte_event *in_event = &in_events[i];
+		struct rte_event *out_event = &out_events[i];
+		uint64_t sn = in_event->u64;
+		uint64_t expected_sn;
+
+		if (in_event->queue_id != app_queue->queue_id) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		expected_sn = app_queue->sn[in_event->flow_id]++;
+
+		if (expected_sn != sn) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		if (intermediate_queue)
+			*out_event = (struct rte_event) {
+				.queue_id = in_event->queue_id + 1,
+				.flow_id = in_event->flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_FORWARD,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+	}
+
+	if (intermediate_queue) {
+		uint16_t n = 0;
+
+		do {
+			n += rte_event_enqueue_forward_burst(p_event_dev_id,
+							     p_event_port_id,
+							     out_events + n,
+							     num - n);
+		} while (n != num);
+	} else
+		atomic_fetch_add_explicit(&app->completed_events, num,
+					  memory_order_relaxed);
+}
+
+static int
+test_app_register_callbacks(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		struct app_queue *app_queue = &app->queues[i];
+		uintptr_t queue_id = app_queue->queue_id;
+		int reg_id;
+
+		reg_id = rte_event_dispatcher_register(app->dispatcher_id,
+						       match_queue,
+						       (void *)queue_id,
+						       test_app_process_queue,
+						       app_queue);
+
+		TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
+			    "callback for queue %d", i);
+
+		app_queue->dispatcher_reg_id = reg_id;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
+{
+	int reg_id = app->queues[queue_id].dispatcher_reg_id;
+
+	if (reg_id < 0) /* unregistered already */
+		return 0;
+
+	int rc = rte_event_dispatcher_unregister(app->dispatcher_id, reg_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
+			    "callback for queue %d", queue_id);
+
+	app->queues[queue_id].dispatcher_reg_id = -1;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callbacks(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		int rc;
+
+		rc = test_app_unregister_callback(app, i);
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dispatcher_start(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to start the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dispatcher_stop(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to stop the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_lcore_add(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
+			    "service core", lcore_id);
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_cores(struct test_app *app)
+{
+	int i;
+	int lcore_id = -1;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
+
+		TEST_ASSERT(lcore_id != RTE_MAX_LCORE,
+			    "Too few lcores. Needs at least %d worker lcores",
+			    NUM_SERVICE_CORES);
+
+		app->service_lcores[i] = lcore_id;
+	}
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		int rc;
+
+		rc = test_app_setup_service_core(app, app->service_lcores[i]);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
+
+	rc = rte_service_lcore_del(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
+			    lcore_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = test_app_teardown_service_core(app, lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_start(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_stop(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_start_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_dispatcher(app);
+
+	app->running = true;
+
+	return rc;
+}
+
+static int
+test_app_stop(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_stop_dispatcher(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	app->running = false;
+
+	return TEST_SUCCESS;
+}
+
+struct test_app *test_app;
+
+static int
+test_setup(void)
+{
+	int rc;
+
+	test_app = test_app_create();
+	TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
+
+	rc = test_app_setup_event_dev(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_create_dispatcher(test_app);
+
+	rc = test_app_setup_service_cores(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_register_callbacks(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_bind_ports(test_app);
+
+	return rc;
+}
+
+static void test_teardown(void)
+{
+	if (test_app->running)
+		test_app_stop(test_app);
+
+	test_app_teardown_service_cores(test_app);
+
+	test_app_unregister_callbacks(test_app);
+
+	test_app_unbind_ports(test_app);
+
+	test_app_free_dispatcher(test_app);
+
+	test_app_teardown_event_dev(test_app);
+
+	test_app_free(test_app);
+
+	test_app = NULL;
+}
+
+static int
+test_app_get_completed_events(struct test_app *app)
+{
+	return atomic_load_explicit(&app->completed_events,
+				    memory_order_relaxed);
+}
+
+static int
+test_app_get_errors(struct test_app *app)
+{
+	return atomic_load_explicit(&app->errors, memory_order_relaxed);
+}
+
+static int
+test_basic(void)
+{
+	int rc;
+	int i;
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	uint64_t sns[NUM_FLOWS] = { 0 };
+
+	for (i = 0; i < NUM_EVENTS;) {
+		struct rte_event events[ENQUEUE_BURST_SIZE];
+		int left;
+		int batch_size;
+		int j;
+		uint16_t n = 0;
+
+		batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
+		left = NUM_EVENTS - i;
+
+		batch_size = RTE_MIN(left, batch_size);
+
+		for (j = 0; j < batch_size; j++) {
+			struct rte_event *event = &events[j];
+			uint64_t sn;
+			uint32_t flow_id;
+
+			flow_id = rte_rand_max(NUM_FLOWS);
+
+			sn = sns[flow_id]++;
+
+			*event = (struct rte_event) {
+				.queue_id = 0,
+				.flow_id = flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_NEW,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+		}
+
+		while (n < batch_size)
+			n += rte_event_enqueue_new_burst(test_app->event_dev_id,
+							 DRIVER_PORT_ID,
+							 events + n,
+							 batch_size - n);
+
+		i += batch_size;
+	}
+
+	while (test_app_get_completed_events(test_app) != NUM_EVENTS)
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+
+	rc = test_app_get_errors(test_app);
+	TEST_ASSERT(rc == 0, "%d errors occurred", rc);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_event_dispatcher_stats stats;
+	rc = rte_event_dispatcher_stats_get(test_app->dispatcher_id,
+					    &stats);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
+			  "Invalid dispatch count");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_drop(void)
+{
+	int rc;
+	uint8_t unhandled_queue = 1;
+	struct rte_event_dispatcher_stats stats;
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_unregister_callback(test_app, unhandled_queue);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_event event = {
+	    .queue_id = unhandled_queue,
+	    .flow_id = 0,
+	    .sched_type = RTE_SCHED_TYPE_ATOMIC,
+	    .op = RTE_EVENT_OP_NEW,
+	    .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	    .u64 = 0
+	};
+
+	do {
+		rc = rte_event_enqueue_burst(test_app->event_dev_id,
+					     DRIVER_PORT_ID, &event, 1);
+	} while (rc == 0);
+
+	do {
+		rc = rte_event_dispatcher_stats_get(test_app->dispatcher_id,
+						    &stats);
+		RETURN_ON_ERROR(rc);
+
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+	} while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
+			  "Dispatch count is not zero");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite test_suite = {
+	.suite_name = "Event dispatcher test suite",
+	.unit_test_cases = {
+		TEST_CASE_ST(test_setup, test_teardown, test_basic),
+		TEST_CASE_ST(test_setup, test_teardown, test_drop),
+		TEST_CASES_END()
+	}
+};
+
+static int
+test_event_dispatcher(void)
+{
+	return unit_test_suite_runner(&test_suite);
+}
+
+REGISTER_TEST_COMMAND(event_dispatcher_autotest, test_event_dispatcher);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [RFC v3 3/3] doc: add event dispatcher programming guide
  2023-05-22  9:16               ` [RFC v3 0/3] Add " Mattias Rönnblom
  2023-05-22  9:16                 ` [RFC v3 1/3] eventdev: introduce " Mattias Rönnblom
  2023-05-22  9:16                 ` [RFC v3 2/3] test: add event dispatcher test suite Mattias Rönnblom
@ 2023-05-22  9:16                 ` Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-05-22  9:16 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, Van, Haaren, Harry, Mattias Rönnblom

Provide programming guide the for the event dispatcher.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 doc/api/doxy-api-index.md                  |   1 +
 doc/guides/prog_guide/event_dispatcher.rst | 423 +++++++++++++++++++++
 doc/guides/prog_guide/index.rst            |   1 +
 3 files changed, 425 insertions(+)
 create mode 100644 doc/guides/prog_guide/event_dispatcher.rst

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index c709fd48ad..05b22057f9 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -29,6 +29,7 @@ The public API headers are grouped by topics:
   [event_eth_tx_adapter](@ref rte_event_eth_tx_adapter.h),
   [event_timer_adapter](@ref rte_event_timer_adapter.h),
   [event_crypto_adapter](@ref rte_event_crypto_adapter.h),
+  [event_dispatcher](@ref rte_event_dispatcher.h),
   [rawdev](@ref rte_rawdev.h),
   [metrics](@ref rte_metrics.h),
   [bitrate](@ref rte_bitrate.h),
diff --git a/doc/guides/prog_guide/event_dispatcher.rst b/doc/guides/prog_guide/event_dispatcher.rst
new file mode 100644
index 0000000000..6fabadf560
--- /dev/null
+++ b/doc/guides/prog_guide/event_dispatcher.rst
@@ -0,0 +1,423 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Ericsson AB.
+
+Event Dispatcher
+================
+
+Overview
+--------
+
+The purpose of the event dispatcher is to help reduce coupling in an
+:doc:`Eventdev <eventdev>`-based DPDK application.
+
+In particular, the event dispatcher addresses a scenario where an
+application's modules share the same event device and event device
+ports, and performs work on the same lcore threads.
+
+The event dispatcher replaces the conditional logic that follows an
+event device dequeue operation, where events are dispatched to
+different parts of the application, typically based on fields in the
+``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
+``sched_type``.
+
+Below is an excerpt from an fictious application consisting of two
+modules; A and B. In this example, event-to-module routing is based
+purely on queue id, where module A expects all events to a certain
+queue id, and module B two other queue ids. [#Mapping]_
+
+.. code-block:: c
+
+    for (;;) {
+            struct rte_event events[MAX_BURST];
+            unsigned int n;
+    
+            n = rte_event_dequeue_burst(dev_id, port_id, events,
+	                                MAX_BURST, 0);
+    
+            for (i = 0; i < n; i++) {
+                    const struct rte_event *event = &events[i];
+    
+                    switch (event->queue_id) {
+                    case MODULE_A_QUEUE_ID:
+                            module_a_process(event);
+                            break;
+                    case MODULE_B_STAGE_0_QUEUE_ID:
+                            module_b_process_stage_0(event);
+                            break;
+                    case MODULE_B_STAGE_1_QUEUE_ID:
+                            module_b_process_stage_1(event);
+                            break;
+                    }
+            }
+    }
+
+The issue this example attempts to illustrate is that the centralized
+conditional logic has knowledge of things that should be private to
+the modules involved. In other words, this pattern leads to a
+violation of module encapsulation.
+
+The shared conditional logic contains explicit knowledge about what
+events should go where. In case, for example, the
+``module_a_process()`` is broken into two processing stages — a purely
+module-internal change — the shared conditional code must be updated
+to reflect this change.
+
+The centralized event routing code becomes an issue in larger
+applications, where modules are developed by different organizations.
+This pattern also makes module reuse across different application more
+difficult. The part of the conditional logic relevant for a particular
+application may need to be duplicated across many module
+instantiations (e.g., applications and test setups).
+
+The event dispatcher separates the mechanism (routing events to their
+receiver) from the policy (which events should go where).
+
+The basic operation of the event dispatcher is as follows:
+
+* Dequeue a batch of events from the event device.
+* For each event determine which handler should receive the event, using
+  a set of application-provided, per-handler event matching callback
+  functions.
+* Provide events matching a particular handler, to that handler, using
+  its process callback.
+
+If the above application would have made use of the event dispatcher,
+the code relevant for its module A may have looked something like
+this:
+
+.. code-block:: c
+
+    static bool
+    module_a_match(const struct rte_event *event, void *cb_data)
+    {
+           return event->queue_id == MODULE_A_QUEUE_ID;
+    }
+    
+    static void
+    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
+                            const struct rte_event *events,
+			    uint16_t num, void *cb_data)
+    {
+            uint16_t i;
+
+            for (i = 0; i < num; i++)
+                    module_a_process_event(&events[i]);
+    }
+    
+    /* In the module's initialization code */
+    rte_event_dispatcher_register(EVENT_DISPATCHER_ID, module_a_match,
+                                  NULL, module_a_process_events,
+				  module_a_data);
+
+(Error handling is left out of this and future example code in this
+chapter.)
+
+When the shared conditional logic is removed, a new question arise:
+which part of the system actually runs the dispatching mechanism? Or
+phrased differently, what is replacing the function hosting the shared
+conditional logic (typically launched on all lcores using
+``rte_eal_remote_launch()``)? To solve this issue, the event
+dispatcher is a run as a DPDK :doc:`Service <service_cores>`.
+
+The event dispatcher is a layer between the application and the event
+device in the receive direction. In the transmit (i.e., item of work
+submission) direction, the application directly accesses the Eventdev
+core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
+forwarded event to the event device.
+
+Event Dispatcher Creation
+-------------------------
+
+Before any ``rte_event_dispatcher_*()`` calls are made, the
+application must create an event dispatcher instance by means of a
+``rte_event_dispatcher_create()`` call.
+
+The supplied dispatcher id is allocated by the application, and must
+be unique.
+
+The event device must be configured before the event dispatcher is
+created.
+
+Usually, only one event dispatcher is needed per event device. An
+event dispatcher can handle only a single event device.
+
+An event dispatcher is freed using the ``rte_event_dispatcher_free()``
+function. The event dispatcher's service functions must not be running
+on any lcore at the point of this call.
+
+Event Port Binding
+------------------
+
+In order to be able to dequeue events, the event dispatcher must known
+which event ports are to be used, for every lcore to which the event
+dispatcher service is mapped.
+
+The application configures a particular event dev port to be managed
+by a particular lcore by calling
+``rte_event_dispatcher_bind_port_to_lcore()``.
+
+This call is typically made from the part of the application that
+deals with deployment issues (e.g., iterating lcores and determining
+which lcore does what), at the time of application initialization.
+
+The ``rte_event_dispatcher_unbind_port_from_lcore()`` is used to undo
+this operation.
+
+Multiple lcore threads may not safely use the same event port.
+
+Event ports cannot safely be bound or unbound while the event
+dispatcher's service function is running on any lcore.
+
+Event Handlers
+--------------
+
+The event dispatcher handler is an interface between the event
+dispatcher and an application module, used to route events to the
+appropriate part of the application.
+
+Handler Registration
+^^^^^^^^^^^^^^^^^^^^
+
+The event handler interface consists of two function pointers:
+
+* The ``rte_event_dispatcher_match_t`` callback, which job is to
+  decide if this event is to be the property of this handler.
+* The ``rte_event_dispatcher_process_t``, which is used by the
+  event dispatcher to deliver matched events.
+
+An event handler registration is valid for lcores.
+
+The functions pointed to by the match and process callbacks resides in
+the application's domain logic, with one or more handlers per
+application module.
+
+A module may use more than one event handler, for convience or to
+further decouple sub-modules. However, the event dispatcher may impose
+an upper limit of the number handlers. In addition, many handlers
+increase event dispatcher overhead, although this does not nessarily
+translate to an application-level performance degradation. See the
+section on :ref:`Event Clustering` for more information.
+
+Handler registration and unregistration cannot safely be done while
+the event dispatcher's service function is running on any lcore.
+
+Event Delivery
+^^^^^^^^^^^^^^
+
+The handler callbacks are invocated by the event dispatcher's service
+function, upon the arrival of events to the event ports bound to the
+running service lcore.
+
+The application must not depend on all match callback invocations for
+a particular event batch being made prior to any process calls are
+being made. For example, if the event dispatcher dequeues two events
+from the event device, it may choose to find out the destination for
+the first event, and deliver it, and then continue to find out the
+destination for the second, and then deliver that event as well. The
+event dispatcher may also choose a strategy where no event is
+delivered until the destination handler for both events have been
+determined.
+
+The event dispatcher guarantees that  all events provided in a process
+batch has been seen (and matched) by the handler's match callbacks. It
+also  guarantees that  all events  provided in  a single  process call
+belong to the same event port dequeue burst.
+
+.. _Event Clustering:
+
+Event Clustering
+^^^^^^^^^^^^^^^^
+
+The event dispatcher maintains the order of events destined for the
+same handler.
+
+The event dispatcher *does not* guarantee to maintain the order of
+events delivered to *different* handlers.
+
+For example, assume that ``MODULE_A_QUEUE_ID`` takes on the value 0,
+and ``MODULE_B_STAGE_0_QUEUE_ID`` takes on the value 1. Then consider
+a scenario where the following events are dequeued from the event
+device (qid is short for event queue id).
+
+.. code-block::
+
+    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
+
+The event dispatcher may deliver the events in the following manner:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
+   module_a_process([e2: qid=0])
+   module_b_stage_0_process([e2: qid=1])
+
+The event dispatcher may also choose to cluster (group) all events
+destined for ``module_b_stage_0_process()`` into one array:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
+   module_a_process([e2: qid=0])
+
+The event ``e2`` is reordered and placed behind ``e3``, from a deliver
+order point of view. This kind of reshuffling is allowed, since they
+belong to different handlers.
+
+The event dispatcher may also deliver ``e2`` before the three events
+destined for module B.
+
+An example of what the event dispatcher may not do, is to reorder
+event ``e1`` so, that it precedes ``e0`` in the array passed to the
+module B's stage 0 process callback.
+
+Clustering events destined for the same callback may help to improve
+application-level performance, since processing events destined for
+the same handler likely increases temporal locality of memory
+accesses, which in turn may lead to fewer caches misses and improved
+performance.
+
+Finalize
+--------
+
+The event dispatcher may be configured to notify one or more parts of
+the application when the matching and processing of a batch of events
+has completed.
+
+The ``rte_event_dispatcher_finalize_register`` call is used to
+register a finalize callback. The function
+``rte_event_dispatcher_finalize_unregister`` is used to remove a
+callback.
+
+The finalize hook may be used by a set of event handlers (in the same
+modules, or a set of cooperating modules) sharing an event output
+buffer, since it allows for flushing of the buffers at the last
+possible moment. In particular, it allows for buffering of
+``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
+``rte_event_dequeue_burst()`` call is made (assuming implicit release
+is employed).
+
+The following is an example with an application-defined event output
+buffer (the ``event_buffer``):
+
+.. code-block:: c
+
+    static void
+    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
+                   void *cb_data)
+    {
+            struct event_buffer *buffer = cb_data;
+            unsigned lcore_id = rte_lcore_id();
+            struct event_buffer_lcore *lcore_buffer =
+                    &buffer->lcore_buffer[lcore_id];
+    
+            event_buffer_lcore_flush(lcore_buffer);
+    }
+
+    /* In the module's initialization code */
+    rte_event_dispatcher_finalize_register(EVENT_DISPATCHER_ID,
+                                           finalize_batch,
+                                           shared_event_buffer);
+
+The event dispatcher does not track any relationship between a handler
+and a finalize callback, and all finalize callbacks will be called, if
+(and only if) at least one event was dequeued from the event device.
+
+Finalize callback registration and unregistration cannot safely be
+done while the event dispatcher's service function is running on any
+lcore.
+
+Service
+-------
+
+The event dispatcher is a DPDK service, and is managed in a manner
+similar to other DPDK services (e.g., an Event Timer Adapter).
+
+Below is an example of how to configure a particular lcore to serve as
+a service lcore, and to map an already-configured event dispatcher
+(identified by ``EVENT_DISPATCHER_ID``) to that lcore.
+
+.. code-block:: c
+
+    static void
+    launch_event_dispatcher_core(unsigned lcore_id)
+    {
+            uint32_t service_id;
+    
+            rte_service_lcore_add(lcore_id);
+    
+            rte_event_dispatcher_service_id_get(EVENT_DISPATCHER_ID,
+                                                &service_id);
+    
+            rte_service_map_lcore_set(service_id, lcore_id, 1);
+    
+            rte_service_lcore_start(lcore_id);
+    
+            rte_service_runstate_set(service_id, 1);
+    }
+
+As the final step, the event dispatcher must be started.
+
+.. code-block:: c
+
+    rte_event_dispatcher_start(EVENT_DISPATCHER_ID);
+
+
+Multi Service Dispatcher Lcores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In an Eventdev application, most (or all) compute-intensive and
+performance-sensitive processing is done in an event-driven manner,
+where CPU cycles spent on application domain logic is the direct
+result of items of work (i.e., ``rte_event`` events) dequeued from an
+event device.
+
+In the light of this, it makes sense to have the event dispatcher
+service be the only DPDK service on all lcores used for packet
+processing — at least in principle.
+
+There is nothing preventing colocating other services with the event
+dispatcher service, on the same lcore.
+
+Tasks that prior to the introduction was performed on the lcore, even
+though no events were received, are prime targets for being converted
+into such auxiliary services, running on the dispatcher core set.
+
+An example of such a task would be the management of a per-lcore timer
+wheel (i.e., calling ``rte_timer_manage()``).
+
+For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
+similar technique), may opt for having quiescent state (e.g., calling
+``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
+service, to assure resource reclaimination occurs even in though some
+lcores currently do not process any events.
+
+If more services than the event dispatcher service is mapped to a
+service lcore, it's important that the other service are well-behaved
+and don't interfere with event processing to the extent the system's
+throughput and/or latency requirements are at risk of not being met.
+
+In particular, to avoid jitter, they should have an small upper bound
+for the maximum amount of time spent in a single service function
+call.
+
+An example of scenario with a more CPU-heavy colocated service is
+low-lcore count deployment, where the event device lacks the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
+require software to feed incoming packets into the event device). In
+this case, the best performance may be achieved if the Event Ethernet
+RX and/or TX Adapters are mapped to lcores also used by for event
+dispatching, since otherwise the adapter lcores would have a lot of
+idle CPU cycles.
+
+.. rubric:: Footnotes
+
+.. [#Mapping]
+   Event-to-module mapping may reasonably be done based on other
+   ``rte_event`` fields (or even event data), but queue id-based
+   routing serves well in a simple example. Indeed, that's the very
+   reason to have callback match functions, instead of a simple
+   queue id-to-handler scheme.
+
+.. [#Port-MT-Safety]
+   This property (which is a feature, not a bug) is inherited from the
+   core Eventdev APIs.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 87333ee84a..74fcbcee6b 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -59,6 +59,7 @@ Programmer's Guide
     event_ethernet_tx_adapter
     event_timer_adapter
     event_crypto_adapter
+    event_dispatcher
     qos_framework
     power_man
     packet_classif_access_ctrl
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [RFC v4 0/3] Add event dispatcher
  2023-05-22  9:16                 ` [RFC v3 1/3] eventdev: introduce " Mattias Rönnblom
@ 2023-06-09  7:08                   ` Mattias Rönnblom
  2023-06-09  7:08                     ` [RFC v4 1/3] eventdev: introduce " Mattias Rönnblom
                                       ` (2 more replies)
  0 siblings, 3 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-09  7:08 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Mattias Rönnblom

The purpose of the event dispatcher is to decouple different parts of
an application (e.g., processing pipeline stages), sharing the same
underlying event device.

The event dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on event meta data, such as the queue id or scheduling type.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the event dispatcher relies on application-supplied matching
callback functions to decide where to deliver events.

An event dispatcher is configured to dequeue events from a specific
event device, and ties into the service core framework, to do its (and
the application's) work.

The event dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

Although the event dispatcher adds some overhead, experience suggests
that the net effect on the application (both syntetic benchmarks and
more real-world applications) may well be positive. This is primarily
due to clustering (see programming guide) reducing cache misses.
Benchmarking indicates that the overhead is ~10 cc/event (on a
large core), with a handful of active handlers.

The event dispatcher does not support runtime reconfiguration.

Outstanding questions:
 o Consider adding possibility to express simple match functions
   (e..queue_id == 7) without a match callback.
 o Consider allowing the application setting the process callback to NULL,
   signalling to the dispatcher that processing will occur already at the
   time of the match call. May provide some slight performance benefits
   for applications where the average number of events supplied per process
   function call is very small.

Mattias Rönnblom (3):
  eventdev: introduce event dispatcher
  test: add event dispatcher test suite
  doc: add event dispatcher programming guide

 app/test/meson.build                       |   1 +
 app/test/test_event_dispatcher.c           | 814 +++++++++++++++++++++
 doc/api/doxy-api-index.md                  |   1 +
 doc/guides/prog_guide/event_dispatcher.rst | 443 +++++++++++
 doc/guides/prog_guide/index.rst            |   1 +
 lib/eventdev/meson.build                   |   2 +
 lib/eventdev/rte_event_dispatcher.c        | 770 +++++++++++++++++++
 lib/eventdev/rte_event_dispatcher.h        | 448 ++++++++++++
 lib/eventdev/version.map                   |  13 +
 9 files changed, 2493 insertions(+)
 create mode 100644 app/test/test_event_dispatcher.c
 create mode 100644 doc/guides/prog_guide/event_dispatcher.rst
 create mode 100644 lib/eventdev/rte_event_dispatcher.c
 create mode 100644 lib/eventdev/rte_event_dispatcher.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [RFC v4 1/3] eventdev: introduce event dispatcher
  2023-06-09  7:08                   ` [RFC v4 0/3] Add " Mattias Rönnblom
@ 2023-06-09  7:08                     ` Mattias Rönnblom
  2023-06-09 14:34                       ` Stephen Hemminger
  2023-06-14 17:25                       ` [PATCH 0/3] Add " Mattias Rönnblom
  2023-06-09  7:08                     ` [RFC v4 2/3] test: add event dispatcher test suite Mattias Rönnblom
  2023-06-09  7:08                     ` [RFC v4 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
  2 siblings, 2 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-09  7:08 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Mattias Rönnblom

The purpose of the event dispatcher is to help reduce coupling in an
Eventdev-based DPDK application.

In addition, the event dispatcher also provides a convenient and
flexible way for the application to use service cores for
application-level processing.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>

--

RFC v4:
 o Move handlers to per-lcore data structures.
 o Introduce mechanism which rearranges handlers so that often-used
   handlers tend to be tried first.
 o Terminate dispatch loop in case all events are delivered.
 o To avoid the dispatcher's service function hogging the CPU, process
   only one batch per call.
 o Have service function return -EAGAIN if no work is performed.
 o Events delivered in the process function is no longer marked 'const',
   since modifying them may be useful for the application and cause
   no difficulties for the dispatcher.
 o Various minor API documentation improvements.

RFC v3:
 o Add stats_get() function to the version.map file.
---
 lib/eventdev/meson.build            |   2 +
 lib/eventdev/rte_event_dispatcher.c | 770 ++++++++++++++++++++++++++++
 lib/eventdev/rte_event_dispatcher.h | 448 ++++++++++++++++
 lib/eventdev/version.map            |  13 +
 4 files changed, 1233 insertions(+)
 create mode 100644 lib/eventdev/rte_event_dispatcher.c
 create mode 100644 lib/eventdev/rte_event_dispatcher.h

diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 6edf98dfa5..c0edc744fe 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -19,6 +19,7 @@ sources = files(
         'rte_event_crypto_adapter.c',
         'rte_event_eth_rx_adapter.c',
         'rte_event_eth_tx_adapter.c',
+        'rte_event_dispatcher.c',
         'rte_event_ring.c',
         'rte_event_timer_adapter.c',
         'rte_eventdev.c',
@@ -27,6 +28,7 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_rx_adapter.h',
         'rte_event_eth_tx_adapter.h',
+        'rte_event_dispatcher.h',
         'rte_event_ring.h',
         'rte_event_timer_adapter.h',
         'rte_eventdev.h',
diff --git a/lib/eventdev/rte_event_dispatcher.c b/lib/eventdev/rte_event_dispatcher.c
new file mode 100644
index 0000000000..5e5096e2cc
--- /dev/null
+++ b/lib/eventdev/rte_event_dispatcher.c
@@ -0,0 +1,770 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_random.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_event_dispatcher.h>
+
+#define RED_MAX_PORTS_PER_LCORE 4
+#define RED_MAX_HANDLERS 32
+#define RED_MAX_FINALIZERS 16
+#define RED_AVG_PRIO_INTERVAL 2000
+
+struct rte_event_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_event_dispatcher_handler {
+	int id;
+	rte_event_dispatcher_match_t match_fun;
+	void *match_data;
+	rte_event_dispatcher_process_t process_fun;
+	void *process_data;
+};
+
+struct rte_event_dispatcher_finalizer {
+	int id;
+	rte_event_dispatcher_finalize_t finalize_fun;
+	void *finalize_data;
+};
+
+struct rte_event_dispatcher_lcore {
+	uint8_t num_ports;
+	uint16_t num_handlers;
+	int32_t prio_count;
+	struct rte_event_dispatcher_lcore_port ports[RED_MAX_PORTS_PER_LCORE];
+	struct rte_event_dispatcher_handler handlers[RED_MAX_HANDLERS];
+	struct rte_event_dispatcher_stats stats;
+} __rte_cache_aligned;
+
+struct rte_event_dispatcher {
+	uint8_t id;
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_event_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	uint16_t num_finalizers;
+	struct rte_event_dispatcher_finalizer finalizers[RED_MAX_FINALIZERS];
+};
+
+static struct rte_event_dispatcher *dispatchers[UINT8_MAX];
+
+static bool
+red_has_dispatcher(uint8_t id)
+{
+	return dispatchers[id] != NULL;
+}
+
+static struct rte_event_dispatcher *
+red_get_dispatcher(uint8_t id)
+{
+	return dispatchers[id];
+}
+
+static void
+red_set_dispatcher(uint8_t id, struct rte_event_dispatcher *dispatcher)
+{
+	dispatchers[id] = dispatcher;
+}
+
+#define RED_VALID_ID_OR_RET_EINVAL(id)					\
+	do {								\
+		if (unlikely(!red_has_dispatcher(id))) {		\
+			RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
+			return -EINVAL;					\
+		}							\
+	} while (0)
+
+static int
+red_lookup_handler_idx(struct rte_event_dispatcher_lcore *lcore,
+		       const struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->match_fun(event, handler->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static void
+red_prioritize_handler(struct rte_event_dispatcher_lcore *lcore,
+		       int handler_idx)
+{
+	struct rte_event_dispatcher_handler tmp;
+
+	if (handler_idx == 0)
+		return;
+
+	/* Let the lucky handler "bubble" up the list */
+
+	tmp = lcore->handlers[handler_idx - 1];
+
+	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
+
+	lcore->handlers[handler_idx] = tmp;
+}
+
+static inline void
+red_consider_prioritize_handler(struct rte_event_dispatcher_lcore *lcore,
+				int handler_idx, uint16_t handler_events)
+{
+	lcore->prio_count -= handler_events;
+
+	if (unlikely(lcore->prio_count <= 0)) {
+		red_prioritize_handler(lcore, handler_idx);
+
+		/*
+		 * Randomize the interval in the unlikely case
+		 * the traffic follow some very strict pattern.
+		 */
+		lcore->prio_count =
+			rte_rand_max(RED_AVG_PRIO_INTERVAL) +
+			RED_AVG_PRIO_INTERVAL / 2;
+	}
+}
+
+static inline void
+red_dispatch_events(struct rte_event_dispatcher *dispatcher,
+		    struct rte_event_dispatcher_lcore *lcore,
+		    struct rte_event_dispatcher_lcore_port *port,
+		    struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[RED_MAX_HANDLERS][num_events];
+	uint16_t burst_lens[RED_MAX_HANDLERS] = { 0 };
+	uint16_t drop_count = 0;
+	uint16_t dispatch_count;
+	uint16_t dispatched = 0;
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *event = &events[i];
+		int handler_idx;
+
+		handler_idx = red_lookup_handler_idx(lcore, event);
+
+		if (unlikely(handler_idx < 0)) {
+			drop_count++;
+			continue;
+		}
+
+		bursts[handler_idx][burst_lens[handler_idx]] = *event;
+		burst_lens[handler_idx]++;
+	}
+
+	dispatch_count = num_events - drop_count;
+
+	for (i = 0; i < lcore->num_handlers &&
+		 dispatched < dispatch_count; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		handler->process_fun(dispatcher->event_dev_id, port->port_id,
+				     bursts[i], len, handler->process_data);
+
+		dispatched += len;
+
+		/*
+		 * Safe, since any reshuffling will only involve
+		 * already-processed handlers.
+		 */
+		red_consider_prioritize_handler(lcore, i, len);
+	}
+
+	lcore->stats.ev_dispatch_count += dispatch_count;
+	lcore->stats.ev_drop_count += drop_count;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_event_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		finalizer->finalize_fun(dispatcher->event_dev_id,
+					port->port_id,
+					finalizer->finalize_data);
+	}
+}
+
+static __rte_always_inline uint16_t
+red_port_dequeue(struct rte_event_dispatcher *dispatcher,
+		 struct rte_event_dispatcher_lcore *lcore,
+		 struct rte_event_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	if (likely(n > 0))
+		red_dispatch_events(dispatcher, lcore, port, events, n);
+
+	lcore->stats.poll_count++;
+
+	return n;
+}
+
+static __rte_always_inline uint16_t
+red_lcore_process(struct rte_event_dispatcher *dispatcher,
+		  struct rte_event_dispatcher_lcore *lcore)
+{
+	uint16_t i;
+	uint16_t event_count = 0;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		event_count += red_port_dequeue(dispatcher, lcore, port);
+	}
+
+	return event_count;
+}
+
+static int32_t
+red_process(void *userdata)
+{
+	struct rte_event_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_event_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+	uint64_t event_count;
+
+	event_count = red_lcore_process(dispatcher, lcore);
+
+	if (unlikely(event_count == 0))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int
+red_service_register(struct rte_event_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = red_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "red_%d",
+		 dispatcher->id);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Registration of event dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	return rc;
+}
+
+static int
+red_service_unregister(struct rte_event_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Unregistration of event dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	if (red_has_dispatcher(id)) {
+		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
+				 id);
+		return -EEXIST;
+	}
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("event dispatcher",
+				  sizeof(struct rte_event_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for event "
+				 "dispatcher\n");
+		return -ENOMEM;
+	}
+
+	*dispatcher = (struct rte_event_dispatcher) {
+		.id = id,
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = red_service_register(dispatcher);
+
+	if (rc < 0) {
+		rte_free(dispatcher);
+		return rc;
+	}
+
+	red_set_dispatcher(id, dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_free(uint8_t id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	rc = red_service_unregister(dispatcher);
+
+	if (rc)
+		return rc;
+
+	red_set_dispatcher(id, NULL);
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	*service_id = dispatcher->service_id;
+
+	return 0;
+}
+
+static int
+lcore_port_index(struct rte_event_dispatcher_lcore *lcore,
+		 uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	struct rte_event_dispatcher_lcore_port *port;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == RED_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_event_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	int port_idx;
+	struct rte_event_dispatcher_lcore_port *port;
+	struct rte_event_dispatcher_lcore_port *last;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static struct rte_event_dispatcher_handler*
+red_lcore_get_handler_by_id(struct rte_event_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	int i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->id == handler_id)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static int
+red_alloc_handler_id(struct rte_event_dispatcher *dispatcher)
+{
+	int handler_id = 0;
+	struct rte_event_dispatcher_lcore *reference_lcore =
+		&dispatcher->lcores[0];
+
+	if (reference_lcore->num_handlers == RED_MAX_HANDLERS)
+		return -1;
+
+	while (red_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
+		handler_id++;
+
+	return handler_id;
+}
+
+static void
+red_lcore_install_handler(struct rte_event_dispatcher_lcore *lcore,
+		    const struct rte_event_dispatcher_handler *handler)
+{
+	int handler_idx = lcore->num_handlers;
+
+	lcore->handlers[handler_idx] = *handler;
+	lcore->num_handlers++;
+}
+
+static void
+red_install_handler(struct rte_event_dispatcher *dispatcher,
+		    const struct rte_event_dispatcher_handler *handler)
+{
+	int i;
+
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[i];
+		red_lcore_install_handler(lcore, handler);
+	}
+}
+
+int
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_t match_fun,
+			      void *match_data,
+			      rte_event_dispatcher_process_t process_fun,
+			      void *process_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_handler handler = {
+		.match_fun = match_fun,
+		.match_data = match_data,
+		.process_fun = process_fun,
+		.process_data = process_data
+	};
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	handler.id = red_alloc_handler_id(dispatcher);
+
+	if (handler.id < 0)
+		return -ENOMEM;
+
+	red_install_handler(dispatcher, &handler);
+
+	return handler.id;
+}
+
+static int
+red_lcore_uninstall_handler(struct rte_event_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	struct rte_event_dispatcher_handler *unreg_handler;
+	int handler_idx;
+	uint16_t last_idx;
+
+	unreg_handler = red_lcore_get_handler_by_id(lcore, handler_id);
+
+	if (unreg_handler == NULL)
+		return -EINVAL;
+
+	handler_idx = &lcore->handlers[0] - unreg_handler;
+
+	last_idx = lcore->num_handlers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all handlers to maintain handler order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_handler, unreg_handler + 1,
+			sizeof(struct rte_event_dispatcher_handler) * n);
+	}
+
+	lcore->num_handlers--;
+
+	return 0;
+}
+
+static int
+red_uninstall_handler(struct rte_event_dispatcher *dispatcher,
+		      int handler_id)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+		int rc;
+
+		rc = red_lcore_uninstall_handler(lcore, handler_id);
+
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_unregister(uint8_t id, int handler_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	rc = red_uninstall_handler(dispatcher, handler_id);
+
+	return rc;
+}
+
+static struct rte_event_dispatcher_finalizer*
+red_get_finalizer_by_id(struct rte_event_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_event_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		if (finalizer->id == handler_id)
+			return finalizer;
+	}
+
+	return NULL;
+}
+
+static int
+red_alloc_finalizer_id(struct rte_event_dispatcher *dispatcher)
+{
+	int finalizer_id = 0;
+
+	while (red_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
+		finalizer_id++;
+
+	return finalizer_id;
+}
+
+static struct rte_event_dispatcher_finalizer *
+red_alloc_finalizer(struct rte_event_dispatcher *dispatcher)
+{
+	int finalizer_idx;
+	struct rte_event_dispatcher_finalizer *finalizer;
+
+	if (dispatcher->num_finalizers == RED_MAX_FINALIZERS)
+		return NULL;
+
+	finalizer_idx = dispatcher->num_finalizers;
+	finalizer = &dispatcher->finalizers[finalizer_idx];
+
+	finalizer->id = red_alloc_finalizer_id(dispatcher);
+
+	dispatcher->num_finalizers++;
+
+	return finalizer;
+}
+
+int
+rte_event_dispatcher_finalize_register(uint8_t id,
+			      rte_event_dispatcher_finalize_t finalize_fun,
+			      void *finalize_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_finalizer *finalizer;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	finalizer = red_alloc_finalizer(dispatcher);
+
+	if (finalizer == NULL)
+		return -ENOMEM;
+
+	finalizer->finalize_fun = finalize_fun;
+	finalizer->finalize_data = finalize_data;
+
+	return finalizer->id;
+}
+
+int
+rte_event_dispatcher_finalize_unregister(uint8_t id, int handler_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_finalizer *unreg_finalizer;
+	int handler_idx;
+	uint16_t last_idx;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	unreg_finalizer = red_get_finalizer_by_id(dispatcher, handler_id);
+
+	if (unreg_finalizer == NULL)
+		return -EINVAL;
+
+	handler_idx = &dispatcher->finalizers[0] - unreg_finalizer;
+
+	last_idx = dispatcher->num_finalizers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all finalizers to maintain finalizer order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_finalizer, unreg_finalizer + 1,
+			sizeof(struct rte_event_dispatcher_finalizer) * n);
+	}
+
+	dispatcher->num_finalizers--;
+
+	return 0;
+}
+
+static void
+red_aggregate_stats(struct rte_event_dispatcher_stats *result,
+		    const struct rte_event_dispatcher_stats *part)
+{
+	result->poll_count += part->poll_count;
+	result->ev_dispatch_count += part->ev_dispatch_count;
+	result->ev_drop_count += part->ev_drop_count;
+}
+
+int
+rte_event_dispatcher_stats_get(uint8_t id,
+			       struct rte_event_dispatcher_stats *stats)
+{
+	struct rte_event_dispatcher *dispatcher;
+	unsigned int lcore_id;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	*stats = (struct rte_event_dispatcher_stats) {};
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		red_aggregate_stats(stats, &lcore->stats);
+	}
+
+	return 0;
+}
+
+static int
+red_set_service_runstate(uint8_t id, int state)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	rc = rte_service_component_runstate_set(dispatcher->service_id,
+						state);
+
+	if (rc != 0) {
+		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+				 "service component run state to %d\n", rc,
+				 state);
+		RTE_ASSERT(0);
+	}
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_start(uint8_t id)
+{
+	return red_set_service_runstate(id, 1);
+}
+
+int
+rte_event_dispatcher_stop(uint8_t id)
+{
+	return red_set_service_runstate(id, 0);
+}
diff --git a/lib/eventdev/rte_event_dispatcher.h b/lib/eventdev/rte_event_dispatcher.h
new file mode 100644
index 0000000000..927e7e0b3c
--- /dev/null
+++ b/lib/eventdev/rte_event_dispatcher.h
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#ifndef __RTE_EVENT_DISPATCHER_H__
+#define __RTE_EVENT_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Event Dispatcher
+ *
+ * The purpose of the event dispatcher is to help decouple different parts
+ * of an application (e.g., modules), sharing the same underlying
+ * event device.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * Match callbacks are used by an application to decide how the
+ * event dispatcher distributes events to different parts of the
+ * application.
+ *
+ * The application is not expected to process the event at the point
+ * of the match call. Such matters should be deferred to the process
+ * callback invocation.
+ *
+ * The match callback may be used as an opportunity to prefetch data.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this events should be delivered (via
+ *   the process callback), and false otherwise.
+ */
+typedef bool
+(*rte_event_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
+
+/**
+ * Function prototype for process callbacks.
+ *
+ * The process callbacks are used by the event dispatcher to deliver
+ * events for processing.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ */
+
+typedef void
+(*rte_event_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				  struct rte_event *events, uint16_t num,
+				  void *cb_data);
+
+/**
+ * Function prototype for finalize callbacks.
+ *
+ * The finalize callbacks are used by the event dispatcher to notify
+ * the application it has delivered all events from a particular batch
+ * dequeued from the event device.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_finalize_register().
+ */
+
+typedef void
+(*rte_event_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				   void *cb_data);
+
+/**
+ * Event dispatcher statistics
+ */
+struct rte_event_dispatcher_stats {
+	uint64_t poll_count;
+	/**< Number of event dequeue calls made toward the event device. */
+	uint64_t ev_dispatch_count;
+	/**< Number of events dispatched to a handler.*/
+	uint64_t ev_drop_count;
+	/**< Number of events dropped because no handler was found. */
+};
+
+/**
+ * Create an event dispatcher with the specified id.
+ *
+ * @param id
+ *  An application-specified, unique (across all event dispatcher
+ *  instances) identifier.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this event dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   - 0: Success
+ *   - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id);
+
+/**
+ * Free an event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_free(uint8_t id);
+
+/**
+ * Retrieve the service identifier of an event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param [out] service_id
+ *  A pointer to a caller-supplied buffer where the event dispatcher's
+ *  service id will be stored.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * event dispatcher.
+ *
+ * This function configures the event port id to be used by the event
+ * dispatcher service, if run on the specified lcore.
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port must not be bound to more than one lcore.
+ *
+ * If the event dispatcher service is mapped (with
+ * rte_service_map_lcore_set()) to a lcore for which no ports are
+ * bound, the service function will be a no-operation.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ *  - -EEXISTS: Event port is already configured.
+ *  - -EINVAL: Invalid arguments.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ *  - -ENOENT: Event port id not bound to this @c lcore_id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id);
+
+/**
+ * Register an event handler.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding process callback
+ * function.
+ *
+ * The reason for having two distinct steps is to allow the dispatcher
+ * to deliver all events as a batch. This in turn will cause
+ * processing of a particular kind of events to happen in a
+ * back-to-back manner, improving cache locality.
+ *
+ * The list of handler callback functions is shared among all lcores,
+ * but will only be executed on lcores which has an eventdev port
+ * bound to them, and which are running the event dispatcher service.
+ *
+ * An event is delivered to at most one handler. Events where no
+ * handler is found are dropped.
+ *
+ * The application must not depend on the order of which the match
+ * functions are invoked.
+ *
+ * Ordering of events is not guaranteed to be maintained between
+ * different deliver callbacks. For example, suppose there are two
+ * callbacks registered, matching different subsets of events arriving
+ * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
+ * on a particular port, all pertaining to the same flow. The match
+ * callback for registration A returns true for ev0 and ev2, and the
+ * matching function for registration B for ev1. In that scenario, the
+ * event dispatcher may choose to deliver first [ev0, ev2] using A's
+ * deliver function, and then [ev1] to B - or vice versa.
+ *
+ * rte_event_dispatcher_register() is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param process_fun
+ *  The process callback function.
+ *
+ * @param process_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when process_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_t match_fun,
+			      void *match_cb_data,
+			      rte_event_dispatcher_process_t process_fun,
+			      void *process_cb_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param handler_id
+ *  The handler registration id returned by the original
+ *  rte_event_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c handler_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unregister(uint8_t id, int handler_id);
+
+/**
+ * Register a finalize callback function.
+ *
+ * An application may optionally install one or more finalize
+ * callbacks.
+ *
+ * All finalize callbacks are invoked by the event dispatcher when a
+ * complete batch of events (retrieve using rte_event_dequeue_burst())
+ * have been delivered to the application (or have been dropped).
+ *
+ * The finalize callback is not tied to any particular handler.
+ *
+ * The finalize callback provides an opportunity for the application
+ * to do per-batch processing. One case where this may be useful is if
+ * an event output buffer is used, and is shared among several
+ * handlers. In such a case, proper output buffer flushing may be
+ * assured using a finalize callback.
+ *
+ * rte_event_dispatcher_finalize_register() is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param finalize_fun
+ *  The function called after completing the processing of a
+ *  dequeue batch.
+ *
+ * @param finalize_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when @c finalize_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_finalize_register(uint8_t id,
+			    rte_event_dispatcher_finalize_t finalize_fun,
+			    void *finalize_data);
+
+/**
+ * Unregister a finalize callback.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param reg_id
+ *  The finalize registration id returned by the original
+ *  rte_event_dispatcher_finalize_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c reg_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_finalize_unregister(uint8_t id, int reg_id);
+
+/**
+ * Start an event dispatcher instance.
+ *
+ * Enables the event dispatcher service.
+ *
+ * The underlying event device must have been started prior to calling
+ * rte_event_dispatcher_start().
+ *
+ * For the event dispatcher to actually perform work (i.e., dispatch
+ * events), its service must have been mapped to one or more service
+ * lcores, and its service run state set to '1'. An event dispatcher's
+ * service is retrieved using rte_event_dispatcher_service_id_get().
+ *
+ * Each service lcore to which the event dispatcher is mapped should
+ * have at least one event port configured. Such configuration is
+ * performed by calling rte_event_dispatcher_bind_port_to_lcore(),
+ * prior to starting the event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_start(uint8_t id);
+
+/**
+ * Stop a running event dispatcher instance.
+ *
+ * Disables the event dispatcher service.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_stop(uint8_t id);
+
+/**
+ * Retrieve statistics for an event dispatcher instance.
+ *
+ * This function is MT safe and may be called from any thread
+ * (including unregistered non-EAL threads).
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ * @param[out] stats
+ *   A pointer to a structure to fill with statistics.
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_stats_get(uint8_t id,
+			       struct rte_event_dispatcher_stats *stats);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_EVENT_DISPATCHER__ */
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 89068a5713..d3aa878686 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -131,6 +131,19 @@ EXPERIMENTAL {
 	rte_event_eth_tx_adapter_runtime_params_init;
 	rte_event_eth_tx_adapter_runtime_params_set;
 	rte_event_timer_remaining_ticks_get;
+
+	rte_event_dispatcher_create;
+	rte_event_dispatcher_free;
+	rte_event_dispatcher_service_id_get;
+	rte_event_dispatcher_bind_port_to_lcore;
+	rte_event_dispatcher_unbind_port_from_lcore;
+	rte_event_dispatcher_register;
+	rte_event_dispatcher_unregister;
+	rte_event_dispatcher_finalize_register;
+	rte_event_dispatcher_finalize_unregister;
+	rte_event_dispatcher_start;
+	rte_event_dispatcher_stop;
+	rte_event_dispatcher_stats_get;
 };
 
 INTERNAL {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [RFC v4 2/3] test: add event dispatcher test suite
  2023-06-09  7:08                   ` [RFC v4 0/3] Add " Mattias Rönnblom
  2023-06-09  7:08                     ` [RFC v4 1/3] eventdev: introduce " Mattias Rönnblom
@ 2023-06-09  7:08                     ` Mattias Rönnblom
  2023-06-09  7:08                     ` [RFC v4 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-09  7:08 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Mattias Rönnblom

Add unit tests for the event dispatcher.

--

RFC v4: Adapt to non-const events in process function prototype.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 app/test/meson.build             |   1 +
 app/test/test_event_dispatcher.c | 814 +++++++++++++++++++++++++++++++
 2 files changed, 815 insertions(+)
 create mode 100644 app/test/test_event_dispatcher.c

diff --git a/app/test/meson.build b/app/test/meson.build
index b9b5432496..fac3b6b88b 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -50,6 +50,7 @@ test_sources = files(
         'test_errno.c',
         'test_ethdev_link.c',
         'test_event_crypto_adapter.c',
+        'test_event_dispatcher.c',
         'test_event_eth_rx_adapter.c',
         'test_event_ring.c',
         'test_event_timer_adapter.c',
diff --git a/app/test/test_event_dispatcher.c b/app/test/test_event_dispatcher.c
new file mode 100644
index 0000000000..a6ce430e21
--- /dev/null
+++ b/app/test/test_event_dispatcher.c
@@ -0,0 +1,814 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include "test.h"
+
+#include <stdatomic.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_event_dispatcher.h>
+#include <rte_eventdev.h>
+#include <rte_random.h>
+#include <rte_service.h>
+
+#define NUM_WORKERS 3
+
+#define NUM_PORTS (NUM_WORKERS + 1)
+#define WORKER_PORT_ID(worker_idx) (worker_idx)
+#define DRIVER_PORT_ID (NUM_PORTS - 1)
+
+#define NUM_SERVICE_CORES NUM_WORKERS
+
+/* Eventdev */
+#define NUM_QUEUES 8
+#define LAST_QUEUE_ID (NUM_QUEUES - 1)
+#define MAX_EVENTS 4096
+#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
+#define DEQUEUE_BURST_SIZE 32
+#define ENQUEUE_BURST_SIZE 32
+
+#define NUM_EVENTS 10000000
+#define NUM_FLOWS 16
+
+#define DSW_VDEV "event_dsw0"
+
+struct app_queue {
+	uint8_t queue_id;
+	uint64_t sn[NUM_FLOWS];
+	int dispatcher_reg_id;
+};
+
+struct test_app {
+	uint8_t event_dev_id;
+	uint8_t dispatcher_id;
+	uint32_t dispatcher_service_id;
+
+	unsigned int service_lcores[NUM_SERVICE_CORES];
+
+	struct app_queue queues[NUM_QUEUES];
+
+	bool running;
+
+	atomic_int completed_events;
+	atomic_int errors;
+};
+
+#define RETURN_ON_ERROR(rc) \
+	do {					\
+		if (rc != TEST_SUCCESS)		\
+			return rc;		\
+	} while (0)
+
+static struct test_app *
+test_app_create(void)
+{
+	int i;
+	struct test_app *app;
+
+	app = calloc(1, sizeof(struct test_app));
+
+	if (app == NULL)
+		return NULL;
+
+	for (i = 0; i < NUM_QUEUES; i++)
+		app->queues[i].queue_id = i;
+
+	return app;
+}
+
+static void
+test_app_free(struct test_app *app)
+{
+	free(app);
+}
+
+static int
+test_app_create_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_vdev_init(DSW_VDEV, NULL);
+	if (rc < 0)
+		return TEST_SKIPPED;
+
+	rc = rte_event_dev_get_dev_id(DSW_VDEV);
+
+	app->event_dev_id = (uint8_t)rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_destroy_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_close(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
+
+	rc = rte_vdev_uninit(DSW_VDEV);
+	TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_event_dev(struct test_app *app)
+{
+	int rc;
+	int i;
+
+	rc = test_app_create_vdev(app);
+	if (rc < 0)
+		return rc;
+
+	struct rte_event_dev_config config = {
+		.nb_event_queues = NUM_QUEUES,
+		.nb_event_ports = NUM_PORTS,
+		.nb_events_limit = MAX_EVENTS,
+		.nb_event_queue_flows = 64,
+		.nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
+		.nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	rc = rte_event_dev_configure(app->event_dev_id, &config);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
+
+	struct rte_event_queue_conf queue_config = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.nb_atomic_flows = 64
+	};
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		uint8_t queue_id = i;
+
+		rc = rte_event_queue_setup(app->event_dev_id, queue_id,
+					   &queue_config);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
+	}
+
+	struct rte_event_port_conf port_config = {
+		.new_event_threshold = NEW_EVENT_THRESHOLD,
+		.dequeue_depth = DEQUEUE_BURST_SIZE,
+		.enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	for (i = 0; i < NUM_PORTS; i++) {
+		uint8_t event_port_id = i;
+
+		rc = rte_event_port_setup(app->event_dev_id, event_port_id,
+					  &port_config);
+		TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
+				    event_port_id);
+
+		if (event_port_id == DRIVER_PORT_ID)
+			continue;
+
+		rc = rte_event_port_link(app->event_dev_id, event_port_id,
+					 NULL, NULL, 0);
+
+		TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
+				  event_port_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_event_dev(struct test_app *app)
+{
+	return test_app_destroy_vdev(app);
+}
+
+static int
+test_app_start_event_dev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_start(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
+
+	return TEST_SUCCESS;
+}
+
+static void
+test_app_stop_event_dev(struct test_app *app)
+{
+	rte_event_dev_stop(app->event_dev_id);
+}
+
+static int
+test_app_create_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	app->dispatcher_id = rte_rand_max(256);
+
+	rc = rte_event_dispatcher_create(app->dispatcher_id,
+					 app->event_dev_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to create event dispatcher");
+
+	rc = rte_event_dispatcher_service_id_get(app->dispatcher_id,
+						 &app->dispatcher_service_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to get event dispatcher service ID");
+
+	rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
+			    "stats");
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_free_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	rte_event_dispatcher_free(app->dispatcher_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_bind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_event_dispatcher_bind_port_to_lcore(
+			app->dispatcher_id, WORKER_PORT_ID(i),
+			DEQUEUE_BURST_SIZE, 0, lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
+				    "to lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unbind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_event_dispatcher_unbind_port_from_lcore(
+			app->dispatcher_id,
+			WORKER_PORT_ID(i),
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
+				    "from lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static bool
+match_queue(const struct rte_event *event, void *cb_data)
+{
+	uintptr_t queue_id = (uintptr_t)cb_data;
+
+	return event->queue_id == queue_id;
+}
+
+static int
+test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++)
+		if (app->service_lcores[i] == lcore_id)
+			return i;
+
+	return -1;
+}
+
+static int
+test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
+{
+	int worker;
+
+	worker = test_app_get_worker_index(app, lcore_id);
+
+	if (worker < 0)
+		return -1;
+
+	return WORKER_PORT_ID(worker);
+}
+
+static void
+test_app_queue_note_error(struct test_app *app)
+{
+	atomic_fetch_add_explicit(&app->errors, 1, memory_order_relaxed);
+}
+
+static void
+test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
+		       struct rte_event *in_events, uint16_t num,
+		       void *cb_data)
+{
+	struct app_queue *app_queue = cb_data;
+	struct test_app *app = container_of(app_queue, struct test_app,
+					    queues[app_queue->queue_id]);
+	unsigned int lcore_id = rte_lcore_id();
+	bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
+	int event_port_id;
+	uint16_t i;
+	struct rte_event out_events[num];
+
+	event_port_id = test_app_get_worker_port(app, lcore_id);
+
+	if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
+	    p_event_port_id != event_port_id) {
+		test_app_queue_note_error(app);
+		return;
+	}
+
+	for (i = 0; i < num; i++) {
+		const struct rte_event *in_event = &in_events[i];
+		struct rte_event *out_event = &out_events[i];
+		uint64_t sn = in_event->u64;
+		uint64_t expected_sn;
+
+		if (in_event->queue_id != app_queue->queue_id) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		expected_sn = app_queue->sn[in_event->flow_id]++;
+
+		if (expected_sn != sn) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		if (intermediate_queue)
+			*out_event = (struct rte_event) {
+				.queue_id = in_event->queue_id + 1,
+				.flow_id = in_event->flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_FORWARD,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+	}
+
+	if (intermediate_queue) {
+		uint16_t n = 0;
+
+		do {
+			n += rte_event_enqueue_forward_burst(p_event_dev_id,
+							     p_event_port_id,
+							     out_events + n,
+							     num - n);
+		} while (n != num);
+	} else
+		atomic_fetch_add_explicit(&app->completed_events, num,
+					  memory_order_relaxed);
+}
+
+static int
+test_app_register_callbacks(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		struct app_queue *app_queue = &app->queues[i];
+		uintptr_t queue_id = app_queue->queue_id;
+		int reg_id;
+
+		reg_id = rte_event_dispatcher_register(app->dispatcher_id,
+						       match_queue,
+						       (void *)queue_id,
+						       test_app_process_queue,
+						       app_queue);
+
+		TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
+			    "callback for queue %d", i);
+
+		app_queue->dispatcher_reg_id = reg_id;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
+{
+	int reg_id = app->queues[queue_id].dispatcher_reg_id;
+
+	if (reg_id < 0) /* unregistered already */
+		return 0;
+
+	int rc = rte_event_dispatcher_unregister(app->dispatcher_id, reg_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
+			    "callback for queue %d", queue_id);
+
+	app->queues[queue_id].dispatcher_reg_id = -1;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callbacks(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		int rc;
+
+		rc = test_app_unregister_callback(app, i);
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dispatcher_start(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to start the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dispatcher_stop(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to stop the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_lcore_add(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
+			    "service core", lcore_id);
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_cores(struct test_app *app)
+{
+	int i;
+	int lcore_id = -1;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
+
+		TEST_ASSERT(lcore_id != RTE_MAX_LCORE,
+			    "Too few lcores. Needs at least %d worker lcores",
+			    NUM_SERVICE_CORES);
+
+		app->service_lcores[i] = lcore_id;
+	}
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		int rc;
+
+		rc = test_app_setup_service_core(app, app->service_lcores[i]);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
+
+	rc = rte_service_lcore_del(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
+			    lcore_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = test_app_teardown_service_core(app, lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_start(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_stop(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_start_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_dispatcher(app);
+
+	app->running = true;
+
+	return rc;
+}
+
+static int
+test_app_stop(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_stop_dispatcher(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	app->running = false;
+
+	return TEST_SUCCESS;
+}
+
+struct test_app *test_app;
+
+static int
+test_setup(void)
+{
+	int rc;
+
+	test_app = test_app_create();
+	TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
+
+	rc = test_app_setup_event_dev(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_create_dispatcher(test_app);
+
+	rc = test_app_setup_service_cores(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_register_callbacks(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_bind_ports(test_app);
+
+	return rc;
+}
+
+static void test_teardown(void)
+{
+	if (test_app->running)
+		test_app_stop(test_app);
+
+	test_app_teardown_service_cores(test_app);
+
+	test_app_unregister_callbacks(test_app);
+
+	test_app_unbind_ports(test_app);
+
+	test_app_free_dispatcher(test_app);
+
+	test_app_teardown_event_dev(test_app);
+
+	test_app_free(test_app);
+
+	test_app = NULL;
+}
+
+static int
+test_app_get_completed_events(struct test_app *app)
+{
+	return atomic_load_explicit(&app->completed_events,
+				    memory_order_relaxed);
+}
+
+static int
+test_app_get_errors(struct test_app *app)
+{
+	return atomic_load_explicit(&app->errors, memory_order_relaxed);
+}
+
+static int
+test_basic(void)
+{
+	int rc;
+	int i;
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	uint64_t sns[NUM_FLOWS] = { 0 };
+
+	for (i = 0; i < NUM_EVENTS;) {
+		struct rte_event events[ENQUEUE_BURST_SIZE];
+		int left;
+		int batch_size;
+		int j;
+		uint16_t n = 0;
+
+		batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
+		left = NUM_EVENTS - i;
+
+		batch_size = RTE_MIN(left, batch_size);
+
+		for (j = 0; j < batch_size; j++) {
+			struct rte_event *event = &events[j];
+			uint64_t sn;
+			uint32_t flow_id;
+
+			flow_id = rte_rand_max(NUM_FLOWS);
+
+			sn = sns[flow_id]++;
+
+			*event = (struct rte_event) {
+				.queue_id = 0,
+				.flow_id = flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_NEW,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+		}
+
+		while (n < batch_size)
+			n += rte_event_enqueue_new_burst(test_app->event_dev_id,
+							 DRIVER_PORT_ID,
+							 events + n,
+							 batch_size - n);
+
+		i += batch_size;
+	}
+
+	while (test_app_get_completed_events(test_app) != NUM_EVENTS)
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+
+	rc = test_app_get_errors(test_app);
+	TEST_ASSERT(rc == 0, "%d errors occurred", rc);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_event_dispatcher_stats stats;
+	rc = rte_event_dispatcher_stats_get(test_app->dispatcher_id,
+					    &stats);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
+			  "Invalid dispatch count");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_drop(void)
+{
+	int rc;
+	uint8_t unhandled_queue = 1;
+	struct rte_event_dispatcher_stats stats;
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_unregister_callback(test_app, unhandled_queue);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_event event = {
+	    .queue_id = unhandled_queue,
+	    .flow_id = 0,
+	    .sched_type = RTE_SCHED_TYPE_ATOMIC,
+	    .op = RTE_EVENT_OP_NEW,
+	    .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	    .u64 = 0
+	};
+
+	do {
+		rc = rte_event_enqueue_burst(test_app->event_dev_id,
+					     DRIVER_PORT_ID, &event, 1);
+	} while (rc == 0);
+
+	do {
+		rc = rte_event_dispatcher_stats_get(test_app->dispatcher_id,
+						    &stats);
+		RETURN_ON_ERROR(rc);
+
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+	} while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
+			  "Dispatch count is not zero");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite test_suite = {
+	.suite_name = "Event dispatcher test suite",
+	.unit_test_cases = {
+		TEST_CASE_ST(test_setup, test_teardown, test_basic),
+		TEST_CASE_ST(test_setup, test_teardown, test_drop),
+		TEST_CASES_END()
+	}
+};
+
+static int
+test_event_dispatcher(void)
+{
+	return unit_test_suite_runner(&test_suite);
+}
+
+REGISTER_TEST_COMMAND(event_dispatcher_autotest, test_event_dispatcher);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [RFC v4 3/3] doc: add event dispatcher programming guide
  2023-06-09  7:08                   ` [RFC v4 0/3] Add " Mattias Rönnblom
  2023-06-09  7:08                     ` [RFC v4 1/3] eventdev: introduce " Mattias Rönnblom
  2023-06-09  7:08                     ` [RFC v4 2/3] test: add event dispatcher test suite Mattias Rönnblom
@ 2023-06-09  7:08                     ` Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-09  7:08 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Mattias Rönnblom

Provide programming guide the for the event dispatcher.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--

RFC v4:
 o Extend event matching section of the programming guide.
 o Improve grammar and spelling.
---
 doc/api/doxy-api-index.md                  |   1 +
 doc/guides/prog_guide/event_dispatcher.rst | 443 +++++++++++++++++++++
 doc/guides/prog_guide/index.rst            |   1 +
 3 files changed, 445 insertions(+)
 create mode 100644 doc/guides/prog_guide/event_dispatcher.rst

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index c709fd48ad..05b22057f9 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -29,6 +29,7 @@ The public API headers are grouped by topics:
   [event_eth_tx_adapter](@ref rte_event_eth_tx_adapter.h),
   [event_timer_adapter](@ref rte_event_timer_adapter.h),
   [event_crypto_adapter](@ref rte_event_crypto_adapter.h),
+  [event_dispatcher](@ref rte_event_dispatcher.h),
   [rawdev](@ref rte_rawdev.h),
   [metrics](@ref rte_metrics.h),
   [bitrate](@ref rte_bitrate.h),
diff --git a/doc/guides/prog_guide/event_dispatcher.rst b/doc/guides/prog_guide/event_dispatcher.rst
new file mode 100644
index 0000000000..d3386bc609
--- /dev/null
+++ b/doc/guides/prog_guide/event_dispatcher.rst
@@ -0,0 +1,443 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Ericsson AB.
+
+Event Dispatcher
+================
+
+Overview
+--------
+
+The purpose of the event dispatcher is to help reduce coupling in an
+:doc:`Eventdev <eventdev>`-based DPDK application.
+
+In particular, the event dispatcher addresses a scenario where an
+application's modules share the same event device and event device
+ports, and performs work on the same lcore threads.
+
+The event dispatcher replaces the conditional logic that follows an
+event device dequeue operation, where events are dispatched to
+different parts of the application, typically based on fields in the
+``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
+``sched_type``.
+
+Below is an excerpt from a fictitious application consisting of two
+modules; A and B. In this example, event-to-module routing is based
+purely on queue id, where module A expects all events to a certain
+queue id, and module B two other queue ids. [#Mapping]_
+
+.. code-block:: c
+
+    for (;;) {
+            struct rte_event events[MAX_BURST];
+            unsigned int n;
+    
+            n = rte_event_dequeue_burst(dev_id, port_id, events,
+	                                MAX_BURST, 0);
+    
+            for (i = 0; i < n; i++) {
+                    const struct rte_event *event = &events[i];
+    
+                    switch (event->queue_id) {
+                    case MODULE_A_QUEUE_ID:
+                            module_a_process(event);
+                            break;
+                    case MODULE_B_STAGE_0_QUEUE_ID:
+                            module_b_process_stage_0(event);
+                            break;
+                    case MODULE_B_STAGE_1_QUEUE_ID:
+                            module_b_process_stage_1(event);
+                            break;
+                    }
+            }
+    }
+
+The issue this example attempts to illustrate is that the centralized
+conditional logic has knowledge of things that should be private to
+the modules. In other words, this pattern leads to a violation of
+module encapsulation.
+
+The shared conditional logic contains explicit knowledge about what
+events should go where. In case, for example, the
+``module_a_process()`` is broken into two processing stages — a
+module-internal affair — the shared conditional code must be updated
+to reflect this change.
+
+The centralized event routing code becomes an issue in larger
+applications, where modules are developed by different organizations.
+This pattern also makes module reuse across different application more
+difficult. The part of the conditional logic relevant for a particular
+application may need to be duplicated across many module
+instantiations (e.g., applications and test setups).
+
+The event dispatcher separates the mechanism (routing events to their
+receiver) from the policy (which events should go where).
+
+The basic operation of the event dispatcher is as follows:
+
+* Dequeue a batch of events from the event device.
+* For each event determine which handler should receive the event, using
+  a set of application-provided, per-handler event matching callback
+  functions.
+* Provide events matching a particular handler, to that handler, using
+  its process callback.
+
+If the above application would have made use of the event dispatcher,
+the code relevant for its module A may have looked something like
+this:
+
+.. code-block:: c
+
+    static bool
+    module_a_match(const struct rte_event *event, void *cb_data)
+    {
+           return event->queue_id == MODULE_A_QUEUE_ID;
+    }
+    
+    static void
+    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
+                            const struct rte_event *events,
+			    uint16_t num, void *cb_data)
+    {
+            uint16_t i;
+
+            for (i = 0; i < num; i++)
+                    module_a_process_event(&events[i]);
+    }
+    
+    /* In the module's initialization code */
+    rte_event_dispatcher_register(EVENT_DISPATCHER_ID, module_a_match,
+                                  NULL, module_a_process_events,
+				  module_a_data);
+
+(Error handling is left out of this and future example code in this
+chapter.)
+
+When the shared conditional logic is removed, a new question arise:
+which part of the system actually runs the dispatching mechanism? Or
+phrased differently, what is replacing the function hosting the shared
+conditional logic (typically launched on all lcores using
+``rte_eal_remote_launch()``)? To solve this issue, the event
+dispatcher is a run as a DPDK :doc:`Service <service_cores>`.
+
+The event dispatcher is a layer between the application and the event
+device in the receive direction. In the transmit (i.e., item of work
+submission) direction, the application directly accesses the Eventdev
+core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
+forwarded event to the event device.
+
+Event Dispatcher Creation
+-------------------------
+
+An event dispatcher is created with using
+``rte_event_dispatcher_create()``.
+
+The dispatcher id is provided by the application, and must be unique.
+
+The event device must be configured before the event dispatcher is
+created.
+
+Usually, only one event dispatcher is needed per event device. An
+event dispatcher handles exactly one event device.
+
+An event dispatcher is freed using the ``rte_event_dispatcher_free()``
+function. The event dispatcher's service functions must not be running
+on any lcore at the point of this call.
+
+Event Port Binding
+------------------
+
+To be able to dequeue events, the event dispatcher must know which
+event ports are to be used, on all the lcores it uses. The application
+provides this information using
+``rte_event_dispatcher_bind_port_to_lcore()``.
+
+This call is typically made from the part of the application that
+deals with deployment issues (e.g., iterating lcores and determining
+which lcore does what), at the time of application initialization.
+
+The ``rte_event_dispatcher_unbind_port_from_lcore()`` is used to undo
+this operation.
+
+Multiple lcore threads may not safely use the same event
+port. [#Port-MT-Safety]
+
+Event ports cannot safely be bound or unbound while the event
+dispatcher's service function is running on any lcore.
+
+Event Handlers
+--------------
+
+The event dispatcher handler is an interface between the event
+dispatcher and an application module, used to route events to the
+appropriate part of the application.
+
+Handler Registration
+^^^^^^^^^^^^^^^^^^^^
+
+The event handler interface consists of two function pointers:
+
+* The ``rte_event_dispatcher_match_t`` callback, which job is to
+  decide if this event is to be the property of this handler.
+* The ``rte_event_dispatcher_process_t``, which is used by the
+  event dispatcher to deliver matched events.
+
+An event handler registration is valid on all lcores.
+
+The functions pointed to by the match and process callbacks resides in
+the application's domain logic, with one or more handlers per
+application module.
+
+A module may use more than one event handler, for convience or to
+further decouple sub-modules. However, the event dispatcher may impose
+an upper limit of the number handlers. In addition, installing a large
+number of handlers increase event dispatcher overhead, although this
+does not nessarily translate to a system-level performance
+degradation. See the section on :ref:`Event Clustering` for more
+information.
+
+Handler registration and unregistration cannot safely be done while
+the event dispatcher's service function is running on any lcore.
+
+Event Matching
+^^^^^^^^^^^^^^
+
+A handler's match callback function decides if an event should be
+delivered to this handler, or not.
+
+An event is routed to no more than one handler. Thus, if a match
+function returns true, no further match functions will be invoked for
+that event.
+
+Match functions must not depend on being invocated in any particular
+order (e.g., in the handler registration order).
+
+Events failing to match any handler are dropped, and the
+``ev_drop_count`` counter is updated accordingly.
+
+Event Delivery
+^^^^^^^^^^^^^^
+
+The handler callbacks are invocated by the event dispatcher's service
+function, upon the arrival of events to the event ports bound to the
+running service lcore.
+
+A particular event is delivery to at most one handler.
+
+The application must not depend on all match callback invocations for
+a particular event batch being made prior to any process calls are
+being made. For example, if the event dispatcher dequeues two events
+from the event device, it may choose to find out the destination for
+the first event, and deliver it, and then continue to find out the
+destination for the second, and then deliver that event as well. The
+event dispatcher may also choose a strategy where no event is
+delivered until the destination handler for both events have been
+determined.
+
+The events provided in a single process call always belong to the same
+event port dequeue burst.
+
+.. _Event Clustering:
+
+Event Clustering
+^^^^^^^^^^^^^^^^
+
+The event dispatcher maintains the order of events destined for the
+same handler.
+
+*Order* here refers to the order in which the events were delivered
+from the event device to the dispatcher (i.e., in the event array
+populated by ``rte_event_dequeue_burst()``), in relation to the order
+in which the event dispatcher deliveres these events to the
+application.
+
+The event dispatcher *does not* guarantee to maintain the order of
+events delivered to *different* handlers.
+
+For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
+and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
+consider a scenario where the following events are dequeued from the
+event device (qid is short for event queue id).
+
+.. code-block::
+
+    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
+
+The event dispatcher may deliver the events in the following manner:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
+   module_a_process([e2: qid=0])
+   module_b_stage_0_process([e2: qid=1])
+
+The event dispatcher may also choose to cluster (group) all events
+destined for ``module_b_stage_0_process()`` into one array:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
+   module_a_process([e2: qid=0])
+
+Here, the event ``e2`` is reordered and placed behind ``e3``, from a
+delivery order point of view. This kind of reshuffling is allowed,
+since the events are destined for different handlers.
+
+The event dispatcher may also deliver ``e2`` before the three events
+destined for module B.
+
+An example of what the event dispatcher may not do, is to reorder
+event ``e1`` so, that it precedes ``e0`` in the array passed to the
+module B's stage 0 process callback.
+
+Although clustering requires some extra work for the event dispatcher,
+it leads to fewer process function calls. In addition, and likely more
+importantly, it improves temporal locality of memory accesses to
+handler-specific data structures in the application, which in turn may
+lead to fewer cache misses and improved overall performance.
+
+Finalize
+--------
+
+The event dispatcher may be configured to notify one or more parts of
+the application when the matching and processing of a batch of events
+has completed.
+
+The ``rte_event_dispatcher_finalize_register`` call is used to
+register a finalize callback. The function
+``rte_event_dispatcher_finalize_unregister`` is used to remove a
+callback.
+
+The finalize hook may be used by a set of event handlers (in the same
+modules, or a set of cooperating modules) sharing an event output
+buffer, since it allows for flushing of the buffers at the last
+possible moment. In particular, it allows for buffering of
+``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
+``rte_event_dequeue_burst()`` call is made (assuming implicit release
+is employed).
+
+The following is an example with an application-defined event output
+buffer (the ``event_buffer``):
+
+.. code-block:: c
+
+    static void
+    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
+                   void *cb_data)
+    {
+            struct event_buffer *buffer = cb_data;
+            unsigned lcore_id = rte_lcore_id();
+            struct event_buffer_lcore *lcore_buffer =
+                    &buffer->lcore_buffer[lcore_id];
+    
+            event_buffer_lcore_flush(lcore_buffer);
+    }
+
+    /* In the module's initialization code */
+    rte_event_dispatcher_finalize_register(EVENT_DISPATCHER_ID,
+                                           finalize_batch,
+                                           shared_event_buffer);
+
+The event dispatcher does not track any relationship between a handler
+and a finalize callback, and all finalize callbacks will be called, if
+(and only if) at least one event was dequeued from the event device.
+
+Finalize callback registration and unregistration cannot safely be
+done while the event dispatcher's service function is running on any
+lcore.
+
+Service
+-------
+
+The event dispatcher is a DPDK service, and is managed in a manner
+similar to other DPDK services (e.g., an Event Timer Adapter).
+
+Below is an example of how to configure a particular lcore to serve as
+a service lcore, and to map an already-configured event dispatcher
+(identified by ``EVENT_DISPATCHER_ID``) to that lcore.
+
+.. code-block:: c
+
+    static void
+    launch_event_dispatcher_core(unsigned lcore_id)
+    {
+            uint32_t service_id;
+    
+            rte_service_lcore_add(lcore_id);
+    
+            rte_event_dispatcher_service_id_get(EVENT_DISPATCHER_ID,
+                                                &service_id);
+    
+            rte_service_map_lcore_set(service_id, lcore_id, 1);
+    
+            rte_service_lcore_start(lcore_id);
+    
+            rte_service_runstate_set(service_id, 1);
+    }
+
+As the final step, the event dispatcher must be started.
+
+.. code-block:: c
+
+    rte_event_dispatcher_start(EVENT_DISPATCHER_ID);
+
+
+Multi Service Dispatcher Lcores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In an Eventdev application, most (or all) compute-intensive and
+performance-sensitive processing is done in an event-driven manner,
+where CPU cycles spent on application domain logic is the direct
+result of items of work (i.e., ``rte_event`` events) dequeued from an
+event device.
+
+In the light of this, it makes sense to have the event dispatcher
+service be the only DPDK service on all lcores used for packet
+processing — at least in principle.
+
+However, there is nothing in DPDK that prevents colocating other
+services with the event dispatcher service on the same lcore.
+
+Tasks that prior to the introduction of the event dispatcher into the
+application was performed on the lcore, even though no events were
+received, are prime targets for being converted into such auxiliary
+services, running on the dispatcher core set.
+
+An example of such a task would be the management of a per-lcore timer
+wheel (i.e., calling ``rte_timer_manage()``).
+
+For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
+similar technique), may opt for having quiescent state (e.g., calling
+``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
+service, to assure resource reclaimination occurs even in though some
+lcores currently do not process any events.
+
+If more services than the event dispatcher service is mapped to a
+service lcore, it's important that the other service are well-behaved
+and don't interfere with event processing to the extent the system's
+throughput and/or latency requirements are at risk of not being met.
+
+In particular, to avoid jitter, they should have an small upper bound
+for the maximum amount of time spent in a single service function
+call.
+
+An example of scenario with a more CPU-heavy colocated service is a
+low-lcore count deployment, where the event device lacks the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
+require software to feed incoming packets into the event device). In
+this case, the best performance may be achieved if the Event Ethernet
+RX and/or TX Adapters are mapped to lcores also used by for event
+dispatching, since otherwise the adapter lcores would have a lot of
+idle CPU cycles.
+
+.. rubric:: Footnotes
+
+.. [#Mapping]
+   Event routing may reasonably be done based on other ``rte_event``
+   fields (or even event user data). Indeed, that's the very reason to
+   have match callback functions, instead of a simple queue
+   id-to-handler mapping scheme. Queue id-based routing serves well in
+   a simple example.
+
+.. [#Port-MT-Safety]
+   This property (which is a feature, not a bug) is inherited from the
+   core Eventdev APIs.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 87333ee84a..74fcbcee6b 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -59,6 +59,7 @@ Programmer's Guide
     event_ethernet_tx_adapter
     event_timer_adapter
     event_crypto_adapter
+    event_dispatcher
     qos_framework
     power_man
     packet_classif_access_ctrl
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [RFC v4 1/3] eventdev: introduce event dispatcher
  2023-06-09  7:08                     ` [RFC v4 1/3] eventdev: introduce " Mattias Rönnblom
@ 2023-06-09 14:34                       ` Stephen Hemminger
  2023-06-09 17:51                         ` Mattias Rönnblom
  2023-06-14 17:25                       ` [PATCH 0/3] Add " Mattias Rönnblom
  1 sibling, 1 reply; 102+ messages in thread
From: Stephen Hemminger @ 2023-06-09 14:34 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: jerinj, Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson

On Fri, 9 Jun 2023 09:08:24 +0200
Mattias Rönnblom <mattias.ronnblom@ericsson.com> wrote:

> +#define RED_MAX_PORTS_PER_LCORE 4
> +#define RED_MAX_HANDLERS 32
> +#define RED_MAX_FINALIZERS 16
> +#define RED_AVG_PRIO_INTERVAL 2000

RED in networking usually means something else.
Is there an alternative TLA that could be used here?

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [RFC v4 1/3] eventdev: introduce event dispatcher
  2023-06-09 14:34                       ` Stephen Hemminger
@ 2023-06-09 17:51                         ` Mattias Rönnblom
  0 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-09 17:51 UTC (permalink / raw)
  To: Stephen Hemminger, Mattias Rönnblom
  Cc: jerinj, Jerin Jacob, dev, harry.van.haaren, peter.j.nilsson

On 2023-06-09 16:34, Stephen Hemminger wrote:
> On Fri, 9 Jun 2023 09:08:24 +0200
> Mattias Rönnblom <mattias.ronnblom@ericsson.com> wrote:
> 
>> +#define RED_MAX_PORTS_PER_LCORE 4
>> +#define RED_MAX_HANDLERS 32
>> +#define RED_MAX_FINALIZERS 16
>> +#define RED_AVG_PRIO_INTERVAL 2000
> 
> RED in networking usually means something else.
> Is there an alternative TLA that could be used here?

I'll think of something. EVD maybe.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH 0/3] Add event dispatcher
  2023-06-09  7:08                     ` [RFC v4 1/3] eventdev: introduce " Mattias Rönnblom
  2023-06-09 14:34                       ` Stephen Hemminger
@ 2023-06-14 17:25                       ` Mattias Rönnblom
  2023-06-14 17:25                         ` [PATCH 1/3] eventdev: introduce " Mattias Rönnblom
                                           ` (2 more replies)
  1 sibling, 3 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-14 17:25 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Stephen Hemminger, Heng Wang, Mattias Rönnblom

The purpose of the event dispatcher is to decouple different parts of
an application (e.g., processing pipeline stages), sharing the same
underlying event device.

The event dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on event meta data, such as the queue id or scheduling type.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the event dispatcher relies on application-supplied matching
callback functions to decide where to deliver events.

An event dispatcher is configured to dequeue events from a specific
event device, and ties into the service core framework, to do its (and
the application's) work.

The event dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

Although the event dispatcher adds some overhead, experience suggests
that the net effect on the application (both synthetic benchmarks and
more real-world applications) may well be positive. This is primarily
due to clustering (see programming guide) reducing cache misses.

Benchmarking indicates that the overhead is ~10 cc/event (on a
large core), with a handful of active handlers.

The event dispatcher does not support run-time reconfiguration.

Mattias Rönnblom (3):
  eventdev: introduce event dispatcher
  test: add event dispatcher test suite
  doc: add event dispatcher programming guide

 app/test/meson.build                       |   1 +
 app/test/test_event_dispatcher.c           | 861 +++++++++++++++++++++
 doc/api/doxy-api-index.md                  |   1 +
 doc/guides/prog_guide/event_dispatcher.rst | 443 +++++++++++
 doc/guides/prog_guide/index.rst            |   1 +
 lib/eventdev/meson.build                   |   2 +
 lib/eventdev/rte_event_dispatcher.c        | 770 ++++++++++++++++++
 lib/eventdev/rte_event_dispatcher.h        | 448 +++++++++++
 lib/eventdev/version.map                   |  13 +
 9 files changed, 2540 insertions(+)
 create mode 100644 app/test/test_event_dispatcher.c
 create mode 100644 doc/guides/prog_guide/event_dispatcher.rst
 create mode 100644 lib/eventdev/rte_event_dispatcher.c
 create mode 100644 lib/eventdev/rte_event_dispatcher.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH 1/3] eventdev: introduce event dispatcher
  2023-06-14 17:25                       ` [PATCH 0/3] Add " Mattias Rönnblom
@ 2023-06-14 17:25                         ` Mattias Rönnblom
  2023-06-14 18:13                           ` Stephen Hemminger
  2023-06-16  7:40                           ` [PATCH v2 0/3] Add " Mattias Rönnblom
  2023-06-14 17:25                         ` [PATCH 2/3] test: add event dispatcher test suite Mattias Rönnblom
  2023-06-14 17:25                         ` [PATCH 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
  2 siblings, 2 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-14 17:25 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Stephen Hemminger, Heng Wang, Mattias Rönnblom

The purpose of the event dispatcher is to help reduce coupling in an
Eventdev-based DPDK application.

In addition, the event dispatcher also provides a convenient and
flexible way for the application to use service cores for
application-level processing.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
Reviewed-by: Heng Wang <heng.wang@ericsson.com>

--

PATCH:
 o Change prefix from RED to EVD, to avoid confusion with random
   early detection.

RFC v4:
 o Move handlers to per-lcore data structures.
 o Introduce mechanism which rearranges handlers so that often-used
   handlers tend to be tried first.
 o Terminate dispatch loop in case all events are delivered.
 o To avoid the dispatcher's service function hogging the CPU, process
   only one batch per call.
 o Have service function return -EAGAIN if no work is performed.
 o Events delivered in the process function is no longer marked 'const',
   since modifying them may be useful for the application and cause
   no difficulties for the dispatcher.
 o Various minor API documentation improvements.

RFC v3:
 o Add stats_get() function to the version.map file.
---
 lib/eventdev/meson.build            |   2 +
 lib/eventdev/rte_event_dispatcher.c | 770 ++++++++++++++++++++++++++++
 lib/eventdev/rte_event_dispatcher.h | 448 ++++++++++++++++
 lib/eventdev/version.map            |  13 +
 4 files changed, 1233 insertions(+)
 create mode 100644 lib/eventdev/rte_event_dispatcher.c
 create mode 100644 lib/eventdev/rte_event_dispatcher.h

diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 6edf98dfa5..c0edc744fe 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -19,6 +19,7 @@ sources = files(
         'rte_event_crypto_adapter.c',
         'rte_event_eth_rx_adapter.c',
         'rte_event_eth_tx_adapter.c',
+        'rte_event_dispatcher.c',
         'rte_event_ring.c',
         'rte_event_timer_adapter.c',
         'rte_eventdev.c',
@@ -27,6 +28,7 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_rx_adapter.h',
         'rte_event_eth_tx_adapter.h',
+        'rte_event_dispatcher.h',
         'rte_event_ring.h',
         'rte_event_timer_adapter.h',
         'rte_eventdev.h',
diff --git a/lib/eventdev/rte_event_dispatcher.c b/lib/eventdev/rte_event_dispatcher.c
new file mode 100644
index 0000000000..8e23754782
--- /dev/null
+++ b/lib/eventdev/rte_event_dispatcher.c
@@ -0,0 +1,770 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_random.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_event_dispatcher.h>
+
+#define EVD_MAX_PORTS_PER_LCORE 4
+#define EVD_MAX_HANDLERS 32
+#define EVD_MAX_FINALIZERS 16
+#define EVD_AVG_PRIO_INTERVAL 2000
+
+struct rte_event_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_event_dispatcher_handler {
+	int id;
+	rte_event_dispatcher_match_t match_fun;
+	void *match_data;
+	rte_event_dispatcher_process_t process_fun;
+	void *process_data;
+};
+
+struct rte_event_dispatcher_finalizer {
+	int id;
+	rte_event_dispatcher_finalize_t finalize_fun;
+	void *finalize_data;
+};
+
+struct rte_event_dispatcher_lcore {
+	uint8_t num_ports;
+	uint16_t num_handlers;
+	int32_t prio_count;
+	struct rte_event_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
+	struct rte_event_dispatcher_handler handlers[EVD_MAX_HANDLERS];
+	struct rte_event_dispatcher_stats stats;
+} __rte_cache_aligned;
+
+struct rte_event_dispatcher {
+	uint8_t id;
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_event_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	uint16_t num_finalizers;
+	struct rte_event_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
+};
+
+static struct rte_event_dispatcher *dispatchers[UINT8_MAX];
+
+static bool
+evd_has_dispatcher(uint8_t id)
+{
+	return dispatchers[id] != NULL;
+}
+
+static struct rte_event_dispatcher *
+evd_get_dispatcher(uint8_t id)
+{
+	return dispatchers[id];
+}
+
+static void
+evd_set_dispatcher(uint8_t id, struct rte_event_dispatcher *dispatcher)
+{
+	dispatchers[id] = dispatcher;
+}
+
+#define EVD_VALID_ID_OR_RET_EINVAL(id)					\
+	do {								\
+		if (unlikely(!evd_has_dispatcher(id))) {		\
+			RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
+			return -EINVAL;					\
+		}							\
+	} while (0)
+
+static int
+evd_lookup_handler_idx(struct rte_event_dispatcher_lcore *lcore,
+		       const struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->match_fun(event, handler->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static void
+evd_prioritize_handler(struct rte_event_dispatcher_lcore *lcore,
+		       int handler_idx)
+{
+	struct rte_event_dispatcher_handler tmp;
+
+	if (handler_idx == 0)
+		return;
+
+	/* Let the lucky handler "bubble" up the list */
+
+	tmp = lcore->handlers[handler_idx - 1];
+
+	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
+
+	lcore->handlers[handler_idx] = tmp;
+}
+
+static inline void
+evd_consider_prioritize_handler(struct rte_event_dispatcher_lcore *lcore,
+				int handler_idx, uint16_t handler_events)
+{
+	lcore->prio_count -= handler_events;
+
+	if (unlikely(lcore->prio_count <= 0)) {
+		evd_prioritize_handler(lcore, handler_idx);
+
+		/*
+		 * Randomize the interval in the unlikely case
+		 * the traffic follow some very strict pattern.
+		 */
+		lcore->prio_count =
+			rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
+			EVD_AVG_PRIO_INTERVAL / 2;
+	}
+}
+
+static inline void
+evd_dispatch_events(struct rte_event_dispatcher *dispatcher,
+		    struct rte_event_dispatcher_lcore *lcore,
+		    struct rte_event_dispatcher_lcore_port *port,
+		    struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
+	uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
+	uint16_t drop_count = 0;
+	uint16_t dispatch_count;
+	uint16_t dispatched = 0;
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *event = &events[i];
+		int handler_idx;
+
+		handler_idx = evd_lookup_handler_idx(lcore, event);
+
+		if (unlikely(handler_idx < 0)) {
+			drop_count++;
+			continue;
+		}
+
+		bursts[handler_idx][burst_lens[handler_idx]] = *event;
+		burst_lens[handler_idx]++;
+	}
+
+	dispatch_count = num_events - drop_count;
+
+	for (i = 0; i < lcore->num_handlers &&
+		 dispatched < dispatch_count; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		handler->process_fun(dispatcher->event_dev_id, port->port_id,
+				     bursts[i], len, handler->process_data);
+
+		dispatched += len;
+
+		/*
+		 * Safe, since any reshuffling will only involve
+		 * already-processed handlers.
+		 */
+		evd_consider_prioritize_handler(lcore, i, len);
+	}
+
+	lcore->stats.ev_dispatch_count += dispatch_count;
+	lcore->stats.ev_drop_count += drop_count;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_event_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		finalizer->finalize_fun(dispatcher->event_dev_id,
+					port->port_id,
+					finalizer->finalize_data);
+	}
+}
+
+static __rte_always_inline uint16_t
+evd_port_dequeue(struct rte_event_dispatcher *dispatcher,
+		 struct rte_event_dispatcher_lcore *lcore,
+		 struct rte_event_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	if (likely(n > 0))
+		evd_dispatch_events(dispatcher, lcore, port, events, n);
+
+	lcore->stats.poll_count++;
+
+	return n;
+}
+
+static __rte_always_inline uint16_t
+evd_lcore_process(struct rte_event_dispatcher *dispatcher,
+		  struct rte_event_dispatcher_lcore *lcore)
+{
+	uint16_t i;
+	uint16_t event_count = 0;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		event_count += evd_port_dequeue(dispatcher, lcore, port);
+	}
+
+	return event_count;
+}
+
+static int32_t
+evd_process(void *userdata)
+{
+	struct rte_event_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_event_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+	uint64_t event_count;
+
+	event_count = evd_lcore_process(dispatcher, lcore);
+
+	if (unlikely(event_count == 0))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int
+evd_service_register(struct rte_event_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = evd_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "evd_%d",
+		 dispatcher->id);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Registration of event dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	return rc;
+}
+
+static int
+evd_service_unregister(struct rte_event_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Unregistration of event dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	if (evd_has_dispatcher(id)) {
+		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
+				 id);
+		return -EEXIST;
+	}
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("event dispatcher",
+				  sizeof(struct rte_event_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for event "
+				 "dispatcher\n");
+		return -ENOMEM;
+	}
+
+	*dispatcher = (struct rte_event_dispatcher) {
+		.id = id,
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = evd_service_register(dispatcher);
+
+	if (rc < 0) {
+		rte_free(dispatcher);
+		return rc;
+	}
+
+	evd_set_dispatcher(id, dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_free(uint8_t id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = evd_service_unregister(dispatcher);
+
+	if (rc)
+		return rc;
+
+	evd_set_dispatcher(id, NULL);
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	*service_id = dispatcher->service_id;
+
+	return 0;
+}
+
+static int
+lcore_port_index(struct rte_event_dispatcher_lcore *lcore,
+		 uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	struct rte_event_dispatcher_lcore_port *port;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_event_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	int port_idx;
+	struct rte_event_dispatcher_lcore_port *port;
+	struct rte_event_dispatcher_lcore_port *last;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static struct rte_event_dispatcher_handler*
+evd_lcore_get_handler_by_id(struct rte_event_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	int i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->id == handler_id)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_handler_id(struct rte_event_dispatcher *dispatcher)
+{
+	int handler_id = 0;
+	struct rte_event_dispatcher_lcore *reference_lcore =
+		&dispatcher->lcores[0];
+
+	if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
+		return -1;
+
+	while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
+		handler_id++;
+
+	return handler_id;
+}
+
+static void
+evd_lcore_install_handler(struct rte_event_dispatcher_lcore *lcore,
+		    const struct rte_event_dispatcher_handler *handler)
+{
+	int handler_idx = lcore->num_handlers;
+
+	lcore->handlers[handler_idx] = *handler;
+	lcore->num_handlers++;
+}
+
+static void
+evd_install_handler(struct rte_event_dispatcher *dispatcher,
+		    const struct rte_event_dispatcher_handler *handler)
+{
+	int i;
+
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[i];
+		evd_lcore_install_handler(lcore, handler);
+	}
+}
+
+int
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_t match_fun,
+			      void *match_data,
+			      rte_event_dispatcher_process_t process_fun,
+			      void *process_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_handler handler = {
+		.match_fun = match_fun,
+		.match_data = match_data,
+		.process_fun = process_fun,
+		.process_data = process_data
+	};
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	handler.id = evd_alloc_handler_id(dispatcher);
+
+	if (handler.id < 0)
+		return -ENOMEM;
+
+	evd_install_handler(dispatcher, &handler);
+
+	return handler.id;
+}
+
+static int
+evd_lcore_uninstall_handler(struct rte_event_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	struct rte_event_dispatcher_handler *unreg_handler;
+	int handler_idx;
+	uint16_t last_idx;
+
+	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
+
+	if (unreg_handler == NULL)
+		return -EINVAL;
+
+	handler_idx = &lcore->handlers[0] - unreg_handler;
+
+	last_idx = lcore->num_handlers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all handlers to maintain handler order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_handler, unreg_handler + 1,
+			sizeof(struct rte_event_dispatcher_handler) * n);
+	}
+
+	lcore->num_handlers--;
+
+	return 0;
+}
+
+static int
+evd_uninstall_handler(struct rte_event_dispatcher *dispatcher,
+		      int handler_id)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+		int rc;
+
+		rc = evd_lcore_uninstall_handler(lcore, handler_id);
+
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_unregister(uint8_t id, int handler_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = evd_uninstall_handler(dispatcher, handler_id);
+
+	return rc;
+}
+
+static struct rte_event_dispatcher_finalizer*
+evd_get_finalizer_by_id(struct rte_event_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_event_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		if (finalizer->id == handler_id)
+			return finalizer;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_finalizer_id(struct rte_event_dispatcher *dispatcher)
+{
+	int finalizer_id = 0;
+
+	while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
+		finalizer_id++;
+
+	return finalizer_id;
+}
+
+static struct rte_event_dispatcher_finalizer *
+evd_alloc_finalizer(struct rte_event_dispatcher *dispatcher)
+{
+	int finalizer_idx;
+	struct rte_event_dispatcher_finalizer *finalizer;
+
+	if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
+		return NULL;
+
+	finalizer_idx = dispatcher->num_finalizers;
+	finalizer = &dispatcher->finalizers[finalizer_idx];
+
+	finalizer->id = evd_alloc_finalizer_id(dispatcher);
+
+	dispatcher->num_finalizers++;
+
+	return finalizer;
+}
+
+int
+rte_event_dispatcher_finalize_register(uint8_t id,
+			      rte_event_dispatcher_finalize_t finalize_fun,
+			      void *finalize_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_finalizer *finalizer;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	finalizer = evd_alloc_finalizer(dispatcher);
+
+	if (finalizer == NULL)
+		return -ENOMEM;
+
+	finalizer->finalize_fun = finalize_fun;
+	finalizer->finalize_data = finalize_data;
+
+	return finalizer->id;
+}
+
+int
+rte_event_dispatcher_finalize_unregister(uint8_t id, int handler_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_finalizer *unreg_finalizer;
+	int handler_idx;
+	uint16_t last_idx;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, handler_id);
+
+	if (unreg_finalizer == NULL)
+		return -EINVAL;
+
+	handler_idx = &dispatcher->finalizers[0] - unreg_finalizer;
+
+	last_idx = dispatcher->num_finalizers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all finalizers to maintain finalizer order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_finalizer, unreg_finalizer + 1,
+			sizeof(struct rte_event_dispatcher_finalizer) * n);
+	}
+
+	dispatcher->num_finalizers--;
+
+	return 0;
+}
+
+static void
+evd_aggregate_stats(struct rte_event_dispatcher_stats *result,
+		    const struct rte_event_dispatcher_stats *part)
+{
+	result->poll_count += part->poll_count;
+	result->ev_dispatch_count += part->ev_dispatch_count;
+	result->ev_drop_count += part->ev_drop_count;
+}
+
+int
+rte_event_dispatcher_stats_get(uint8_t id,
+			       struct rte_event_dispatcher_stats *stats)
+{
+	struct rte_event_dispatcher *dispatcher;
+	unsigned int lcore_id;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	*stats = (struct rte_event_dispatcher_stats) {};
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		evd_aggregate_stats(stats, &lcore->stats);
+	}
+
+	return 0;
+}
+
+static int
+evd_set_service_runstate(uint8_t id, int state)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = rte_service_component_runstate_set(dispatcher->service_id,
+						state);
+
+	if (rc != 0) {
+		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+				 "service component run state to %d\n", rc,
+				 state);
+		RTE_ASSERT(0);
+	}
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_start(uint8_t id)
+{
+	return evd_set_service_runstate(id, 1);
+}
+
+int
+rte_event_dispatcher_stop(uint8_t id)
+{
+	return evd_set_service_runstate(id, 0);
+}
diff --git a/lib/eventdev/rte_event_dispatcher.h b/lib/eventdev/rte_event_dispatcher.h
new file mode 100644
index 0000000000..927e7e0b3c
--- /dev/null
+++ b/lib/eventdev/rte_event_dispatcher.h
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#ifndef __RTE_EVENT_DISPATCHER_H__
+#define __RTE_EVENT_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Event Dispatcher
+ *
+ * The purpose of the event dispatcher is to help decouple different parts
+ * of an application (e.g., modules), sharing the same underlying
+ * event device.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * Match callbacks are used by an application to decide how the
+ * event dispatcher distributes events to different parts of the
+ * application.
+ *
+ * The application is not expected to process the event at the point
+ * of the match call. Such matters should be deferred to the process
+ * callback invocation.
+ *
+ * The match callback may be used as an opportunity to prefetch data.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this events should be delivered (via
+ *   the process callback), and false otherwise.
+ */
+typedef bool
+(*rte_event_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
+
+/**
+ * Function prototype for process callbacks.
+ *
+ * The process callbacks are used by the event dispatcher to deliver
+ * events for processing.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ */
+
+typedef void
+(*rte_event_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				  struct rte_event *events, uint16_t num,
+				  void *cb_data);
+
+/**
+ * Function prototype for finalize callbacks.
+ *
+ * The finalize callbacks are used by the event dispatcher to notify
+ * the application it has delivered all events from a particular batch
+ * dequeued from the event device.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_finalize_register().
+ */
+
+typedef void
+(*rte_event_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				   void *cb_data);
+
+/**
+ * Event dispatcher statistics
+ */
+struct rte_event_dispatcher_stats {
+	uint64_t poll_count;
+	/**< Number of event dequeue calls made toward the event device. */
+	uint64_t ev_dispatch_count;
+	/**< Number of events dispatched to a handler.*/
+	uint64_t ev_drop_count;
+	/**< Number of events dropped because no handler was found. */
+};
+
+/**
+ * Create an event dispatcher with the specified id.
+ *
+ * @param id
+ *  An application-specified, unique (across all event dispatcher
+ *  instances) identifier.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this event dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   - 0: Success
+ *   - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id);
+
+/**
+ * Free an event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_free(uint8_t id);
+
+/**
+ * Retrieve the service identifier of an event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param [out] service_id
+ *  A pointer to a caller-supplied buffer where the event dispatcher's
+ *  service id will be stored.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * event dispatcher.
+ *
+ * This function configures the event port id to be used by the event
+ * dispatcher service, if run on the specified lcore.
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port must not be bound to more than one lcore.
+ *
+ * If the event dispatcher service is mapped (with
+ * rte_service_map_lcore_set()) to a lcore for which no ports are
+ * bound, the service function will be a no-operation.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ *  - -EEXISTS: Event port is already configured.
+ *  - -EINVAL: Invalid arguments.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ *  - -ENOENT: Event port id not bound to this @c lcore_id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id);
+
+/**
+ * Register an event handler.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding process callback
+ * function.
+ *
+ * The reason for having two distinct steps is to allow the dispatcher
+ * to deliver all events as a batch. This in turn will cause
+ * processing of a particular kind of events to happen in a
+ * back-to-back manner, improving cache locality.
+ *
+ * The list of handler callback functions is shared among all lcores,
+ * but will only be executed on lcores which has an eventdev port
+ * bound to them, and which are running the event dispatcher service.
+ *
+ * An event is delivered to at most one handler. Events where no
+ * handler is found are dropped.
+ *
+ * The application must not depend on the order of which the match
+ * functions are invoked.
+ *
+ * Ordering of events is not guaranteed to be maintained between
+ * different deliver callbacks. For example, suppose there are two
+ * callbacks registered, matching different subsets of events arriving
+ * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
+ * on a particular port, all pertaining to the same flow. The match
+ * callback for registration A returns true for ev0 and ev2, and the
+ * matching function for registration B for ev1. In that scenario, the
+ * event dispatcher may choose to deliver first [ev0, ev2] using A's
+ * deliver function, and then [ev1] to B - or vice versa.
+ *
+ * rte_event_dispatcher_register() is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param process_fun
+ *  The process callback function.
+ *
+ * @param process_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when process_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_t match_fun,
+			      void *match_cb_data,
+			      rte_event_dispatcher_process_t process_fun,
+			      void *process_cb_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param handler_id
+ *  The handler registration id returned by the original
+ *  rte_event_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c handler_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unregister(uint8_t id, int handler_id);
+
+/**
+ * Register a finalize callback function.
+ *
+ * An application may optionally install one or more finalize
+ * callbacks.
+ *
+ * All finalize callbacks are invoked by the event dispatcher when a
+ * complete batch of events (retrieve using rte_event_dequeue_burst())
+ * have been delivered to the application (or have been dropped).
+ *
+ * The finalize callback is not tied to any particular handler.
+ *
+ * The finalize callback provides an opportunity for the application
+ * to do per-batch processing. One case where this may be useful is if
+ * an event output buffer is used, and is shared among several
+ * handlers. In such a case, proper output buffer flushing may be
+ * assured using a finalize callback.
+ *
+ * rte_event_dispatcher_finalize_register() is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param finalize_fun
+ *  The function called after completing the processing of a
+ *  dequeue batch.
+ *
+ * @param finalize_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when @c finalize_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_finalize_register(uint8_t id,
+			    rte_event_dispatcher_finalize_t finalize_fun,
+			    void *finalize_data);
+
+/**
+ * Unregister a finalize callback.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param reg_id
+ *  The finalize registration id returned by the original
+ *  rte_event_dispatcher_finalize_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c reg_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_finalize_unregister(uint8_t id, int reg_id);
+
+/**
+ * Start an event dispatcher instance.
+ *
+ * Enables the event dispatcher service.
+ *
+ * The underlying event device must have been started prior to calling
+ * rte_event_dispatcher_start().
+ *
+ * For the event dispatcher to actually perform work (i.e., dispatch
+ * events), its service must have been mapped to one or more service
+ * lcores, and its service run state set to '1'. An event dispatcher's
+ * service is retrieved using rte_event_dispatcher_service_id_get().
+ *
+ * Each service lcore to which the event dispatcher is mapped should
+ * have at least one event port configured. Such configuration is
+ * performed by calling rte_event_dispatcher_bind_port_to_lcore(),
+ * prior to starting the event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_start(uint8_t id);
+
+/**
+ * Stop a running event dispatcher instance.
+ *
+ * Disables the event dispatcher service.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_stop(uint8_t id);
+
+/**
+ * Retrieve statistics for an event dispatcher instance.
+ *
+ * This function is MT safe and may be called from any thread
+ * (including unregistered non-EAL threads).
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ * @param[out] stats
+ *   A pointer to a structure to fill with statistics.
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_stats_get(uint8_t id,
+			       struct rte_event_dispatcher_stats *stats);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_EVENT_DISPATCHER__ */
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 89068a5713..d3aa878686 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -131,6 +131,19 @@ EXPERIMENTAL {
 	rte_event_eth_tx_adapter_runtime_params_init;
 	rte_event_eth_tx_adapter_runtime_params_set;
 	rte_event_timer_remaining_ticks_get;
+
+	rte_event_dispatcher_create;
+	rte_event_dispatcher_free;
+	rte_event_dispatcher_service_id_get;
+	rte_event_dispatcher_bind_port_to_lcore;
+	rte_event_dispatcher_unbind_port_from_lcore;
+	rte_event_dispatcher_register;
+	rte_event_dispatcher_unregister;
+	rte_event_dispatcher_finalize_register;
+	rte_event_dispatcher_finalize_unregister;
+	rte_event_dispatcher_start;
+	rte_event_dispatcher_stop;
+	rte_event_dispatcher_stats_get;
 };
 
 INTERNAL {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH 2/3] test: add event dispatcher test suite
  2023-06-14 17:25                       ` [PATCH 0/3] Add " Mattias Rönnblom
  2023-06-14 17:25                         ` [PATCH 1/3] eventdev: introduce " Mattias Rönnblom
@ 2023-06-14 17:25                         ` Mattias Rönnblom
  2023-06-14 17:25                         ` [PATCH 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-14 17:25 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Stephen Hemminger, Heng Wang, Mattias Rönnblom

Add unit tests for the event dispatcher.

--

PATCH:
 o Extend test to cover often-used handler optimization feature.
RFC v4:
 o Adapt to non-const events in process function prototype.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 app/test/meson.build             |   1 +
 app/test/test_event_dispatcher.c | 861 +++++++++++++++++++++++++++++++
 2 files changed, 862 insertions(+)
 create mode 100644 app/test/test_event_dispatcher.c

diff --git a/app/test/meson.build b/app/test/meson.build
index b9b5432496..fac3b6b88b 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -50,6 +50,7 @@ test_sources = files(
         'test_errno.c',
         'test_ethdev_link.c',
         'test_event_crypto_adapter.c',
+        'test_event_dispatcher.c',
         'test_event_eth_rx_adapter.c',
         'test_event_ring.c',
         'test_event_timer_adapter.c',
diff --git a/app/test/test_event_dispatcher.c b/app/test/test_event_dispatcher.c
new file mode 100644
index 0000000000..356ef8df44
--- /dev/null
+++ b/app/test/test_event_dispatcher.c
@@ -0,0 +1,861 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include "test.h"
+
+#include <stdatomic.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_event_dispatcher.h>
+#include <rte_eventdev.h>
+#include <rte_random.h>
+#include <rte_service.h>
+
+#define NUM_WORKERS 3
+
+#define NUM_PORTS (NUM_WORKERS + 1)
+#define WORKER_PORT_ID(worker_idx) (worker_idx)
+#define DRIVER_PORT_ID (NUM_PORTS - 1)
+
+#define NUM_SERVICE_CORES NUM_WORKERS
+
+/* Eventdev */
+#define NUM_QUEUES 8
+#define LAST_QUEUE_ID (NUM_QUEUES - 1)
+#define MAX_EVENTS 4096
+#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
+#define DEQUEUE_BURST_SIZE 32
+#define ENQUEUE_BURST_SIZE 32
+
+#define NUM_EVENTS 10000000
+#define NUM_FLOWS 16
+
+#define DSW_VDEV "event_dsw0"
+
+struct app_queue {
+	uint8_t queue_id;
+	uint64_t sn[NUM_FLOWS];
+	int dispatcher_reg_id;
+};
+
+struct test_app {
+	uint8_t event_dev_id;
+	uint8_t dispatcher_id;
+	uint32_t dispatcher_service_id;
+
+	unsigned int service_lcores[NUM_SERVICE_CORES];
+
+	int never_match_reg_id;
+	uint64_t never_match_count;
+	uint64_t never_process_count;
+
+	struct app_queue queues[NUM_QUEUES];
+
+	bool running;
+
+	atomic_int completed_events;
+	atomic_int errors;
+};
+
+#define RETURN_ON_ERROR(rc) \
+	do {					\
+		if (rc != TEST_SUCCESS)		\
+			return rc;		\
+	} while (0)
+
+static struct test_app *
+test_app_create(void)
+{
+	int i;
+	struct test_app *app;
+
+	app = calloc(1, sizeof(struct test_app));
+
+	if (app == NULL)
+		return NULL;
+
+	for (i = 0; i < NUM_QUEUES; i++)
+		app->queues[i].queue_id = i;
+
+	return app;
+}
+
+static void
+test_app_free(struct test_app *app)
+{
+	free(app);
+}
+
+static int
+test_app_create_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_vdev_init(DSW_VDEV, NULL);
+	if (rc < 0)
+		return TEST_SKIPPED;
+
+	rc = rte_event_dev_get_dev_id(DSW_VDEV);
+
+	app->event_dev_id = (uint8_t)rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_destroy_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_close(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
+
+	rc = rte_vdev_uninit(DSW_VDEV);
+	TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_event_dev(struct test_app *app)
+{
+	int rc;
+	int i;
+
+	rc = test_app_create_vdev(app);
+	if (rc < 0)
+		return rc;
+
+	struct rte_event_dev_config config = {
+		.nb_event_queues = NUM_QUEUES,
+		.nb_event_ports = NUM_PORTS,
+		.nb_events_limit = MAX_EVENTS,
+		.nb_event_queue_flows = 64,
+		.nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
+		.nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	rc = rte_event_dev_configure(app->event_dev_id, &config);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
+
+	struct rte_event_queue_conf queue_config = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.nb_atomic_flows = 64
+	};
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		uint8_t queue_id = i;
+
+		rc = rte_event_queue_setup(app->event_dev_id, queue_id,
+					   &queue_config);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
+	}
+
+	struct rte_event_port_conf port_config = {
+		.new_event_threshold = NEW_EVENT_THRESHOLD,
+		.dequeue_depth = DEQUEUE_BURST_SIZE,
+		.enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	for (i = 0; i < NUM_PORTS; i++) {
+		uint8_t event_port_id = i;
+
+		rc = rte_event_port_setup(app->event_dev_id, event_port_id,
+					  &port_config);
+		TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
+				    event_port_id);
+
+		if (event_port_id == DRIVER_PORT_ID)
+			continue;
+
+		rc = rte_event_port_link(app->event_dev_id, event_port_id,
+					 NULL, NULL, 0);
+
+		TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
+				  event_port_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_event_dev(struct test_app *app)
+{
+	return test_app_destroy_vdev(app);
+}
+
+static int
+test_app_start_event_dev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_start(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
+
+	return TEST_SUCCESS;
+}
+
+static void
+test_app_stop_event_dev(struct test_app *app)
+{
+	rte_event_dev_stop(app->event_dev_id);
+}
+
+static int
+test_app_create_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	app->dispatcher_id = rte_rand_max(256);
+
+	rc = rte_event_dispatcher_create(app->dispatcher_id,
+					 app->event_dev_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to create event dispatcher");
+
+	rc = rte_event_dispatcher_service_id_get(app->dispatcher_id,
+						 &app->dispatcher_service_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to get event dispatcher service ID");
+
+	rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
+			    "stats");
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_free_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	rte_event_dispatcher_free(app->dispatcher_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_bind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_event_dispatcher_bind_port_to_lcore(
+			app->dispatcher_id, WORKER_PORT_ID(i),
+			DEQUEUE_BURST_SIZE, 0, lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
+				    "to lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unbind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_event_dispatcher_unbind_port_from_lcore(
+			app->dispatcher_id,
+			WORKER_PORT_ID(i),
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
+				    "from lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static bool
+match_queue(const struct rte_event *event, void *cb_data)
+{
+	uintptr_t queue_id = (uintptr_t)cb_data;
+
+	return event->queue_id == queue_id;
+}
+
+static int
+test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++)
+		if (app->service_lcores[i] == lcore_id)
+			return i;
+
+	return -1;
+}
+
+static int
+test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
+{
+	int worker;
+
+	worker = test_app_get_worker_index(app, lcore_id);
+
+	if (worker < 0)
+		return -1;
+
+	return WORKER_PORT_ID(worker);
+}
+
+static void
+test_app_queue_note_error(struct test_app *app)
+{
+	atomic_fetch_add_explicit(&app->errors, 1, memory_order_relaxed);
+}
+
+static void
+test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
+		       struct rte_event *in_events, uint16_t num,
+		       void *cb_data)
+{
+	struct app_queue *app_queue = cb_data;
+	struct test_app *app = container_of(app_queue, struct test_app,
+					    queues[app_queue->queue_id]);
+	unsigned int lcore_id = rte_lcore_id();
+	bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
+	int event_port_id;
+	uint16_t i;
+	struct rte_event out_events[num];
+
+	event_port_id = test_app_get_worker_port(app, lcore_id);
+
+	if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
+	    p_event_port_id != event_port_id) {
+		test_app_queue_note_error(app);
+		return;
+	}
+
+	for (i = 0; i < num; i++) {
+		const struct rte_event *in_event = &in_events[i];
+		struct rte_event *out_event = &out_events[i];
+		uint64_t sn = in_event->u64;
+		uint64_t expected_sn;
+
+		if (in_event->queue_id != app_queue->queue_id) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		expected_sn = app_queue->sn[in_event->flow_id]++;
+
+		if (expected_sn != sn) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		if (intermediate_queue)
+			*out_event = (struct rte_event) {
+				.queue_id = in_event->queue_id + 1,
+				.flow_id = in_event->flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_FORWARD,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+	}
+
+	if (intermediate_queue) {
+		uint16_t n = 0;
+
+		do {
+			n += rte_event_enqueue_forward_burst(p_event_dev_id,
+							     p_event_port_id,
+							     out_events + n,
+							     num - n);
+		} while (n != num);
+	} else
+		atomic_fetch_add_explicit(&app->completed_events, num,
+					  memory_order_relaxed);
+}
+
+static bool
+never_match(const struct rte_event *event __rte_unused, void *cb_data)
+{
+	uint64_t *count = cb_data;
+
+	(*count)++;
+
+	return false;
+}
+
+static void
+test_app_never_process(uint8_t event_dev_id __rte_unused,
+		       uint8_t event_port_id __rte_unused,
+		       struct rte_event *in_events __rte_unused,
+		       uint16_t num, void *cb_data)
+{
+	uint64_t *count = cb_data;
+
+	(*count) += num;
+}
+
+static int
+test_app_register_callbacks(struct test_app *app)
+{
+	int i;
+
+	app->never_match_reg_id =
+		rte_event_dispatcher_register(app->dispatcher_id,
+					      never_match,
+					      &app->never_match_count,
+					      test_app_never_process,
+					      &app->never_process_count);
+
+	TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register "
+		    "never-match handler");
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		struct app_queue *app_queue = &app->queues[i];
+		uintptr_t queue_id = app_queue->queue_id;
+		int reg_id;
+
+		reg_id = rte_event_dispatcher_register(app->dispatcher_id,
+						       match_queue,
+						       (void *)queue_id,
+						       test_app_process_queue,
+						       app_queue);
+
+		TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
+			    "callback for queue %d", i);
+
+		app_queue->dispatcher_reg_id = reg_id;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
+{
+	int reg_id = app->queues[queue_id].dispatcher_reg_id;
+
+	if (reg_id < 0) /* unregistered already */
+		return 0;
+
+	int rc = rte_event_dispatcher_unregister(app->dispatcher_id, reg_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
+			    "callback for queue %d", queue_id);
+
+	app->queues[queue_id].dispatcher_reg_id = -1;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callbacks(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		int rc;
+
+		rc = test_app_unregister_callback(app, i);
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dispatcher_start(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to start the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dispatcher_stop(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to stop the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_lcore_add(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
+			    "service core", lcore_id);
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_cores(struct test_app *app)
+{
+	int i;
+	int lcore_id = -1;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
+
+		TEST_ASSERT(lcore_id != RTE_MAX_LCORE,
+			    "Too few lcores. Needs at least %d worker lcores",
+			    NUM_SERVICE_CORES);
+
+		app->service_lcores[i] = lcore_id;
+	}
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		int rc;
+
+		rc = test_app_setup_service_core(app, app->service_lcores[i]);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
+
+	rc = rte_service_lcore_del(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
+			    lcore_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = test_app_teardown_service_core(app, lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_start(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_stop(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_start_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_dispatcher(app);
+
+	app->running = true;
+
+	return rc;
+}
+
+static int
+test_app_stop(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_stop_dispatcher(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	app->running = false;
+
+	return TEST_SUCCESS;
+}
+
+struct test_app *test_app;
+
+static int
+test_setup(void)
+{
+	int rc;
+
+	test_app = test_app_create();
+	TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
+
+	rc = test_app_setup_event_dev(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_create_dispatcher(test_app);
+
+	rc = test_app_setup_service_cores(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_register_callbacks(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_bind_ports(test_app);
+
+	return rc;
+}
+
+static void test_teardown(void)
+{
+	if (test_app->running)
+		test_app_stop(test_app);
+
+	test_app_teardown_service_cores(test_app);
+
+	test_app_unregister_callbacks(test_app);
+
+	test_app_unbind_ports(test_app);
+
+	test_app_free_dispatcher(test_app);
+
+	test_app_teardown_event_dev(test_app);
+
+	test_app_free(test_app);
+
+	test_app = NULL;
+}
+
+static int
+test_app_get_completed_events(struct test_app *app)
+{
+	return atomic_load_explicit(&app->completed_events,
+				    memory_order_relaxed);
+}
+
+static int
+test_app_get_errors(struct test_app *app)
+{
+	return atomic_load_explicit(&app->errors, memory_order_relaxed);
+}
+
+static int
+test_basic(void)
+{
+	int rc;
+	int i;
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	uint64_t sns[NUM_FLOWS] = { 0 };
+
+	for (i = 0; i < NUM_EVENTS;) {
+		struct rte_event events[ENQUEUE_BURST_SIZE];
+		int left;
+		int batch_size;
+		int j;
+		uint16_t n = 0;
+
+		batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
+		left = NUM_EVENTS - i;
+
+		batch_size = RTE_MIN(left, batch_size);
+
+		for (j = 0; j < batch_size; j++) {
+			struct rte_event *event = &events[j];
+			uint64_t sn;
+			uint32_t flow_id;
+
+			flow_id = rte_rand_max(NUM_FLOWS);
+
+			sn = sns[flow_id]++;
+
+			*event = (struct rte_event) {
+				.queue_id = 0,
+				.flow_id = flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_NEW,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+		}
+
+		while (n < batch_size)
+			n += rte_event_enqueue_new_burst(test_app->event_dev_id,
+							 DRIVER_PORT_ID,
+							 events + n,
+							 batch_size - n);
+
+		i += batch_size;
+	}
+
+	while (test_app_get_completed_events(test_app) != NUM_EVENTS)
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+
+	rc = test_app_get_errors(test_app);
+	TEST_ASSERT(rc == 0, "%d errors occurred", rc);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_event_dispatcher_stats stats;
+	rc = rte_event_dispatcher_stats_get(test_app->dispatcher_id,
+					    &stats);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
+			  "Invalid dispatch count");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	TEST_ASSERT_EQUAL(test_app->never_process_count, 0, "Never-match "
+			  "handler's process function has been called");
+
+	/*
+	 * The event dispatcher should call often-matching match functions
+	 * more often, and thus this never-matching match function should
+	 * be called relatively infrequently.
+	 */
+	TEST_ASSERT(test_app->never_match_count <
+		    (stats.ev_dispatch_count / 4),
+		    "Never-matching match function called suspiciously often");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_drop(void)
+{
+	int rc;
+	uint8_t unhandled_queue = 1;
+	struct rte_event_dispatcher_stats stats;
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_unregister_callback(test_app, unhandled_queue);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_event event = {
+	    .queue_id = unhandled_queue,
+	    .flow_id = 0,
+	    .sched_type = RTE_SCHED_TYPE_ATOMIC,
+	    .op = RTE_EVENT_OP_NEW,
+	    .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	    .u64 = 0
+	};
+
+	do {
+		rc = rte_event_enqueue_burst(test_app->event_dev_id,
+					     DRIVER_PORT_ID, &event, 1);
+	} while (rc == 0);
+
+	do {
+		rc = rte_event_dispatcher_stats_get(test_app->dispatcher_id,
+						    &stats);
+		RETURN_ON_ERROR(rc);
+
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+	} while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
+			  "Dispatch count is not zero");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite test_suite = {
+	.suite_name = "Event dispatcher test suite",
+	.unit_test_cases = {
+		TEST_CASE_ST(test_setup, test_teardown, test_basic),
+		TEST_CASE_ST(test_setup, test_teardown, test_drop),
+		TEST_CASES_END()
+	}
+};
+
+static int
+test_event_dispatcher(void)
+{
+	return unit_test_suite_runner(&test_suite);
+}
+
+REGISTER_TEST_COMMAND(event_dispatcher_autotest, test_event_dispatcher);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH 3/3] doc: add event dispatcher programming guide
  2023-06-14 17:25                       ` [PATCH 0/3] Add " Mattias Rönnblom
  2023-06-14 17:25                         ` [PATCH 1/3] eventdev: introduce " Mattias Rönnblom
  2023-06-14 17:25                         ` [PATCH 2/3] test: add event dispatcher test suite Mattias Rönnblom
@ 2023-06-14 17:25                         ` Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-14 17:25 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Stephen Hemminger, Heng Wang, Mattias Rönnblom

Provide programming guide the for the event dispatcher.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--

PATCH:
 o Improve grammar and spelling.
RFC v4:
 o Extend event matching section of the programming guide.
 o Improve grammar and spelling.
---
 doc/api/doxy-api-index.md                  |   1 +
 doc/guides/prog_guide/event_dispatcher.rst | 443 +++++++++++++++++++++
 doc/guides/prog_guide/index.rst            |   1 +
 3 files changed, 445 insertions(+)
 create mode 100644 doc/guides/prog_guide/event_dispatcher.rst

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index c709fd48ad..05b22057f9 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -29,6 +29,7 @@ The public API headers are grouped by topics:
   [event_eth_tx_adapter](@ref rte_event_eth_tx_adapter.h),
   [event_timer_adapter](@ref rte_event_timer_adapter.h),
   [event_crypto_adapter](@ref rte_event_crypto_adapter.h),
+  [event_dispatcher](@ref rte_event_dispatcher.h),
   [rawdev](@ref rte_rawdev.h),
   [metrics](@ref rte_metrics.h),
   [bitrate](@ref rte_bitrate.h),
diff --git a/doc/guides/prog_guide/event_dispatcher.rst b/doc/guides/prog_guide/event_dispatcher.rst
new file mode 100644
index 0000000000..ca670ce4d4
--- /dev/null
+++ b/doc/guides/prog_guide/event_dispatcher.rst
@@ -0,0 +1,443 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Ericsson AB.
+
+Event Dispatcher
+================
+
+Overview
+--------
+
+The purpose of the event dispatcher is to help reduce coupling in an
+:doc:`Eventdev <eventdev>`-based DPDK application.
+
+In particular, the event dispatcher addresses a scenario where an
+application's modules share the same event device and event device
+ports, and performs work on the same lcore threads.
+
+The event dispatcher replaces the conditional logic that follows an
+event device dequeue operation, where events are dispatched to
+different parts of the application, typically based on fields in the
+``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
+``sched_type``.
+
+Below is an excerpt from a fictitious application consisting of two
+modules; A and B. In this example, event-to-module routing is based
+purely on queue id, where module A expects all events to a certain
+queue id, and module B two other queue ids. [#Mapping]_
+
+.. code-block:: c
+
+    for (;;) {
+            struct rte_event events[MAX_BURST];
+            unsigned int n;
+    
+            n = rte_event_dequeue_burst(dev_id, port_id, events,
+	                                MAX_BURST, 0);
+    
+            for (i = 0; i < n; i++) {
+                    const struct rte_event *event = &events[i];
+    
+                    switch (event->queue_id) {
+                    case MODULE_A_QUEUE_ID:
+                            module_a_process(event);
+                            break;
+                    case MODULE_B_STAGE_0_QUEUE_ID:
+                            module_b_process_stage_0(event);
+                            break;
+                    case MODULE_B_STAGE_1_QUEUE_ID:
+                            module_b_process_stage_1(event);
+                            break;
+                    }
+            }
+    }
+
+The issue this example attempts to illustrate is that the centralized
+conditional logic has knowledge of things that should be private to
+the modules. In other words, this pattern leads to a violation of
+module encapsulation.
+
+The shared conditional logic contains explicit knowledge about what
+events should go where. In case, for example, the
+``module_a_process()`` is broken into two processing stages — a
+module-internal affair — the shared conditional code must be updated
+to reflect this change.
+
+The centralized event routing code becomes an issue in larger
+applications, where modules are developed by different organizations.
+This pattern also makes module reuse across different application more
+difficult. The part of the conditional logic relevant for a particular
+application may need to be duplicated across many module
+instantiations (e.g., applications and test setups).
+
+The event dispatcher separates the mechanism (routing events to their
+receiver) from the policy (which events should go where).
+
+The basic operation of the event dispatcher is as follows:
+
+* Dequeue a batch of events from the event device.
+* For each event determine which handler should receive the event, using
+  a set of application-provided, per-handler event matching callback
+  functions.
+* Provide events matching a particular handler, to that handler, using
+  its process callback.
+
+If the above application would have made use of the event dispatcher,
+the code relevant for its module A may have looked something like
+this:
+
+.. code-block:: c
+
+    static bool
+    module_a_match(const struct rte_event *event, void *cb_data)
+    {
+           return event->queue_id == MODULE_A_QUEUE_ID;
+    }
+    
+    static void
+    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
+                            const struct rte_event *events,
+			    uint16_t num, void *cb_data)
+    {
+            uint16_t i;
+
+            for (i = 0; i < num; i++)
+                    module_a_process_event(&events[i]);
+    }
+    
+    /* In the module's initialization code */
+    rte_event_dispatcher_register(EVENT_DISPATCHER_ID, module_a_match,
+                                  NULL, module_a_process_events,
+				  module_a_data);
+
+(Error handling is left out of this and future example code in this
+chapter.)
+
+When the shared conditional logic is removed, a new question arise:
+which part of the system actually runs the dispatching mechanism? Or
+phrased differently, what is replacing the function hosting the shared
+conditional logic (typically launched on all lcores using
+``rte_eal_remote_launch()``)? To solve this issue, the event
+dispatcher is a run as a DPDK :doc:`Service <service_cores>`.
+
+The event dispatcher is a layer between the application and the event
+device in the receive direction. In the transmit (i.e., item of work
+submission) direction, the application directly accesses the Eventdev
+core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
+forwarded event to the event device.
+
+Event Dispatcher Creation
+-------------------------
+
+An event dispatcher is created with using
+``rte_event_dispatcher_create()``.
+
+The dispatcher id is provided by the application, and must be unique.
+
+The event device must be configured before the event dispatcher is
+created.
+
+Usually, only one event dispatcher is needed per event device. An
+event dispatcher handles exactly one event device.
+
+An event dispatcher is freed using the ``rte_event_dispatcher_free()``
+function. The event dispatcher's service functions must not be running
+on any lcore at the point of this call.
+
+Event Port Binding
+------------------
+
+To be able to dequeue events, the event dispatcher must know which
+event ports are to be used, on all the lcores it uses. The application
+provides this information using
+``rte_event_dispatcher_bind_port_to_lcore()``.
+
+This call is typically made from the part of the application that
+deals with deployment issues (e.g., iterating lcores and determining
+which lcore does what), at the time of application initialization.
+
+The ``rte_event_dispatcher_unbind_port_from_lcore()`` is used to undo
+this operation.
+
+Multiple lcore threads may not safely use the same event
+port. [#Port-MT-Safety]
+
+Event ports cannot safely be bound or unbound while the event
+dispatcher's service function is running on any lcore.
+
+Event Handlers
+--------------
+
+The event dispatcher handler is an interface between the event
+dispatcher and an application module, used to route events to the
+appropriate part of the application.
+
+Handler Registration
+^^^^^^^^^^^^^^^^^^^^
+
+The event handler interface consists of two function pointers:
+
+* The ``rte_event_dispatcher_match_t`` callback, which job is to
+  decide if this event is to be the property of this handler.
+* The ``rte_event_dispatcher_process_t``, which is used by the
+  event dispatcher to deliver matched events.
+
+An event handler registration is valid on all lcores.
+
+The functions pointed to by the match and process callbacks resides in
+the application's domain logic, with one or more handlers per
+application module.
+
+A module may use more than one event handler, for convenience or to
+further decouple sub-modules. However, the event dispatcher may impose
+an upper limit of the number handlers. In addition, installing a large
+number of handlers increase event dispatcher overhead, although this
+does not nessarily translate to a system-level performance
+degradation. See the section on :ref:`Event Clustering` for more
+information.
+
+Handler registration and unregistration cannot safely be done while
+the event dispatcher's service function is running on any lcore.
+
+Event Matching
+^^^^^^^^^^^^^^
+
+A handler's match callback function decides if an event should be
+delivered to this handler, or not.
+
+An event is routed to no more than one handler. Thus, if a match
+function returns true, no further match functions will be invoked for
+that event.
+
+Match functions must not depend on being invocated in any particular
+order (e.g., in the handler registration order).
+
+Events failing to match any handler are dropped, and the
+``ev_drop_count`` counter is updated accordingly.
+
+Event Delivery
+^^^^^^^^^^^^^^
+
+The handler callbacks are invocated by the event dispatcher's service
+function, upon the arrival of events to the event ports bound to the
+running service lcore.
+
+A particular event is delivery to at most one handler.
+
+The application must not depend on all match callback invocations for
+a particular event batch being made prior to any process calls are
+being made. For example, if the event dispatcher dequeues two events
+from the event device, it may choose to find out the destination for
+the first event, and deliver it, and then continue to find out the
+destination for the second, and then deliver that event as well. The
+event dispatcher may also choose a strategy where no event is
+delivered until the destination handler for both events have been
+determined.
+
+The events provided in a single process call always belong to the same
+event port dequeue burst.
+
+.. _Event Clustering:
+
+Event Clustering
+^^^^^^^^^^^^^^^^
+
+The event dispatcher maintains the order of events destined for the
+same handler.
+
+*Order* here refers to the order in which the events were delivered
+from the event device to the dispatcher (i.e., in the event array
+populated by ``rte_event_dequeue_burst()``), in relation to the order
+in which the event dispatcher deliveres these events to the
+application.
+
+The event dispatcher *does not* guarantee to maintain the order of
+events delivered to *different* handlers.
+
+For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
+and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
+consider a scenario where the following events are dequeued from the
+event device (qid is short for event queue id).
+
+.. code-block::
+
+    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
+
+The event dispatcher may deliver the events in the following manner:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
+   module_a_process([e2: qid=0])
+   module_b_stage_0_process([e2: qid=1])
+
+The event dispatcher may also choose to cluster (group) all events
+destined for ``module_b_stage_0_process()`` into one array:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
+   module_a_process([e2: qid=0])
+
+Here, the event ``e2`` is reordered and placed behind ``e3``, from a
+delivery order point of view. This kind of reshuffling is allowed,
+since the events are destined for different handlers.
+
+The event dispatcher may also deliver ``e2`` before the three events
+destined for module B.
+
+An example of what the event dispatcher may not do, is to reorder
+event ``e1`` so, that it precedes ``e0`` in the array passed to the
+module B's stage 0 process callback.
+
+Although clustering requires some extra work for the event dispatcher,
+it leads to fewer process function calls. In addition, and likely more
+importantly, it improves temporal locality of memory accesses to
+handler-specific data structures in the application, which in turn may
+lead to fewer cache misses and improved overall performance.
+
+Finalize
+--------
+
+The event dispatcher may be configured to notify one or more parts of
+the application when the matching and processing of a batch of events
+has completed.
+
+The ``rte_event_dispatcher_finalize_register`` call is used to
+register a finalize callback. The function
+``rte_event_dispatcher_finalize_unregister`` is used to remove a
+callback.
+
+The finalize hook may be used by a set of event handlers (in the same
+modules, or a set of cooperating modules) sharing an event output
+buffer, since it allows for flushing of the buffers at the last
+possible moment. In particular, it allows for buffering of
+``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
+``rte_event_dequeue_burst()`` call is made (assuming implicit release
+is employed).
+
+The following is an example with an application-defined event output
+buffer (the ``event_buffer``):
+
+.. code-block:: c
+
+    static void
+    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
+                   void *cb_data)
+    {
+            struct event_buffer *buffer = cb_data;
+            unsigned lcore_id = rte_lcore_id();
+            struct event_buffer_lcore *lcore_buffer =
+                    &buffer->lcore_buffer[lcore_id];
+    
+            event_buffer_lcore_flush(lcore_buffer);
+    }
+
+    /* In the module's initialization code */
+    rte_event_dispatcher_finalize_register(EVENT_DISPATCHER_ID,
+                                           finalize_batch,
+                                           shared_event_buffer);
+
+The event dispatcher does not track any relationship between a handler
+and a finalize callback, and all finalize callbacks will be called, if
+(and only if) at least one event was dequeued from the event device.
+
+Finalize callback registration and unregistration cannot safely be
+done while the event dispatcher's service function is running on any
+lcore.
+
+Service
+-------
+
+The event dispatcher is a DPDK service, and is managed in a manner
+similar to other DPDK services (e.g., an Event Timer Adapter).
+
+Below is an example of how to configure a particular lcore to serve as
+a service lcore, and to map an already-configured event dispatcher
+(identified by ``EVENT_DISPATCHER_ID``) to that lcore.
+
+.. code-block:: c
+
+    static void
+    launch_event_dispatcher_core(unsigned lcore_id)
+    {
+            uint32_t service_id;
+    
+            rte_service_lcore_add(lcore_id);
+    
+            rte_event_dispatcher_service_id_get(EVENT_DISPATCHER_ID,
+                                                &service_id);
+    
+            rte_service_map_lcore_set(service_id, lcore_id, 1);
+    
+            rte_service_lcore_start(lcore_id);
+    
+            rte_service_runstate_set(service_id, 1);
+    }
+
+As the final step, the event dispatcher must be started.
+
+.. code-block:: c
+
+    rte_event_dispatcher_start(EVENT_DISPATCHER_ID);
+
+
+Multi Service Dispatcher Lcores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In an Eventdev application, most (or all) compute-intensive and
+performance-sensitive processing is done in an event-driven manner,
+where CPU cycles spent on application domain logic is the direct
+result of items of work (i.e., ``rte_event`` events) dequeued from an
+event device.
+
+In the light of this, it makes sense to have the event dispatcher
+service be the only DPDK service on all lcores used for packet
+processing — at least in principle.
+
+However, there is nothing in DPDK that prevents colocating other
+services with the event dispatcher service on the same lcore.
+
+Tasks that prior to the introduction of the event dispatcher into the
+application was performed on the lcore, even though no events were
+received, are prime targets for being converted into such auxiliary
+services, running on the dispatcher core set.
+
+An example of such a task would be the management of a per-lcore timer
+wheel (i.e., calling ``rte_timer_manage()``).
+
+For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
+similar technique), may opt for having quiescent state (e.g., calling
+``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
+service, to assure resource reclaimination occurs even in though some
+lcores currently do not process any events.
+
+If more services than the event dispatcher service is mapped to a
+service lcore, it's important that the other service are well-behaved
+and don't interfere with event processing to the extent the system's
+throughput and/or latency requirements are at risk of not being met.
+
+In particular, to avoid jitter, they should have an small upper bound
+for the maximum amount of time spent in a single service function
+call.
+
+An example of scenario with a more CPU-heavy colocated service is a
+low-lcore count deployment, where the event device lacks the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
+require software to feed incoming packets into the event device). In
+this case, the best performance may be achieved if the Event Ethernet
+RX and/or TX Adapters are mapped to lcores also used by for event
+dispatching, since otherwise the adapter lcores would have a lot of
+idle CPU cycles.
+
+.. rubric:: Footnotes
+
+.. [#Mapping]
+   Event routing may reasonably be done based on other ``rte_event``
+   fields (or even event user data). Indeed, that's the very reason to
+   have match callback functions, instead of a simple queue
+   id-to-handler mapping scheme. Queue id-based routing serves well in
+   a simple example.
+
+.. [#Port-MT-Safety]
+   This property (which is a feature, not a bug) is inherited from the
+   core Eventdev APIs.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 87333ee84a..74fcbcee6b 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -59,6 +59,7 @@ Programmer's Guide
     event_ethernet_tx_adapter
     event_timer_adapter
     event_crypto_adapter
+    event_dispatcher
     qos_framework
     power_man
     packet_classif_access_ctrl
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH 1/3] eventdev: introduce event dispatcher
  2023-06-14 17:25                         ` [PATCH 1/3] eventdev: introduce " Mattias Rönnblom
@ 2023-06-14 18:13                           ` Stephen Hemminger
  2023-06-15  6:07                             ` Mattias Rönnblom
  2023-06-16  7:40                           ` [PATCH v2 0/3] Add " Mattias Rönnblom
  1 sibling, 1 reply; 102+ messages in thread
From: Stephen Hemminger @ 2023-06-14 18:13 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: jerinj, Jerin Jacob, hofors, dev, harry.van.haaren,
	peter.j.nilsson, Heng Wang

On Wed, 14 Jun 2023 19:25:25 +0200
Mattias Rönnblom <mattias.ronnblom@ericsson.com> wrote:

> static struct rte_event_dispatcher_handler*
> +evd_lcore_get_handler_by_id(struct rte_event_dispatcher_lcore *lcore,
> +			    int handler_id)
> +{
> +	int i

in other places you used uint16_t for iterating over handles.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH 1/3] eventdev: introduce event dispatcher
  2023-06-14 18:13                           ` Stephen Hemminger
@ 2023-06-15  6:07                             ` Mattias Rönnblom
  0 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-15  6:07 UTC (permalink / raw)
  To: Stephen Hemminger, Mattias Rönnblom
  Cc: jerinj, Jerin Jacob, dev, harry.van.haaren, peter.j.nilsson, Heng Wang

On 2023-06-14 20:13, Stephen Hemminger wrote:
> On Wed, 14 Jun 2023 19:25:25 +0200
> Mattias Rönnblom <mattias.ronnblom@ericsson.com> wrote:
> 
>> static struct rte_event_dispatcher_handler*
>> +evd_lcore_get_handler_by_id(struct rte_event_dispatcher_lcore *lcore,
>> +			    int handler_id)
>> +{
>> +	int i
> 
> in other places you used uint16_t for iterating over handles.

I'll fix it. Thanks.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v2 0/3] Add event dispatcher
  2023-06-14 17:25                         ` [PATCH 1/3] eventdev: introduce " Mattias Rönnblom
  2023-06-14 18:13                           ` Stephen Hemminger
@ 2023-06-16  7:40                           ` Mattias Rönnblom
  2023-06-16  7:40                             ` [PATCH v2 1/3] eventdev: introduce " Mattias Rönnblom
                                               ` (2 more replies)
  1 sibling, 3 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-16  7:40 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Stephen Hemminger, Heng Wang, Mattias Rönnblom

The purpose of the event dispatcher is to decouple different parts of
an application (e.g., processing pipeline stages), sharing the same
underlying event device.

The event dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on event meta data, such as the queue id or scheduling type.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the event dispatcher relies on application-supplied matching
callback functions to decide where to deliver events.

An event dispatcher is configured to dequeue events from a specific
event device, and ties into the service core framework, to do its (and
the application's) work.

The event dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

Although the event dispatcher adds some overhead, experience suggests
that the net effect on the application (both synthetic benchmarks and
more real-world applications) may well be positive. This is primarily
due to clustering (see programming guide) reducing cache misses.

Benchmarking indicates that the overhead is ~10 cc/event (on a
large core), with a handful of often-used handlers.

The event dispatcher does not support run-time reconfiguration.

Mattias Rönnblom (3):
  eventdev: introduce event dispatcher
  test: add event dispatcher test suite
  doc: add event dispatcher programming guide

 app/test/meson.build                       |    1 +
 app/test/test_event_dispatcher.c           | 1058 ++++++++++++++++++++
 doc/api/doxy-api-index.md                  |    1 +
 doc/guides/prog_guide/event_dispatcher.rst |  443 ++++++++
 doc/guides/prog_guide/index.rst            |    1 +
 lib/eventdev/meson.build                   |    2 +
 lib/eventdev/rte_event_dispatcher.c        |  793 +++++++++++++++
 lib/eventdev/rte_event_dispatcher.h        |  481 +++++++++
 lib/eventdev/version.map                   |   14 +
 9 files changed, 2794 insertions(+)
 create mode 100644 app/test/test_event_dispatcher.c
 create mode 100644 doc/guides/prog_guide/event_dispatcher.rst
 create mode 100644 lib/eventdev/rte_event_dispatcher.c
 create mode 100644 lib/eventdev/rte_event_dispatcher.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v2 1/3] eventdev: introduce event dispatcher
  2023-06-16  7:40                           ` [PATCH v2 0/3] Add " Mattias Rönnblom
@ 2023-06-16  7:40                             ` Mattias Rönnblom
  2023-08-18  6:09                               ` Jerin Jacob
  2023-09-04 13:03                               ` [PATCH v3 0/3] Add dispatcher library Mattias Rönnblom
  2023-06-16  7:40                             ` [PATCH v2 2/3] test: add event dispatcher test suite Mattias Rönnblom
  2023-06-16  7:40                             ` [PATCH v2 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
  2 siblings, 2 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-16  7:40 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Stephen Hemminger, Heng Wang, Mattias Rönnblom

The purpose of the event dispatcher is to help reduce coupling in an
Eventdev-based DPDK application.

In addition, the event dispatcher also provides a convenient and
flexible way for the application to use service cores for
application-level processing.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
Reviewed-by: Heng Wang <heng.wang@ericsson.com>

--
PATCH v2:
 o Add dequeue batch count statistic.
 o Add statistics reset function to API.
 o Clarify MT safety guarantees (or lack thereof) in the API documentation.
 o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
   to be consistent with similar loops elsewhere in the dispatcher.
 o Fix variable names in finalizer unregister function.

PATCH:
 o Change prefix from RED to EVD, to avoid confusion with random
   early detection.

RFC v4:
 o Move handlers to per-lcore data structures.
 o Introduce mechanism which rearranges handlers so that often-used
   handlers tend to be tried first.
 o Terminate dispatch loop in case all events are delivered.
 o To avoid the dispatcher's service function hogging the CPU, process
   only one batch per call.
 o Have service function return -EAGAIN if no work is performed.
 o Events delivered in the process function is no longer marked 'const',
   since modifying them may be useful for the application and cause
   no difficulties for the dispatcher.
 o Various minor API documentation improvements.

RFC v3:
 o Add stats_get() function to the version.map file.
---
 lib/eventdev/meson.build            |   2 +
 lib/eventdev/rte_event_dispatcher.c | 793 ++++++++++++++++++++++++++++
 lib/eventdev/rte_event_dispatcher.h | 480 +++++++++++++++++
 lib/eventdev/version.map            |  14 +
 4 files changed, 1289 insertions(+)
 create mode 100644 lib/eventdev/rte_event_dispatcher.c
 create mode 100644 lib/eventdev/rte_event_dispatcher.h

diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 6edf98dfa5..c0edc744fe 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -19,6 +19,7 @@ sources = files(
         'rte_event_crypto_adapter.c',
         'rte_event_eth_rx_adapter.c',
         'rte_event_eth_tx_adapter.c',
+        'rte_event_dispatcher.c',
         'rte_event_ring.c',
         'rte_event_timer_adapter.c',
         'rte_eventdev.c',
@@ -27,6 +28,7 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_rx_adapter.h',
         'rte_event_eth_tx_adapter.h',
+        'rte_event_dispatcher.h',
         'rte_event_ring.h',
         'rte_event_timer_adapter.h',
         'rte_eventdev.h',
diff --git a/lib/eventdev/rte_event_dispatcher.c b/lib/eventdev/rte_event_dispatcher.c
new file mode 100644
index 0000000000..d4bd39754a
--- /dev/null
+++ b/lib/eventdev/rte_event_dispatcher.c
@@ -0,0 +1,793 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_random.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_event_dispatcher.h>
+
+#define EVD_MAX_PORTS_PER_LCORE 4
+#define EVD_MAX_HANDLERS 32
+#define EVD_MAX_FINALIZERS 16
+#define EVD_AVG_PRIO_INTERVAL 2000
+
+struct rte_event_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_event_dispatcher_handler {
+	int id;
+	rte_event_dispatcher_match_t match_fun;
+	void *match_data;
+	rte_event_dispatcher_process_t process_fun;
+	void *process_data;
+};
+
+struct rte_event_dispatcher_finalizer {
+	int id;
+	rte_event_dispatcher_finalize_t finalize_fun;
+	void *finalize_data;
+};
+
+struct rte_event_dispatcher_lcore {
+	uint8_t num_ports;
+	uint16_t num_handlers;
+	int32_t prio_count;
+	struct rte_event_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
+	struct rte_event_dispatcher_handler handlers[EVD_MAX_HANDLERS];
+	struct rte_event_dispatcher_stats stats;
+} __rte_cache_aligned;
+
+struct rte_event_dispatcher {
+	uint8_t id;
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_event_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	uint16_t num_finalizers;
+	struct rte_event_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
+};
+
+static struct rte_event_dispatcher *dispatchers[UINT8_MAX];
+
+static bool
+evd_has_dispatcher(uint8_t id)
+{
+	return dispatchers[id] != NULL;
+}
+
+static struct rte_event_dispatcher *
+evd_get_dispatcher(uint8_t id)
+{
+	return dispatchers[id];
+}
+
+static void
+evd_set_dispatcher(uint8_t id, struct rte_event_dispatcher *dispatcher)
+{
+	dispatchers[id] = dispatcher;
+}
+
+#define EVD_VALID_ID_OR_RET_EINVAL(id)					\
+	do {								\
+		if (unlikely(!evd_has_dispatcher(id))) {		\
+			RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
+			return -EINVAL;					\
+		}							\
+	} while (0)
+
+static int
+evd_lookup_handler_idx(struct rte_event_dispatcher_lcore *lcore,
+		       const struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->match_fun(event, handler->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static void
+evd_prioritize_handler(struct rte_event_dispatcher_lcore *lcore,
+		       int handler_idx)
+{
+	struct rte_event_dispatcher_handler tmp;
+
+	if (handler_idx == 0)
+		return;
+
+	/* Let the lucky handler "bubble" up the list */
+
+	tmp = lcore->handlers[handler_idx - 1];
+
+	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
+
+	lcore->handlers[handler_idx] = tmp;
+}
+
+static inline void
+evd_consider_prioritize_handler(struct rte_event_dispatcher_lcore *lcore,
+				int handler_idx, uint16_t handler_events)
+{
+	lcore->prio_count -= handler_events;
+
+	if (unlikely(lcore->prio_count <= 0)) {
+		evd_prioritize_handler(lcore, handler_idx);
+
+		/*
+		 * Randomize the interval in the unlikely case
+		 * the traffic follow some very strict pattern.
+		 */
+		lcore->prio_count =
+			rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
+			EVD_AVG_PRIO_INTERVAL / 2;
+	}
+}
+
+static inline void
+evd_dispatch_events(struct rte_event_dispatcher *dispatcher,
+		    struct rte_event_dispatcher_lcore *lcore,
+		    struct rte_event_dispatcher_lcore_port *port,
+		    struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
+	uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
+	uint16_t drop_count = 0;
+	uint16_t dispatch_count;
+	uint16_t dispatched = 0;
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *event = &events[i];
+		int handler_idx;
+
+		handler_idx = evd_lookup_handler_idx(lcore, event);
+
+		if (unlikely(handler_idx < 0)) {
+			drop_count++;
+			continue;
+		}
+
+		bursts[handler_idx][burst_lens[handler_idx]] = *event;
+		burst_lens[handler_idx]++;
+	}
+
+	dispatch_count = num_events - drop_count;
+
+	for (i = 0; i < lcore->num_handlers &&
+		 dispatched < dispatch_count; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		handler->process_fun(dispatcher->event_dev_id, port->port_id,
+				     bursts[i], len, handler->process_data);
+
+		dispatched += len;
+
+		/*
+		 * Safe, since any reshuffling will only involve
+		 * already-processed handlers.
+		 */
+		evd_consider_prioritize_handler(lcore, i, len);
+	}
+
+	lcore->stats.ev_batch_count++;
+	lcore->stats.ev_dispatch_count += dispatch_count;
+	lcore->stats.ev_drop_count += drop_count;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_event_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		finalizer->finalize_fun(dispatcher->event_dev_id,
+					port->port_id,
+					finalizer->finalize_data);
+	}
+}
+
+static __rte_always_inline uint16_t
+evd_port_dequeue(struct rte_event_dispatcher *dispatcher,
+		 struct rte_event_dispatcher_lcore *lcore,
+		 struct rte_event_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	if (likely(n > 0))
+		evd_dispatch_events(dispatcher, lcore, port, events, n);
+
+	lcore->stats.poll_count++;
+
+	return n;
+}
+
+static __rte_always_inline uint16_t
+evd_lcore_process(struct rte_event_dispatcher *dispatcher,
+		  struct rte_event_dispatcher_lcore *lcore)
+{
+	uint16_t i;
+	uint16_t event_count = 0;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		event_count += evd_port_dequeue(dispatcher, lcore, port);
+	}
+
+	return event_count;
+}
+
+static int32_t
+evd_process(void *userdata)
+{
+	struct rte_event_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_event_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+	uint64_t event_count;
+
+	event_count = evd_lcore_process(dispatcher, lcore);
+
+	if (unlikely(event_count == 0))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int
+evd_service_register(struct rte_event_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = evd_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "evd_%d",
+		 dispatcher->id);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Registration of event dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	return rc;
+}
+
+static int
+evd_service_unregister(struct rte_event_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Unregistration of event dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	if (evd_has_dispatcher(id)) {
+		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
+				 id);
+		return -EEXIST;
+	}
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("event dispatcher",
+				  sizeof(struct rte_event_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for event "
+				 "dispatcher\n");
+		return -ENOMEM;
+	}
+
+	*dispatcher = (struct rte_event_dispatcher) {
+		.id = id,
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = evd_service_register(dispatcher);
+
+	if (rc < 0) {
+		rte_free(dispatcher);
+		return rc;
+	}
+
+	evd_set_dispatcher(id, dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_free(uint8_t id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = evd_service_unregister(dispatcher);
+
+	if (rc)
+		return rc;
+
+	evd_set_dispatcher(id, NULL);
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	*service_id = dispatcher->service_id;
+
+	return 0;
+}
+
+static int
+lcore_port_index(struct rte_event_dispatcher_lcore *lcore,
+		 uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	struct rte_event_dispatcher_lcore_port *port;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_event_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	int port_idx;
+	struct rte_event_dispatcher_lcore_port *port;
+	struct rte_event_dispatcher_lcore_port *last;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static struct rte_event_dispatcher_handler*
+evd_lcore_get_handler_by_id(struct rte_event_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->id == handler_id)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_handler_id(struct rte_event_dispatcher *dispatcher)
+{
+	int handler_id = 0;
+	struct rte_event_dispatcher_lcore *reference_lcore =
+		&dispatcher->lcores[0];
+
+	if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
+		return -1;
+
+	while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
+		handler_id++;
+
+	return handler_id;
+}
+
+static void
+evd_lcore_install_handler(struct rte_event_dispatcher_lcore *lcore,
+		    const struct rte_event_dispatcher_handler *handler)
+{
+	int handler_idx = lcore->num_handlers;
+
+	lcore->handlers[handler_idx] = *handler;
+	lcore->num_handlers++;
+}
+
+static void
+evd_install_handler(struct rte_event_dispatcher *dispatcher,
+		    const struct rte_event_dispatcher_handler *handler)
+{
+	int i;
+
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[i];
+		evd_lcore_install_handler(lcore, handler);
+	}
+}
+
+int
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_t match_fun,
+			      void *match_data,
+			      rte_event_dispatcher_process_t process_fun,
+			      void *process_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_handler handler = {
+		.match_fun = match_fun,
+		.match_data = match_data,
+		.process_fun = process_fun,
+		.process_data = process_data
+	};
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	handler.id = evd_alloc_handler_id(dispatcher);
+
+	if (handler.id < 0)
+		return -ENOMEM;
+
+	evd_install_handler(dispatcher, &handler);
+
+	return handler.id;
+}
+
+static int
+evd_lcore_uninstall_handler(struct rte_event_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	struct rte_event_dispatcher_handler *unreg_handler;
+	int handler_idx;
+	uint16_t last_idx;
+
+	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
+
+	if (unreg_handler == NULL)
+		return -EINVAL;
+
+	handler_idx = &lcore->handlers[0] - unreg_handler;
+
+	last_idx = lcore->num_handlers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all handlers to maintain handler order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_handler, unreg_handler + 1,
+			sizeof(struct rte_event_dispatcher_handler) * n);
+	}
+
+	lcore->num_handlers--;
+
+	return 0;
+}
+
+static int
+evd_uninstall_handler(struct rte_event_dispatcher *dispatcher,
+		      int handler_id)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+		int rc;
+
+		rc = evd_lcore_uninstall_handler(lcore, handler_id);
+
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_unregister(uint8_t id, int handler_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = evd_uninstall_handler(dispatcher, handler_id);
+
+	return rc;
+}
+
+static struct rte_event_dispatcher_finalizer*
+evd_get_finalizer_by_id(struct rte_event_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_event_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		if (finalizer->id == handler_id)
+			return finalizer;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_finalizer_id(struct rte_event_dispatcher *dispatcher)
+{
+	int finalizer_id = 0;
+
+	while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
+		finalizer_id++;
+
+	return finalizer_id;
+}
+
+static struct rte_event_dispatcher_finalizer *
+evd_alloc_finalizer(struct rte_event_dispatcher *dispatcher)
+{
+	int finalizer_idx;
+	struct rte_event_dispatcher_finalizer *finalizer;
+
+	if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
+		return NULL;
+
+	finalizer_idx = dispatcher->num_finalizers;
+	finalizer = &dispatcher->finalizers[finalizer_idx];
+
+	finalizer->id = evd_alloc_finalizer_id(dispatcher);
+
+	dispatcher->num_finalizers++;
+
+	return finalizer;
+}
+
+int
+rte_event_dispatcher_finalize_register(uint8_t id,
+			      rte_event_dispatcher_finalize_t finalize_fun,
+			      void *finalize_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_finalizer *finalizer;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	finalizer = evd_alloc_finalizer(dispatcher);
+
+	if (finalizer == NULL)
+		return -ENOMEM;
+
+	finalizer->finalize_fun = finalize_fun;
+	finalizer->finalize_data = finalize_data;
+
+	return finalizer->id;
+}
+
+int
+rte_event_dispatcher_finalize_unregister(uint8_t id, int handler_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_finalizer *unreg_finalizer;
+	int finalizer_idx;
+	uint16_t last_idx;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, handler_id);
+
+	if (unreg_finalizer == NULL)
+		return -EINVAL;
+
+	finalizer_idx = &dispatcher->finalizers[0] - unreg_finalizer;
+
+	last_idx = dispatcher->num_finalizers - 1;
+
+	if (finalizer_idx != last_idx) {
+		/* move all finalizers to maintain order */
+		int n = last_idx - finalizer_idx;
+		memmove(unreg_finalizer, unreg_finalizer + 1,
+			sizeof(struct rte_event_dispatcher_finalizer) * n);
+	}
+
+	dispatcher->num_finalizers--;
+
+	return 0;
+}
+
+static int
+evd_set_service_runstate(uint8_t id, int state)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = rte_service_component_runstate_set(dispatcher->service_id,
+						state);
+
+	if (rc != 0) {
+		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+				 "service component run state to %d\n", rc,
+				 state);
+		RTE_ASSERT(0);
+	}
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_start(uint8_t id)
+{
+	return evd_set_service_runstate(id, 1);
+}
+
+int
+rte_event_dispatcher_stop(uint8_t id)
+{
+	return evd_set_service_runstate(id, 0);
+}
+
+static void
+evd_aggregate_stats(struct rte_event_dispatcher_stats *result,
+		    const struct rte_event_dispatcher_stats *part)
+{
+	result->poll_count += part->poll_count;
+	result->ev_batch_count += part->ev_batch_count;
+	result->ev_dispatch_count += part->ev_dispatch_count;
+	result->ev_drop_count += part->ev_drop_count;
+}
+
+int
+rte_event_dispatcher_stats_get(uint8_t id,
+			       struct rte_event_dispatcher_stats *stats)
+{
+	struct rte_event_dispatcher *dispatcher;
+	unsigned int lcore_id;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	*stats = (struct rte_event_dispatcher_stats) {};
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		evd_aggregate_stats(stats, &lcore->stats);
+	}
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_stats_reset(uint8_t id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	unsigned int lcore_id;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		lcore->stats = (struct rte_event_dispatcher_stats) {};
+	}
+
+	return 0;
+
+}
diff --git a/lib/eventdev/rte_event_dispatcher.h b/lib/eventdev/rte_event_dispatcher.h
new file mode 100644
index 0000000000..8847c8ac1c
--- /dev/null
+++ b/lib/eventdev/rte_event_dispatcher.h
@@ -0,0 +1,480 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#ifndef __RTE_EVENT_DISPATCHER_H__
+#define __RTE_EVENT_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Event Dispatcher
+ *
+ * The purpose of the event dispatcher is to help decouple different parts
+ * of an application (e.g., modules), sharing the same underlying
+ * event device.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * Match callbacks are used by an application to decide how the
+ * event dispatcher distributes events to different parts of the
+ * application.
+ *
+ * The application is not expected to process the event at the point
+ * of the match call. Such matters should be deferred to the process
+ * callback invocation.
+ *
+ * The match callback may be used as an opportunity to prefetch data.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this events should be delivered (via
+ *   the process callback), and false otherwise.
+ */
+typedef bool
+(*rte_event_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
+
+/**
+ * Function prototype for process callbacks.
+ *
+ * The process callbacks are used by the event dispatcher to deliver
+ * events for processing.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ */
+
+typedef void
+(*rte_event_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				  struct rte_event *events, uint16_t num,
+				  void *cb_data);
+
+/**
+ * Function prototype for finalize callbacks.
+ *
+ * The finalize callbacks are used by the event dispatcher to notify
+ * the application it has delivered all events from a particular batch
+ * dequeued from the event device.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_finalize_register().
+ */
+
+typedef void
+(*rte_event_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				   void *cb_data);
+
+/**
+ * Event dispatcher statistics
+ */
+struct rte_event_dispatcher_stats {
+	uint64_t poll_count;
+	/**< Number of event dequeue calls made toward the event device. */
+	uint64_t ev_batch_count;
+	/**< Number of non-empty event batches dequeued from event device.*/
+	uint64_t ev_dispatch_count;
+	/**< Number of events dispatched to a handler.*/
+	uint64_t ev_drop_count;
+	/**< Number of events dropped because no handler was found. */
+};
+
+/**
+ * Create an event dispatcher with the specified id.
+ *
+ * @param id
+ *  An application-specified, unique (across all event dispatcher
+ *  instances) identifier.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this event dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   - 0: Success
+ *   - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id);
+
+/**
+ * Free an event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_free(uint8_t id);
+
+/**
+ * Retrieve the service identifier of an event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param [out] service_id
+ *  A pointer to a caller-supplied buffer where the event dispatcher's
+ *  service id will be stored.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * event dispatcher.
+ *
+ * This function configures the event port id to be used by the event
+ * dispatcher service, if run on the specified lcore.
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port must not be bound to more than one lcore.
+ *
+ * If the event dispatcher service is mapped (with
+ * rte_service_map_lcore_set()) to a lcore for which no ports are
+ * bound, the service function will be a no-operation.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the event dispatcher is running on
+ * lcore specified by @c lcore_id.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ *  - -EEXISTS: Event port is already configured.
+ *  - -EINVAL: Invalid arguments.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the event dispatcher is running on
+ * lcore specified by @c lcore_id.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ *  - -ENOENT: Event port id not bound to this @c lcore_id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id);
+
+/**
+ * Register an event handler.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding process callback
+ * function.
+ *
+ * The reason for having two distinct steps is to allow the dispatcher
+ * to deliver all events as a batch. This in turn will cause
+ * processing of a particular kind of events to happen in a
+ * back-to-back manner, improving cache locality.
+ *
+ * The list of handler callback functions is shared among all lcores,
+ * but will only be executed on lcores which has an eventdev port
+ * bound to them, and which are running the event dispatcher service.
+ *
+ * An event is delivered to at most one handler. Events where no
+ * handler is found are dropped.
+ *
+ * The application must not depend on the order of which the match
+ * functions are invoked.
+ *
+ * Ordering of events is not guaranteed to be maintained between
+ * different deliver callbacks. For example, suppose there are two
+ * callbacks registered, matching different subsets of events arriving
+ * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
+ * on a particular port, all pertaining to the same flow. The match
+ * callback for registration A returns true for ev0 and ev2, and the
+ * matching function for registration B for ev1. In that scenario, the
+ * event dispatcher may choose to deliver first [ev0, ev2] using A's
+ * deliver function, and then [ev1] to B - or vice versa.
+ *
+ * rte_event_dispatcher_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the event
+ * dispatcher is running on any service lcore.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param process_fun
+ *  The process callback function.
+ *
+ * @param process_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when process_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_t match_fun,
+			      void *match_cb_data,
+			      rte_event_dispatcher_process_t process_fun,
+			      void *process_cb_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the event dispatcher is running on
+ * any service lcore.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param handler_id
+ *  The handler registration id returned by the original
+ *  rte_event_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c handler_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unregister(uint8_t id, int handler_id);
+
+/**
+ * Register a finalize callback function.
+ *
+ * An application may optionally install one or more finalize
+ * callbacks.
+ *
+ * All finalize callbacks are invoked by the event dispatcher when a
+ * complete batch of events (retrieve using rte_event_dequeue_burst())
+ * have been delivered to the application (or have been dropped).
+ *
+ * The finalize callback is not tied to any particular handler.
+ *
+ * The finalize callback provides an opportunity for the application
+ * to do per-batch processing. One case where this may be useful is if
+ * an event output buffer is used, and is shared among several
+ * handlers. In such a case, proper output buffer flushing may be
+ * assured using a finalize callback.
+ *
+ * rte_event_dispatcher_finalize_register() may be called by any
+ * thread (including unregistered non-EAL threads), but not while the
+ * event dispatcher is running on any service lcore.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param finalize_fun
+ *  The function called after completing the processing of a
+ *  dequeue batch.
+ *
+ * @param finalize_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when @c finalize_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_finalize_register(uint8_t id,
+			    rte_event_dispatcher_finalize_t finalize_fun,
+			    void *finalize_data);
+
+/**
+ * Unregister a finalize callback.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the event dispatcher is running on
+ * any service lcore.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param reg_id
+ *  The finalize registration id returned by the original
+ *  rte_event_dispatcher_finalize_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c reg_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_finalize_unregister(uint8_t id, int reg_id);
+
+/**
+ * Start an event dispatcher instance.
+ *
+ * Enables the event dispatcher service.
+ *
+ * The underlying event device must have been started prior to calling
+ * rte_event_dispatcher_start().
+ *
+ * For the event dispatcher to actually perform work (i.e., dispatch
+ * events), its service must have been mapped to one or more service
+ * lcores, and its service run state set to '1'. An event dispatcher's
+ * service is retrieved using rte_event_dispatcher_service_id_get().
+ *
+ * Each service lcore to which the event dispatcher is mapped should
+ * have at least one event port configured. Such configuration is
+ * performed by calling rte_event_dispatcher_bind_port_to_lcore(),
+ * prior to starting the event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_start(uint8_t id);
+
+/**
+ * Stop a running event dispatcher instance.
+ *
+ * Disables the event dispatcher service.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_stop(uint8_t id);
+
+/**
+ * Retrieve statistics for an event dispatcher instance.
+ *
+ * This function is MT safe and may be called by any thread
+ * (including unregistered non-EAL threads).
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ * @param[out] stats
+ *   A pointer to a structure to fill with statistics.
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_stats_get(uint8_t id,
+			       struct rte_event_dispatcher_stats *stats);
+
+/**
+ * Reset statistics for an event dispatcher instance.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but may not produce the correct result if the
+ * event dispatcher is running on any service lcore.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_stats_reset(uint8_t id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_EVENT_DISPATCHER__ */
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 89068a5713..edf7ffe1b2 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -131,6 +131,20 @@ EXPERIMENTAL {
 	rte_event_eth_tx_adapter_runtime_params_init;
 	rte_event_eth_tx_adapter_runtime_params_set;
 	rte_event_timer_remaining_ticks_get;
+
+	rte_event_dispatcher_create;
+	rte_event_dispatcher_free;
+	rte_event_dispatcher_service_id_get;
+	rte_event_dispatcher_bind_port_to_lcore;
+	rte_event_dispatcher_unbind_port_from_lcore;
+	rte_event_dispatcher_register;
+	rte_event_dispatcher_unregister;
+	rte_event_dispatcher_finalize_register;
+	rte_event_dispatcher_finalize_unregister;
+	rte_event_dispatcher_start;
+	rte_event_dispatcher_stop;
+	rte_event_dispatcher_stats_get;
+	rte_event_dispatcher_stats_reset;
 };
 
 INTERNAL {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v2 2/3] test: add event dispatcher test suite
  2023-06-16  7:40                           ` [PATCH v2 0/3] Add " Mattias Rönnblom
  2023-06-16  7:40                             ` [PATCH v2 1/3] eventdev: introduce " Mattias Rönnblom
@ 2023-06-16  7:40                             ` Mattias Rönnblom
  2023-06-16  7:40                             ` [PATCH v2 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-16  7:40 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Stephen Hemminger, Heng Wang, Mattias Rönnblom

Add unit tests for the event dispatcher.

--
PATCH v2:
 o Test finalize callback functionality.
 o Test handler and finalizer count upper limits.
 o Add statistics reset test.
 o Make sure dispatcher supply the proper event dev id and port id back
   to the application.

PATCH:
 o Extend test to cover often-used handler optimization feature.

RFC v4:
 o Adapt to non-const events in process function prototype.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 app/test/meson.build             |    1 +
 app/test/test_event_dispatcher.c | 1057 ++++++++++++++++++++++++++++++
 2 files changed, 1058 insertions(+)
 create mode 100644 app/test/test_event_dispatcher.c

diff --git a/app/test/meson.build b/app/test/meson.build
index b9b5432496..fac3b6b88b 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -50,6 +50,7 @@ test_sources = files(
         'test_errno.c',
         'test_ethdev_link.c',
         'test_event_crypto_adapter.c',
+        'test_event_dispatcher.c',
         'test_event_eth_rx_adapter.c',
         'test_event_ring.c',
         'test_event_timer_adapter.c',
diff --git a/app/test/test_event_dispatcher.c b/app/test/test_event_dispatcher.c
new file mode 100644
index 0000000000..0b0358603a
--- /dev/null
+++ b/app/test/test_event_dispatcher.c
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include "test.h"
+
+#include <stdatomic.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_event_dispatcher.h>
+#include <rte_eventdev.h>
+#include <rte_random.h>
+#include <rte_service.h>
+
+#define NUM_WORKERS 3
+
+#define NUM_PORTS (NUM_WORKERS + 1)
+#define WORKER_PORT_ID(worker_idx) (worker_idx)
+#define DRIVER_PORT_ID (NUM_PORTS - 1)
+
+#define NUM_SERVICE_CORES NUM_WORKERS
+
+/* Eventdev */
+#define NUM_QUEUES 8
+#define LAST_QUEUE_ID (NUM_QUEUES - 1)
+#define MAX_EVENTS 4096
+#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
+#define DEQUEUE_BURST_SIZE 32
+#define ENQUEUE_BURST_SIZE 32
+
+#define NUM_EVENTS 10000000
+#define NUM_FLOWS 16
+
+#define DSW_VDEV "event_dsw0"
+
+struct app_queue {
+	uint8_t queue_id;
+	uint64_t sn[NUM_FLOWS];
+	int dispatcher_reg_id;
+};
+
+struct cb_count {
+	uint8_t expected_event_dev_id;
+	uint8_t expected_event_port_id[RTE_MAX_LCORE];
+	atomic_int count;
+};
+
+struct test_app {
+	uint8_t event_dev_id;
+	uint8_t dispatcher_id;
+	uint32_t dispatcher_service_id;
+
+	unsigned int service_lcores[NUM_SERVICE_CORES];
+
+	int never_match_reg_id;
+	uint64_t never_match_count;
+	struct cb_count never_process_count;
+
+	struct app_queue queues[NUM_QUEUES];
+
+	int finalize_reg_id;
+	struct cb_count finalize_count;
+
+	bool running;
+
+	atomic_int completed_events;
+	atomic_int errors;
+};
+
+#define RETURN_ON_ERROR(rc) \
+	do {					\
+		if (rc != TEST_SUCCESS)		\
+			return rc;		\
+	} while (0)
+
+static struct test_app *
+test_app_create(void)
+{
+	int i;
+	struct test_app *app;
+
+	app = calloc(1, sizeof(struct test_app));
+
+	if (app == NULL)
+		return NULL;
+
+	for (i = 0; i < NUM_QUEUES; i++)
+		app->queues[i].queue_id = i;
+
+	return app;
+}
+
+static void
+test_app_free(struct test_app *app)
+{
+	free(app);
+}
+
+static int
+test_app_create_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_vdev_init(DSW_VDEV, NULL);
+	if (rc < 0)
+		return TEST_SKIPPED;
+
+	rc = rte_event_dev_get_dev_id(DSW_VDEV);
+
+	app->event_dev_id = (uint8_t)rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_destroy_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_close(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
+
+	rc = rte_vdev_uninit(DSW_VDEV);
+	TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_event_dev(struct test_app *app)
+{
+	int rc;
+	int i;
+
+	rc = test_app_create_vdev(app);
+	if (rc < 0)
+		return rc;
+
+	struct rte_event_dev_config config = {
+		.nb_event_queues = NUM_QUEUES,
+		.nb_event_ports = NUM_PORTS,
+		.nb_events_limit = MAX_EVENTS,
+		.nb_event_queue_flows = 64,
+		.nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
+		.nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	rc = rte_event_dev_configure(app->event_dev_id, &config);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
+
+	struct rte_event_queue_conf queue_config = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.nb_atomic_flows = 64
+	};
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		uint8_t queue_id = i;
+
+		rc = rte_event_queue_setup(app->event_dev_id, queue_id,
+					   &queue_config);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
+	}
+
+	struct rte_event_port_conf port_config = {
+		.new_event_threshold = NEW_EVENT_THRESHOLD,
+		.dequeue_depth = DEQUEUE_BURST_SIZE,
+		.enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	for (i = 0; i < NUM_PORTS; i++) {
+		uint8_t event_port_id = i;
+
+		rc = rte_event_port_setup(app->event_dev_id, event_port_id,
+					  &port_config);
+		TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
+				    event_port_id);
+
+		if (event_port_id == DRIVER_PORT_ID)
+			continue;
+
+		rc = rte_event_port_link(app->event_dev_id, event_port_id,
+					 NULL, NULL, 0);
+
+		TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
+				  event_port_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_event_dev(struct test_app *app)
+{
+	return test_app_destroy_vdev(app);
+}
+
+static int
+test_app_start_event_dev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_start(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
+
+	return TEST_SUCCESS;
+}
+
+static void
+test_app_stop_event_dev(struct test_app *app)
+{
+	rte_event_dev_stop(app->event_dev_id);
+}
+
+static int
+test_app_create_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	app->dispatcher_id = rte_rand_max(256);
+
+	rc = rte_event_dispatcher_create(app->dispatcher_id,
+					 app->event_dev_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to create event dispatcher");
+
+	rc = rte_event_dispatcher_service_id_get(app->dispatcher_id,
+						 &app->dispatcher_service_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to get event dispatcher service ID");
+
+	rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
+			    "stats");
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_free_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	rte_event_dispatcher_free(app->dispatcher_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_bind_ports(struct test_app *app)
+{
+	int i;
+
+	app->never_process_count.expected_event_dev_id =
+		app->event_dev_id;
+	app->finalize_count.expected_event_dev_id =
+		app->event_dev_id;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		uint8_t port_id = WORKER_PORT_ID(i);
+
+		int rc = rte_event_dispatcher_bind_port_to_lcore(
+			app->dispatcher_id, port_id, DEQUEUE_BURST_SIZE, 0,
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
+				    "to lcore %d", port_id, lcore_id);
+
+		app->never_process_count.expected_event_port_id[lcore_id] =
+			port_id;
+		app->finalize_count.expected_event_port_id[lcore_id] = port_id;
+	}
+
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unbind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_event_dispatcher_unbind_port_from_lcore(
+			app->dispatcher_id,
+			WORKER_PORT_ID(i),
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
+				    "from lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static bool
+match_queue(const struct rte_event *event, void *cb_data)
+{
+	uintptr_t queue_id = (uintptr_t)cb_data;
+
+	return event->queue_id == queue_id;
+}
+
+static int
+test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++)
+		if (app->service_lcores[i] == lcore_id)
+			return i;
+
+	return -1;
+}
+
+static int
+test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
+{
+	int worker;
+
+	worker = test_app_get_worker_index(app, lcore_id);
+
+	if (worker < 0)
+		return -1;
+
+	return WORKER_PORT_ID(worker);
+}
+
+static void
+test_app_queue_note_error(struct test_app *app)
+{
+	atomic_fetch_add_explicit(&app->errors, 1, memory_order_relaxed);
+}
+
+static void
+test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
+		       struct rte_event *in_events, uint16_t num,
+		       void *cb_data)
+{
+	struct app_queue *app_queue = cb_data;
+	struct test_app *app = container_of(app_queue, struct test_app,
+					    queues[app_queue->queue_id]);
+	unsigned int lcore_id = rte_lcore_id();
+	bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
+	int event_port_id;
+	uint16_t i;
+	struct rte_event out_events[num];
+
+	event_port_id = test_app_get_worker_port(app, lcore_id);
+
+	if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
+	    p_event_port_id != event_port_id) {
+		test_app_queue_note_error(app);
+		return;
+	}
+
+	for (i = 0; i < num; i++) {
+		const struct rte_event *in_event = &in_events[i];
+		struct rte_event *out_event = &out_events[i];
+		uint64_t sn = in_event->u64;
+		uint64_t expected_sn;
+
+		if (in_event->queue_id != app_queue->queue_id) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		expected_sn = app_queue->sn[in_event->flow_id]++;
+
+		if (expected_sn != sn) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		if (intermediate_queue)
+			*out_event = (struct rte_event) {
+				.queue_id = in_event->queue_id + 1,
+				.flow_id = in_event->flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_FORWARD,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+	}
+
+	if (intermediate_queue) {
+		uint16_t n = 0;
+
+		do {
+			n += rte_event_enqueue_forward_burst(p_event_dev_id,
+							     p_event_port_id,
+							     out_events + n,
+							     num - n);
+		} while (n != num);
+	} else
+		atomic_fetch_add_explicit(&app->completed_events, num,
+					  memory_order_relaxed);
+}
+
+static bool
+never_match(const struct rte_event *event __rte_unused, void *cb_data)
+{
+	uint64_t *count = cb_data;
+
+	(*count)++;
+
+	return false;
+}
+
+static void
+test_app_never_process(uint8_t event_dev_id,
+		       uint8_t event_port_id,
+		       struct rte_event *in_events __rte_unused,
+		       uint16_t num, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		atomic_fetch_add_explicit(&count->count, num,
+					  memory_order_relaxed);
+}
+
+static void
+finalize(uint8_t event_dev_id, uint8_t event_port_id, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		atomic_fetch_add_explicit(&count->count, 1,
+					  memory_order_relaxed);
+}
+
+static int
+test_app_register_callbacks(struct test_app *app)
+{
+	int i;
+
+	app->never_match_reg_id =
+		rte_event_dispatcher_register(app->dispatcher_id,
+					      never_match,
+					      &app->never_match_count,
+					      test_app_never_process,
+					      &app->never_process_count);
+
+	TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register "
+		    "never-match handler");
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		struct app_queue *app_queue = &app->queues[i];
+		uintptr_t queue_id = app_queue->queue_id;
+		int reg_id;
+
+		reg_id = rte_event_dispatcher_register(app->dispatcher_id,
+						       match_queue,
+						       (void *)queue_id,
+						       test_app_process_queue,
+						       app_queue);
+
+		TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
+			    "callback for queue %d", i);
+
+		app_queue->dispatcher_reg_id = reg_id;
+	}
+
+	app->finalize_reg_id =
+		rte_event_dispatcher_finalize_register(app->dispatcher_id,
+						       finalize,
+						       &app->finalize_count);
+	TEST_ASSERT_SUCCESS(app->finalize_reg_id, "Error registering "
+			    "finalize callback");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
+{
+	int reg_id = app->queues[queue_id].dispatcher_reg_id;
+	int rc;
+
+	if (reg_id < 0) /* unregistered already */
+		return 0;
+
+	rc = rte_event_dispatcher_unregister(app->dispatcher_id, reg_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
+			    "callback for queue %d", queue_id);
+
+	app->queues[queue_id].dispatcher_reg_id = -1;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callbacks(struct test_app *app)
+{
+	int i;
+	int rc;
+
+	if (app->never_match_reg_id >= 0) {
+		rc = rte_event_dispatcher_unregister(app->dispatcher_id,
+						     app->never_match_reg_id);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister never-match "
+				    "handler");
+		app->never_match_reg_id = -1;
+	}
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		rc = test_app_unregister_callback(app, i);
+		RETURN_ON_ERROR(rc);
+	}
+
+	if (app->finalize_reg_id >= 0) {
+		rc = rte_event_dispatcher_finalize_unregister(
+			app->dispatcher_id, app->finalize_reg_id
+		);
+		app->finalize_reg_id = -1;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dispatcher_start(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to start the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dispatcher_stop(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to stop the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_reset_dispatcher_stats(struct test_app *app)
+{
+	int rc;
+	struct rte_event_dispatcher_stats stats;
+
+	rc = rte_event_dispatcher_stats_reset(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to reset event dispatcher statistics");
+
+	rc = rte_event_dispatcher_stats_get(app->dispatcher_id, &stats);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to retrieve event dispatcher "
+			    "statistics");
+
+	TEST_ASSERT_EQUAL(stats.poll_count, 0, "Poll count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_batch_count, 0, "Batch count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, "Dispatch count "
+			  "not zero");
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count not zero");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_lcore_add(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
+			    "service core", lcore_id);
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_cores(struct test_app *app)
+{
+	int i;
+	int lcore_id = -1;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
+
+		TEST_ASSERT(lcore_id != RTE_MAX_LCORE,
+			    "Too few lcores. Needs at least %d worker lcores",
+			    NUM_SERVICE_CORES);
+
+		app->service_lcores[i] = lcore_id;
+	}
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		int rc;
+
+		rc = test_app_setup_service_core(app, app->service_lcores[i]);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
+
+	rc = rte_service_lcore_del(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
+			    lcore_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = test_app_teardown_service_core(app, lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_start(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_stop(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_start_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_dispatcher(app);
+
+	app->running = true;
+
+	return rc;
+}
+
+static int
+test_app_stop(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_stop_dispatcher(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	app->running = false;
+
+	return TEST_SUCCESS;
+}
+
+struct test_app *test_app;
+
+static int
+test_setup(void)
+{
+	int rc;
+
+	test_app = test_app_create();
+	TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
+
+	rc = test_app_setup_event_dev(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_create_dispatcher(test_app);
+
+	rc = test_app_setup_service_cores(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_register_callbacks(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_bind_ports(test_app);
+
+	return rc;
+}
+
+static void test_teardown(void)
+{
+	if (test_app->running)
+		test_app_stop(test_app);
+
+	test_app_teardown_service_cores(test_app);
+
+	test_app_unregister_callbacks(test_app);
+
+	test_app_unbind_ports(test_app);
+
+	test_app_free_dispatcher(test_app);
+
+	test_app_teardown_event_dev(test_app);
+
+	test_app_free(test_app);
+
+	test_app = NULL;
+}
+
+static int
+test_app_get_completed_events(struct test_app *app)
+{
+	return atomic_load_explicit(&app->completed_events,
+				    memory_order_relaxed);
+}
+
+static int
+test_app_get_errors(struct test_app *app)
+{
+	return atomic_load_explicit(&app->errors, memory_order_relaxed);
+}
+
+static int
+test_basic(void)
+{
+	int rc;
+	int i;
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	uint64_t sns[NUM_FLOWS] = { 0 };
+
+	for (i = 0; i < NUM_EVENTS;) {
+		struct rte_event events[ENQUEUE_BURST_SIZE];
+		int left;
+		int batch_size;
+		int j;
+		uint16_t n = 0;
+
+		batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
+		left = NUM_EVENTS - i;
+
+		batch_size = RTE_MIN(left, batch_size);
+
+		for (j = 0; j < batch_size; j++) {
+			struct rte_event *event = &events[j];
+			uint64_t sn;
+			uint32_t flow_id;
+
+			flow_id = rte_rand_max(NUM_FLOWS);
+
+			sn = sns[flow_id]++;
+
+			*event = (struct rte_event) {
+				.queue_id = 0,
+				.flow_id = flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_NEW,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+		}
+
+		while (n < batch_size)
+			n += rte_event_enqueue_new_burst(test_app->event_dev_id,
+							 DRIVER_PORT_ID,
+							 events + n,
+							 batch_size - n);
+
+		i += batch_size;
+	}
+
+	while (test_app_get_completed_events(test_app) != NUM_EVENTS)
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+
+	rc = test_app_get_errors(test_app);
+	TEST_ASSERT(rc == 0, "%d errors occurred", rc);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_event_dispatcher_stats stats;
+	rc = rte_event_dispatcher_stats_get(test_app->dispatcher_id,
+					    &stats);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
+			  "Invalid dispatch count");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	TEST_ASSERT_EQUAL(test_app->never_process_count.count, 0,
+			  "Never-match handler's process function has "
+			  "been called");
+
+	int finalize_count =
+		atomic_load_explicit(&test_app->finalize_count.count,
+				     memory_order_relaxed);
+
+	TEST_ASSERT(finalize_count > 0, "Finalize count is zero");
+	TEST_ASSERT(finalize_count <= (int)stats.ev_dispatch_count,
+		    "Finalize count larger than event count");
+
+	TEST_ASSERT_EQUAL(finalize_count, (int)stats.ev_batch_count,
+			  "%"PRIu64" batches dequeued, but finalize called %d "
+			  "times", stats.ev_batch_count, finalize_count);
+
+	/*
+	 * The event dispatcher should call often-matching match functions
+	 * more often, and thus this never-matching match function should
+	 * be called relatively infrequently.
+	 */
+	TEST_ASSERT(test_app->never_match_count <
+		    (stats.ev_dispatch_count / 4),
+		    "Never-matching match function called suspiciously often");
+
+	rc = test_app_reset_dispatcher_stats(test_app);
+	RETURN_ON_ERROR(rc);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_drop(void)
+{
+	int rc;
+	uint8_t unhandled_queue;
+	struct rte_event_dispatcher_stats stats;
+
+	unhandled_queue = (uint8_t)rte_rand_max(NUM_QUEUES);
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_unregister_callback(test_app, unhandled_queue);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_event event = {
+	    .queue_id = unhandled_queue,
+	    .flow_id = 0,
+	    .sched_type = RTE_SCHED_TYPE_ATOMIC,
+	    .op = RTE_EVENT_OP_NEW,
+	    .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	    .u64 = 0
+	};
+
+	do {
+		rc = rte_event_enqueue_burst(test_app->event_dev_id,
+					     DRIVER_PORT_ID, &event, 1);
+	} while (rc == 0);
+
+	do {
+		rc = rte_event_dispatcher_stats_get(test_app->dispatcher_id,
+						    &stats);
+		RETURN_ON_ERROR(rc);
+
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+	} while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
+			  "Dispatch count is not zero");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+#define MORE_THAN_MAX_HANDLERS 1000
+#define MIN_HANDLERS 32
+
+static int
+test_many_handler_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_HANDLERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+
+	RETURN_ON_ERROR(rc);
+
+	for (i = 0; i < MORE_THAN_MAX_HANDLERS; i++) {
+		reg_id = rte_event_dispatcher_register(test_app->dispatcher_id,
+						       never_match, NULL,
+						       test_app_never_process,
+						       NULL);
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_HANDLERS, "Registration failed already "
+		    "after %d handler registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_event_dispatcher_unregister(test_app->dispatcher_id,
+						     reg_ids[i]);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister handler %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static void
+dummy_finalize(uint8_t event_dev_id __rte_unused,
+	       uint8_t event_port_id __rte_unused,
+	       void *cb_data __rte_unused)
+{
+}
+
+#define MORE_THAN_MAX_FINALIZERS 1000
+#define MIN_FINALIZERS 16
+
+static int
+test_many_finalize_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_FINALIZERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+
+	RETURN_ON_ERROR(rc);
+
+	for (i = 0; i < MORE_THAN_MAX_FINALIZERS; i++) {
+		reg_id = rte_event_dispatcher_finalize_register(
+			test_app->dispatcher_id, dummy_finalize, NULL
+		);
+
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_FINALIZERS, "Finalize registration failed "
+		    "already after %d registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_event_dispatcher_finalize_unregister(
+			test_app->dispatcher_id, reg_ids[i]
+		);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister finalizer %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite test_suite = {
+	.suite_name = "Event dispatcher test suite",
+	.unit_test_cases = {
+		TEST_CASE_ST(test_setup, test_teardown, test_basic),
+		TEST_CASE_ST(test_setup, test_teardown, test_drop),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_handler_registrations),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_finalize_registrations),
+		TEST_CASES_END()
+	}
+};
+
+static int
+test_event_dispatcher(void)
+{
+	return unit_test_suite_runner(&test_suite);
+}
+
+REGISTER_TEST_COMMAND(event_dispatcher_autotest, test_event_dispatcher);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v2 3/3] doc: add event dispatcher programming guide
  2023-06-16  7:40                           ` [PATCH v2 0/3] Add " Mattias Rönnblom
  2023-06-16  7:40                             ` [PATCH v2 1/3] eventdev: introduce " Mattias Rönnblom
  2023-06-16  7:40                             ` [PATCH v2 2/3] test: add event dispatcher test suite Mattias Rönnblom
@ 2023-06-16  7:40                             ` Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-06-16  7:40 UTC (permalink / raw)
  To: jerinj
  Cc: Jerin Jacob, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Stephen Hemminger, Heng Wang, Mattias Rönnblom

Provide programming guide the for the event dispatcher.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--

PATCH:
 o Improve grammar and spelling.
RFC v4:
 o Extend event matching section of the programming guide.
 o Improve grammar and spelling.
---
 doc/api/doxy-api-index.md                  |   1 +
 doc/guides/prog_guide/event_dispatcher.rst | 443 +++++++++++++++++++++
 doc/guides/prog_guide/index.rst            |   1 +
 3 files changed, 445 insertions(+)
 create mode 100644 doc/guides/prog_guide/event_dispatcher.rst

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index c709fd48ad..05b22057f9 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -29,6 +29,7 @@ The public API headers are grouped by topics:
   [event_eth_tx_adapter](@ref rte_event_eth_tx_adapter.h),
   [event_timer_adapter](@ref rte_event_timer_adapter.h),
   [event_crypto_adapter](@ref rte_event_crypto_adapter.h),
+  [event_dispatcher](@ref rte_event_dispatcher.h),
   [rawdev](@ref rte_rawdev.h),
   [metrics](@ref rte_metrics.h),
   [bitrate](@ref rte_bitrate.h),
diff --git a/doc/guides/prog_guide/event_dispatcher.rst b/doc/guides/prog_guide/event_dispatcher.rst
new file mode 100644
index 0000000000..ca670ce4d4
--- /dev/null
+++ b/doc/guides/prog_guide/event_dispatcher.rst
@@ -0,0 +1,443 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Ericsson AB.
+
+Event Dispatcher
+================
+
+Overview
+--------
+
+The purpose of the event dispatcher is to help reduce coupling in an
+:doc:`Eventdev <eventdev>`-based DPDK application.
+
+In particular, the event dispatcher addresses a scenario where an
+application's modules share the same event device and event device
+ports, and performs work on the same lcore threads.
+
+The event dispatcher replaces the conditional logic that follows an
+event device dequeue operation, where events are dispatched to
+different parts of the application, typically based on fields in the
+``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
+``sched_type``.
+
+Below is an excerpt from a fictitious application consisting of two
+modules; A and B. In this example, event-to-module routing is based
+purely on queue id, where module A expects all events to a certain
+queue id, and module B two other queue ids. [#Mapping]_
+
+.. code-block:: c
+
+    for (;;) {
+            struct rte_event events[MAX_BURST];
+            unsigned int n;
+    
+            n = rte_event_dequeue_burst(dev_id, port_id, events,
+	                                MAX_BURST, 0);
+    
+            for (i = 0; i < n; i++) {
+                    const struct rte_event *event = &events[i];
+    
+                    switch (event->queue_id) {
+                    case MODULE_A_QUEUE_ID:
+                            module_a_process(event);
+                            break;
+                    case MODULE_B_STAGE_0_QUEUE_ID:
+                            module_b_process_stage_0(event);
+                            break;
+                    case MODULE_B_STAGE_1_QUEUE_ID:
+                            module_b_process_stage_1(event);
+                            break;
+                    }
+            }
+    }
+
+The issue this example attempts to illustrate is that the centralized
+conditional logic has knowledge of things that should be private to
+the modules. In other words, this pattern leads to a violation of
+module encapsulation.
+
+The shared conditional logic contains explicit knowledge about what
+events should go where. In case, for example, the
+``module_a_process()`` is broken into two processing stages — a
+module-internal affair — the shared conditional code must be updated
+to reflect this change.
+
+The centralized event routing code becomes an issue in larger
+applications, where modules are developed by different organizations.
+This pattern also makes module reuse across different application more
+difficult. The part of the conditional logic relevant for a particular
+application may need to be duplicated across many module
+instantiations (e.g., applications and test setups).
+
+The event dispatcher separates the mechanism (routing events to their
+receiver) from the policy (which events should go where).
+
+The basic operation of the event dispatcher is as follows:
+
+* Dequeue a batch of events from the event device.
+* For each event determine which handler should receive the event, using
+  a set of application-provided, per-handler event matching callback
+  functions.
+* Provide events matching a particular handler, to that handler, using
+  its process callback.
+
+If the above application would have made use of the event dispatcher,
+the code relevant for its module A may have looked something like
+this:
+
+.. code-block:: c
+
+    static bool
+    module_a_match(const struct rte_event *event, void *cb_data)
+    {
+           return event->queue_id == MODULE_A_QUEUE_ID;
+    }
+    
+    static void
+    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
+                            const struct rte_event *events,
+			    uint16_t num, void *cb_data)
+    {
+            uint16_t i;
+
+            for (i = 0; i < num; i++)
+                    module_a_process_event(&events[i]);
+    }
+    
+    /* In the module's initialization code */
+    rte_event_dispatcher_register(EVENT_DISPATCHER_ID, module_a_match,
+                                  NULL, module_a_process_events,
+				  module_a_data);
+
+(Error handling is left out of this and future example code in this
+chapter.)
+
+When the shared conditional logic is removed, a new question arise:
+which part of the system actually runs the dispatching mechanism? Or
+phrased differently, what is replacing the function hosting the shared
+conditional logic (typically launched on all lcores using
+``rte_eal_remote_launch()``)? To solve this issue, the event
+dispatcher is a run as a DPDK :doc:`Service <service_cores>`.
+
+The event dispatcher is a layer between the application and the event
+device in the receive direction. In the transmit (i.e., item of work
+submission) direction, the application directly accesses the Eventdev
+core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
+forwarded event to the event device.
+
+Event Dispatcher Creation
+-------------------------
+
+An event dispatcher is created with using
+``rte_event_dispatcher_create()``.
+
+The dispatcher id is provided by the application, and must be unique.
+
+The event device must be configured before the event dispatcher is
+created.
+
+Usually, only one event dispatcher is needed per event device. An
+event dispatcher handles exactly one event device.
+
+An event dispatcher is freed using the ``rte_event_dispatcher_free()``
+function. The event dispatcher's service functions must not be running
+on any lcore at the point of this call.
+
+Event Port Binding
+------------------
+
+To be able to dequeue events, the event dispatcher must know which
+event ports are to be used, on all the lcores it uses. The application
+provides this information using
+``rte_event_dispatcher_bind_port_to_lcore()``.
+
+This call is typically made from the part of the application that
+deals with deployment issues (e.g., iterating lcores and determining
+which lcore does what), at the time of application initialization.
+
+The ``rte_event_dispatcher_unbind_port_from_lcore()`` is used to undo
+this operation.
+
+Multiple lcore threads may not safely use the same event
+port. [#Port-MT-Safety]
+
+Event ports cannot safely be bound or unbound while the event
+dispatcher's service function is running on any lcore.
+
+Event Handlers
+--------------
+
+The event dispatcher handler is an interface between the event
+dispatcher and an application module, used to route events to the
+appropriate part of the application.
+
+Handler Registration
+^^^^^^^^^^^^^^^^^^^^
+
+The event handler interface consists of two function pointers:
+
+* The ``rte_event_dispatcher_match_t`` callback, which job is to
+  decide if this event is to be the property of this handler.
+* The ``rte_event_dispatcher_process_t``, which is used by the
+  event dispatcher to deliver matched events.
+
+An event handler registration is valid on all lcores.
+
+The functions pointed to by the match and process callbacks resides in
+the application's domain logic, with one or more handlers per
+application module.
+
+A module may use more than one event handler, for convenience or to
+further decouple sub-modules. However, the event dispatcher may impose
+an upper limit of the number handlers. In addition, installing a large
+number of handlers increase event dispatcher overhead, although this
+does not nessarily translate to a system-level performance
+degradation. See the section on :ref:`Event Clustering` for more
+information.
+
+Handler registration and unregistration cannot safely be done while
+the event dispatcher's service function is running on any lcore.
+
+Event Matching
+^^^^^^^^^^^^^^
+
+A handler's match callback function decides if an event should be
+delivered to this handler, or not.
+
+An event is routed to no more than one handler. Thus, if a match
+function returns true, no further match functions will be invoked for
+that event.
+
+Match functions must not depend on being invocated in any particular
+order (e.g., in the handler registration order).
+
+Events failing to match any handler are dropped, and the
+``ev_drop_count`` counter is updated accordingly.
+
+Event Delivery
+^^^^^^^^^^^^^^
+
+The handler callbacks are invocated by the event dispatcher's service
+function, upon the arrival of events to the event ports bound to the
+running service lcore.
+
+A particular event is delivery to at most one handler.
+
+The application must not depend on all match callback invocations for
+a particular event batch being made prior to any process calls are
+being made. For example, if the event dispatcher dequeues two events
+from the event device, it may choose to find out the destination for
+the first event, and deliver it, and then continue to find out the
+destination for the second, and then deliver that event as well. The
+event dispatcher may also choose a strategy where no event is
+delivered until the destination handler for both events have been
+determined.
+
+The events provided in a single process call always belong to the same
+event port dequeue burst.
+
+.. _Event Clustering:
+
+Event Clustering
+^^^^^^^^^^^^^^^^
+
+The event dispatcher maintains the order of events destined for the
+same handler.
+
+*Order* here refers to the order in which the events were delivered
+from the event device to the dispatcher (i.e., in the event array
+populated by ``rte_event_dequeue_burst()``), in relation to the order
+in which the event dispatcher deliveres these events to the
+application.
+
+The event dispatcher *does not* guarantee to maintain the order of
+events delivered to *different* handlers.
+
+For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
+and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
+consider a scenario where the following events are dequeued from the
+event device (qid is short for event queue id).
+
+.. code-block::
+
+    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
+
+The event dispatcher may deliver the events in the following manner:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
+   module_a_process([e2: qid=0])
+   module_b_stage_0_process([e2: qid=1])
+
+The event dispatcher may also choose to cluster (group) all events
+destined for ``module_b_stage_0_process()`` into one array:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
+   module_a_process([e2: qid=0])
+
+Here, the event ``e2`` is reordered and placed behind ``e3``, from a
+delivery order point of view. This kind of reshuffling is allowed,
+since the events are destined for different handlers.
+
+The event dispatcher may also deliver ``e2`` before the three events
+destined for module B.
+
+An example of what the event dispatcher may not do, is to reorder
+event ``e1`` so, that it precedes ``e0`` in the array passed to the
+module B's stage 0 process callback.
+
+Although clustering requires some extra work for the event dispatcher,
+it leads to fewer process function calls. In addition, and likely more
+importantly, it improves temporal locality of memory accesses to
+handler-specific data structures in the application, which in turn may
+lead to fewer cache misses and improved overall performance.
+
+Finalize
+--------
+
+The event dispatcher may be configured to notify one or more parts of
+the application when the matching and processing of a batch of events
+has completed.
+
+The ``rte_event_dispatcher_finalize_register`` call is used to
+register a finalize callback. The function
+``rte_event_dispatcher_finalize_unregister`` is used to remove a
+callback.
+
+The finalize hook may be used by a set of event handlers (in the same
+modules, or a set of cooperating modules) sharing an event output
+buffer, since it allows for flushing of the buffers at the last
+possible moment. In particular, it allows for buffering of
+``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
+``rte_event_dequeue_burst()`` call is made (assuming implicit release
+is employed).
+
+The following is an example with an application-defined event output
+buffer (the ``event_buffer``):
+
+.. code-block:: c
+
+    static void
+    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
+                   void *cb_data)
+    {
+            struct event_buffer *buffer = cb_data;
+            unsigned lcore_id = rte_lcore_id();
+            struct event_buffer_lcore *lcore_buffer =
+                    &buffer->lcore_buffer[lcore_id];
+    
+            event_buffer_lcore_flush(lcore_buffer);
+    }
+
+    /* In the module's initialization code */
+    rte_event_dispatcher_finalize_register(EVENT_DISPATCHER_ID,
+                                           finalize_batch,
+                                           shared_event_buffer);
+
+The event dispatcher does not track any relationship between a handler
+and a finalize callback, and all finalize callbacks will be called, if
+(and only if) at least one event was dequeued from the event device.
+
+Finalize callback registration and unregistration cannot safely be
+done while the event dispatcher's service function is running on any
+lcore.
+
+Service
+-------
+
+The event dispatcher is a DPDK service, and is managed in a manner
+similar to other DPDK services (e.g., an Event Timer Adapter).
+
+Below is an example of how to configure a particular lcore to serve as
+a service lcore, and to map an already-configured event dispatcher
+(identified by ``EVENT_DISPATCHER_ID``) to that lcore.
+
+.. code-block:: c
+
+    static void
+    launch_event_dispatcher_core(unsigned lcore_id)
+    {
+            uint32_t service_id;
+    
+            rte_service_lcore_add(lcore_id);
+    
+            rte_event_dispatcher_service_id_get(EVENT_DISPATCHER_ID,
+                                                &service_id);
+    
+            rte_service_map_lcore_set(service_id, lcore_id, 1);
+    
+            rte_service_lcore_start(lcore_id);
+    
+            rte_service_runstate_set(service_id, 1);
+    }
+
+As the final step, the event dispatcher must be started.
+
+.. code-block:: c
+
+    rte_event_dispatcher_start(EVENT_DISPATCHER_ID);
+
+
+Multi Service Dispatcher Lcores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In an Eventdev application, most (or all) compute-intensive and
+performance-sensitive processing is done in an event-driven manner,
+where CPU cycles spent on application domain logic is the direct
+result of items of work (i.e., ``rte_event`` events) dequeued from an
+event device.
+
+In the light of this, it makes sense to have the event dispatcher
+service be the only DPDK service on all lcores used for packet
+processing — at least in principle.
+
+However, there is nothing in DPDK that prevents colocating other
+services with the event dispatcher service on the same lcore.
+
+Tasks that prior to the introduction of the event dispatcher into the
+application was performed on the lcore, even though no events were
+received, are prime targets for being converted into such auxiliary
+services, running on the dispatcher core set.
+
+An example of such a task would be the management of a per-lcore timer
+wheel (i.e., calling ``rte_timer_manage()``).
+
+For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
+similar technique), may opt for having quiescent state (e.g., calling
+``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
+service, to assure resource reclaimination occurs even in though some
+lcores currently do not process any events.
+
+If more services than the event dispatcher service is mapped to a
+service lcore, it's important that the other service are well-behaved
+and don't interfere with event processing to the extent the system's
+throughput and/or latency requirements are at risk of not being met.
+
+In particular, to avoid jitter, they should have an small upper bound
+for the maximum amount of time spent in a single service function
+call.
+
+An example of scenario with a more CPU-heavy colocated service is a
+low-lcore count deployment, where the event device lacks the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
+require software to feed incoming packets into the event device). In
+this case, the best performance may be achieved if the Event Ethernet
+RX and/or TX Adapters are mapped to lcores also used by for event
+dispatching, since otherwise the adapter lcores would have a lot of
+idle CPU cycles.
+
+.. rubric:: Footnotes
+
+.. [#Mapping]
+   Event routing may reasonably be done based on other ``rte_event``
+   fields (or even event user data). Indeed, that's the very reason to
+   have match callback functions, instead of a simple queue
+   id-to-handler mapping scheme. Queue id-based routing serves well in
+   a simple example.
+
+.. [#Port-MT-Safety]
+   This property (which is a feature, not a bug) is inherited from the
+   core Eventdev APIs.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 87333ee84a..74fcbcee6b 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -59,6 +59,7 @@ Programmer's Guide
     event_ethernet_tx_adapter
     event_timer_adapter
     event_crypto_adapter
+    event_dispatcher
     qos_framework
     power_man
     packet_classif_access_ctrl
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v2 1/3] eventdev: introduce event dispatcher
  2023-06-16  7:40                             ` [PATCH v2 1/3] eventdev: introduce " Mattias Rönnblom
@ 2023-08-18  6:09                               ` Jerin Jacob
  2023-08-22  8:42                                 ` Mattias Rönnblom
  2023-09-01 10:53                                 ` Mattias Rönnblom
  2023-09-04 13:03                               ` [PATCH v3 0/3] Add dispatcher library Mattias Rönnblom
  1 sibling, 2 replies; 102+ messages in thread
From: Jerin Jacob @ 2023-08-18  6:09 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: jerinj, hofors, dev, harry.van.haaren, peter.j.nilsson,
	Stephen Hemminger, Heng Wang, Naga Harish K, S V,
	Erik Gabriel Carrillo, Gujjar, Abhinandan S, Pavan Nikhilesh,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, techboard, Zhirun Yan

On Fri, Jun 16, 2023 at 1:17 PM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> The purpose of the event dispatcher is to help reduce coupling in an
> Eventdev-based DPDK application.
>
> In addition, the event dispatcher also provides a convenient and
> flexible way for the application to use service cores for
> application-level processing.
>
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
> Reviewed-by: Heng Wang <heng.wang@ericsson.com>

Adding eventdev maintainers and tech board,

Hi Mattais,

Finally, got some time to review this series, and thanks for excellent
documentation.

I understand the use case for the dispatcher, But following are some
of my concern

1) To decouple the application specific business logic, one need to
use two function pointers to access per packet (match and process)
function.
2) Need to enforce service core for its usage.

IMO, Both are a given application's choice, All the application does
not need to use this scheme. Keeping the code in lib/eventdev has the
following issue.

1)It is kind of enforcing above scheme for all the application
modeling, which may not applicable for application use cases and
eventdev device does not dictate a specific framework model.
2) The framework code, we never kept in device class library. i.e.,
public APIs are implemented through device class API and public API
don't have any no hook to PMD API.
For example, we never kept lib/distributor/ code in lib/ethdev.

Other than the placement of this code, I agree with use case and
solution at high level . The following could option for placement of
this library. Based on that, we can have next level review.

1) It is possible to plug in this to lib/graph by adding new graph
model(@zhirun.yan@intel.com recently added
RTE_GRAPH_MODEL_MCORE_DISPATCH)

Based on my understanding, That can translate to
a)  Adding new graph model which allows to have it on graph walk
(Graph walk is nothing but calling registered dispatcher routines)
b) It is possible to add model specific APIs via
rte_graph_model_model_name_xxxx()
c) Graph library is not using match callback kind of scheme. Instead,
nodes will process the packet and find its downstream node and enqueue
to it and then graph_walk() calls the downstream node specific process
function.
With that, we can meet the original goal of business logic decoupling.
However, Currently, nodes are not aware of what kind of graph model it
is running, that could be one issue here as eventdev has more
scheduling properties
like schedule type etc., to overcome that issue, it may be possible to
introduce nodes to graph model compatibility (where nodes can
advertise the supported graph models)
d) Currently we are planning to make graph API as stable, if we are
taking this path, we need to hold
https://patches.dpdk.org/project/dpdk/patch/20230810180515.113700-1-stephen@networkplumber.org/
as
we may need to update some public APIs.

2) Have new library lib/event_dispatcher

3) Move to example directory to showcase the framework

4) Move to app/test-eventdev directory  to show the case of the framework.


Thoughts?

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v2 1/3] eventdev: introduce event dispatcher
  2023-08-18  6:09                               ` Jerin Jacob
@ 2023-08-22  8:42                                 ` Mattias Rönnblom
  2023-08-22 12:32                                   ` Jerin Jacob
  2023-09-01 10:53                                 ` Mattias Rönnblom
  1 sibling, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-08-22  8:42 UTC (permalink / raw)
  To: Jerin Jacob, Mattias Rönnblom
  Cc: jerinj, dev, harry.van.haaren, peter.j.nilsson,
	Stephen Hemminger, Heng Wang, Naga Harish K, S V,
	Erik Gabriel Carrillo, Gujjar, Abhinandan S, Pavan Nikhilesh,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, techboard, Zhirun Yan

On 2023-08-18 08:09, Jerin Jacob wrote:
> On Fri, Jun 16, 2023 at 1:17 PM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>>
>> The purpose of the event dispatcher is to help reduce coupling in an
>> Eventdev-based DPDK application.
>>
>> In addition, the event dispatcher also provides a convenient and
>> flexible way for the application to use service cores for
>> application-level processing.
>>
>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
>> Reviewed-by: Heng Wang <heng.wang@ericsson.com>
> 
> Adding eventdev maintainers and tech board,
> 
> Hi Mattais,
> 
> Finally, got some time to review this series, and thanks for excellent
> documentation.
> 
> I understand the use case for the dispatcher, But following are some
> of my concern
> 
> 1) To decouple the application specific business logic, one need to
> use two function pointers to access per packet (match and process)
> function.

The API design is based on community feedback, which suggested more 
flexibility was required than the initial 
"dispatching-based-on-queue-id-only" functionality the first RFC provided.

Where I expected to land was a design where I would have something like 
the RFC v2 design with match+process callbacks, and then a supplementary 
"hard-coded" dispatch-internal match API as well, where only the process 
function would be used (much like how RFC v1 worked).

It turned out the special-case API was not performing better (rather the 
opposite) for the evaluated use cases, so I dropped that idea.

That said, it could always be a future extension to re-introduce 
dispatcher-internal matching.

> 2) Need to enforce service core for its usage.
> 

Well, Eventdev does that already, except on systems where all required 
event adapters have the appropriate INTERNAL_PORT capability.

> IMO, Both are a given application's choice, All the application does
> not need to use this scheme. Keeping the code in lib/eventdev has the
> following issue.
> 
> 1)It is kind of enforcing above scheme for all the application
> modeling, which may not applicable for application use cases and
> eventdev device does not dictate a specific framework model.
> 2) The framework code, we never kept in device class library. i.e.,
> public APIs are implemented through device class API and public API
> don't have any no hook to PMD API.
> For example, we never kept lib/distributor/ code in lib/ethdev.
> 
> Other than the placement of this code, I agree with use case and
> solution at high level . The following could option for placement of
> this library. Based on that, we can have next level review.
> 

I'm fine with keeping this as a separate library, although I also don't 
see the harm in having some utility-type functionality in eventdev itself.

> 1) It is possible to plug in this to lib/graph by adding new graph
> model(@zhirun.yan@intel.com recently added
> RTE_GRAPH_MODEL_MCORE_DISPATCH)
> 
> Based on my understanding, That can translate to
> a)  Adding new graph model which allows to have it on graph walk
> (Graph walk is nothing but calling registered dispatcher routines)
> b) It is possible to add model specific APIs via
> rte_graph_model_model_name_xxxx()
> c) Graph library is not using match callback kind of scheme. Instead,
> nodes will process the packet and find its downstream node and enqueue
> to it and then graph_walk() calls the downstream node specific process
> function.
> With that, we can meet the original goal of business logic decoupling.
> However, Currently, nodes are not aware of what kind of graph model it
> is running, that could be one issue here as eventdev has more
> scheduling properties
> like schedule type etc., to overcome that issue, it may be possible to
> introduce nodes to graph model compatibility (where nodes can
> advertise the supported graph models)
> d) Currently we are planning to make graph API as stable, if we are
> taking this path, we need to hold
> https://patches.dpdk.org/project/dpdk/patch/20230810180515.113700-1-stephen@networkplumber.org/
> as
> we may need to update some public APIs.
> 
> 2) Have new library lib/event_dispatcher
> 
> 3) Move to example directory to showcase the framework
> 
> 4) Move to app/test-eventdev directory  to show the case of the framework.
> 
> 
> Thoughts?

I'm not sure I follow. Are you suggesting rte_graph could use 
rte_event_dispatcher, or that an application could use rte_graph to 
solve the same problem as rte_event_dispatcher solves?

I didn't review rte_graph in detail, but if it's anything like fd.io 
VPP's graph model, it's not a programming model that you switch to 
without significant application code impact.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v2 1/3] eventdev: introduce event dispatcher
  2023-08-22  8:42                                 ` Mattias Rönnblom
@ 2023-08-22 12:32                                   ` Jerin Jacob
  2023-08-24 11:17                                     ` Mattias Rönnblom
  0 siblings, 1 reply; 102+ messages in thread
From: Jerin Jacob @ 2023-08-22 12:32 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Mattias Rönnblom, jerinj, dev, harry.van.haaren,
	peter.j.nilsson, Stephen Hemminger, Heng Wang, Naga Harish K,
	S V, Erik Gabriel Carrillo, Gujjar, Abhinandan S,
	Pavan Nikhilesh, Shijith Thotton, Hemant Agrawal, Sachin Saxena,
	Liang Ma, Peter Mccarthy, techboard, Zhirun Yan

On Tue, Aug 22, 2023 at 2:12 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>
> On 2023-08-18 08:09, Jerin Jacob wrote:
> > On Fri, Jun 16, 2023 at 1:17 PM Mattias Rönnblom
> > <mattias.ronnblom@ericsson.com> wrote:
> >>
> >
> > Hi Mattais,
> >
> > Finally, got some time to review this series, and thanks for excellent
> > documentation.
> >
> > I understand the use case for the dispatcher, But following are some
> > of my concern
> >
> > 1) To decouple the application specific business logic, one need to
> > use two function pointers to access per packet (match and process)
> > function.
>
> The API design is based on community feedback, which suggested more
> flexibility was required than the initial
> "dispatching-based-on-queue-id-only" functionality the first RFC provided.
>
> Where I expected to land was a design where I would have something like
> the RFC v2 design with match+process callbacks, and then a supplementary
> "hard-coded" dispatch-internal match API as well, where only the process
> function would be used (much like how RFC v1 worked).
>
> It turned out the special-case API was not performing better (rather the
> opposite) for the evaluated use cases, so I dropped that idea.
>
> That said, it could always be a future extension to re-introduce
> dispatcher-internal matching.

Ack.

>
> > 2) Need to enforce service core for its usage.
> >
>
> Well, Eventdev does that already, except on systems where all required
> event adapters have the appropriate INTERNAL_PORT capability.

Yes. The difference is, one is for HW feature emulation and other one
for framework usage.


>
> > IMO, Both are a given application's choice, All the application does
> > not need to use this scheme. Keeping the code in lib/eventdev has the
> > following issue.
> >
> > 1)It is kind of enforcing above scheme for all the application
> > modeling, which may not applicable for application use cases and
> > eventdev device does not dictate a specific framework model.
> > 2) The framework code, we never kept in device class library. i.e.,
> > public APIs are implemented through device class API and public API
> > don't have any no hook to PMD API.
> > For example, we never kept lib/distributor/ code in lib/ethdev.
> >
> > Other than the placement of this code, I agree with use case and
> > solution at high level . The following could option for placement of
> > this library. Based on that, we can have next level review.
> >
>
> I'm fine with keeping this as a separate library, although I also don't
> see the harm in having some utility-type functionality in eventdev itself.

I see harm as

1)It is kind of enforcing above scheme for all the application
modeling, which may not applicable for all application use cases and
eventdev device does not dictate a specific framework model.

2) The framework code, we never kept in device class library. i.e.,
public APIs are implemented through device class API and public API
don't have any no hook to PMD API.
For example, we never kept lib/distributor/ code in lib/ethdev.

I would to keep eventDEV library scope as abstracting event device features.
We have some common code in library whose scope is sharing between PMDs
not a framework on top eventdev public APIs.

Having said that, I supportive to get this framework as new library and
happy to review the new library.

>
> > 1) It is possible to plug in this to lib/graph by adding new graph
> > model(@zhirun.yan@intel.com recently added
> > RTE_GRAPH_MODEL_MCORE_DISPATCH)
> >
> > Based on my understanding, That can translate to
> > a)  Adding new graph model which allows to have it on graph walk
> > (Graph walk is nothing but calling registered dispatcher routines)
> > b) It is possible to add model specific APIs via
> > rte_graph_model_model_name_xxxx()
> > c) Graph library is not using match callback kind of scheme. Instead,
> > nodes will process the packet and find its downstream node and enqueue
> > to it and then graph_walk() calls the downstream node specific process
> > function.
> > With that, we can meet the original goal of business logic decoupling.
> > However, Currently, nodes are not aware of what kind of graph model it
> > is running, that could be one issue here as eventdev has more
> > scheduling properties
> > like schedule type etc., to overcome that issue, it may be possible to
> > introduce nodes to graph model compatibility (where nodes can
> > advertise the supported graph models)
> > d) Currently we are planning to make graph API as stable, if we are
> > taking this path, we need to hold
> > https://patches.dpdk.org/project/dpdk/patch/20230810180515.113700-1-stephen@networkplumber.org/
> > as
> > we may need to update some public APIs.
> >
> > 2) Have new library lib/event_dispatcher
> >
> > 3) Move to example directory to showcase the framework
> >
> > 4) Move to app/test-eventdev directory  to show the case of the framework.
> >
> >
> > Thoughts?
>
> I'm not sure I follow. Are you suggesting rte_graph could use
> rte_event_dispatcher, or that an application could use rte_graph to
> solve the same problem as rte_event_dispatcher solves?

Later one, Application could use rte_graph to solve the same problem
as rte_event_dispatcher solves.
In fact, both are solving similar problems. See below.


>
> I didn't review rte_graph in detail, but if it's anything like fd.io
> VPP's graph model, it's not a programming model that you switch to
> without significant application code impact.

This is a new library, right? So, which existing applications?

It is not strictly like VPP graph model. rte_graph supports plugin for
the different graph models.

Following are the models available.
https://doc.dpdk.org/guides/prog_guide/graph_lib.html
See
62.4.5.1. RTC (Run-To-Completion)
62.4.5.2. Dispatch model

RTC is similar to fd.io VPP. Other model is not like VPP.

If we choose to take new library route instead of new rte_graph model
for eventdev then
https://doc.dpdk.org/guides/contributing/new_library.html will be the process.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v2 1/3] eventdev: introduce event dispatcher
  2023-08-22 12:32                                   ` Jerin Jacob
@ 2023-08-24 11:17                                     ` Mattias Rönnblom
  2023-08-25  7:27                                       ` Jerin Jacob
  0 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-08-24 11:17 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Mattias Rönnblom, jerinj, dev, harry.van.haaren,
	peter.j.nilsson, Stephen Hemminger, Heng Wang, Naga Harish K,
	S V, Erik Gabriel Carrillo, Gujjar, Abhinandan S,
	Pavan Nikhilesh, Shijith Thotton, Hemant Agrawal, Sachin Saxena,
	Liang Ma, Peter Mccarthy, techboard, Zhirun Yan

On 2023-08-22 14:32, Jerin Jacob wrote:
> On Tue, Aug 22, 2023 at 2:12 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>>
>> On 2023-08-18 08:09, Jerin Jacob wrote:
>>> On Fri, Jun 16, 2023 at 1:17 PM Mattias Rönnblom
>>> <mattias.ronnblom@ericsson.com> wrote:
>>>>
>>>
>>> Hi Mattais,
>>>
>>> Finally, got some time to review this series, and thanks for excellent
>>> documentation.
>>>
>>> I understand the use case for the dispatcher, But following are some
>>> of my concern
>>>
>>> 1) To decouple the application specific business logic, one need to
>>> use two function pointers to access per packet (match and process)
>>> function.
>>
>> The API design is based on community feedback, which suggested more
>> flexibility was required than the initial
>> "dispatching-based-on-queue-id-only" functionality the first RFC provided.
>>
>> Where I expected to land was a design where I would have something like
>> the RFC v2 design with match+process callbacks, and then a supplementary
>> "hard-coded" dispatch-internal match API as well, where only the process
>> function would be used (much like how RFC v1 worked).
>>
>> It turned out the special-case API was not performing better (rather the
>> opposite) for the evaluated use cases, so I dropped that idea.
>>
>> That said, it could always be a future extension to re-introduce
>> dispatcher-internal matching.
> 
> Ack.
> 
>>
>>> 2) Need to enforce service core for its usage.
>>>
>>
>> Well, Eventdev does that already, except on systems where all required
>> event adapters have the appropriate INTERNAL_PORT capability.
> 
> Yes. The difference is, one is for HW feature emulation and other one
> for framework usage.
> 

Can you elaborate why that difference is relevant?

Both the adapters and the event dispatcher are optional, so if you have 
an issue with service cores, you can avoid their use.

> 
>>
>>> IMO, Both are a given application's choice, All the application does
>>> not need to use this scheme. Keeping the code in lib/eventdev has the
>>> following issue.
>>>
>>> 1)It is kind of enforcing above scheme for all the application
>>> modeling, which may not applicable for application use cases and
>>> eventdev device does not dictate a specific framework model.
>>> 2) The framework code, we never kept in device class library. i.e.,
>>> public APIs are implemented through device class API and public API
>>> don't have any no hook to PMD API.
>>> For example, we never kept lib/distributor/ code in lib/ethdev.
>>>
>>> Other than the placement of this code, I agree with use case and
>>> solution at high level . The following could option for placement of
>>> this library. Based on that, we can have next level review.
>>>
>>
>> I'm fine with keeping this as a separate library, although I also don't
>> see the harm in having some utility-type functionality in eventdev itself.
> 
> I see harm as
> 
> 1)It is kind of enforcing above scheme for all the application
> modeling, which may not applicable for all application use cases and
> eventdev device does not dictate a specific framework model.
> 

What scheme is being enforced? Using this thing is optional.

> 2) The framework code, we never kept in device class library. i.e.,
> public APIs are implemented through device class API and public API
> don't have any no hook to PMD API.
> For example, we never kept lib/distributor/ code in lib/ethdev.
> 
> I would to keep eventDEV library scope as abstracting event device features.
> We have some common code in library whose scope is sharing between PMDs
> not a framework on top eventdev public APIs.
> 
> Having said that, I supportive to get this framework as new library and
> happy to review the new library.
> 

Thanks.

I'll reshape the event dispatcher as a separate library and submit a new 
patch.

>>
>>> 1) It is possible to plug in this to lib/graph by adding new graph
>>> model(@zhirun.yan@intel.com recently added
>>> RTE_GRAPH_MODEL_MCORE_DISPATCH)
>>>
>>> Based on my understanding, That can translate to
>>> a)  Adding new graph model which allows to have it on graph walk
>>> (Graph walk is nothing but calling registered dispatcher routines)
>>> b) It is possible to add model specific APIs via
>>> rte_graph_model_model_name_xxxx()
>>> c) Graph library is not using match callback kind of scheme. Instead,
>>> nodes will process the packet and find its downstream node and enqueue
>>> to it and then graph_walk() calls the downstream node specific process
>>> function.
>>> With that, we can meet the original goal of business logic decoupling.
>>> However, Currently, nodes are not aware of what kind of graph model it
>>> is running, that could be one issue here as eventdev has more
>>> scheduling properties
>>> like schedule type etc., to overcome that issue, it may be possible to
>>> introduce nodes to graph model compatibility (where nodes can
>>> advertise the supported graph models)
>>> d) Currently we are planning to make graph API as stable, if we are
>>> taking this path, we need to hold
>>> https://patches.dpdk.org/project/dpdk/patch/20230810180515.113700-1-stephen@networkplumber.org/
>>> as
>>> we may need to update some public APIs.
>>>
>>> 2) Have new library lib/event_dispatcher
>>>
>>> 3) Move to example directory to showcase the framework
>>>
>>> 4) Move to app/test-eventdev directory  to show the case of the framework.
>>>
>>>
>>> Thoughts?
>>
>> I'm not sure I follow. Are you suggesting rte_graph could use
>> rte_event_dispatcher, or that an application could use rte_graph to
>> solve the same problem as rte_event_dispatcher solves?
> 
> Later one, Application could use rte_graph to solve the same problem
> as rte_event_dispatcher solves.
> In fact, both are solving similar problems. See below.
> 
> 
>>
>> I didn't review rte_graph in detail, but if it's anything like fd.io
>> VPP's graph model, it's not a programming model that you switch to
>> without significant application code impact.
> 
> This is a new library, right? So, which existing applications?
> 

Existing DPDK applications, which domain logic is not organized as a 
graph. Which, I'm guessing, are many.

Moving from "raw" event device dequeue to the event dispatcher model is 
a trivial, non-intrusive, operation.

> It is not strictly like VPP graph model. rte_graph supports plugin for
> the different graph models.
> 
> Following are the models available.
> https://doc.dpdk.org/guides/prog_guide/graph_lib.html
> See
> 62.4.5.1. RTC (Run-To-Completion)
> 62.4.5.2. Dispatch model
> 
> RTC is similar to fd.io VPP. Other model is not like VPP.
> 
> If we choose to take new library route instead of new rte_graph model
> for eventdev then
> https://doc.dpdk.org/guides/contributing/new_library.html will be the process.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v2 1/3] eventdev: introduce event dispatcher
  2023-08-24 11:17                                     ` Mattias Rönnblom
@ 2023-08-25  7:27                                       ` Jerin Jacob
  0 siblings, 0 replies; 102+ messages in thread
From: Jerin Jacob @ 2023-08-25  7:27 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Mattias Rönnblom, jerinj, dev, harry.van.haaren,
	peter.j.nilsson, Stephen Hemminger, Heng Wang, Naga Harish K,
	S V, Erik Gabriel Carrillo, Gujjar, Abhinandan S,
	Pavan Nikhilesh, Shijith Thotton, Hemant Agrawal, Sachin Saxena,
	Liang Ma, Peter Mccarthy, techboard, Zhirun Yan

On Thu, Aug 24, 2023 at 4:47 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>
> On 2023-08-22 14:32, Jerin Jacob wrote:

> >> Well, Eventdev does that already, except on systems where all required
> >> event adapters have the appropriate INTERNAL_PORT capability.
> >
> > Yes. The difference is, one is for HW feature emulation and other one
> > for framework usage.
> >
>
> Can you elaborate why that difference is relevant?
>
> Both the adapters and the event dispatcher are optional, so if you have
> an issue with service cores, you can avoid their use.

Adaptor's service core is not optional if HW don't have that feature
via adaptor API.


> >
> > 1)It is kind of enforcing above scheme for all the application
> > modeling, which may not applicable for all application use cases and
> > eventdev device does not dictate a specific framework model.
> >
>
> What scheme is being enforced? Using this thing is optional.

Yes. Exposing in rte_event_.... name space and framework is in lib/eventdev,
one can think, it is layerd SW model and top most event dispatch needs to
be used. Changing the namespace and move to different library will fix
that problem.


>
> > 2) The framework code, we never kept in device class library. i.e.,
> > public APIs are implemented through device class API and public API
> > don't have any no hook to PMD API.
> > For example, we never kept lib/distributor/ code in lib/ethdev.
> >
> > I would to keep eventDEV library scope as abstracting event device features.
> > We have some common code in library whose scope is sharing between PMDs
> > not a framework on top eventdev public APIs.
> >
> > Having said that, I supportive to get this framework as new library and
> > happy to review the new library.
> >
>
> Thanks.
>
> I'll reshape the event dispatcher as a separate library and submit a new
> patch.

Ack

> >>
> >> I didn't review rte_graph in detail, but if it's anything like fd.io
> >> VPP's graph model, it's not a programming model that you switch to
> >> without significant application code impact.
> >
> > This is a new library, right? So, which existing applications?
> >
>
> Existing DPDK applications, which domain logic is not organized as a
> graph. Which, I'm guessing, are many.

Yes. But I was comparing new application based on Graph vs new event
dispatch model,
not  Graph vs "raw" event device.

Nevertheless, if there are some in house application which is
following event dispatch model and
one wants to make that model as upstream as new library.  No
objections from my side.


>
> Moving from "raw" event device dequeue to the event dispatcher model is
> a trivial, non-intrusive, operation.
>
> > It is not strictly like VPP graph model. rte_graph supports plugin for
> > the different graph models.
> >
> > Following are the models available.
> > https://doc.dpdk.org/guides/prog_guide/graph_lib.html
> > See
> > 62.4.5.1. RTC (Run-To-Completion)
> > 62.4.5.2. Dispatch model
> >
> > RTC is similar to fd.io VPP. Other model is not like VPP.
> >
> > If we choose to take new library route instead of new rte_graph model
> > for eventdev then
> > https://doc.dpdk.org/guides/contributing/new_library.html will be the process.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v2 1/3] eventdev: introduce event dispatcher
  2023-08-18  6:09                               ` Jerin Jacob
  2023-08-22  8:42                                 ` Mattias Rönnblom
@ 2023-09-01 10:53                                 ` Mattias Rönnblom
  2023-09-01 10:56                                   ` Jerin Jacob
  1 sibling, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-01 10:53 UTC (permalink / raw)
  To: Jerin Jacob, Mattias Rönnblom
  Cc: jerinj, dev, harry.van.haaren, peter.j.nilsson,
	Stephen Hemminger, Heng Wang, Naga Harish K, S V,
	Erik Gabriel Carrillo, Gujjar, Abhinandan S, Pavan Nikhilesh,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, techboard, Zhirun Yan, techboard

On 2023-08-18 08:09, Jerin Jacob wrote:

<snip>

> 2) Have new library lib/event_dispatcher
> 

Should the library be named event_dispatcher, or simply dispatcher?

Underscore in library isn't exactly aesthetically pleasing, and shorter 
is better. Also, the rte_event_* namespace is avoided altogether.

On the other hand "dispatcher" is a little too generic, and somewhat 
grandiose name, for a relatively simple thing. "event_dispatcher" makes 
the relation to eventdev obvious.

<snip>

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v2 1/3] eventdev: introduce event dispatcher
  2023-09-01 10:53                                 ` Mattias Rönnblom
@ 2023-09-01 10:56                                   ` Jerin Jacob
  0 siblings, 0 replies; 102+ messages in thread
From: Jerin Jacob @ 2023-09-01 10:56 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Mattias Rönnblom, jerinj, dev, harry.van.haaren,
	peter.j.nilsson, Stephen Hemminger, Heng Wang, Naga Harish K,
	S V, Erik Gabriel Carrillo, Gujjar, Abhinandan S,
	Pavan Nikhilesh, Shijith Thotton, Hemant Agrawal, Sachin Saxena,
	Liang Ma, Peter Mccarthy, Zhirun Yan, techboard

On Fri, Sep 1, 2023 at 4:23 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>
> On 2023-08-18 08:09, Jerin Jacob wrote:
>
> <snip>
>
> > 2) Have new library lib/event_dispatcher
> >
>
> Should the library be named event_dispatcher, or simply dispatcher?

Looks good to me.

>
> Underscore in library isn't exactly aesthetically pleasing, and shorter

> is better. Also, the rte_event_* namespace is avoided altogether.

+1

>
> On the other hand "dispatcher" is a little too generic, and somewhat
> grandiose name, for a relatively simple thing. "event_dispatcher" makes
> the relation to eventdev obvious.
>
> <snip>

^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v3 0/3] Add dispatcher library
  2023-06-16  7:40                             ` [PATCH v2 1/3] eventdev: introduce " Mattias Rönnblom
  2023-08-18  6:09                               ` Jerin Jacob
@ 2023-09-04 13:03                               ` Mattias Rönnblom
  2023-09-04 13:03                                 ` [PATCH v3 1/3] lib: introduce " Mattias Rönnblom
                                                   ` (3 more replies)
  1 sibling, 4 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-04 13:03 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to decouple different parts
of an eventdev-based application (e.g., processing pipeline stages),
sharing the same underlying event device.

The dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on event meta data, such as the queue id or scheduling type.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the dispatcher relies on application-supplied matching callback
functions to decide where to deliver events.

A dispatcher is configured to dequeue events from a specific event
device, and ties into the service core framework, to do its (and the
application's) work.

The dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

Although the dispatcher adds some overhead, experience suggests that
the net effect on the application (both synthetic benchmarks and more
real-world applications) may well be positive. This is primarily due
to clustering (see programming guide) reducing cache misses.

Benchmarking indicates that the overhead is ~10 cc/event (on a
large core), with a handful of often-used handlers.

The dispatcher does not support run-time reconfiguration.

The use of the dispatcher library is optional, and an eventdev-based
application may still opt to access the event device using direct
eventdev API calls, or by some other means.

Mattias Rönnblom (3):
  lib: introduce dispatcher library
  test: add dispatcher test suite
  doc: add dispatcher programming guide

 MAINTAINERS                              |    5 +
 app/test/meson.build                     |    1 +
 app/test/test_dispatcher.c               | 1054 ++++++++++++++++++++++
 doc/api/doxy-api-index.md                |    1 +
 doc/api/doxy-api.conf.in                 |    1 +
 doc/guides/prog_guide/dispatcher_lib.rst |  434 +++++++++
 doc/guides/prog_guide/index.rst          |    1 +
 lib/dispatcher/meson.build               |   17 +
 lib/dispatcher/rte_dispatcher.c          |  791 ++++++++++++++++
 lib/dispatcher/rte_dispatcher.h          |  480 ++++++++++
 lib/dispatcher/version.map               |   20 +
 lib/meson.build                          |    2 +
 12 files changed, 2807 insertions(+)
 create mode 100644 app/test/test_dispatcher.c
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-04 13:03                               ` [PATCH v3 0/3] Add dispatcher library Mattias Rönnblom
@ 2023-09-04 13:03                                 ` Mattias Rönnblom
  2023-09-17 16:46                                   ` Naga Harish K, S V
                                                     ` (3 more replies)
  2023-09-04 13:03                                 ` [PATCH v3 2/3] test: add dispatcher test suite Mattias Rönnblom
                                                   ` (2 subsequent siblings)
  3 siblings, 4 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-04 13:03 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to help reduce coupling in an
Eventdev-based DPDK application.

In addition, the dispatcher also provides a convenient and flexible
way for the application to use service cores for application-level
processing.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
Reviewed-by: Heng Wang <heng.wang@ericsson.com>

--

PATCH v3:
 o To underline its optional character and since it does not provide
   hardware abstraction, the event dispatcher is now a separate
   library.
 o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
   shorter and to avoid the rte_event_* namespace.

PATCH v2:
 o Add dequeue batch count statistic.
 o Add statistics reset function to API.
 o Clarify MT safety guarantees (or lack thereof) in the API documentation.
 o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
   to be consistent with similar loops elsewhere in the dispatcher.
 o Fix variable names in finalizer unregister function.

PATCH:
 o Change prefix from RED to EVD, to avoid confusion with random
   early detection.

RFC v4:
 o Move handlers to per-lcore data structures.
 o Introduce mechanism which rearranges handlers so that often-used
   handlers tend to be tried first.
 o Terminate dispatch loop in case all events are delivered.
 o To avoid the dispatcher's service function hogging the CPU, process
   only one batch per call.
 o Have service function return -EAGAIN if no work is performed.
 o Events delivered in the process function is no longer marked 'const',
   since modifying them may be useful for the application and cause
   no difficulties for the dispatcher.
 o Various minor API documentation improvements.

RFC v3:
 o Add stats_get() function to the version.map file.
---
 MAINTAINERS                     |   3 +
 lib/dispatcher/meson.build      |  17 +
 lib/dispatcher/rte_dispatcher.c | 791 ++++++++++++++++++++++++++++++++
 lib/dispatcher/rte_dispatcher.h | 480 +++++++++++++++++++
 lib/dispatcher/version.map      |  20 +
 lib/meson.build                 |   2 +
 6 files changed, 1313 insertions(+)
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index a926155f26..6704cd5b2c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1726,6 +1726,9 @@ M: Nithin Dabilpuram <ndabilpuram@marvell.com>
 M: Pavan Nikhilesh <pbhagavatula@marvell.com>
 F: lib/node/
 
+Dispatcher - EXPERIMENTAL
+M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
+F: lib/dispatcher/
 
 Test Applications
 -----------------
diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build
new file mode 100644
index 0000000000..c6054a3a5d
--- /dev/null
+++ b/lib/dispatcher/meson.build
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2023 Ericsson AB
+
+if is_windows
+    build = false
+    reason = 'not supported on Windows'
+    subdir_done()
+endif
+
+sources = files(
+        'rte_dispatcher.c',
+)
+headers = files(
+        'rte_dispatcher.h',
+)
+
+deps += ['eventdev']
diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
new file mode 100644
index 0000000000..3319fe09f2
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.c
@@ -0,0 +1,791 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_random.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_dispatcher.h>
+
+#define EVD_MAX_PORTS_PER_LCORE 4
+#define EVD_MAX_HANDLERS 32
+#define EVD_MAX_FINALIZERS 16
+#define EVD_AVG_PRIO_INTERVAL 2000
+
+struct rte_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_dispatcher_handler {
+	int id;
+	rte_dispatcher_match_t match_fun;
+	void *match_data;
+	rte_dispatcher_process_t process_fun;
+	void *process_data;
+};
+
+struct rte_dispatcher_finalizer {
+	int id;
+	rte_dispatcher_finalize_t finalize_fun;
+	void *finalize_data;
+};
+
+struct rte_dispatcher_lcore {
+	uint8_t num_ports;
+	uint16_t num_handlers;
+	int32_t prio_count;
+	struct rte_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
+	struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
+	struct rte_dispatcher_stats stats;
+} __rte_cache_aligned;
+
+struct rte_dispatcher {
+	uint8_t id;
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	uint16_t num_finalizers;
+	struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
+};
+
+static struct rte_dispatcher *dispatchers[UINT8_MAX];
+
+static bool
+evd_has_dispatcher(uint8_t id)
+{
+	return dispatchers[id] != NULL;
+}
+
+static struct rte_dispatcher *
+evd_get_dispatcher(uint8_t id)
+{
+	return dispatchers[id];
+}
+
+static void
+evd_set_dispatcher(uint8_t id, struct rte_dispatcher *dispatcher)
+{
+	dispatchers[id] = dispatcher;
+}
+
+#define EVD_VALID_ID_OR_RET_EINVAL(id)					\
+	do {								\
+		if (unlikely(!evd_has_dispatcher(id))) {		\
+			RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
+			return -EINVAL;					\
+		}							\
+	} while (0)
+
+static int
+evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
+		       const struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->match_fun(event, handler->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static void
+evd_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+		       int handler_idx)
+{
+	struct rte_dispatcher_handler tmp;
+
+	if (handler_idx == 0)
+		return;
+
+	/* Let the lucky handler "bubble" up the list */
+
+	tmp = lcore->handlers[handler_idx - 1];
+
+	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
+
+	lcore->handlers[handler_idx] = tmp;
+}
+
+static inline void
+evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+				int handler_idx, uint16_t handler_events)
+{
+	lcore->prio_count -= handler_events;
+
+	if (unlikely(lcore->prio_count <= 0)) {
+		evd_prioritize_handler(lcore, handler_idx);
+
+		/*
+		 * Randomize the interval in the unlikely case
+		 * the traffic follow some very strict pattern.
+		 */
+		lcore->prio_count =
+			rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
+			EVD_AVG_PRIO_INTERVAL / 2;
+	}
+}
+
+static inline void
+evd_dispatch_events(struct rte_dispatcher *dispatcher,
+		    struct rte_dispatcher_lcore *lcore,
+		    struct rte_dispatcher_lcore_port *port,
+		    struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
+	uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
+	uint16_t drop_count = 0;
+	uint16_t dispatch_count;
+	uint16_t dispatched = 0;
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *event = &events[i];
+		int handler_idx;
+
+		handler_idx = evd_lookup_handler_idx(lcore, event);
+
+		if (unlikely(handler_idx < 0)) {
+			drop_count++;
+			continue;
+		}
+
+		bursts[handler_idx][burst_lens[handler_idx]] = *event;
+		burst_lens[handler_idx]++;
+	}
+
+	dispatch_count = num_events - drop_count;
+
+	for (i = 0; i < lcore->num_handlers &&
+		 dispatched < dispatch_count; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		handler->process_fun(dispatcher->event_dev_id, port->port_id,
+				     bursts[i], len, handler->process_data);
+
+		dispatched += len;
+
+		/*
+		 * Safe, since any reshuffling will only involve
+		 * already-processed handlers.
+		 */
+		evd_consider_prioritize_handler(lcore, i, len);
+	}
+
+	lcore->stats.ev_batch_count++;
+	lcore->stats.ev_dispatch_count += dispatch_count;
+	lcore->stats.ev_drop_count += drop_count;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		finalizer->finalize_fun(dispatcher->event_dev_id,
+					port->port_id,
+					finalizer->finalize_data);
+	}
+}
+
+static __rte_always_inline uint16_t
+evd_port_dequeue(struct rte_dispatcher *dispatcher,
+		 struct rte_dispatcher_lcore *lcore,
+		 struct rte_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	if (likely(n > 0))
+		evd_dispatch_events(dispatcher, lcore, port, events, n);
+
+	lcore->stats.poll_count++;
+
+	return n;
+}
+
+static __rte_always_inline uint16_t
+evd_lcore_process(struct rte_dispatcher *dispatcher,
+		  struct rte_dispatcher_lcore *lcore)
+{
+	uint16_t i;
+	uint16_t event_count = 0;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		event_count += evd_port_dequeue(dispatcher, lcore, port);
+	}
+
+	return event_count;
+}
+
+static int32_t
+evd_process(void *userdata)
+{
+	struct rte_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+	uint64_t event_count;
+
+	event_count = evd_lcore_process(dispatcher, lcore);
+
+	if (unlikely(event_count == 0))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int
+evd_service_register(struct rte_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = evd_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "evd_%d",
+		 dispatcher->id);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Registration of dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	return rc;
+}
+
+static int
+evd_service_unregister(struct rte_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+int
+rte_dispatcher_create(uint8_t id, uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	if (evd_has_dispatcher(id)) {
+		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
+				 id);
+		return -EEXIST;
+	}
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for dispatcher\n");
+		return -ENOMEM;
+	}
+
+	*dispatcher = (struct rte_dispatcher) {
+		.id = id,
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = evd_service_register(dispatcher);
+
+	if (rc < 0) {
+		rte_free(dispatcher);
+		return rc;
+	}
+
+	evd_set_dispatcher(id, dispatcher);
+
+	return 0;
+}
+
+int
+rte_dispatcher_free(uint8_t id)
+{
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = evd_service_unregister(dispatcher);
+
+	if (rc)
+		return rc;
+
+	evd_set_dispatcher(id, NULL);
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+int
+rte_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
+{
+	struct rte_dispatcher *dispatcher;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	*service_id = dispatcher->service_id;
+
+	return 0;
+}
+
+static int
+lcore_port_index(struct rte_dispatcher_lcore *lcore,
+		 uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id)
+{
+	struct rte_dispatcher *dispatcher;
+	struct rte_dispatcher_lcore *lcore;
+	struct rte_dispatcher_lcore_port *port;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id)
+{
+	struct rte_dispatcher *dispatcher;
+	struct rte_dispatcher_lcore *lcore;
+	int port_idx;
+	struct rte_dispatcher_lcore_port *port;
+	struct rte_dispatcher_lcore_port *last;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static struct rte_dispatcher_handler*
+evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->id == handler_id)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_handler_id(struct rte_dispatcher *dispatcher)
+{
+	int handler_id = 0;
+	struct rte_dispatcher_lcore *reference_lcore =
+		&dispatcher->lcores[0];
+
+	if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
+		return -1;
+
+	while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
+		handler_id++;
+
+	return handler_id;
+}
+
+static void
+evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore,
+		    const struct rte_dispatcher_handler *handler)
+{
+	int handler_idx = lcore->num_handlers;
+
+	lcore->handlers[handler_idx] = *handler;
+	lcore->num_handlers++;
+}
+
+static void
+evd_install_handler(struct rte_dispatcher *dispatcher,
+		    const struct rte_dispatcher_handler *handler)
+{
+	int i;
+
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[i];
+		evd_lcore_install_handler(lcore, handler);
+	}
+}
+
+int
+rte_dispatcher_register(uint8_t id,
+			      rte_dispatcher_match_t match_fun,
+			      void *match_data,
+			      rte_dispatcher_process_t process_fun,
+			      void *process_data)
+{
+	struct rte_dispatcher *dispatcher;
+	struct rte_dispatcher_handler handler = {
+		.match_fun = match_fun,
+		.match_data = match_data,
+		.process_fun = process_fun,
+		.process_data = process_data
+	};
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	handler.id = evd_alloc_handler_id(dispatcher);
+
+	if (handler.id < 0)
+		return -ENOMEM;
+
+	evd_install_handler(dispatcher, &handler);
+
+	return handler.id;
+}
+
+static int
+evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	struct rte_dispatcher_handler *unreg_handler;
+	int handler_idx;
+	uint16_t last_idx;
+
+	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
+
+	if (unreg_handler == NULL)
+		return -EINVAL;
+
+	handler_idx = &lcore->handlers[0] - unreg_handler;
+
+	last_idx = lcore->num_handlers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all handlers to maintain handler order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_handler, unreg_handler + 1,
+			sizeof(struct rte_dispatcher_handler) * n);
+	}
+
+	lcore->num_handlers--;
+
+	return 0;
+}
+
+static int
+evd_uninstall_handler(struct rte_dispatcher *dispatcher,
+		      int handler_id)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+		int rc;
+
+		rc = evd_lcore_uninstall_handler(lcore, handler_id);
+
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_unregister(uint8_t id, int handler_id)
+{
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = evd_uninstall_handler(dispatcher, handler_id);
+
+	return rc;
+}
+
+static struct rte_dispatcher_finalizer*
+evd_get_finalizer_by_id(struct rte_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		if (finalizer->id == handler_id)
+			return finalizer;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_id = 0;
+
+	while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
+		finalizer_id++;
+
+	return finalizer_id;
+}
+
+static struct rte_dispatcher_finalizer *
+evd_alloc_finalizer(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_idx;
+	struct rte_dispatcher_finalizer *finalizer;
+
+	if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
+		return NULL;
+
+	finalizer_idx = dispatcher->num_finalizers;
+	finalizer = &dispatcher->finalizers[finalizer_idx];
+
+	finalizer->id = evd_alloc_finalizer_id(dispatcher);
+
+	dispatcher->num_finalizers++;
+
+	return finalizer;
+}
+
+int
+rte_dispatcher_finalize_register(uint8_t id,
+			      rte_dispatcher_finalize_t finalize_fun,
+			      void *finalize_data)
+{
+	struct rte_dispatcher *dispatcher;
+	struct rte_dispatcher_finalizer *finalizer;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	finalizer = evd_alloc_finalizer(dispatcher);
+
+	if (finalizer == NULL)
+		return -ENOMEM;
+
+	finalizer->finalize_fun = finalize_fun;
+	finalizer->finalize_data = finalize_data;
+
+	return finalizer->id;
+}
+
+int
+rte_dispatcher_finalize_unregister(uint8_t id, int handler_id)
+{
+	struct rte_dispatcher *dispatcher;
+	struct rte_dispatcher_finalizer *unreg_finalizer;
+	int finalizer_idx;
+	uint16_t last_idx;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, handler_id);
+
+	if (unreg_finalizer == NULL)
+		return -EINVAL;
+
+	finalizer_idx = &dispatcher->finalizers[0] - unreg_finalizer;
+
+	last_idx = dispatcher->num_finalizers - 1;
+
+	if (finalizer_idx != last_idx) {
+		/* move all finalizers to maintain order */
+		int n = last_idx - finalizer_idx;
+		memmove(unreg_finalizer, unreg_finalizer + 1,
+			sizeof(struct rte_dispatcher_finalizer) * n);
+	}
+
+	dispatcher->num_finalizers--;
+
+	return 0;
+}
+
+static int
+evd_set_service_runstate(uint8_t id, int state)
+{
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = rte_service_component_runstate_set(dispatcher->service_id,
+						state);
+
+	if (rc != 0) {
+		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+				 "service component run state to %d\n", rc,
+				 state);
+		RTE_ASSERT(0);
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_start(uint8_t id)
+{
+	return evd_set_service_runstate(id, 1);
+}
+
+int
+rte_dispatcher_stop(uint8_t id)
+{
+	return evd_set_service_runstate(id, 0);
+}
+
+static void
+evd_aggregate_stats(struct rte_dispatcher_stats *result,
+		    const struct rte_dispatcher_stats *part)
+{
+	result->poll_count += part->poll_count;
+	result->ev_batch_count += part->ev_batch_count;
+	result->ev_dispatch_count += part->ev_dispatch_count;
+	result->ev_drop_count += part->ev_drop_count;
+}
+
+int
+rte_dispatcher_stats_get(uint8_t id,
+			       struct rte_dispatcher_stats *stats)
+{
+	struct rte_dispatcher *dispatcher;
+	unsigned int lcore_id;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	*stats = (struct rte_dispatcher_stats) {};
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		evd_aggregate_stats(stats, &lcore->stats);
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_stats_reset(uint8_t id)
+{
+	struct rte_dispatcher *dispatcher;
+	unsigned int lcore_id;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		lcore->stats = (struct rte_dispatcher_stats) {};
+	}
+
+	return 0;
+
+}
diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
new file mode 100644
index 0000000000..6712687a08
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.h
@@ -0,0 +1,480 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#ifndef __RTE_DISPATCHER_H__
+#define __RTE_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Dispatcher
+ *
+ * The purpose of the dispatcher is to help decouple different parts
+ * of an application (e.g., modules), sharing the same underlying
+ * event device.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * Match callbacks are used by an application to decide how the
+ * dispatcher distributes events to different parts of the
+ * application.
+ *
+ * The application is not expected to process the event at the point
+ * of the match call. Such matters should be deferred to the process
+ * callback invocation.
+ *
+ * The match callback may be used as an opportunity to prefetch data.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this events should be delivered (via
+ *   the process callback), and false otherwise.
+ */
+typedef bool
+(*rte_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
+
+/**
+ * Function prototype for process callbacks.
+ *
+ * The process callbacks are used by the dispatcher to deliver
+ * events for processing.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ */
+
+typedef void
+(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				  struct rte_event *events, uint16_t num,
+				  void *cb_data);
+
+/**
+ * Function prototype for finalize callbacks.
+ *
+ * The finalize callbacks are used by the dispatcher to notify the
+ * application it has delivered all events from a particular batch
+ * dequeued from the event device.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_finalize_register().
+ */
+
+typedef void
+(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				   void *cb_data);
+
+/**
+ * Dispatcher statistics
+ */
+struct rte_dispatcher_stats {
+	uint64_t poll_count;
+	/**< Number of event dequeue calls made toward the event device. */
+	uint64_t ev_batch_count;
+	/**< Number of non-empty event batches dequeued from event device.*/
+	uint64_t ev_dispatch_count;
+	/**< Number of events dispatched to a handler.*/
+	uint64_t ev_drop_count;
+	/**< Number of events dropped because no handler was found. */
+};
+
+/**
+ * Create a dispatcher with the specified id.
+ *
+ * @param id
+ *  An application-specified, unique (across all dispatcher
+ *  instances) identifier.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   - 0: Success
+ *   - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_dispatcher_create(uint8_t id, uint8_t event_dev_id);
+
+/**
+ * Free a dispatcher.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_dispatcher_free(uint8_t id);
+
+/**
+ * Retrieve the service identifier of a dispatcher.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param [out] service_id
+ *  A pointer to a caller-supplied buffer where the dispatcher's
+ *  service id will be stored.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * dispatcher.
+ *
+ * This function configures the event port id to be used by the event
+ * dispatcher service, if run on the specified lcore.
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port must not be bound to more than one lcore.
+ *
+ * If the dispatcher service is mapped (with rte_service_map_lcore_set())
+ * to a lcore to which no ports are bound, the service function will be a
+ * no-operation.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on lcore
+ * specified by @c lcore_id.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ *  - -EEXISTS: Event port is already configured.
+ *  - -EINVAL: Invalid arguments.
+ */
+__rte_experimental
+int
+rte_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * lcore specified by @c lcore_id.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ *  - -ENOENT: Event port id not bound to this @c lcore_id.
+ */
+__rte_experimental
+int
+rte_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id);
+
+/**
+ * Register an event handler.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding process callback
+ * function.
+ *
+ * The reason for having two distinct steps is to allow the dispatcher
+ * to deliver all events as a batch. This in turn will cause
+ * processing of a particular kind of events to happen in a
+ * back-to-back manner, improving cache locality.
+ *
+ * The list of handler callback functions is shared among all lcores,
+ * but will only be executed on lcores which has an eventdev port
+ * bound to them, and which are running the dispatcher service.
+ *
+ * An event is delivered to at most one handler. Events where no
+ * handler is found are dropped.
+ *
+ * The application must not depend on the order of which the match
+ * functions are invoked.
+ *
+ * Ordering of events is not guaranteed to be maintained between
+ * different deliver callbacks. For example, suppose there are two
+ * callbacks registered, matching different subsets of events arriving
+ * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
+ * on a particular port, all pertaining to the same flow. The match
+ * callback for registration A returns true for ev0 and ev2, and the
+ * matching function for registration B for ev1. In that scenario, the
+ * dispatcher may choose to deliver first [ev0, ev2] using A's deliver
+ * function, and then [ev1] to B - or vice versa.
+ *
+ * rte_dispatcher_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the event
+ * dispatcher is running on any service lcore.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param process_fun
+ *  The process callback function.
+ *
+ * @param process_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when process_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_register(uint8_t id,
+			      rte_dispatcher_match_t match_fun,
+			      void *match_cb_data,
+			      rte_dispatcher_process_t process_fun,
+			      void *process_cb_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param handler_id
+ *  The handler registration id returned by the original
+ *  rte_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c handler_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_unregister(uint8_t id, int handler_id);
+
+/**
+ * Register a finalize callback function.
+ *
+ * An application may optionally install one or more finalize
+ * callbacks.
+ *
+ * All finalize callbacks are invoked by the dispatcher when a
+ * complete batch of events (retrieve using rte_event_dequeue_burst())
+ * have been delivered to the application (or have been dropped).
+ *
+ * The finalize callback is not tied to any particular handler.
+ *
+ * The finalize callback provides an opportunity for the application
+ * to do per-batch processing. One case where this may be useful is if
+ * an event output buffer is used, and is shared among several
+ * handlers. In such a case, proper output buffer flushing may be
+ * assured using a finalize callback.
+ *
+ * rte_dispatcher_finalize_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the
+ * dispatcher is running on any service lcore.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param finalize_fun
+ *  The function called after completing the processing of a
+ *  dequeue batch.
+ *
+ * @param finalize_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when @c finalize_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_register(uint8_t id,
+				 rte_dispatcher_finalize_t finalize_fun,
+				 void *finalize_data);
+
+/**
+ * Unregister a finalize callback.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param reg_id
+ *  The finalize registration id returned by the original
+ *  rte_dispatcher_finalize_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c reg_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_unregister(uint8_t id, int reg_id);
+
+/**
+ * Start a dispatcher instance.
+ *
+ * Enables the dispatcher service.
+ *
+ * The underlying event device must have been started prior to calling
+ * rte_dispatcher_start().
+ *
+ * For the dispatcher to actually perform work (i.e., dispatch
+ * events), its service must have been mapped to one or more service
+ * lcores, and its service run state set to '1'. A dispatcher's
+ * service is retrieved using rte_dispatcher_service_id_get().
+ *
+ * Each service lcore to which the dispatcher is mapped should
+ * have at least one event port configured. Such configuration is
+ * performed by calling rte_dispatcher_bind_port_to_lcore(), prior to
+ * starting the dispatcher.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_dispatcher_start(uint8_t id);
+
+/**
+ * Stop a running dispatcher instance.
+ *
+ * Disables the dispatcher service.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_dispatcher_stop(uint8_t id);
+
+/**
+ * Retrieve statistics for a dispatcher instance.
+ *
+ * This function is MT safe and may be called by any thread
+ * (including unregistered non-EAL threads).
+ *
+ * @param id
+ *  The dispatcher identifier.
+ * @param[out] stats
+ *   A pointer to a structure to fill with statistics.
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_stats_get(uint8_t id,
+			       struct rte_dispatcher_stats *stats);
+
+/**
+ * Reset statistics for a dispatcher instance.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but may not produce the correct result if the
+ * dispatcher is running on any service lcore.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_stats_reset(uint8_t id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DISPATCHER__ */
diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map
new file mode 100644
index 0000000000..8f9ad96522
--- /dev/null
+++ b/lib/dispatcher/version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+	global:
+
+	# added in 23.11
+	rte_dispatcher_create;
+	rte_dispatcher_free;
+	rte_dispatcher_service_id_get;
+	rte_dispatcher_bind_port_to_lcore;
+	rte_dispatcher_unbind_port_from_lcore;
+	rte_dispatcher_register;
+	rte_dispatcher_unregister;
+	rte_dispatcher_finalize_register;
+	rte_dispatcher_finalize_unregister;
+	rte_dispatcher_start;
+	rte_dispatcher_stop;
+	rte_dispatcher_stats_get;
+	rte_dispatcher_stats_reset;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 099b0ed18a..3093b338d2 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -35,6 +35,7 @@ libraries = [
         'distributor',
         'efd',
         'eventdev',
+        'dispatcher', # dispatcher depends on eventdev
         'gpudev',
         'gro',
         'gso',
@@ -81,6 +82,7 @@ optional_libs = [
         'cfgfile',
         'compressdev',
         'cryptodev',
+        'dispatcher',
         'distributor',
         'dmadev',
         'efd',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v3 2/3] test: add dispatcher test suite
  2023-09-04 13:03                               ` [PATCH v3 0/3] Add dispatcher library Mattias Rönnblom
  2023-09-04 13:03                                 ` [PATCH v3 1/3] lib: introduce " Mattias Rönnblom
@ 2023-09-04 13:03                                 ` Mattias Rönnblom
  2023-09-04 13:03                                 ` [PATCH v3 3/3] doc: add dispatcher programming guide Mattias Rönnblom
  2023-09-06 19:32                                 ` [PATCH v3 0/3] Add dispatcher library Stephen Hemminger
  3 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-04 13:03 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Add unit tests for the dispatcher.

--
PATCH v3:
 o Adapt the test suite to dispatcher API name changes.

PATCH v2:
 o Test finalize callback functionality.
 o Test handler and finalizer count upper limits.
 o Add statistics reset test.
 o Make sure dispatcher supply the proper event dev id and port id back
   to the application.

PATCH:
 o Extend test to cover often-used handler optimization feature.

RFC v4:
 o Adapt to non-const events in process function prototype.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 MAINTAINERS                |    1 +
 app/test/meson.build       |    1 +
 app/test/test_dispatcher.c | 1054 ++++++++++++++++++++++++++++++++++++
 3 files changed, 1056 insertions(+)
 create mode 100644 app/test/test_dispatcher.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 6704cd5b2c..43890cad0e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1729,6 +1729,7 @@ F: lib/node/
 Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
+F: app/test/test_dispatcher.c
 
 Test Applications
 -----------------
diff --git a/app/test/meson.build b/app/test/meson.build
index 05bae9216d..3303c73817 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -55,6 +55,7 @@ source_file_deps = {
     'test_cycles.c': [],
     'test_debug.c': [],
     'test_devargs.c': ['kvargs'],
+    'test_dispatcher.c': ['dispatcher'],
     'test_distributor.c': ['distributor'],
     'test_distributor_perf.c': ['distributor'],
     'test_dmadev.c': ['dmadev', 'bus_vdev'],
diff --git a/app/test/test_dispatcher.c b/app/test/test_dispatcher.c
new file mode 100644
index 0000000000..b64103c48e
--- /dev/null
+++ b/app/test/test_dispatcher.c
@@ -0,0 +1,1054 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include "test.h"
+
+#include <stdatomic.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_dispatcher.h>
+#include <rte_eventdev.h>
+#include <rte_random.h>
+#include <rte_service.h>
+
+#define NUM_WORKERS 3
+
+#define NUM_PORTS (NUM_WORKERS + 1)
+#define WORKER_PORT_ID(worker_idx) (worker_idx)
+#define DRIVER_PORT_ID (NUM_PORTS - 1)
+
+#define NUM_SERVICE_CORES NUM_WORKERS
+
+/* Eventdev */
+#define NUM_QUEUES 8
+#define LAST_QUEUE_ID (NUM_QUEUES - 1)
+#define MAX_EVENTS 4096
+#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
+#define DEQUEUE_BURST_SIZE 32
+#define ENQUEUE_BURST_SIZE 32
+
+#define NUM_EVENTS 10000000
+#define NUM_FLOWS 16
+
+#define DSW_VDEV "event_dsw0"
+
+struct app_queue {
+	uint8_t queue_id;
+	uint64_t sn[NUM_FLOWS];
+	int dispatcher_reg_id;
+};
+
+struct cb_count {
+	uint8_t expected_event_dev_id;
+	uint8_t expected_event_port_id[RTE_MAX_LCORE];
+	atomic_int count;
+};
+
+struct test_app {
+	uint8_t event_dev_id;
+	uint8_t dispatcher_id;
+	uint32_t dispatcher_service_id;
+
+	unsigned int service_lcores[NUM_SERVICE_CORES];
+
+	int never_match_reg_id;
+	uint64_t never_match_count;
+	struct cb_count never_process_count;
+
+	struct app_queue queues[NUM_QUEUES];
+
+	int finalize_reg_id;
+	struct cb_count finalize_count;
+
+	bool running;
+
+	atomic_int completed_events;
+	atomic_int errors;
+};
+
+#define RETURN_ON_ERROR(rc) \
+	do {					\
+		if (rc != TEST_SUCCESS)		\
+			return rc;		\
+	} while (0)
+
+static struct test_app *
+test_app_create(void)
+{
+	int i;
+	struct test_app *app;
+
+	app = calloc(1, sizeof(struct test_app));
+
+	if (app == NULL)
+		return NULL;
+
+	for (i = 0; i < NUM_QUEUES; i++)
+		app->queues[i].queue_id = i;
+
+	return app;
+}
+
+static void
+test_app_free(struct test_app *app)
+{
+	free(app);
+}
+
+static int
+test_app_create_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_vdev_init(DSW_VDEV, NULL);
+	if (rc < 0)
+		return TEST_SKIPPED;
+
+	rc = rte_event_dev_get_dev_id(DSW_VDEV);
+
+	app->event_dev_id = (uint8_t)rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_destroy_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_close(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
+
+	rc = rte_vdev_uninit(DSW_VDEV);
+	TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_event_dev(struct test_app *app)
+{
+	int rc;
+	int i;
+
+	rc = test_app_create_vdev(app);
+	if (rc < 0)
+		return rc;
+
+	struct rte_event_dev_config config = {
+		.nb_event_queues = NUM_QUEUES,
+		.nb_event_ports = NUM_PORTS,
+		.nb_events_limit = MAX_EVENTS,
+		.nb_event_queue_flows = 64,
+		.nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
+		.nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	rc = rte_event_dev_configure(app->event_dev_id, &config);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
+
+	struct rte_event_queue_conf queue_config = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.nb_atomic_flows = 64
+	};
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		uint8_t queue_id = i;
+
+		rc = rte_event_queue_setup(app->event_dev_id, queue_id,
+					   &queue_config);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
+	}
+
+	struct rte_event_port_conf port_config = {
+		.new_event_threshold = NEW_EVENT_THRESHOLD,
+		.dequeue_depth = DEQUEUE_BURST_SIZE,
+		.enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	for (i = 0; i < NUM_PORTS; i++) {
+		uint8_t event_port_id = i;
+
+		rc = rte_event_port_setup(app->event_dev_id, event_port_id,
+					  &port_config);
+		TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
+				    event_port_id);
+
+		if (event_port_id == DRIVER_PORT_ID)
+			continue;
+
+		rc = rte_event_port_link(app->event_dev_id, event_port_id,
+					 NULL, NULL, 0);
+
+		TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
+				  event_port_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_event_dev(struct test_app *app)
+{
+	return test_app_destroy_vdev(app);
+}
+
+static int
+test_app_start_event_dev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_start(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
+
+	return TEST_SUCCESS;
+}
+
+static void
+test_app_stop_event_dev(struct test_app *app)
+{
+	rte_event_dev_stop(app->event_dev_id);
+}
+
+static int
+test_app_create_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	app->dispatcher_id = rte_rand_max(256);
+
+	rc = rte_dispatcher_create(app->dispatcher_id, app->event_dev_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to create event dispatcher");
+
+	rc = rte_dispatcher_service_id_get(app->dispatcher_id,
+					   &app->dispatcher_service_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to get event dispatcher service ID");
+
+	rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
+			    "stats");
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_free_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	rte_dispatcher_free(app->dispatcher_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_bind_ports(struct test_app *app)
+{
+	int i;
+
+	app->never_process_count.expected_event_dev_id =
+		app->event_dev_id;
+	app->finalize_count.expected_event_dev_id =
+		app->event_dev_id;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		uint8_t port_id = WORKER_PORT_ID(i);
+
+		int rc = rte_dispatcher_bind_port_to_lcore(
+			app->dispatcher_id, port_id, DEQUEUE_BURST_SIZE, 0,
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
+				    "to lcore %d", port_id, lcore_id);
+
+		app->never_process_count.expected_event_port_id[lcore_id] =
+			port_id;
+		app->finalize_count.expected_event_port_id[lcore_id] = port_id;
+	}
+
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unbind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_dispatcher_unbind_port_from_lcore(
+			app->dispatcher_id,
+			WORKER_PORT_ID(i),
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
+				    "from lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static bool
+match_queue(const struct rte_event *event, void *cb_data)
+{
+	uintptr_t queue_id = (uintptr_t)cb_data;
+
+	return event->queue_id == queue_id;
+}
+
+static int
+test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++)
+		if (app->service_lcores[i] == lcore_id)
+			return i;
+
+	return -1;
+}
+
+static int
+test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
+{
+	int worker;
+
+	worker = test_app_get_worker_index(app, lcore_id);
+
+	if (worker < 0)
+		return -1;
+
+	return WORKER_PORT_ID(worker);
+}
+
+static void
+test_app_queue_note_error(struct test_app *app)
+{
+	atomic_fetch_add_explicit(&app->errors, 1, memory_order_relaxed);
+}
+
+static void
+test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
+		       struct rte_event *in_events, uint16_t num,
+		       void *cb_data)
+{
+	struct app_queue *app_queue = cb_data;
+	struct test_app *app = container_of(app_queue, struct test_app,
+					    queues[app_queue->queue_id]);
+	unsigned int lcore_id = rte_lcore_id();
+	bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
+	int event_port_id;
+	uint16_t i;
+	struct rte_event out_events[num];
+
+	event_port_id = test_app_get_worker_port(app, lcore_id);
+
+	if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
+	    p_event_port_id != event_port_id) {
+		test_app_queue_note_error(app);
+		return;
+	}
+
+	for (i = 0; i < num; i++) {
+		const struct rte_event *in_event = &in_events[i];
+		struct rte_event *out_event = &out_events[i];
+		uint64_t sn = in_event->u64;
+		uint64_t expected_sn;
+
+		if (in_event->queue_id != app_queue->queue_id) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		expected_sn = app_queue->sn[in_event->flow_id]++;
+
+		if (expected_sn != sn) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		if (intermediate_queue)
+			*out_event = (struct rte_event) {
+				.queue_id = in_event->queue_id + 1,
+				.flow_id = in_event->flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_FORWARD,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+	}
+
+	if (intermediate_queue) {
+		uint16_t n = 0;
+
+		do {
+			n += rte_event_enqueue_forward_burst(p_event_dev_id,
+							     p_event_port_id,
+							     out_events + n,
+							     num - n);
+		} while (n != num);
+	} else
+		atomic_fetch_add_explicit(&app->completed_events, num,
+					  memory_order_relaxed);
+}
+
+static bool
+never_match(const struct rte_event *event __rte_unused, void *cb_data)
+{
+	uint64_t *count = cb_data;
+
+	(*count)++;
+
+	return false;
+}
+
+static void
+test_app_never_process(uint8_t event_dev_id,
+		       uint8_t event_port_id,
+		       struct rte_event *in_events __rte_unused,
+		       uint16_t num, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		atomic_fetch_add_explicit(&count->count, num,
+					  memory_order_relaxed);
+}
+
+static void
+finalize(uint8_t event_dev_id, uint8_t event_port_id, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		atomic_fetch_add_explicit(&count->count, 1,
+					  memory_order_relaxed);
+}
+
+static int
+test_app_register_callbacks(struct test_app *app)
+{
+	int i;
+
+	app->never_match_reg_id =
+		rte_dispatcher_register(app->dispatcher_id, never_match,
+					&app->never_match_count,
+					test_app_never_process,
+					&app->never_process_count);
+
+	TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register "
+		    "never-match handler");
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		struct app_queue *app_queue = &app->queues[i];
+		uintptr_t queue_id = app_queue->queue_id;
+		int reg_id;
+
+		reg_id = rte_dispatcher_register(app->dispatcher_id,
+						 match_queue, (void *)queue_id,
+						 test_app_process_queue,
+						 app_queue);
+
+		TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
+			    "callback for queue %d", i);
+
+		app_queue->dispatcher_reg_id = reg_id;
+	}
+
+	app->finalize_reg_id =
+		rte_dispatcher_finalize_register(app->dispatcher_id,
+						       finalize,
+						       &app->finalize_count);
+	TEST_ASSERT_SUCCESS(app->finalize_reg_id, "Error registering "
+			    "finalize callback");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
+{
+	int reg_id = app->queues[queue_id].dispatcher_reg_id;
+	int rc;
+
+	if (reg_id < 0) /* unregistered already */
+		return 0;
+
+	rc = rte_dispatcher_unregister(app->dispatcher_id, reg_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
+			    "callback for queue %d", queue_id);
+
+	app->queues[queue_id].dispatcher_reg_id = -1;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callbacks(struct test_app *app)
+{
+	int i;
+	int rc;
+
+	if (app->never_match_reg_id >= 0) {
+		rc = rte_dispatcher_unregister(app->dispatcher_id,
+						     app->never_match_reg_id);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister never-match "
+				    "handler");
+		app->never_match_reg_id = -1;
+	}
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		rc = test_app_unregister_callback(app, i);
+		RETURN_ON_ERROR(rc);
+	}
+
+	if (app->finalize_reg_id >= 0) {
+		rc = rte_dispatcher_finalize_unregister(
+			app->dispatcher_id, app->finalize_reg_id
+		);
+		app->finalize_reg_id = -1;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_dispatcher_start(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to start the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_dispatcher_stop(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to stop the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_reset_dispatcher_stats(struct test_app *app)
+{
+	int rc;
+	struct rte_dispatcher_stats stats;
+
+	rc = rte_dispatcher_stats_reset(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to reset event dispatcher statistics");
+
+	rc = rte_dispatcher_stats_get(app->dispatcher_id, &stats);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to retrieve event dispatcher "
+			    "statistics");
+
+	TEST_ASSERT_EQUAL(stats.poll_count, 0, "Poll count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_batch_count, 0, "Batch count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, "Dispatch count "
+			  "not zero");
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count not zero");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_lcore_add(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
+			    "service core", lcore_id);
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_cores(struct test_app *app)
+{
+	int i;
+	int lcore_id = -1;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
+
+		TEST_ASSERT(lcore_id != RTE_MAX_LCORE,
+			    "Too few lcores. Needs at least %d worker lcores",
+			    NUM_SERVICE_CORES);
+
+		app->service_lcores[i] = lcore_id;
+	}
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		int rc;
+
+		rc = test_app_setup_service_core(app, app->service_lcores[i]);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
+
+	rc = rte_service_lcore_del(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
+			    lcore_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = test_app_teardown_service_core(app, lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_start(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_stop(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_start_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_dispatcher(app);
+
+	app->running = true;
+
+	return rc;
+}
+
+static int
+test_app_stop(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_stop_dispatcher(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	app->running = false;
+
+	return TEST_SUCCESS;
+}
+
+struct test_app *test_app;
+
+static int
+test_setup(void)
+{
+	int rc;
+
+	test_app = test_app_create();
+	TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
+
+	rc = test_app_setup_event_dev(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_create_dispatcher(test_app);
+
+	rc = test_app_setup_service_cores(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_register_callbacks(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_bind_ports(test_app);
+
+	return rc;
+}
+
+static void test_teardown(void)
+{
+	if (test_app->running)
+		test_app_stop(test_app);
+
+	test_app_teardown_service_cores(test_app);
+
+	test_app_unregister_callbacks(test_app);
+
+	test_app_unbind_ports(test_app);
+
+	test_app_free_dispatcher(test_app);
+
+	test_app_teardown_event_dev(test_app);
+
+	test_app_free(test_app);
+
+	test_app = NULL;
+}
+
+static int
+test_app_get_completed_events(struct test_app *app)
+{
+	return atomic_load_explicit(&app->completed_events,
+				    memory_order_relaxed);
+}
+
+static int
+test_app_get_errors(struct test_app *app)
+{
+	return atomic_load_explicit(&app->errors, memory_order_relaxed);
+}
+
+static int
+test_basic(void)
+{
+	int rc;
+	int i;
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	uint64_t sns[NUM_FLOWS] = { 0 };
+
+	for (i = 0; i < NUM_EVENTS;) {
+		struct rte_event events[ENQUEUE_BURST_SIZE];
+		int left;
+		int batch_size;
+		int j;
+		uint16_t n = 0;
+
+		batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
+		left = NUM_EVENTS - i;
+
+		batch_size = RTE_MIN(left, batch_size);
+
+		for (j = 0; j < batch_size; j++) {
+			struct rte_event *event = &events[j];
+			uint64_t sn;
+			uint32_t flow_id;
+
+			flow_id = rte_rand_max(NUM_FLOWS);
+
+			sn = sns[flow_id]++;
+
+			*event = (struct rte_event) {
+				.queue_id = 0,
+				.flow_id = flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_NEW,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+		}
+
+		while (n < batch_size)
+			n += rte_event_enqueue_new_burst(test_app->event_dev_id,
+							 DRIVER_PORT_ID,
+							 events + n,
+							 batch_size - n);
+
+		i += batch_size;
+	}
+
+	while (test_app_get_completed_events(test_app) != NUM_EVENTS)
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+
+	rc = test_app_get_errors(test_app);
+	TEST_ASSERT(rc == 0, "%d errors occurred", rc);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_dispatcher_stats stats;
+	rc = rte_dispatcher_stats_get(test_app->dispatcher_id,
+					    &stats);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
+			  "Invalid dispatch count");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	TEST_ASSERT_EQUAL(test_app->never_process_count.count, 0,
+			  "Never-match handler's process function has "
+			  "been called");
+
+	int finalize_count =
+		atomic_load_explicit(&test_app->finalize_count.count,
+				     memory_order_relaxed);
+
+	TEST_ASSERT(finalize_count > 0, "Finalize count is zero");
+	TEST_ASSERT(finalize_count <= (int)stats.ev_dispatch_count,
+		    "Finalize count larger than event count");
+
+	TEST_ASSERT_EQUAL(finalize_count, (int)stats.ev_batch_count,
+			  "%"PRIu64" batches dequeued, but finalize called %d "
+			  "times", stats.ev_batch_count, finalize_count);
+
+	/*
+	 * The event dispatcher should call often-matching match functions
+	 * more often, and thus this never-matching match function should
+	 * be called relatively infrequently.
+	 */
+	TEST_ASSERT(test_app->never_match_count <
+		    (stats.ev_dispatch_count / 4),
+		    "Never-matching match function called suspiciously often");
+
+	rc = test_app_reset_dispatcher_stats(test_app);
+	RETURN_ON_ERROR(rc);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_drop(void)
+{
+	int rc;
+	uint8_t unhandled_queue;
+	struct rte_dispatcher_stats stats;
+
+	unhandled_queue = (uint8_t)rte_rand_max(NUM_QUEUES);
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_unregister_callback(test_app, unhandled_queue);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_event event = {
+	    .queue_id = unhandled_queue,
+	    .flow_id = 0,
+	    .sched_type = RTE_SCHED_TYPE_ATOMIC,
+	    .op = RTE_EVENT_OP_NEW,
+	    .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	    .u64 = 0
+	};
+
+	do {
+		rc = rte_event_enqueue_burst(test_app->event_dev_id,
+					     DRIVER_PORT_ID, &event, 1);
+	} while (rc == 0);
+
+	do {
+		rc = rte_dispatcher_stats_get(test_app->dispatcher_id,
+						    &stats);
+		RETURN_ON_ERROR(rc);
+
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+	} while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
+			  "Dispatch count is not zero");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+#define MORE_THAN_MAX_HANDLERS 1000
+#define MIN_HANDLERS 32
+
+static int
+test_many_handler_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_HANDLERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+
+	RETURN_ON_ERROR(rc);
+
+	for (i = 0; i < MORE_THAN_MAX_HANDLERS; i++) {
+		reg_id = rte_dispatcher_register(test_app->dispatcher_id,
+						       never_match, NULL,
+						       test_app_never_process,
+						       NULL);
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_HANDLERS, "Registration failed already "
+		    "after %d handler registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_unregister(test_app->dispatcher_id,
+						     reg_ids[i]);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister handler %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static void
+dummy_finalize(uint8_t event_dev_id __rte_unused,
+	       uint8_t event_port_id __rte_unused,
+	       void *cb_data __rte_unused)
+{
+}
+
+#define MORE_THAN_MAX_FINALIZERS 1000
+#define MIN_FINALIZERS 16
+
+static int
+test_many_finalize_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_FINALIZERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+
+	RETURN_ON_ERROR(rc);
+
+	for (i = 0; i < MORE_THAN_MAX_FINALIZERS; i++) {
+		reg_id = rte_dispatcher_finalize_register(
+			test_app->dispatcher_id, dummy_finalize, NULL
+		);
+
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_FINALIZERS, "Finalize registration failed "
+		    "already after %d registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_finalize_unregister(
+			test_app->dispatcher_id, reg_ids[i]
+		);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister finalizer %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite test_suite = {
+	.suite_name = "Event dispatcher test suite",
+	.unit_test_cases = {
+		TEST_CASE_ST(test_setup, test_teardown, test_basic),
+		TEST_CASE_ST(test_setup, test_teardown, test_drop),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_handler_registrations),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_finalize_registrations),
+		TEST_CASES_END()
+	}
+};
+
+static int
+test_dispatcher(void)
+{
+	return unit_test_suite_runner(&test_suite);
+}
+
+REGISTER_TEST_COMMAND(dispatcher_autotest, test_dispatcher);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v3 3/3] doc: add dispatcher programming guide
  2023-09-04 13:03                               ` [PATCH v3 0/3] Add dispatcher library Mattias Rönnblom
  2023-09-04 13:03                                 ` [PATCH v3 1/3] lib: introduce " Mattias Rönnblom
  2023-09-04 13:03                                 ` [PATCH v3 2/3] test: add dispatcher test suite Mattias Rönnblom
@ 2023-09-04 13:03                                 ` Mattias Rönnblom
  2023-09-06 19:32                                 ` [PATCH v3 0/3] Add dispatcher library Stephen Hemminger
  3 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-04 13:03 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Provide programming guide for the dispatcher library.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--
PATCH v3:
 o Adapt guide to the dispatcher API name changes.

PATCH:
 o Improve grammar and spelling.

RFC v4:
 o Extend event matching section of the programming guide.
 o Improve grammar and spelling.
---
 MAINTAINERS                              |   1 +
 doc/api/doxy-api-index.md                |   1 +
 doc/api/doxy-api.conf.in                 |   1 +
 doc/guides/prog_guide/dispatcher_lib.rst | 434 +++++++++++++++++++++++
 doc/guides/prog_guide/index.rst          |   1 +
 5 files changed, 438 insertions(+)
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst

diff --git a/MAINTAINERS b/MAINTAINERS
index 43890cad0e..ab35498204 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1730,6 +1730,7 @@ Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
 F: app/test/test_dispatcher.c
+F: doc/guides/prog_guide/dispatcher_lib.rst
 
 Test Applications
 -----------------
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index fdeda13932..7d0cad9fed 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -155,6 +155,7 @@ The public API headers are grouped by topics:
 
 - **classification**
   [reorder](@ref rte_reorder.h),
+  [dispatcher](@ref rte_dispatcher.h),
   [distributor](@ref rte_distributor.h),
   [EFD](@ref rte_efd.h),
   [ACL](@ref rte_acl.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index a88accd907..59c679e621 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dispatcher \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
diff --git a/doc/guides/prog_guide/dispatcher_lib.rst b/doc/guides/prog_guide/dispatcher_lib.rst
new file mode 100644
index 0000000000..d4f29ce7ba
--- /dev/null
+++ b/doc/guides/prog_guide/dispatcher_lib.rst
@@ -0,0 +1,434 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Ericsson AB.
+
+Dispatcher
+==========
+
+Overview
+--------
+
+The purpose of the dispatcher is to help reduce coupling in an
+:doc:`Eventdev <eventdev>`-based DPDK application.
+
+In particular, the dispatcher addresses a scenario where an
+application's modules share the same event device and event device
+ports, and performs work on the same lcore threads.
+
+The dispatcher replaces the conditional logic that follows an event
+device dequeue operation, where events are dispatched to different
+parts of the application, typically based on fields in the
+``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
+``sched_type``.
+
+Below is an excerpt from a fictitious application consisting of two
+modules; A and B. In this example, event-to-module routing is based
+purely on queue id, where module A expects all events to a certain
+queue id, and module B two other queue ids. [#Mapping]_
+
+.. code-block:: c
+
+    for (;;) {
+            struct rte_event events[MAX_BURST];
+            unsigned int n;
+    
+            n = rte_event_dequeue_burst(dev_id, port_id, events,
+	                                MAX_BURST, 0);
+    
+            for (i = 0; i < n; i++) {
+                    const struct rte_event *event = &events[i];
+    
+                    switch (event->queue_id) {
+                    case MODULE_A_QUEUE_ID:
+                            module_a_process(event);
+                            break;
+                    case MODULE_B_STAGE_0_QUEUE_ID:
+                            module_b_process_stage_0(event);
+                            break;
+                    case MODULE_B_STAGE_1_QUEUE_ID:
+                            module_b_process_stage_1(event);
+                            break;
+                    }
+            }
+    }
+
+The issue this example attempts to illustrate is that the centralized
+conditional logic has knowledge of things that should be private to
+the modules. In other words, this pattern leads to a violation of
+module encapsulation.
+
+The shared conditional logic contains explicit knowledge about what
+events should go where. In case, for example, the
+``module_a_process()`` is broken into two processing stages — a
+module-internal affair — the shared conditional code must be updated
+to reflect this change.
+
+The centralized event routing code becomes an issue in larger
+applications, where modules are developed by different organizations.
+This pattern also makes module reuse across different application more
+difficult. The part of the conditional logic relevant for a particular
+application may need to be duplicated across many module
+instantiations (e.g., applications and test setups).
+
+The dispatcher separates the mechanism (routing events to their
+receiver) from the policy (which events should go where).
+
+The basic operation of the dispatcher is as follows:
+
+* Dequeue a batch of events from the event device.
+* For each event determine which handler should receive the event, using
+  a set of application-provided, per-handler event matching callback
+  functions.
+* Provide events matching a particular handler, to that handler, using
+  its process callback.
+
+If the above application would have made use of the dispatcher, the
+code relevant for its module A may have looked something like this:
+
+.. code-block:: c
+
+    static bool
+    module_a_match(const struct rte_event *event, void *cb_data)
+    {
+           return event->queue_id == MODULE_A_QUEUE_ID;
+    }
+    
+    static void
+    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
+                            const struct rte_event *events,
+			    uint16_t num, void *cb_data)
+    {
+            uint16_t i;
+
+            for (i = 0; i < num; i++)
+                    module_a_process_event(&events[i]);
+    }
+    
+    /* In the module's initialization code */
+    rte_dispatcher_register(DISPATCHER_ID, module_a_match, NULL,
+			    module_a_process_events, module_a_data);
+
+(Error handling is left out of this and future example code in this
+chapter.)
+
+When the shared conditional logic is removed, a new question arise:
+which part of the system actually runs the dispatching mechanism? Or
+phrased differently, what is replacing the function hosting the shared
+conditional logic (typically launched on all lcores using
+``rte_eal_remote_launch()``)? To solve this issue, the dispatcher is a
+run as a DPDK :doc:`Service <service_cores>`.
+
+The dispatcher is a layer between the application and the event device
+in the receive direction. In the transmit (i.e., item of work
+submission) direction, the application directly accesses the Eventdev
+core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
+forwarded event to the event device.
+
+Dispatcher Creation
+-------------------
+
+A dispatcher is created with using
+``rte_dispatcher_create()``.
+
+The dispatcher id is provided by the application, and must be unique.
+
+The event device must be configured before the dispatcher is created.
+
+Usually, only one dispatcher is needed per event device. A dispatcher
+handles exactly one event device.
+
+An dispatcher is freed using the ``rte_dispatcher_free()``
+function. The dispatcher's service functions must not be running on
+any lcore at the point of this call.
+
+Event Port Binding
+------------------
+
+To be able to dequeue events, the dispatcher must know which event
+ports are to be used, on all the lcores it uses. The application
+provides this information using
+``rte_dispatcher_bind_port_to_lcore()``.
+
+This call is typically made from the part of the application that
+deals with deployment issues (e.g., iterating lcores and determining
+which lcore does what), at the time of application initialization.
+
+The ``rte_dispatcher_unbind_port_from_lcore()`` is used to undo
+this operation.
+
+Multiple lcore threads may not safely use the same event
+port. [#Port-MT-Safety]
+
+Event ports cannot safely be bound or unbound while the dispatcher's
+service function is running on any lcore.
+
+Event Handlers
+--------------
+
+The dispatcher handler is an interface between the dispatcher and an
+application module, used to route events to the appropriate part of
+the application.
+
+Handler Registration
+^^^^^^^^^^^^^^^^^^^^
+
+The event handler interface consists of two function pointers:
+
+* The ``rte_dispatcher_match_t`` callback, which job is to
+  decide if this event is to be the property of this handler.
+* The ``rte_dispatcher_process_t``, which is used by the
+  dispatcher to deliver matched events.
+
+An event handler registration is valid on all lcores.
+
+The functions pointed to by the match and process callbacks resides in
+the application's domain logic, with one or more handlers per
+application module.
+
+A module may use more than one event handler, for convenience or to
+further decouple sub-modules. However, the dispatcher may impose an
+upper limit of the number handlers. In addition, installing a large
+number of handlers increase dispatcher overhead, although this does
+not nessarily translate to a system-level performance degradation. See
+the section on :ref:`Event Clustering` for more information.
+
+Handler registration and unregistration cannot safely be done while
+the dispatcher's service function is running on any lcore.
+
+Event Matching
+^^^^^^^^^^^^^^
+
+A handler's match callback function decides if an event should be
+delivered to this handler, or not.
+
+An event is routed to no more than one handler. Thus, if a match
+function returns true, no further match functions will be invoked for
+that event.
+
+Match functions must not depend on being invocated in any particular
+order (e.g., in the handler registration order).
+
+Events failing to match any handler are dropped, and the
+``ev_drop_count`` counter is updated accordingly.
+
+Event Delivery
+^^^^^^^^^^^^^^
+
+The handler callbacks are invocated by the dispatcher's service
+function, upon the arrival of events to the event ports bound to the
+running service lcore.
+
+A particular event is delivery to at most one handler.
+
+The application must not depend on all match callback invocations for
+a particular event batch being made prior to any process calls are
+being made. For example, if the dispatcher dequeues two events from
+the event device, it may choose to find out the destination for the
+first event, and deliver it, and then continue to find out the
+destination for the second, and then deliver that event as well. The
+dispatcher may also choose a strategy where no event is delivered
+until the destination handler for both events have been determined.
+
+The events provided in a single process call always belong to the same
+event port dequeue burst.
+
+.. _Event Clustering:
+
+Event Clustering
+^^^^^^^^^^^^^^^^
+
+The dispatcher maintains the order of events destined for the same
+handler.
+
+*Order* here refers to the order in which the events were delivered
+from the event device to the dispatcher (i.e., in the event array
+populated by ``rte_event_dequeue_burst()``), in relation to the order
+in which the dispatcher deliveres these events to the application.
+
+The dispatcher *does not* guarantee to maintain the order of events
+delivered to *different* handlers.
+
+For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
+and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
+consider a scenario where the following events are dequeued from the
+event device (qid is short for event queue id).
+
+.. code-block::
+
+    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
+
+The dispatcher may deliver the events in the following manner:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
+   module_a_process([e2: qid=0])
+   module_b_stage_0_process([e2: qid=1])
+
+The dispatcher may also choose to cluster (group) all events destined
+for ``module_b_stage_0_process()`` into one array:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
+   module_a_process([e2: qid=0])
+
+Here, the event ``e2`` is reordered and placed behind ``e3``, from a
+delivery order point of view. This kind of reshuffling is allowed,
+since the events are destined for different handlers.
+
+The dispatcher may also deliver ``e2`` before the three events
+destined for module B.
+
+An example of what the dispatcher may not do, is to reorder event
+``e1`` so, that it precedes ``e0`` in the array passed to the module
+B's stage 0 process callback.
+
+Although clustering requires some extra work for the dispatcher, it
+leads to fewer process function calls. In addition, and likely more
+importantly, it improves temporal locality of memory accesses to
+handler-specific data structures in the application, which in turn may
+lead to fewer cache misses and improved overall performance.
+
+Finalize
+--------
+
+The dispatcher may be configured to notify one or more parts of the
+application when the matching and processing of a batch of events has
+completed.
+
+The ``rte_dispatcher_finalize_register`` call is used to
+register a finalize callback. The function
+``rte_dispatcher_finalize_unregister`` is used to remove a
+callback.
+
+The finalize hook may be used by a set of event handlers (in the same
+modules, or a set of cooperating modules) sharing an event output
+buffer, since it allows for flushing of the buffers at the last
+possible moment. In particular, it allows for buffering of
+``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
+``rte_event_dequeue_burst()`` call is made (assuming implicit release
+is employed).
+
+The following is an example with an application-defined event output
+buffer (the ``event_buffer``):
+
+.. code-block:: c
+
+    static void
+    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
+                   void *cb_data)
+    {
+            struct event_buffer *buffer = cb_data;
+            unsigned lcore_id = rte_lcore_id();
+            struct event_buffer_lcore *lcore_buffer =
+                    &buffer->lcore_buffer[lcore_id];
+    
+            event_buffer_lcore_flush(lcore_buffer);
+    }
+
+    /* In the module's initialization code */
+    rte_dispatcher_finalize_register(DISPATCHER_ID, finalize_batch,
+                                     shared_event_buffer);
+
+The dispatcher does not track any relationship between a handler and a
+finalize callback, and all finalize callbacks will be called, if (and
+only if) at least one event was dequeued from the event device.
+
+Finalize callback registration and unregistration cannot safely be
+done while the dispatcher's service function is running on any lcore.
+
+Service
+-------
+
+The dispatcher is a DPDK service, and is managed in a manner similar
+to other DPDK services (e.g., an Event Timer Adapter).
+
+Below is an example of how to configure a particular lcore to serve as
+a service lcore, and to map an already-configured dispatcher
+(identified by ``DISPATCHER_ID``) to that lcore.
+
+.. code-block:: c
+
+    static void
+    launch_dispatcher_core(unsigned lcore_id)
+    {
+            uint32_t service_id;
+    
+            rte_service_lcore_add(lcore_id);
+    
+            rte_dispatcher_service_id_get(DISPATCHER_ID, &service_id);
+    
+            rte_service_map_lcore_set(service_id, lcore_id, 1);
+    
+            rte_service_lcore_start(lcore_id);
+    
+            rte_service_runstate_set(service_id, 1);
+    }
+
+As the final step, the dispatcher must be started.
+
+.. code-block:: c
+
+    rte_dispatcher_start(DISPATCHER_ID);
+
+
+Multi Service Dispatcher Lcores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In an Eventdev application, most (or all) compute-intensive and
+performance-sensitive processing is done in an event-driven manner,
+where CPU cycles spent on application domain logic is the direct
+result of items of work (i.e., ``rte_event`` events) dequeued from an
+event device.
+
+In the light of this, it makes sense to have the dispatcher service be
+the only DPDK service on all lcores used for packet processing — at
+least in principle.
+
+However, there is nothing in DPDK that prevents colocating other
+services with the dispatcher service on the same lcore.
+
+Tasks that prior to the introduction of the dispatcher into the
+application was performed on the lcore, even though no events were
+received, are prime targets for being converted into such auxiliary
+services, running on the dispatcher core set.
+
+An example of such a task would be the management of a per-lcore timer
+wheel (i.e., calling ``rte_timer_manage()``).
+
+For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
+similar technique), may opt for having quiescent state (e.g., calling
+``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
+service, to assure resource reclaimination occurs even in though some
+lcores currently do not process any events.
+
+If more services than the dispatcher service is mapped to a service
+lcore, it's important that the other service are well-behaved and
+don't interfere with event processing to the extent the system's
+throughput and/or latency requirements are at risk of not being met.
+
+In particular, to avoid jitter, they should have an small upper bound
+for the maximum amount of time spent in a single service function
+call.
+
+An example of scenario with a more CPU-heavy colocated service is a
+low-lcore count deployment, where the event device lacks the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
+require software to feed incoming packets into the event device). In
+this case, the best performance may be achieved if the Event Ethernet
+RX and/or TX Adapters are mapped to lcores also used by for event
+dispatching, since otherwise the adapter lcores would have a lot of
+idle CPU cycles.
+
+.. rubric:: Footnotes
+
+.. [#Mapping]
+   Event routing may reasonably be done based on other ``rte_event``
+   fields (or even event user data). Indeed, that's the very reason to
+   have match callback functions, instead of a simple queue
+   id-to-handler mapping scheme. Queue id-based routing serves well in
+   a simple example.
+
+.. [#Port-MT-Safety]
+   This property (which is a feature, not a bug) is inherited from the
+   core Eventdev APIs.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 52a6d9e7aa..ab05bd6074 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -60,6 +60,7 @@ Programmer's Guide
     event_ethernet_tx_adapter
     event_timer_adapter
     event_crypto_adapter
+    dispatcher_lib
     qos_framework
     power_man
     packet_classif_access_ctrl
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v3 0/3] Add dispatcher library
  2023-09-04 13:03                               ` [PATCH v3 0/3] Add dispatcher library Mattias Rönnblom
                                                   ` (2 preceding siblings ...)
  2023-09-04 13:03                                 ` [PATCH v3 3/3] doc: add dispatcher programming guide Mattias Rönnblom
@ 2023-09-06 19:32                                 ` Stephen Hemminger
  2023-09-06 20:28                                   ` Mattias Rönnblom
  3 siblings, 1 reply; 102+ messages in thread
From: Stephen Hemminger @ 2023-09-06 19:32 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, hofors,
	Peter Nilsson, Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On Mon, 4 Sep 2023 15:03:10 +0200
Mattias Rönnblom <mattias.ronnblom@ericsson.com> wrote:

> The purpose of the dispatcher library is to decouple different parts
> of an eventdev-based application (e.g., processing pipeline stages),
> sharing the same underlying event device.
> 
> The dispatcher replaces the conditional logic (often, a switch
> statement) that typically follows an event device dequeue operation,
> where events are dispatched to different parts of the application
> based on event meta data, such as the queue id or scheduling type.
> 
> The concept is similar to a UNIX file descriptor event loop library.
> Instead of tying callback functions to fds as for example libevent
> does, the dispatcher relies on application-supplied matching callback
> functions to decide where to deliver events.
> 
> A dispatcher is configured to dequeue events from a specific event
> device, and ties into the service core framework, to do its (and the
> application's) work.
> 
> The dispatcher provides a convenient way for an eventdev-based
> application to use service cores for application-level processing, and
> thus for sharing those cores with other DPDK services.
> 
> Although the dispatcher adds some overhead, experience suggests that
> the net effect on the application (both synthetic benchmarks and more
> real-world applications) may well be positive. This is primarily due
> to clustering (see programming guide) reducing cache misses.
> 
> Benchmarking indicates that the overhead is ~10 cc/event (on a
> large core), with a handful of often-used handlers.
> 
> The dispatcher does not support run-time reconfiguration.
> 
> The use of the dispatcher library is optional, and an eventdev-based
> application may still opt to access the event device using direct
> eventdev API calls, or by some other means.

My experience with event libraries is mixed.
There are already multiple choices libevent, libev, and libsystemd as
well as rte_epoll().  Not sure if adding another one is helpful.

The main issue is dealing with external (non DPDK) events which usually
are handled as file descriptors (signalfd, epoll, etc). The other issue
is the thread safety. Most event libraries are not thread safe which
makes handling one event waking up another difficult.

With DPDK, there is the additional questions about use from non-EAL threads.

For the test suite, you should look at what libsystemd does.


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v3 0/3] Add dispatcher library
  2023-09-06 19:32                                 ` [PATCH v3 0/3] Add dispatcher library Stephen Hemminger
@ 2023-09-06 20:28                                   ` Mattias Rönnblom
  0 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-06 20:28 UTC (permalink / raw)
  To: Stephen Hemminger, Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On 2023-09-06 21:32, Stephen Hemminger wrote:
> On Mon, 4 Sep 2023 15:03:10 +0200
> Mattias Rönnblom <mattias.ronnblom@ericsson.com> wrote:
> 
>> The purpose of the dispatcher library is to decouple different parts
>> of an eventdev-based application (e.g., processing pipeline stages),
>> sharing the same underlying event device.
>>
>> The dispatcher replaces the conditional logic (often, a switch
>> statement) that typically follows an event device dequeue operation,
>> where events are dispatched to different parts of the application
>> based on event meta data, such as the queue id or scheduling type.
>>
>> The concept is similar to a UNIX file descriptor event loop library.
>> Instead of tying callback functions to fds as for example libevent
>> does, the dispatcher relies on application-supplied matching callback
>> functions to decide where to deliver events.
>>
>> A dispatcher is configured to dequeue events from a specific event
>> device, and ties into the service core framework, to do its (and the
>> application's) work.
>>
>> The dispatcher provides a convenient way for an eventdev-based
>> application to use service cores for application-level processing, and
>> thus for sharing those cores with other DPDK services.
>>
>> Although the dispatcher adds some overhead, experience suggests that
>> the net effect on the application (both synthetic benchmarks and more
>> real-world applications) may well be positive. This is primarily due
>> to clustering (see programming guide) reducing cache misses.
>>
>> Benchmarking indicates that the overhead is ~10 cc/event (on a
>> large core), with a handful of often-used handlers.
>>
>> The dispatcher does not support run-time reconfiguration.
>>
>> The use of the dispatcher library is optional, and an eventdev-based
>> application may still opt to access the event device using direct
>> eventdev API calls, or by some other means.
> 
> My experience with event libraries is mixed.
> There are already multiple choices libevent, libev, and libsystemd as
> well as rte_epoll().  Not sure if adding another one is helpful.
> 

This library *conceptually* provides the same kind of functionality as 
libevent, but has nothing to do with file descriptor events. These are 
eventdev events, and thus are tied to the arrival of a packet, a 
notification some kind of hardware offload, a timeout, or something else 
DPDK PMD-related.

> The main issue is dealing with external (non DPDK) events which usually
> are handled as file descriptors (signalfd, epoll, etc). The other issue
> is the thread safety. Most event libraries are not thread safe which
> makes handling one event waking up another difficult.
> 
This machinery is for use exclusively by EAL threads, for fast-path 
packet processing. No syscalls, no non-DPDK events.

> With DPDK, there is the additional questions about use from non-EAL threads.
> 

See above.

> For the test suite, you should look at what libsystemd does.
> 

^ permalink raw reply	[flat|nested] 102+ messages in thread

* RE: [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-04 13:03                                 ` [PATCH v3 1/3] lib: introduce " Mattias Rönnblom
@ 2023-09-17 16:46                                   ` Naga Harish K, S V
  2023-09-19  9:20                                     ` Mattias Rönnblom
  2023-09-20  9:32                                     ` Jerin Jacob
  2023-09-19 10:58                                   ` Jerin Jacob
                                                     ` (2 subsequent siblings)
  3 siblings, 2 replies; 102+ messages in thread
From: Naga Harish K, S V @ 2023-09-17 16:46 UTC (permalink / raw)
  To: mattias.ronnblom, dev
  Cc: Jerin Jacob, techboard, Van Haaren, Harry, hofors, Nilsson,
	Peter, Heng Wang, Pavan Nikhilesh, Gujjar, Abhinandan S,
	Carrillo, Erik G, Shijith Thotton, Hemant Agrawal, Sachin Saxena,
	Liang Ma, Mccarthy, Peter, Yan, Zhirun, mattias.ronnblom



> -----Original Message-----
> From: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> Sent: Monday, September 4, 2023 6:33 PM
> To: dev@dpdk.org
> Cc: Jerin Jacob <jerinj@marvell.com>; techboard@dpdk.org; Van Haaren,
> Harry <harry.van.haaren@intel.com>; hofors@lysator.liu.se; Nilsson, Peter
> <peter.j.nilsson@ericsson.com>; Heng Wang <heng.wang@ericsson.com>;
> Naga Harish K, S V <s.v.naga.harish.k@intel.com>; Pavan Nikhilesh
> <pbhagavatula@marvell.com>; Gujjar, Abhinandan S
> <abhinandan.gujjar@intel.com>; Carrillo, Erik G <Erik.G.Carrillo@intel.com>;
> Shijith Thotton <sthotton@marvell.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>; Sachin Saxena <sachin.saxena@oss.nxp.com>;
> Liang Ma <liangma@liangbit.com>; Mccarthy, Peter
> <Peter.Mccarthy@intel.com>; Yan, Zhirun <Zhirun.Yan@intel.com>;
> mattias.ronnblom <mattias.ronnblom@ericsson.com>
> Subject: [PATCH v3 1/3] lib: introduce dispatcher library
> 
> The purpose of the dispatcher library is to help reduce coupling in an
> Eventdev-based DPDK application.
> 
> In addition, the dispatcher also provides a convenient and flexible way for the
> application to use service cores for application-level processing.
> 
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
> Reviewed-by: Heng Wang <heng.wang@ericsson.com>
> 
> --
> 
> PATCH v3:
>  o To underline its optional character and since it does not provide
>    hardware abstraction, the event dispatcher is now a separate
>    library.
>  o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
>    shorter and to avoid the rte_event_* namespace.
> 

Rte_dispatcher is basically dispatching events but it feels like the name does not convey that.
Also, it is like any other adapter service that can reside within the eventdev directory.

I can see some discussion in previous threads related to the placement of the dispatcher library.

It is an optional eventdev application service, not enforcing this programming model to the application.
The documentation may need to be updated and mention that this is optional.

If any hardware comes up with the dispatcher feature, then this library may need to be moved inside eventdev library later.

So, It makes sense to keep this optional service in the eventdev folder as an optional feature.

> PATCH v2:
>  o Add dequeue batch count statistic.
>  o Add statistics reset function to API.
>  o Clarify MT safety guarantees (or lack thereof) in the API documentation.
>  o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
>    to be consistent with similar loops elsewhere in the dispatcher.
>  o Fix variable names in finalizer unregister function.
> 
> PATCH:
>  o Change prefix from RED to EVD, to avoid confusion with random
>    early detection.
> 
> RFC v4:
>  o Move handlers to per-lcore data structures.
>  o Introduce mechanism which rearranges handlers so that often-used
>    handlers tend to be tried first.
>  o Terminate dispatch loop in case all events are delivered.
>  o To avoid the dispatcher's service function hogging the CPU, process
>    only one batch per call.
>  o Have service function return -EAGAIN if no work is performed.
>  o Events delivered in the process function is no longer marked 'const',
>    since modifying them may be useful for the application and cause
>    no difficulties for the dispatcher.
>  o Various minor API documentation improvements.
> 
> RFC v3:
>  o Add stats_get() function to the version.map file.
> ---
>  MAINTAINERS                     |   3 +
>  lib/dispatcher/meson.build      |  17 +
>  lib/dispatcher/rte_dispatcher.c | 791
> ++++++++++++++++++++++++++++++++  lib/dispatcher/rte_dispatcher.h |
> 480 +++++++++++++++++++
>  lib/dispatcher/version.map      |  20 +
>  lib/meson.build                 |   2 +
>  6 files changed, 1313 insertions(+)
>  create mode 100644 lib/dispatcher/meson.build  create mode 100644
> lib/dispatcher/rte_dispatcher.c  create mode 100644
> lib/dispatcher/rte_dispatcher.h  create mode 100644
> lib/dispatcher/version.map
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index a926155f26..6704cd5b2c 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -1726,6 +1726,9 @@ M: Nithin Dabilpuram
> <ndabilpuram@marvell.com>
>  M: Pavan Nikhilesh <pbhagavatula@marvell.com>
>  F: lib/node/
> 
> +Dispatcher - EXPERIMENTAL
> +M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> +F: lib/dispatcher/
> 
>  Test Applications
>  -----------------
> diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build new file
> mode 100644 index 0000000000..c6054a3a5d
> --- /dev/null
> +++ b/lib/dispatcher/meson.build
> @@ -0,0 +1,17 @@
> +# SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2023 Ericsson AB
> +
> +if is_windows
> +    build = false
> +    reason = 'not supported on Windows'
> +    subdir_done()
> +endif
> +
> +sources = files(
> +        'rte_dispatcher.c',
> +)
> +headers = files(
> +        'rte_dispatcher.h',
> +)
> +
> +deps += ['eventdev']
> diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
> new file mode 100644 index 0000000000..3319fe09f2
> --- /dev/null
> +++ b/lib/dispatcher/rte_dispatcher.c
> @@ -0,0 +1,791 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Ericsson AB
> + */
> +
> +#include <stdbool.h>
> +#include <stdint.h>
> +
> +#include <rte_branch_prediction.h>
> +#include <rte_common.h>
> +#include <rte_lcore.h>
> +#include <rte_random.h>
> +#include <rte_service_component.h>
> +
> +#include "eventdev_pmd.h"
> +
> +#include <rte_dispatcher.h>
> +
> +#define EVD_MAX_PORTS_PER_LCORE 4
> +#define EVD_MAX_HANDLERS 32
> +#define EVD_MAX_FINALIZERS 16
> +#define EVD_AVG_PRIO_INTERVAL 2000
> +
> +struct rte_dispatcher_lcore_port {
> +	uint8_t port_id;
> +	uint16_t batch_size;
> +	uint64_t timeout;
> +};
> +
> +struct rte_dispatcher_handler {
> +	int id;
> +	rte_dispatcher_match_t match_fun;
> +	void *match_data;
> +	rte_dispatcher_process_t process_fun;
> +	void *process_data;
> +};
> +
> +struct rte_dispatcher_finalizer {
> +	int id;
> +	rte_dispatcher_finalize_t finalize_fun;
> +	void *finalize_data;
> +};
> +
> +struct rte_dispatcher_lcore {
> +	uint8_t num_ports;
> +	uint16_t num_handlers;
> +	int32_t prio_count;
> +	struct rte_dispatcher_lcore_port
> ports[EVD_MAX_PORTS_PER_LCORE];
> +	struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
> +	struct rte_dispatcher_stats stats;
> +} __rte_cache_aligned;
> +
> +struct rte_dispatcher {
> +	uint8_t id;
> +	uint8_t event_dev_id;
> +	int socket_id;
> +	uint32_t service_id;
> +	struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
> +	uint16_t num_finalizers;
> +	struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS]; };
> +
> +static struct rte_dispatcher *dispatchers[UINT8_MAX];
> +
> +static bool
> +evd_has_dispatcher(uint8_t id)
> +{
> +	return dispatchers[id] != NULL;
> +}
> +
> +static struct rte_dispatcher *
> +evd_get_dispatcher(uint8_t id)
> +{
> +	return dispatchers[id];
> +}
> +
> +static void
> +evd_set_dispatcher(uint8_t id, struct rte_dispatcher *dispatcher) {
> +	dispatchers[id] = dispatcher;
> +}
> +
> +#define EVD_VALID_ID_OR_RET_EINVAL(id)
> 	\
> +	do {								\
> +		if (unlikely(!evd_has_dispatcher(id))) {		\
> +			RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id);
> \
> +			return -EINVAL;					\
> +		}							\
> +	} while (0)
> +
> +static int
> +evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
> +		       const struct rte_event *event) {
> +	uint16_t i;
> +
> +	for (i = 0; i < lcore->num_handlers; i++) {
> +		struct rte_dispatcher_handler *handler =
> +			&lcore->handlers[i];
> +
> +		if (handler->match_fun(event, handler->match_data))
> +			return i;
> +	}
> +
> +	return -1;
> +}
> +
> +static void
> +evd_prioritize_handler(struct rte_dispatcher_lcore *lcore,
> +		       int handler_idx)
> +{
> +	struct rte_dispatcher_handler tmp;
> +
> +	if (handler_idx == 0)
> +		return;
> +
> +	/* Let the lucky handler "bubble" up the list */
> +
> +	tmp = lcore->handlers[handler_idx - 1];
> +
> +	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
> +
> +	lcore->handlers[handler_idx] = tmp;
> +}
> +
> +static inline void
> +evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore,
> +				int handler_idx, uint16_t handler_events) {
> +	lcore->prio_count -= handler_events;
> +
> +	if (unlikely(lcore->prio_count <= 0)) {
> +		evd_prioritize_handler(lcore, handler_idx);
> +
> +		/*
> +		 * Randomize the interval in the unlikely case
> +		 * the traffic follow some very strict pattern.
> +		 */
> +		lcore->prio_count =
> +			rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
> +			EVD_AVG_PRIO_INTERVAL / 2;
> +	}
> +}
> +
> +static inline void
> +evd_dispatch_events(struct rte_dispatcher *dispatcher,
> +		    struct rte_dispatcher_lcore *lcore,
> +		    struct rte_dispatcher_lcore_port *port,
> +		    struct rte_event *events, uint16_t num_events) {
> +	int i;
> +	struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
> +	uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
> +	uint16_t drop_count = 0;
> +	uint16_t dispatch_count;
> +	uint16_t dispatched = 0;
> +
> +	for (i = 0; i < num_events; i++) {
> +		struct rte_event *event = &events[i];
> +		int handler_idx;
> +
> +		handler_idx = evd_lookup_handler_idx(lcore, event);
> +
> +		if (unlikely(handler_idx < 0)) {
> +			drop_count++;
> +			continue;
> +		}
> +
> +		bursts[handler_idx][burst_lens[handler_idx]] = *event;
> +		burst_lens[handler_idx]++;
> +	}
> +
> +	dispatch_count = num_events - drop_count;
> +
> +	for (i = 0; i < lcore->num_handlers &&
> +		 dispatched < dispatch_count; i++) {
> +		struct rte_dispatcher_handler *handler =
> +			&lcore->handlers[i];
> +		uint16_t len = burst_lens[i];
> +
> +		if (len == 0)
> +			continue;
> +
> +		handler->process_fun(dispatcher->event_dev_id, port-
> >port_id,
> +				     bursts[i], len, handler->process_data);
> +
> +		dispatched += len;
> +
> +		/*
> +		 * Safe, since any reshuffling will only involve
> +		 * already-processed handlers.
> +		 */
> +		evd_consider_prioritize_handler(lcore, i, len);
> +	}
> +
> +	lcore->stats.ev_batch_count++;
> +	lcore->stats.ev_dispatch_count += dispatch_count;
> +	lcore->stats.ev_drop_count += drop_count;
> +
> +	for (i = 0; i < dispatcher->num_finalizers; i++) {
> +		struct rte_dispatcher_finalizer *finalizer =
> +			&dispatcher->finalizers[i];
> +
> +		finalizer->finalize_fun(dispatcher->event_dev_id,
> +					port->port_id,
> +					finalizer->finalize_data);
> +	}
> +}
> +
> +static __rte_always_inline uint16_t
> +evd_port_dequeue(struct rte_dispatcher *dispatcher,
> +		 struct rte_dispatcher_lcore *lcore,
> +		 struct rte_dispatcher_lcore_port *port) {
> +	uint16_t batch_size = port->batch_size;
> +	struct rte_event events[batch_size];
> +	uint16_t n;
> +
> +	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port-
> >port_id,
> +				    events, batch_size, port->timeout);
> +
> +	if (likely(n > 0))
> +		evd_dispatch_events(dispatcher, lcore, port, events, n);
> +
> +	lcore->stats.poll_count++;
> +
> +	return n;
> +}
> +
> +static __rte_always_inline uint16_t
> +evd_lcore_process(struct rte_dispatcher *dispatcher,
> +		  struct rte_dispatcher_lcore *lcore) {
> +	uint16_t i;
> +	uint16_t event_count = 0;
> +
> +	for (i = 0; i < lcore->num_ports; i++) {
> +		struct rte_dispatcher_lcore_port *port =
> +			&lcore->ports[i];
> +
> +		event_count += evd_port_dequeue(dispatcher, lcore, port);
> +	}
> +
> +	return event_count;
> +}
> +
> +static int32_t
> +evd_process(void *userdata)
> +{
> +	struct rte_dispatcher *dispatcher = userdata;
> +	unsigned int lcore_id = rte_lcore_id();
> +	struct rte_dispatcher_lcore *lcore =
> +		&dispatcher->lcores[lcore_id];
> +	uint64_t event_count;
> +
> +	event_count = evd_lcore_process(dispatcher, lcore);
> +
> +	if (unlikely(event_count == 0))
> +		return -EAGAIN;
> +
> +	return 0;
> +}
> +
> +static int
> +evd_service_register(struct rte_dispatcher *dispatcher) {
> +	struct rte_service_spec service = {
> +		.callback = evd_process,
> +		.callback_userdata = dispatcher,
> +		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
> +		.socket_id = dispatcher->socket_id
> +	};
> +	int rc;
> +
> +	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "evd_%d",
> +		 dispatcher->id);
> +
> +	rc = rte_service_component_register(&service,
> +&dispatcher->service_id);
> +
> +	if (rc)
> +		RTE_EDEV_LOG_ERR("Registration of dispatcher service "
> +				 "%s failed with error code %d\n",
> +				 service.name, rc);
> +
> +	return rc;
> +}
> +
> +static int
> +evd_service_unregister(struct rte_dispatcher *dispatcher) {
> +	int rc;
> +
> +	rc = rte_service_component_unregister(dispatcher->service_id);
> +
> +	if (rc)
> +		RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
> +				 "failed with error code %d\n", rc);
> +
> +	return rc;
> +}
> +
> +int
> +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id) {
> +	int socket_id;
> +	struct rte_dispatcher *dispatcher;
> +	int rc;
> +
> +	if (evd_has_dispatcher(id)) {
> +		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
> +				 id);
> +		return -EEXIST;
> +	}
> +
> +	socket_id = rte_event_dev_socket_id(event_dev_id);
> +
> +	dispatcher =
> +		rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher),
> +				  RTE_CACHE_LINE_SIZE, socket_id);
> +
> +	if (dispatcher == NULL) {
> +		RTE_EDEV_LOG_ERR("Unable to allocate memory for
> dispatcher\n");
> +		return -ENOMEM;
> +	}
> +
> +	*dispatcher = (struct rte_dispatcher) {
> +		.id = id,
> +		.event_dev_id = event_dev_id,
> +		.socket_id = socket_id
> +	};
> +
> +	rc = evd_service_register(dispatcher);
> +
> +	if (rc < 0) {
> +		rte_free(dispatcher);
> +		return rc;
> +	}
> +
> +	evd_set_dispatcher(id, dispatcher);
> +
> +	return 0;
> +}
> +
> +int
> +rte_dispatcher_free(uint8_t id)
> +{
> +	struct rte_dispatcher *dispatcher;
> +	int rc;
> +
> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> +	dispatcher = evd_get_dispatcher(id);
> +
> +	rc = evd_service_unregister(dispatcher);
> +
> +	if (rc)
> +		return rc;
> +
> +	evd_set_dispatcher(id, NULL);
> +
> +	rte_free(dispatcher);
> +
> +	return 0;
> +}
> +
> +int
> +rte_dispatcher_service_id_get(uint8_t id, uint32_t *service_id) {
> +	struct rte_dispatcher *dispatcher;
> +
> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> +	dispatcher = evd_get_dispatcher(id);
> +

Service_id pointer needs to be validated for NULL before accessing


> +	*service_id = dispatcher->service_id;
> +
> +	return 0;
> +}
> +
> +static int
> +lcore_port_index(struct rte_dispatcher_lcore *lcore,
> +		 uint8_t event_port_id)
> +{
> +	uint16_t i;
> +
> +	for (i = 0; i < lcore->num_ports; i++) {
> +		struct rte_dispatcher_lcore_port *port =
> +			&lcore->ports[i];
> +
> +		if (port->port_id == event_port_id)
> +			return i;
> +	}
> +
> +	return -1;
> +}
> +
> +int
> +rte_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
> +					uint16_t batch_size, uint64_t
> timeout,
> +					unsigned int lcore_id)
> +{
> +	struct rte_dispatcher *dispatcher;
> +	struct rte_dispatcher_lcore *lcore;
> +	struct rte_dispatcher_lcore_port *port;
> +
> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> +	dispatcher = evd_get_dispatcher(id);
> +
> +	lcore =	&dispatcher->lcores[lcore_id];
> +
> +	if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
> +		return -ENOMEM;
> +
> +	if (lcore_port_index(lcore, event_port_id) >= 0)
> +		return -EEXIST;
> +
> +	port = &lcore->ports[lcore->num_ports];
> +
> +	*port = (struct rte_dispatcher_lcore_port) {
> +		.port_id = event_port_id,
> +		.batch_size = batch_size,
> +		.timeout = timeout
> +	};
> +
> +	lcore->num_ports++;
> +
> +	return 0;
> +}
> +
> +int
> +rte_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
> +					    unsigned int lcore_id)
> +{
> +	struct rte_dispatcher *dispatcher;
> +	struct rte_dispatcher_lcore *lcore;
> +	int port_idx;
> +	struct rte_dispatcher_lcore_port *port;
> +	struct rte_dispatcher_lcore_port *last;
> +
> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> +	dispatcher = evd_get_dispatcher(id);
> +
> +	lcore =	&dispatcher->lcores[lcore_id];
> +
> +	port_idx = lcore_port_index(lcore, event_port_id);
> +
> +	if (port_idx < 0)
> +		return -ENOENT;
> +
> +	port = &lcore->ports[port_idx];
> +	last = &lcore->ports[lcore->num_ports - 1];
> +
> +	if (port != last)
> +		*port = *last;
> +
> +	lcore->num_ports--;
> +
> +	return 0;
> +}
> +
> +static struct rte_dispatcher_handler*
> +evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore,
> +			    int handler_id)
> +{
> +	uint16_t i;
> +
> +	for (i = 0; i < lcore->num_handlers; i++) {
> +		struct rte_dispatcher_handler *handler =
> +			&lcore->handlers[i];
> +
> +		if (handler->id == handler_id)
> +			return handler;
> +	}
> +
> +	return NULL;
> +}
> +
> +static int
> +evd_alloc_handler_id(struct rte_dispatcher *dispatcher) {
> +	int handler_id = 0;
> +	struct rte_dispatcher_lcore *reference_lcore =
> +		&dispatcher->lcores[0];
> +
> +	if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
> +		return -1;
> +
> +	while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) !=
> NULL)
> +		handler_id++;
> +
> +	return handler_id;
> +}
> +
> +static void
> +evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore,
> +		    const struct rte_dispatcher_handler *handler) {
> +	int handler_idx = lcore->num_handlers;
> +
> +	lcore->handlers[handler_idx] = *handler;
> +	lcore->num_handlers++;
> +}
> +
> +static void
> +evd_install_handler(struct rte_dispatcher *dispatcher,
> +		    const struct rte_dispatcher_handler *handler) {
> +	int i;
> +
> +	for (i = 0; i < RTE_MAX_LCORE; i++) {
> +		struct rte_dispatcher_lcore *lcore =
> +			&dispatcher->lcores[i];
> +		evd_lcore_install_handler(lcore, handler);
> +	}
> +}
> +
> +int
> +rte_dispatcher_register(uint8_t id,
> +			      rte_dispatcher_match_t match_fun,
> +			      void *match_data,
> +			      rte_dispatcher_process_t process_fun,
> +			      void *process_data)
> +{
> +	struct rte_dispatcher *dispatcher;
> +	struct rte_dispatcher_handler handler = {
> +		.match_fun = match_fun,
> +		.match_data = match_data,

We can have a default function which uses queue_id as matching data.
This reduces the application load to provide two callbacks, one for matching and one for processing the event.
Application can pass NULL parameter for "match_fun", in that case default function pointer can be used here.


> +		.process_fun = process_fun,
> +		.process_data = process_data
> +	};
> +
> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> +	dispatcher = evd_get_dispatcher(id);
> +
> +	handler.id = evd_alloc_handler_id(dispatcher);
> +
> +	if (handler.id < 0)
> +		return -ENOMEM;
> +
> +	evd_install_handler(dispatcher, &handler);
> +
> +	return handler.id;
> +}
> +
> +static int
> +evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
> +			    int handler_id)
> +{
> +	struct rte_dispatcher_handler *unreg_handler;
> +	int handler_idx;
> +	uint16_t last_idx;
> +
> +	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
> +
> +	if (unreg_handler == NULL)
> +		return -EINVAL;
> +

Shouldn't the logic be " handler_idx = unreg_handler - &lcore->handlers[0];"
Because, unreg_handler will be a higher or equal address to the handler base address (&lcore->handlers[0])


> +	handler_idx = &lcore->handlers[0] - unreg_handler;
> +
> +	last_idx = lcore->num_handlers - 1;
> +
> +	if (handler_idx != last_idx) {
> +		/* move all handlers to maintain handler order */
> +		int n = last_idx - handler_idx;
> +		memmove(unreg_handler, unreg_handler + 1,
> +			sizeof(struct rte_dispatcher_handler) * n);
> +	}
> +
> +	lcore->num_handlers--;
> +
> +	return 0;
> +}
> +
> +static int
> +evd_uninstall_handler(struct rte_dispatcher *dispatcher,
> +		      int handler_id)
> +{
> +	unsigned int lcore_id;
> +
> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +		struct rte_dispatcher_lcore *lcore =
> +			&dispatcher->lcores[lcore_id];
> +		int rc;
> +
> +		rc = evd_lcore_uninstall_handler(lcore, handler_id);
> +
> +		if (rc < 0)
> +			return rc;
> +	}
> +
> +	return 0;
> +}
> +
> +int
> +rte_dispatcher_unregister(uint8_t id, int handler_id) {
> +	struct rte_dispatcher *dispatcher;
> +	int rc;
> +
> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> +	dispatcher = evd_get_dispatcher(id);
> +
> +	rc = evd_uninstall_handler(dispatcher, handler_id);
> +
> +	return rc;
> +}
> +
> +static struct rte_dispatcher_finalizer* evd_get_finalizer_by_id(struct
> +rte_dispatcher *dispatcher,
> +		       int handler_id)
> +{
> +	int i;
> +
> +	for (i = 0; i < dispatcher->num_finalizers; i++) {
> +		struct rte_dispatcher_finalizer *finalizer =
> +			&dispatcher->finalizers[i];
> +
> +		if (finalizer->id == handler_id)
> +			return finalizer;
> +	}
> +
> +	return NULL;
> +}
> +
> +static int
> +evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher) {
> +	int finalizer_id = 0;
> +
> +	while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
> +		finalizer_id++;
> +
> +	return finalizer_id;
> +}
> +
> +static struct rte_dispatcher_finalizer * evd_alloc_finalizer(struct
> +rte_dispatcher *dispatcher) {
> +	int finalizer_idx;
> +	struct rte_dispatcher_finalizer *finalizer;
> +
> +	if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
> +		return NULL;
> +
> +	finalizer_idx = dispatcher->num_finalizers;
> +	finalizer = &dispatcher->finalizers[finalizer_idx];
> +
> +	finalizer->id = evd_alloc_finalizer_id(dispatcher);
> +
> +	dispatcher->num_finalizers++;
> +
> +	return finalizer;
> +}
> +
> +int
> +rte_dispatcher_finalize_register(uint8_t id,
> +			      rte_dispatcher_finalize_t finalize_fun,
> +			      void *finalize_data)
> +{
> +	struct rte_dispatcher *dispatcher;
> +	struct rte_dispatcher_finalizer *finalizer;
> +
> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> +	dispatcher = evd_get_dispatcher(id);
> +
> +	finalizer = evd_alloc_finalizer(dispatcher);
> +
> +	if (finalizer == NULL)
> +		return -ENOMEM;
> +
> +	finalizer->finalize_fun = finalize_fun;
> +	finalizer->finalize_data = finalize_data;
> +
> +	return finalizer->id;
> +}
> +
> +int
> +rte_dispatcher_finalize_unregister(uint8_t id, int handler_id) {
> +	struct rte_dispatcher *dispatcher;
> +	struct rte_dispatcher_finalizer *unreg_finalizer;
> +	int finalizer_idx;
> +	uint16_t last_idx;
> +
> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> +	dispatcher = evd_get_dispatcher(id);
> +
> +	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, handler_id);
> +
> +	if (unreg_finalizer == NULL)
> +		return -EINVAL;
> +

Same as above comment in rte_dispatcher_unregister, base address needs to be subtracted from unreg_finalizer


> +	finalizer_idx = &dispatcher->finalizers[0] - unreg_finalizer;
> +
> +	last_idx = dispatcher->num_finalizers - 1;
> +
> +	if (finalizer_idx != last_idx) {
> +		/* move all finalizers to maintain order */
> +		int n = last_idx - finalizer_idx;
> +		memmove(unreg_finalizer, unreg_finalizer + 1,
> +			sizeof(struct rte_dispatcher_finalizer) * n);
> +	}
> +
> +	dispatcher->num_finalizers--;
> +
> +	return 0;
> +}
> +
> +static int
> +evd_set_service_runstate(uint8_t id, int state) {
> +	struct rte_dispatcher *dispatcher;
> +	int rc;
> +
> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> +	dispatcher = evd_get_dispatcher(id);
> +
> +	rc = rte_service_component_runstate_set(dispatcher->service_id,
> +						state);
> +
> +	if (rc != 0) {
> +		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while
> setting "
> +				 "service component run state to %d\n", rc,
> +				 state);
> +		RTE_ASSERT(0);
> +	}
> +
> +	return 0;
> +}
> +
> +int
> +rte_dispatcher_start(uint8_t id)
> +{
> +	return evd_set_service_runstate(id, 1); }
> +
> +int
> +rte_dispatcher_stop(uint8_t id)
> +{
> +	return evd_set_service_runstate(id, 0); }
> +
> +static void
> +evd_aggregate_stats(struct rte_dispatcher_stats *result,
> +		    const struct rte_dispatcher_stats *part) {
> +	result->poll_count += part->poll_count;
> +	result->ev_batch_count += part->ev_batch_count;
> +	result->ev_dispatch_count += part->ev_dispatch_count;
> +	result->ev_drop_count += part->ev_drop_count; }
> +
> +int
> +rte_dispatcher_stats_get(uint8_t id,
> +			       struct rte_dispatcher_stats *stats) {
> +	struct rte_dispatcher *dispatcher;
> +	unsigned int lcore_id;
> +
> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> +	dispatcher = evd_get_dispatcher(id);
> +

Stats pointer needs to be validated for NULL before accessing


> +	*stats = (struct rte_dispatcher_stats) {};
> +
> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +		struct rte_dispatcher_lcore *lcore =
> +			&dispatcher->lcores[lcore_id];
> +
> +		evd_aggregate_stats(stats, &lcore->stats);
> +	}
> +
> +	return 0;
> +}
> +
> +int
> +rte_dispatcher_stats_reset(uint8_t id)
> +{
> +	struct rte_dispatcher *dispatcher;
> +	unsigned int lcore_id;
> +
> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> +	dispatcher = evd_get_dispatcher(id);
> +
> +
> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +		struct rte_dispatcher_lcore *lcore =
> +			&dispatcher->lcores[lcore_id];
> +
> +		lcore->stats = (struct rte_dispatcher_stats) {};
> +	}
> +
> +	return 0;
> +
> +}
> diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
> new file mode 100644 index 0000000000..6712687a08
> --- /dev/null
> +++ b/lib/dispatcher/rte_dispatcher.h
> @@ -0,0 +1,480 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Ericsson AB
> + */
> +
> +#ifndef __RTE_DISPATCHER_H__
> +#define __RTE_DISPATCHER_H__
> +
> +/**
> + * @file
> + *
> + * RTE Dispatcher
> + *
> + * The purpose of the dispatcher is to help decouple different parts
> + * of an application (e.g., modules), sharing the same underlying
> + * event device.
> + */
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#include <rte_eventdev.h>
> +
> +/**
> + * Function prototype for match callbacks.
> + *
> + * Match callbacks are used by an application to decide how the
> + * dispatcher distributes events to different parts of the
> + * application.
> + *
> + * The application is not expected to process the event at the point
> + * of the match call. Such matters should be deferred to the process
> + * callback invocation.
> + *
> + * The match callback may be used as an opportunity to prefetch data.
> + *
> + * @param event
> + *  Pointer to event
> + *
> + * @param cb_data
> + *  The pointer supplied by the application in
> + *  rte_dispatcher_register().
> + *
> + * @return
> + *   Returns true in case this events should be delivered (via
> + *   the process callback), and false otherwise.
> + */
> +typedef bool
> +(*rte_dispatcher_match_t)(const struct rte_event *event, void
> +*cb_data);
> +
> +/**
> + * Function prototype for process callbacks.
> + *
> + * The process callbacks are used by the dispatcher to deliver
> + * events for processing.
> + *
> + * @param event_dev_id
> + *  The originating event device id.
> + *
> + * @param event_port_id
> + *  The originating event port.
> + *
> + * @param events
> + *  Pointer to an array of events.
> + *
> + * @param num
> + *  The number of events in the @p events array.
> + *
> + * @param cb_data
> + *  The pointer supplied by the application in
> + *  rte_dispatcher_register().
> + */
> +
> +typedef void
> +(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
> +				  struct rte_event *events, uint16_t num,
> +				  void *cb_data);
> +
> +/**
> + * Function prototype for finalize callbacks.
> + *
> + * The finalize callbacks are used by the dispatcher to notify the
> + * application it has delivered all events from a particular batch
> + * dequeued from the event device.
> + *
> + * @param event_dev_id
> + *  The originating event device id.
> + *
> + * @param event_port_id
> + *  The originating event port.
> + *
> + * @param cb_data
> + *  The pointer supplied by the application in
> + *  rte_dispatcher_finalize_register().
> + */
> +
> +typedef void
> +(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
> +				   void *cb_data);
> +
> +/**
> + * Dispatcher statistics
> + */
> +struct rte_dispatcher_stats {
> +	uint64_t poll_count;
> +	/**< Number of event dequeue calls made toward the event device. */
> +	uint64_t ev_batch_count;
> +	/**< Number of non-empty event batches dequeued from event
> device.*/
> +	uint64_t ev_dispatch_count;
> +	/**< Number of events dispatched to a handler.*/
> +	uint64_t ev_drop_count;
> +	/**< Number of events dropped because no handler was found. */ };
> +
> +/**
> + * Create a dispatcher with the specified id.
> + *
> + * @param id
> + *  An application-specified, unique (across all dispatcher
> + *  instances) identifier.
> + *
> + * @param event_dev_id
> + *  The identifier of the event device from which this dispatcher
> + *  will dequeue events.
> + *
> + * @return
> + *   - 0: Success
> + *   - <0: Error code on failure
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id);
> +
> +/**
> + * Free a dispatcher.
> + *
> + * @param id
> + *  The dispatcher identifier.
> + *
> + * @return
> + *  - 0: Success
> + *  - <0: Error code on failure
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_free(uint8_t id);
> +
> +/**
> + * Retrieve the service identifier of a dispatcher.
> + *
> + * @param id
> + *  The dispatcher identifier.
> + *
> + * @param [out] service_id
> + *  A pointer to a caller-supplied buffer where the dispatcher's
> + *  service id will be stored.
> + *
> + * @return
> + *  - 0: Success
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
> +
> +/**
> + * Binds an event device port to a specific lcore on the specified
> + * dispatcher.
> + *
> + * This function configures the event port id to be used by the event
> + * dispatcher service, if run on the specified lcore.
> + *
> + * Multiple event device ports may be bound to the same lcore. A
> + * particular port must not be bound to more than one lcore.
> + *
> + * If the dispatcher service is mapped (with
> +rte_service_map_lcore_set())
> + * to a lcore to which no ports are bound, the service function will be
> +a
> + * no-operation.
> + *
> + * This function may be called by any thread (including unregistered
> + * non-EAL threads), but not while the dispatcher is running on lcore
> + * specified by @c lcore_id.
> + *
> + * @param id
> + *  The dispatcher identifier.
> + *
> + * @param event_port_id
> + *  The event device port identifier.
> + *
> + * @param batch_size
> + *  The batch size to use in rte_event_dequeue_burst(), for the
> + *  configured event device port and lcore.
> + *
> + * @param timeout
> + *  The timeout parameter to use in rte_event_dequeue_burst(), for the
> + *  configured event device port and lcore.
> + *
> + * @param lcore_id
> + *  The lcore by which this event port will be used.
> + *
> + * @return
> + *  - 0: Success
> + *  - -ENOMEM: Unable to allocate sufficient resources.
> + *  - -EEXISTS: Event port is already configured.
> + *  - -EINVAL: Invalid arguments.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
> +					uint16_t batch_size, uint64_t
> timeout,
> +					unsigned int lcore_id);
> +
> +/**
> + * Unbind an event device port from a specific lcore.
> + *
> + * This function may be called by any thread (including unregistered
> + * non-EAL threads), but not while the dispatcher is running on
> + * lcore specified by @c lcore_id.
> + *
> + * @param id
> + *  The dispatcher identifier.
> + *
> + * @param event_port_id
> + *  The event device port identifier.
> + *
> + * @param lcore_id
> + *  The lcore which was using this event port.
> + *
> + * @return
> + *  - 0: Success
> + *  - -EINVAL: Invalid @c id.
> + *  - -ENOENT: Event port id not bound to this @c lcore_id.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
> +					    unsigned int lcore_id);
> +
> +/**
> + * Register an event handler.
> + *
> + * The match callback function is used to select if a particular event
> + * should be delivered, using the corresponding process callback
> + * function.
> + *
> + * The reason for having two distinct steps is to allow the dispatcher
> + * to deliver all events as a batch. This in turn will cause
> + * processing of a particular kind of events to happen in a
> + * back-to-back manner, improving cache locality.
> + *
> + * The list of handler callback functions is shared among all lcores,
> + * but will only be executed on lcores which has an eventdev port
> + * bound to them, and which are running the dispatcher service.
> + *
> + * An event is delivered to at most one handler. Events where no
> + * handler is found are dropped.
> + *
> + * The application must not depend on the order of which the match
> + * functions are invoked.
> + *
> + * Ordering of events is not guaranteed to be maintained between
> + * different deliver callbacks. For example, suppose there are two
> + * callbacks registered, matching different subsets of events arriving
> + * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
> + * on a particular port, all pertaining to the same flow. The match
> + * callback for registration A returns true for ev0 and ev2, and the
> + * matching function for registration B for ev1. In that scenario, the
> + * dispatcher may choose to deliver first [ev0, ev2] using A's deliver
> + * function, and then [ev1] to B - or vice versa.
> + *
> + * rte_dispatcher_register() may be called by any thread
> + * (including unregistered non-EAL threads), but not while the event
> + * dispatcher is running on any service lcore.
> + *
> + * @param id
> + *  The dispatcher identifier.
> + *
> + * @param match_fun
> + *  The match callback function.
> + *
> + * @param match_cb_data
> + *  A pointer to some application-specific opaque data (or NULL),
> + *  which is supplied back to the application when match_fun is
> + *  called.
> + *
> + * @param process_fun
> + *  The process callback function.
> + *
> + * @param process_cb_data
> + *  A pointer to some application-specific opaque data (or NULL),
> + *  which is supplied back to the application when process_fun is
> + *  called.
> + *
> + * @return
> + *  - >= 0: The identifier for this registration.
> + *  - -ENOMEM: Unable to allocate sufficient resources.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_register(uint8_t id,
> +			      rte_dispatcher_match_t match_fun,
> +			      void *match_cb_data,
> +			      rte_dispatcher_process_t process_fun,
> +			      void *process_cb_data);
> +
> +/**
> + * Unregister an event handler.
> + *
> + * This function may be called by any thread (including unregistered
> + * non-EAL threads), but not while the dispatcher is running on
> + * any service lcore.
> + *
> + * @param id
> + *  The dispatcher identifier.
> + *
> + * @param handler_id
> + *  The handler registration id returned by the original
> + *  rte_dispatcher_register() call.
> + *
> + * @return
> + *  - 0: Success
> + *  - -EINVAL: The @c id and/or the @c handler_id parameter was invalid.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_unregister(uint8_t id, int handler_id);
> +
> +/**
> + * Register a finalize callback function.
> + *
> + * An application may optionally install one or more finalize
> + * callbacks.
> + *
> + * All finalize callbacks are invoked by the dispatcher when a
> + * complete batch of events (retrieve using rte_event_dequeue_burst())
> + * have been delivered to the application (or have been dropped).
> + *
> + * The finalize callback is not tied to any particular handler.
> + *
> + * The finalize callback provides an opportunity for the application
> + * to do per-batch processing. One case where this may be useful is if
> + * an event output buffer is used, and is shared among several
> + * handlers. In such a case, proper output buffer flushing may be
> + * assured using a finalize callback.
> + *
> + * rte_dispatcher_finalize_register() may be called by any thread
> + * (including unregistered non-EAL threads), but not while the
> + * dispatcher is running on any service lcore.
> + *
> + * @param id
> + *  The dispatcher identifier.
> + *
> + * @param finalize_fun
> + *  The function called after completing the processing of a
> + *  dequeue batch.
> + *
> + * @param finalize_data
> + *  A pointer to some application-specific opaque data (or NULL),
> + *  which is supplied back to the application when @c finalize_fun is
> + *  called.
> + *
> + * @return
> + *  - >= 0: The identifier for this registration.
> + *  - -ENOMEM: Unable to allocate sufficient resources.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_finalize_register(uint8_t id,
> +				 rte_dispatcher_finalize_t finalize_fun,
> +				 void *finalize_data);
> +
> +/**
> + * Unregister a finalize callback.
> + *
> + * This function may be called by any thread (including unregistered
> + * non-EAL threads), but not while the dispatcher is running on
> + * any service lcore.
> + *
> + * @param id
> + *  The dispatcher identifier.
> + *
> + * @param reg_id
> + *  The finalize registration id returned by the original
> + *  rte_dispatcher_finalize_register() call.
> + *
> + * @return
> + *  - 0: Success
> + *  - -EINVAL: The @c id and/or the @c reg_id parameter was invalid.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_finalize_unregister(uint8_t id, int reg_id);
> +
> +/**
> + * Start a dispatcher instance.
> + *
> + * Enables the dispatcher service.
> + *
> + * The underlying event device must have been started prior to calling
> + * rte_dispatcher_start().
> + *
> + * For the dispatcher to actually perform work (i.e., dispatch
> + * events), its service must have been mapped to one or more service
> + * lcores, and its service run state set to '1'. A dispatcher's
> + * service is retrieved using rte_dispatcher_service_id_get().
> + *
> + * Each service lcore to which the dispatcher is mapped should
> + * have at least one event port configured. Such configuration is
> + * performed by calling rte_dispatcher_bind_port_to_lcore(), prior to
> + * starting the dispatcher.
> + *
> + * @param id
> + *  The dispatcher identifier.
> + *
> + * @return
> + *  - 0: Success
> + *  - -EINVAL: Invalid @c id.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_start(uint8_t id);
> +
> +/**
> + * Stop a running dispatcher instance.
> + *
> + * Disables the dispatcher service.
> + *
> + * @param id
> + *  The dispatcher identifier.
> + *
> + * @return
> + *  - 0: Success
> + *  - -EINVAL: Invalid @c id.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_stop(uint8_t id);
> +
> +/**
> + * Retrieve statistics for a dispatcher instance.
> + *
> + * This function is MT safe and may be called by any thread
> + * (including unregistered non-EAL threads).
> + *
> + * @param id
> + *  The dispatcher identifier.
> + * @param[out] stats
> + *   A pointer to a structure to fill with statistics.
> + * @return
> + *  - 0: Success
> + *  - -EINVAL: The @c id parameter was invalid.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_stats_get(uint8_t id,
> +			       struct rte_dispatcher_stats *stats);
> +
> +/**
> + * Reset statistics for a dispatcher instance.
> + *
> + * This function may be called by any thread (including unregistered
> + * non-EAL threads), but may not produce the correct result if the
> + * dispatcher is running on any service lcore.
> + *
> + * @param id
> + *  The dispatcher identifier.
> + *
> + * @return
> + *  - 0: Success
> + *  - -EINVAL: The @c id parameter was invalid.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_stats_reset(uint8_t id);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* __RTE_DISPATCHER__ */
> diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map new file
> mode 100644 index 0000000000..8f9ad96522
> --- /dev/null
> +++ b/lib/dispatcher/version.map
> @@ -0,0 +1,20 @@
> +EXPERIMENTAL {
> +	global:
> +
> +	# added in 23.11
> +	rte_dispatcher_create;
> +	rte_dispatcher_free;
> +	rte_dispatcher_service_id_get;
> +	rte_dispatcher_bind_port_to_lcore;
> +	rte_dispatcher_unbind_port_from_lcore;
> +	rte_dispatcher_register;
> +	rte_dispatcher_unregister;
> +	rte_dispatcher_finalize_register;
> +	rte_dispatcher_finalize_unregister;
> +	rte_dispatcher_start;
> +	rte_dispatcher_stop;
> +	rte_dispatcher_stats_get;
> +	rte_dispatcher_stats_reset;
> +
> +	local: *;
> +};
> diff --git a/lib/meson.build b/lib/meson.build index
> 099b0ed18a..3093b338d2 100644
> --- a/lib/meson.build
> +++ b/lib/meson.build
> @@ -35,6 +35,7 @@ libraries = [
>          'distributor',
>          'efd',
>          'eventdev',
> +        'dispatcher', # dispatcher depends on eventdev
>          'gpudev',
>          'gro',
>          'gso',
> @@ -81,6 +82,7 @@ optional_libs = [
>          'cfgfile',
>          'compressdev',
>          'cryptodev',
> +        'dispatcher',
>          'distributor',
>          'dmadev',
>          'efd',
> --
> 2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-17 16:46                                   ` Naga Harish K, S V
@ 2023-09-19  9:20                                     ` Mattias Rönnblom
  2023-09-20  9:11                                       ` Naga Harish K, S V
  2023-09-20  9:32                                     ` Jerin Jacob
  1 sibling, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-19  9:20 UTC (permalink / raw)
  To: Naga Harish K, S V, mattias.ronnblom, dev
  Cc: Jerin Jacob, techboard, Van Haaren, Harry, Nilsson, Peter,
	Heng Wang, Pavan Nikhilesh, Gujjar, Abhinandan S, Carrillo,
	Erik G, Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Mccarthy, Peter, Yan, Zhirun

On 2023-09-17 18:46, Naga Harish K, S V wrote:
> 
> 
>> -----Original Message-----
>> From: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>> Sent: Monday, September 4, 2023 6:33 PM
>> To: dev@dpdk.org
>> Cc: Jerin Jacob <jerinj@marvell.com>; techboard@dpdk.org; Van Haaren,
>> Harry <harry.van.haaren@intel.com>; hofors@lysator.liu.se; Nilsson, Peter
>> <peter.j.nilsson@ericsson.com>; Heng Wang <heng.wang@ericsson.com>;
>> Naga Harish K, S V <s.v.naga.harish.k@intel.com>; Pavan Nikhilesh
>> <pbhagavatula@marvell.com>; Gujjar, Abhinandan S
>> <abhinandan.gujjar@intel.com>; Carrillo, Erik G <Erik.G.Carrillo@intel.com>;
>> Shijith Thotton <sthotton@marvell.com>; Hemant Agrawal
>> <hemant.agrawal@nxp.com>; Sachin Saxena <sachin.saxena@oss.nxp.com>;
>> Liang Ma <liangma@liangbit.com>; Mccarthy, Peter
>> <Peter.Mccarthy@intel.com>; Yan, Zhirun <Zhirun.Yan@intel.com>;
>> mattias.ronnblom <mattias.ronnblom@ericsson.com>
>> Subject: [PATCH v3 1/3] lib: introduce dispatcher library
>>
>> The purpose of the dispatcher library is to help reduce coupling in an
>> Eventdev-based DPDK application.
>>
>> In addition, the dispatcher also provides a convenient and flexible way for the
>> application to use service cores for application-level processing.
>>
>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
>> Reviewed-by: Heng Wang <heng.wang@ericsson.com>
>>
>> --
>>
>> PATCH v3:
>>   o To underline its optional character and since it does not provide
>>     hardware abstraction, the event dispatcher is now a separate
>>     library.
>>   o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
>>     shorter and to avoid the rte_event_* namespace.
>>
> 
> Rte_dispatcher is basically dispatching events but it feels like the name does not convey that.
> Also, it is like any other adapter service that can reside within the eventdev directory.
> 
> I can see some discussion in previous threads related to the placement of the dispatcher library.
> 
> It is an optional eventdev application service, not enforcing this programming model to the application.
> The documentation may need to be updated and mention that this is optional.
> 
> If any hardware comes up with the dispatcher feature, then this library may need to be moved inside eventdev library later.
> 

It seems to me that the deciding factor for what functionality goes into 
a DPDK library or not is not such much dependent on if it's implemented 
in hardware, in software, or some combination thereof. The important 
thing is that the library is be able to present a coherent API to the 
application (or other libraries).

That said, as I've mentioned before, I have no strong opionion on this 
subject.

> So, It makes sense to keep this optional service in the eventdev folder as an optional feature.
> 
>> PATCH v2:
>>   o Add dequeue batch count statistic.
>>   o Add statistics reset function to API.
>>   o Clarify MT safety guarantees (or lack thereof) in the API documentation.
>>   o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
>>     to be consistent with similar loops elsewhere in the dispatcher.
>>   o Fix variable names in finalizer unregister function.
>>
>> PATCH:
>>   o Change prefix from RED to EVD, to avoid confusion with random
>>     early detection.
>>
>> RFC v4:
>>   o Move handlers to per-lcore data structures.
>>   o Introduce mechanism which rearranges handlers so that often-used
>>     handlers tend to be tried first.
>>   o Terminate dispatch loop in case all events are delivered.
>>   o To avoid the dispatcher's service function hogging the CPU, process
>>     only one batch per call.
>>   o Have service function return -EAGAIN if no work is performed.
>>   o Events delivered in the process function is no longer marked 'const',
>>     since modifying them may be useful for the application and cause
>>     no difficulties for the dispatcher.
>>   o Various minor API documentation improvements.
>>
>> RFC v3:
>>   o Add stats_get() function to the version.map file.
>> ---
>>   MAINTAINERS                     |   3 +
>>   lib/dispatcher/meson.build      |  17 +
>>   lib/dispatcher/rte_dispatcher.c | 791
>> ++++++++++++++++++++++++++++++++  lib/dispatcher/rte_dispatcher.h |
>> 480 +++++++++++++++++++
>>   lib/dispatcher/version.map      |  20 +
>>   lib/meson.build                 |   2 +
>>   6 files changed, 1313 insertions(+)
>>   create mode 100644 lib/dispatcher/meson.build  create mode 100644
>> lib/dispatcher/rte_dispatcher.c  create mode 100644
>> lib/dispatcher/rte_dispatcher.h  create mode 100644
>> lib/dispatcher/version.map
>>
>> diff --git a/MAINTAINERS b/MAINTAINERS
>> index a926155f26..6704cd5b2c 100644
>> --- a/MAINTAINERS
>> +++ b/MAINTAINERS
>> @@ -1726,6 +1726,9 @@ M: Nithin Dabilpuram
>> <ndabilpuram@marvell.com>
>>   M: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>   F: lib/node/
>>
>> +Dispatcher - EXPERIMENTAL
>> +M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>> +F: lib/dispatcher/
>>
>>   Test Applications
>>   -----------------
>> diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build new file
>> mode 100644 index 0000000000..c6054a3a5d
>> --- /dev/null
>> +++ b/lib/dispatcher/meson.build
>> @@ -0,0 +1,17 @@
>> +# SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2023 Ericsson AB
>> +
>> +if is_windows
>> +    build = false
>> +    reason = 'not supported on Windows'
>> +    subdir_done()
>> +endif
>> +
>> +sources = files(
>> +        'rte_dispatcher.c',
>> +)
>> +headers = files(
>> +        'rte_dispatcher.h',
>> +)
>> +
>> +deps += ['eventdev']
>> diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
>> new file mode 100644 index 0000000000..3319fe09f2
>> --- /dev/null
>> +++ b/lib/dispatcher/rte_dispatcher.c
>> @@ -0,0 +1,791 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2023 Ericsson AB
>> + */
>> +
>> +#include <stdbool.h>
>> +#include <stdint.h>
>> +
>> +#include <rte_branch_prediction.h>
>> +#include <rte_common.h>
>> +#include <rte_lcore.h>
>> +#include <rte_random.h>
>> +#include <rte_service_component.h>
>> +
>> +#include "eventdev_pmd.h"
>> +
>> +#include <rte_dispatcher.h>
>> +
>> +#define EVD_MAX_PORTS_PER_LCORE 4
>> +#define EVD_MAX_HANDLERS 32
>> +#define EVD_MAX_FINALIZERS 16
>> +#define EVD_AVG_PRIO_INTERVAL 2000
>> +
>> +struct rte_dispatcher_lcore_port {
>> +	uint8_t port_id;
>> +	uint16_t batch_size;
>> +	uint64_t timeout;
>> +};
>> +
>> +struct rte_dispatcher_handler {
>> +	int id;
>> +	rte_dispatcher_match_t match_fun;
>> +	void *match_data;
>> +	rte_dispatcher_process_t process_fun;
>> +	void *process_data;
>> +};
>> +
>> +struct rte_dispatcher_finalizer {
>> +	int id;
>> +	rte_dispatcher_finalize_t finalize_fun;
>> +	void *finalize_data;
>> +};
>> +
>> +struct rte_dispatcher_lcore {
>> +	uint8_t num_ports;
>> +	uint16_t num_handlers;
>> +	int32_t prio_count;
>> +	struct rte_dispatcher_lcore_port
>> ports[EVD_MAX_PORTS_PER_LCORE];
>> +	struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
>> +	struct rte_dispatcher_stats stats;
>> +} __rte_cache_aligned;
>> +
>> +struct rte_dispatcher {
>> +	uint8_t id;
>> +	uint8_t event_dev_id;
>> +	int socket_id;
>> +	uint32_t service_id;
>> +	struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
>> +	uint16_t num_finalizers;
>> +	struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS]; };
>> +
>> +static struct rte_dispatcher *dispatchers[UINT8_MAX];
>> +
>> +static bool
>> +evd_has_dispatcher(uint8_t id)
>> +{
>> +	return dispatchers[id] != NULL;
>> +}
>> +
>> +static struct rte_dispatcher *
>> +evd_get_dispatcher(uint8_t id)
>> +{
>> +	return dispatchers[id];
>> +}
>> +
>> +static void
>> +evd_set_dispatcher(uint8_t id, struct rte_dispatcher *dispatcher) {
>> +	dispatchers[id] = dispatcher;
>> +}
>> +
>> +#define EVD_VALID_ID_OR_RET_EINVAL(id)
>> 	\
>> +	do {								\
>> +		if (unlikely(!evd_has_dispatcher(id))) {		\
>> +			RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id);
>> \
>> +			return -EINVAL;					\
>> +		}							\
>> +	} while (0)
>> +
>> +static int
>> +evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
>> +		       const struct rte_event *event) {
>> +	uint16_t i;
>> +
>> +	for (i = 0; i < lcore->num_handlers; i++) {
>> +		struct rte_dispatcher_handler *handler =
>> +			&lcore->handlers[i];
>> +
>> +		if (handler->match_fun(event, handler->match_data))
>> +			return i;
>> +	}
>> +
>> +	return -1;
>> +}
>> +
>> +static void
>> +evd_prioritize_handler(struct rte_dispatcher_lcore *lcore,
>> +		       int handler_idx)
>> +{
>> +	struct rte_dispatcher_handler tmp;
>> +
>> +	if (handler_idx == 0)
>> +		return;
>> +
>> +	/* Let the lucky handler "bubble" up the list */
>> +
>> +	tmp = lcore->handlers[handler_idx - 1];
>> +
>> +	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
>> +
>> +	lcore->handlers[handler_idx] = tmp;
>> +}
>> +
>> +static inline void
>> +evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore,
>> +				int handler_idx, uint16_t handler_events) {
>> +	lcore->prio_count -= handler_events;
>> +
>> +	if (unlikely(lcore->prio_count <= 0)) {
>> +		evd_prioritize_handler(lcore, handler_idx);
>> +
>> +		/*
>> +		 * Randomize the interval in the unlikely case
>> +		 * the traffic follow some very strict pattern.
>> +		 */
>> +		lcore->prio_count =
>> +			rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
>> +			EVD_AVG_PRIO_INTERVAL / 2;
>> +	}
>> +}
>> +
>> +static inline void
>> +evd_dispatch_events(struct rte_dispatcher *dispatcher,
>> +		    struct rte_dispatcher_lcore *lcore,
>> +		    struct rte_dispatcher_lcore_port *port,
>> +		    struct rte_event *events, uint16_t num_events) {
>> +	int i;
>> +	struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
>> +	uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
>> +	uint16_t drop_count = 0;
>> +	uint16_t dispatch_count;
>> +	uint16_t dispatched = 0;
>> +
>> +	for (i = 0; i < num_events; i++) {
>> +		struct rte_event *event = &events[i];
>> +		int handler_idx;
>> +
>> +		handler_idx = evd_lookup_handler_idx(lcore, event);
>> +
>> +		if (unlikely(handler_idx < 0)) {
>> +			drop_count++;
>> +			continue;
>> +		}
>> +
>> +		bursts[handler_idx][burst_lens[handler_idx]] = *event;
>> +		burst_lens[handler_idx]++;
>> +	}
>> +
>> +	dispatch_count = num_events - drop_count;
>> +
>> +	for (i = 0; i < lcore->num_handlers &&
>> +		 dispatched < dispatch_count; i++) {
>> +		struct rte_dispatcher_handler *handler =
>> +			&lcore->handlers[i];
>> +		uint16_t len = burst_lens[i];
>> +
>> +		if (len == 0)
>> +			continue;
>> +
>> +		handler->process_fun(dispatcher->event_dev_id, port-
>>> port_id,
>> +				     bursts[i], len, handler->process_data);
>> +
>> +		dispatched += len;
>> +
>> +		/*
>> +		 * Safe, since any reshuffling will only involve
>> +		 * already-processed handlers.
>> +		 */
>> +		evd_consider_prioritize_handler(lcore, i, len);
>> +	}
>> +
>> +	lcore->stats.ev_batch_count++;
>> +	lcore->stats.ev_dispatch_count += dispatch_count;
>> +	lcore->stats.ev_drop_count += drop_count;
>> +
>> +	for (i = 0; i < dispatcher->num_finalizers; i++) {
>> +		struct rte_dispatcher_finalizer *finalizer =
>> +			&dispatcher->finalizers[i];
>> +
>> +		finalizer->finalize_fun(dispatcher->event_dev_id,
>> +					port->port_id,
>> +					finalizer->finalize_data);
>> +	}
>> +}
>> +
>> +static __rte_always_inline uint16_t
>> +evd_port_dequeue(struct rte_dispatcher *dispatcher,
>> +		 struct rte_dispatcher_lcore *lcore,
>> +		 struct rte_dispatcher_lcore_port *port) {
>> +	uint16_t batch_size = port->batch_size;
>> +	struct rte_event events[batch_size];
>> +	uint16_t n;
>> +
>> +	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port-
>>> port_id,
>> +				    events, batch_size, port->timeout);
>> +
>> +	if (likely(n > 0))
>> +		evd_dispatch_events(dispatcher, lcore, port, events, n);
>> +
>> +	lcore->stats.poll_count++;
>> +
>> +	return n;
>> +}
>> +
>> +static __rte_always_inline uint16_t
>> +evd_lcore_process(struct rte_dispatcher *dispatcher,
>> +		  struct rte_dispatcher_lcore *lcore) {
>> +	uint16_t i;
>> +	uint16_t event_count = 0;
>> +
>> +	for (i = 0; i < lcore->num_ports; i++) {
>> +		struct rte_dispatcher_lcore_port *port =
>> +			&lcore->ports[i];
>> +
>> +		event_count += evd_port_dequeue(dispatcher, lcore, port);
>> +	}
>> +
>> +	return event_count;
>> +}
>> +
>> +static int32_t
>> +evd_process(void *userdata)
>> +{
>> +	struct rte_dispatcher *dispatcher = userdata;
>> +	unsigned int lcore_id = rte_lcore_id();
>> +	struct rte_dispatcher_lcore *lcore =
>> +		&dispatcher->lcores[lcore_id];
>> +	uint64_t event_count;
>> +
>> +	event_count = evd_lcore_process(dispatcher, lcore);
>> +
>> +	if (unlikely(event_count == 0))
>> +		return -EAGAIN;
>> +
>> +	return 0;
>> +}
>> +
>> +static int
>> +evd_service_register(struct rte_dispatcher *dispatcher) {
>> +	struct rte_service_spec service = {
>> +		.callback = evd_process,
>> +		.callback_userdata = dispatcher,
>> +		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
>> +		.socket_id = dispatcher->socket_id
>> +	};
>> +	int rc;
>> +
>> +	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "evd_%d",
>> +		 dispatcher->id);
>> +
>> +	rc = rte_service_component_register(&service,
>> +&dispatcher->service_id);
>> +
>> +	if (rc)
>> +		RTE_EDEV_LOG_ERR("Registration of dispatcher service "
>> +				 "%s failed with error code %d\n",
>> +				 service.name, rc);
>> +
>> +	return rc;
>> +}
>> +
>> +static int
>> +evd_service_unregister(struct rte_dispatcher *dispatcher) {
>> +	int rc;
>> +
>> +	rc = rte_service_component_unregister(dispatcher->service_id);
>> +
>> +	if (rc)
>> +		RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
>> +				 "failed with error code %d\n", rc);
>> +
>> +	return rc;
>> +}
>> +
>> +int
>> +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id) {
>> +	int socket_id;
>> +	struct rte_dispatcher *dispatcher;
>> +	int rc;
>> +
>> +	if (evd_has_dispatcher(id)) {
>> +		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
>> +				 id);
>> +		return -EEXIST;
>> +	}
>> +
>> +	socket_id = rte_event_dev_socket_id(event_dev_id);
>> +
>> +	dispatcher =
>> +		rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher),
>> +				  RTE_CACHE_LINE_SIZE, socket_id);
>> +
>> +	if (dispatcher == NULL) {
>> +		RTE_EDEV_LOG_ERR("Unable to allocate memory for
>> dispatcher\n");
>> +		return -ENOMEM;
>> +	}
>> +
>> +	*dispatcher = (struct rte_dispatcher) {
>> +		.id = id,
>> +		.event_dev_id = event_dev_id,
>> +		.socket_id = socket_id
>> +	};
>> +
>> +	rc = evd_service_register(dispatcher);
>> +
>> +	if (rc < 0) {
>> +		rte_free(dispatcher);
>> +		return rc;
>> +	}
>> +
>> +	evd_set_dispatcher(id, dispatcher);
>> +
>> +	return 0;
>> +}
>> +
>> +int
>> +rte_dispatcher_free(uint8_t id)
>> +{
>> +	struct rte_dispatcher *dispatcher;
>> +	int rc;
>> +
>> +	EVD_VALID_ID_OR_RET_EINVAL(id);
>> +	dispatcher = evd_get_dispatcher(id);
>> +
>> +	rc = evd_service_unregister(dispatcher);
>> +
>> +	if (rc)
>> +		return rc;
>> +
>> +	evd_set_dispatcher(id, NULL);
>> +
>> +	rte_free(dispatcher);
>> +
>> +	return 0;
>> +}
>> +
>> +int
>> +rte_dispatcher_service_id_get(uint8_t id, uint32_t *service_id) {
>> +	struct rte_dispatcher *dispatcher;
>> +
>> +	EVD_VALID_ID_OR_RET_EINVAL(id);
>> +	dispatcher = evd_get_dispatcher(id);
>> +
> 
> Service_id pointer needs to be validated for NULL before accessing
> 
> 

Noted.

Returning error codes on API violations is bad practice (such should be 
met with assertions), but it is DPDK standard practice and how Eventdev 
does things as well, so I'll change it.

It would also be consistent with allowing invalid 'id' parameter values.

>> +	*service_id = dispatcher->service_id;
>> +
>> +	return 0;
>> +}
>> +
>> +static int
>> +lcore_port_index(struct rte_dispatcher_lcore *lcore,
>> +		 uint8_t event_port_id)
>> +{
>> +	uint16_t i;
>> +
>> +	for (i = 0; i < lcore->num_ports; i++) {
>> +		struct rte_dispatcher_lcore_port *port =
>> +			&lcore->ports[i];
>> +
>> +		if (port->port_id == event_port_id)
>> +			return i;
>> +	}
>> +
>> +	return -1;
>> +}
>> +
>> +int
>> +rte_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
>> +					uint16_t batch_size, uint64_t
>> timeout,
>> +					unsigned int lcore_id)
>> +{
>> +	struct rte_dispatcher *dispatcher;
>> +	struct rte_dispatcher_lcore *lcore;
>> +	struct rte_dispatcher_lcore_port *port;
>> +
>> +	EVD_VALID_ID_OR_RET_EINVAL(id);
>> +	dispatcher = evd_get_dispatcher(id);
>> +
>> +	lcore =	&dispatcher->lcores[lcore_id];
>> +
>> +	if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
>> +		return -ENOMEM;
>> +
>> +	if (lcore_port_index(lcore, event_port_id) >= 0)
>> +		return -EEXIST;
>> +
>> +	port = &lcore->ports[lcore->num_ports];
>> +
>> +	*port = (struct rte_dispatcher_lcore_port) {
>> +		.port_id = event_port_id,
>> +		.batch_size = batch_size,
>> +		.timeout = timeout
>> +	};
>> +
>> +	lcore->num_ports++;
>> +
>> +	return 0;
>> +}
>> +
>> +int
>> +rte_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
>> +					    unsigned int lcore_id)
>> +{
>> +	struct rte_dispatcher *dispatcher;
>> +	struct rte_dispatcher_lcore *lcore;
>> +	int port_idx;
>> +	struct rte_dispatcher_lcore_port *port;
>> +	struct rte_dispatcher_lcore_port *last;
>> +
>> +	EVD_VALID_ID_OR_RET_EINVAL(id);
>> +	dispatcher = evd_get_dispatcher(id);
>> +
>> +	lcore =	&dispatcher->lcores[lcore_id];
>> +
>> +	port_idx = lcore_port_index(lcore, event_port_id);
>> +
>> +	if (port_idx < 0)
>> +		return -ENOENT;
>> +
>> +	port = &lcore->ports[port_idx];
>> +	last = &lcore->ports[lcore->num_ports - 1];
>> +
>> +	if (port != last)
>> +		*port = *last;
>> +
>> +	lcore->num_ports--;
>> +
>> +	return 0;
>> +}
>> +
>> +static struct rte_dispatcher_handler*
>> +evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore,
>> +			    int handler_id)
>> +{
>> +	uint16_t i;
>> +
>> +	for (i = 0; i < lcore->num_handlers; i++) {
>> +		struct rte_dispatcher_handler *handler =
>> +			&lcore->handlers[i];
>> +
>> +		if (handler->id == handler_id)
>> +			return handler;
>> +	}
>> +
>> +	return NULL;
>> +}
>> +
>> +static int
>> +evd_alloc_handler_id(struct rte_dispatcher *dispatcher) {
>> +	int handler_id = 0;
>> +	struct rte_dispatcher_lcore *reference_lcore =
>> +		&dispatcher->lcores[0];
>> +
>> +	if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
>> +		return -1;
>> +
>> +	while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) !=
>> NULL)
>> +		handler_id++;
>> +
>> +	return handler_id;
>> +}
>> +
>> +static void
>> +evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore,
>> +		    const struct rte_dispatcher_handler *handler) {
>> +	int handler_idx = lcore->num_handlers;
>> +
>> +	lcore->handlers[handler_idx] = *handler;
>> +	lcore->num_handlers++;
>> +}
>> +
>> +static void
>> +evd_install_handler(struct rte_dispatcher *dispatcher,
>> +		    const struct rte_dispatcher_handler *handler) {
>> +	int i;
>> +
>> +	for (i = 0; i < RTE_MAX_LCORE; i++) {
>> +		struct rte_dispatcher_lcore *lcore =
>> +			&dispatcher->lcores[i];
>> +		evd_lcore_install_handler(lcore, handler);
>> +	}
>> +}
>> +
>> +int
>> +rte_dispatcher_register(uint8_t id,
>> +			      rte_dispatcher_match_t match_fun,
>> +			      void *match_data,
>> +			      rte_dispatcher_process_t process_fun,
>> +			      void *process_data)
>> +{
>> +	struct rte_dispatcher *dispatcher;
>> +	struct rte_dispatcher_handler handler = {
>> +		.match_fun = match_fun,
>> +		.match_data = match_data,
> 
> We can have a default function which uses queue_id as matching data.
> This reduces the application load to provide two callbacks, one for matching and one for processing the event.
> Application can pass NULL parameter for "match_fun", in that case default function pointer can be used here.
> 

But which queue id would this default function pointer match?

I think you need more API calls to allow for something like this. I've 
discussed this kind of API in some previous messager on this list, if I 
recall correctly.

> 
>> +		.process_fun = process_fun,
>> +		.process_data = process_data
>> +	};
>> +
>> +	EVD_VALID_ID_OR_RET_EINVAL(id);
>> +	dispatcher = evd_get_dispatcher(id);
>> +
>> +	handler.id = evd_alloc_handler_id(dispatcher);
>> +
>> +	if (handler.id < 0)
>> +		return -ENOMEM;
>> +
>> +	evd_install_handler(dispatcher, &handler);
>> +
>> +	return handler.id;
>> +}
>> +
>> +static int
>> +evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
>> +			    int handler_id)
>> +{
>> +	struct rte_dispatcher_handler *unreg_handler;
>> +	int handler_idx;
>> +	uint16_t last_idx;
>> +
>> +	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
>> +
>> +	if (unreg_handler == NULL)
>> +		return -EINVAL;
>> +
> 
> Shouldn't the logic be " handler_idx = unreg_handler - &lcore->handlers[0];"
> Because, unreg_handler will be a higher or equal address to the handler base address (&lcore->handlers[0])
> 

True. Will fix.

> 
>> +	handler_idx = &lcore->handlers[0] - unreg_handler;
>> +
>> +	last_idx = lcore->num_handlers - 1;
>> +
>> +	if (handler_idx != last_idx) {
>> +		/* move all handlers to maintain handler order */
>> +		int n = last_idx - handler_idx;
>> +		memmove(unreg_handler, unreg_handler + 1,
>> +			sizeof(struct rte_dispatcher_handler) * n);
>> +	}
>> +
>> +	lcore->num_handlers--;
>> +
>> +	return 0;
>> +}
>> +
>> +static int
>> +evd_uninstall_handler(struct rte_dispatcher *dispatcher,
>> +		      int handler_id)
>> +{
>> +	unsigned int lcore_id;
>> +
>> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
>> +		struct rte_dispatcher_lcore *lcore =
>> +			&dispatcher->lcores[lcore_id];
>> +		int rc;
>> +
>> +		rc = evd_lcore_uninstall_handler(lcore, handler_id);
>> +
>> +		if (rc < 0)
>> +			return rc;
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +int
>> +rte_dispatcher_unregister(uint8_t id, int handler_id) {
>> +	struct rte_dispatcher *dispatcher;
>> +	int rc;
>> +
>> +	EVD_VALID_ID_OR_RET_EINVAL(id);
>> +	dispatcher = evd_get_dispatcher(id);
>> +
>> +	rc = evd_uninstall_handler(dispatcher, handler_id);
>> +
>> +	return rc;
>> +}
>> +
>> +static struct rte_dispatcher_finalizer* evd_get_finalizer_by_id(struct
>> +rte_dispatcher *dispatcher,
>> +		       int handler_id)
>> +{
>> +	int i;
>> +
>> +	for (i = 0; i < dispatcher->num_finalizers; i++) {
>> +		struct rte_dispatcher_finalizer *finalizer =
>> +			&dispatcher->finalizers[i];
>> +
>> +		if (finalizer->id == handler_id)
>> +			return finalizer;
>> +	}
>> +
>> +	return NULL;
>> +}
>> +
>> +static int
>> +evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher) {
>> +	int finalizer_id = 0;
>> +
>> +	while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
>> +		finalizer_id++;
>> +
>> +	return finalizer_id;
>> +}
>> +
>> +static struct rte_dispatcher_finalizer * evd_alloc_finalizer(struct
>> +rte_dispatcher *dispatcher) {
>> +	int finalizer_idx;
>> +	struct rte_dispatcher_finalizer *finalizer;
>> +
>> +	if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
>> +		return NULL;
>> +
>> +	finalizer_idx = dispatcher->num_finalizers;
>> +	finalizer = &dispatcher->finalizers[finalizer_idx];
>> +
>> +	finalizer->id = evd_alloc_finalizer_id(dispatcher);
>> +
>> +	dispatcher->num_finalizers++;
>> +
>> +	return finalizer;
>> +}
>> +
>> +int
>> +rte_dispatcher_finalize_register(uint8_t id,
>> +			      rte_dispatcher_finalize_t finalize_fun,
>> +			      void *finalize_data)
>> +{
>> +	struct rte_dispatcher *dispatcher;
>> +	struct rte_dispatcher_finalizer *finalizer;
>> +
>> +	EVD_VALID_ID_OR_RET_EINVAL(id);
>> +	dispatcher = evd_get_dispatcher(id);
>> +
>> +	finalizer = evd_alloc_finalizer(dispatcher);
>> +
>> +	if (finalizer == NULL)
>> +		return -ENOMEM;
>> +
>> +	finalizer->finalize_fun = finalize_fun;
>> +	finalizer->finalize_data = finalize_data;
>> +
>> +	return finalizer->id;
>> +}
>> +
>> +int
>> +rte_dispatcher_finalize_unregister(uint8_t id, int handler_id) {
>> +	struct rte_dispatcher *dispatcher;
>> +	struct rte_dispatcher_finalizer *unreg_finalizer;
>> +	int finalizer_idx;
>> +	uint16_t last_idx;
>> +
>> +	EVD_VALID_ID_OR_RET_EINVAL(id);
>> +	dispatcher = evd_get_dispatcher(id);
>> +
>> +	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, handler_id);
>> +
>> +	if (unreg_finalizer == NULL)
>> +		return -EINVAL;
>> +
> 
> Same as above comment in rte_dispatcher_unregister, base address needs to be subtracted from unreg_finalizer
> 

Yes.

> 
>> +	finalizer_idx = &dispatcher->finalizers[0] - unreg_finalizer;
>> +
>> +	last_idx = dispatcher->num_finalizers - 1;
>> +
>> +	if (finalizer_idx != last_idx) {
>> +		/* move all finalizers to maintain order */
>> +		int n = last_idx - finalizer_idx;
>> +		memmove(unreg_finalizer, unreg_finalizer + 1,
>> +			sizeof(struct rte_dispatcher_finalizer) * n);
>> +	}
>> +
>> +	dispatcher->num_finalizers--;
>> +
>> +	return 0;
>> +}
>> +
>> +static int
>> +evd_set_service_runstate(uint8_t id, int state) {
>> +	struct rte_dispatcher *dispatcher;
>> +	int rc;
>> +
>> +	EVD_VALID_ID_OR_RET_EINVAL(id);
>> +	dispatcher = evd_get_dispatcher(id);
>> +
>> +	rc = rte_service_component_runstate_set(dispatcher->service_id,
>> +						state);
>> +
>> +	if (rc != 0) {
>> +		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while
>> setting "
>> +				 "service component run state to %d\n", rc,
>> +				 state);
>> +		RTE_ASSERT(0);
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +int
>> +rte_dispatcher_start(uint8_t id)
>> +{
>> +	return evd_set_service_runstate(id, 1); }
>> +
>> +int
>> +rte_dispatcher_stop(uint8_t id)
>> +{
>> +	return evd_set_service_runstate(id, 0); }
>> +
>> +static void
>> +evd_aggregate_stats(struct rte_dispatcher_stats *result,
>> +		    const struct rte_dispatcher_stats *part) {
>> +	result->poll_count += part->poll_count;
>> +	result->ev_batch_count += part->ev_batch_count;
>> +	result->ev_dispatch_count += part->ev_dispatch_count;
>> +	result->ev_drop_count += part->ev_drop_count; }
>> +
>> +int
>> +rte_dispatcher_stats_get(uint8_t id,
>> +			       struct rte_dispatcher_stats *stats) {
>> +	struct rte_dispatcher *dispatcher;
>> +	unsigned int lcore_id;
>> +
>> +	EVD_VALID_ID_OR_RET_EINVAL(id);
>> +	dispatcher = evd_get_dispatcher(id);
>> +
> 
> Stats pointer needs to be validated for NULL before accessing
> 
> 

Yes.

Thanks a lot for your review comments.

>> +	*stats = (struct rte_dispatcher_stats) {};
>> +
>> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
>> +		struct rte_dispatcher_lcore *lcore =
>> +			&dispatcher->lcores[lcore_id];
>> +
>> +		evd_aggregate_stats(stats, &lcore->stats);
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +int
>> +rte_dispatcher_stats_reset(uint8_t id)
>> +{
>> +	struct rte_dispatcher *dispatcher;
>> +	unsigned int lcore_id;
>> +
>> +	EVD_VALID_ID_OR_RET_EINVAL(id);
>> +	dispatcher = evd_get_dispatcher(id);
>> +
>> +
>> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
>> +		struct rte_dispatcher_lcore *lcore =
>> +			&dispatcher->lcores[lcore_id];
>> +
>> +		lcore->stats = (struct rte_dispatcher_stats) {};
>> +	}
>> +
>> +	return 0;
>> +
>> +}
>> diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
>> new file mode 100644 index 0000000000..6712687a08
>> --- /dev/null
>> +++ b/lib/dispatcher/rte_dispatcher.h
>> @@ -0,0 +1,480 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2023 Ericsson AB
>> + */
>> +
>> +#ifndef __RTE_DISPATCHER_H__
>> +#define __RTE_DISPATCHER_H__
>> +
>> +/**
>> + * @file
>> + *
>> + * RTE Dispatcher
>> + *
>> + * The purpose of the dispatcher is to help decouple different parts
>> + * of an application (e.g., modules), sharing the same underlying
>> + * event device.
>> + */
>> +
>> +#ifdef __cplusplus
>> +extern "C" {
>> +#endif
>> +
>> +#include <rte_eventdev.h>
>> +
>> +/**
>> + * Function prototype for match callbacks.
>> + *
>> + * Match callbacks are used by an application to decide how the
>> + * dispatcher distributes events to different parts of the
>> + * application.
>> + *
>> + * The application is not expected to process the event at the point
>> + * of the match call. Such matters should be deferred to the process
>> + * callback invocation.
>> + *
>> + * The match callback may be used as an opportunity to prefetch data.
>> + *
>> + * @param event
>> + *  Pointer to event
>> + *
>> + * @param cb_data
>> + *  The pointer supplied by the application in
>> + *  rte_dispatcher_register().
>> + *
>> + * @return
>> + *   Returns true in case this events should be delivered (via
>> + *   the process callback), and false otherwise.
>> + */
>> +typedef bool
>> +(*rte_dispatcher_match_t)(const struct rte_event *event, void
>> +*cb_data);
>> +
>> +/**
>> + * Function prototype for process callbacks.
>> + *
>> + * The process callbacks are used by the dispatcher to deliver
>> + * events for processing.
>> + *
>> + * @param event_dev_id
>> + *  The originating event device id.
>> + *
>> + * @param event_port_id
>> + *  The originating event port.
>> + *
>> + * @param events
>> + *  Pointer to an array of events.
>> + *
>> + * @param num
>> + *  The number of events in the @p events array.
>> + *
>> + * @param cb_data
>> + *  The pointer supplied by the application in
>> + *  rte_dispatcher_register().
>> + */
>> +
>> +typedef void
>> +(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
>> +				  struct rte_event *events, uint16_t num,
>> +				  void *cb_data);
>> +
>> +/**
>> + * Function prototype for finalize callbacks.
>> + *
>> + * The finalize callbacks are used by the dispatcher to notify the
>> + * application it has delivered all events from a particular batch
>> + * dequeued from the event device.
>> + *
>> + * @param event_dev_id
>> + *  The originating event device id.
>> + *
>> + * @param event_port_id
>> + *  The originating event port.
>> + *
>> + * @param cb_data
>> + *  The pointer supplied by the application in
>> + *  rte_dispatcher_finalize_register().
>> + */
>> +
>> +typedef void
>> +(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
>> +				   void *cb_data);
>> +
>> +/**
>> + * Dispatcher statistics
>> + */
>> +struct rte_dispatcher_stats {
>> +	uint64_t poll_count;
>> +	/**< Number of event dequeue calls made toward the event device. */
>> +	uint64_t ev_batch_count;
>> +	/**< Number of non-empty event batches dequeued from event
>> device.*/
>> +	uint64_t ev_dispatch_count;
>> +	/**< Number of events dispatched to a handler.*/
>> +	uint64_t ev_drop_count;
>> +	/**< Number of events dropped because no handler was found. */ };
>> +
>> +/**
>> + * Create a dispatcher with the specified id.
>> + *
>> + * @param id
>> + *  An application-specified, unique (across all dispatcher
>> + *  instances) identifier.
>> + *
>> + * @param event_dev_id
>> + *  The identifier of the event device from which this dispatcher
>> + *  will dequeue events.
>> + *
>> + * @return
>> + *   - 0: Success
>> + *   - <0: Error code on failure
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id);
>> +
>> +/**
>> + * Free a dispatcher.
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - <0: Error code on failure
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_free(uint8_t id);
>> +
>> +/**
>> + * Retrieve the service identifier of a dispatcher.
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + *
>> + * @param [out] service_id
>> + *  A pointer to a caller-supplied buffer where the dispatcher's
>> + *  service id will be stored.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - <0: Error code on failure.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
>> +
>> +/**
>> + * Binds an event device port to a specific lcore on the specified
>> + * dispatcher.
>> + *
>> + * This function configures the event port id to be used by the event
>> + * dispatcher service, if run on the specified lcore.
>> + *
>> + * Multiple event device ports may be bound to the same lcore. A
>> + * particular port must not be bound to more than one lcore.
>> + *
>> + * If the dispatcher service is mapped (with
>> +rte_service_map_lcore_set())
>> + * to a lcore to which no ports are bound, the service function will be
>> +a
>> + * no-operation.
>> + *
>> + * This function may be called by any thread (including unregistered
>> + * non-EAL threads), but not while the dispatcher is running on lcore
>> + * specified by @c lcore_id.
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + *
>> + * @param event_port_id
>> + *  The event device port identifier.
>> + *
>> + * @param batch_size
>> + *  The batch size to use in rte_event_dequeue_burst(), for the
>> + *  configured event device port and lcore.
>> + *
>> + * @param timeout
>> + *  The timeout parameter to use in rte_event_dequeue_burst(), for the
>> + *  configured event device port and lcore.
>> + *
>> + * @param lcore_id
>> + *  The lcore by which this event port will be used.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -ENOMEM: Unable to allocate sufficient resources.
>> + *  - -EEXISTS: Event port is already configured.
>> + *  - -EINVAL: Invalid arguments.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
>> +					uint16_t batch_size, uint64_t
>> timeout,
>> +					unsigned int lcore_id);
>> +
>> +/**
>> + * Unbind an event device port from a specific lcore.
>> + *
>> + * This function may be called by any thread (including unregistered
>> + * non-EAL threads), but not while the dispatcher is running on
>> + * lcore specified by @c lcore_id.
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + *
>> + * @param event_port_id
>> + *  The event device port identifier.
>> + *
>> + * @param lcore_id
>> + *  The lcore which was using this event port.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -EINVAL: Invalid @c id.
>> + *  - -ENOENT: Event port id not bound to this @c lcore_id.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
>> +					    unsigned int lcore_id);
>> +
>> +/**
>> + * Register an event handler.
>> + *
>> + * The match callback function is used to select if a particular event
>> + * should be delivered, using the corresponding process callback
>> + * function.
>> + *
>> + * The reason for having two distinct steps is to allow the dispatcher
>> + * to deliver all events as a batch. This in turn will cause
>> + * processing of a particular kind of events to happen in a
>> + * back-to-back manner, improving cache locality.
>> + *
>> + * The list of handler callback functions is shared among all lcores,
>> + * but will only be executed on lcores which has an eventdev port
>> + * bound to them, and which are running the dispatcher service.
>> + *
>> + * An event is delivered to at most one handler. Events where no
>> + * handler is found are dropped.
>> + *
>> + * The application must not depend on the order of which the match
>> + * functions are invoked.
>> + *
>> + * Ordering of events is not guaranteed to be maintained between
>> + * different deliver callbacks. For example, suppose there are two
>> + * callbacks registered, matching different subsets of events arriving
>> + * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
>> + * on a particular port, all pertaining to the same flow. The match
>> + * callback for registration A returns true for ev0 and ev2, and the
>> + * matching function for registration B for ev1. In that scenario, the
>> + * dispatcher may choose to deliver first [ev0, ev2] using A's deliver
>> + * function, and then [ev1] to B - or vice versa.
>> + *
>> + * rte_dispatcher_register() may be called by any thread
>> + * (including unregistered non-EAL threads), but not while the event
>> + * dispatcher is running on any service lcore.
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + *
>> + * @param match_fun
>> + *  The match callback function.
>> + *
>> + * @param match_cb_data
>> + *  A pointer to some application-specific opaque data (or NULL),
>> + *  which is supplied back to the application when match_fun is
>> + *  called.
>> + *
>> + * @param process_fun
>> + *  The process callback function.
>> + *
>> + * @param process_cb_data
>> + *  A pointer to some application-specific opaque data (or NULL),
>> + *  which is supplied back to the application when process_fun is
>> + *  called.
>> + *
>> + * @return
>> + *  - >= 0: The identifier for this registration.
>> + *  - -ENOMEM: Unable to allocate sufficient resources.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_register(uint8_t id,
>> +			      rte_dispatcher_match_t match_fun,
>> +			      void *match_cb_data,
>> +			      rte_dispatcher_process_t process_fun,
>> +			      void *process_cb_data);
>> +
>> +/**
>> + * Unregister an event handler.
>> + *
>> + * This function may be called by any thread (including unregistered
>> + * non-EAL threads), but not while the dispatcher is running on
>> + * any service lcore.
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + *
>> + * @param handler_id
>> + *  The handler registration id returned by the original
>> + *  rte_dispatcher_register() call.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -EINVAL: The @c id and/or the @c handler_id parameter was invalid.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_unregister(uint8_t id, int handler_id);
>> +
>> +/**
>> + * Register a finalize callback function.
>> + *
>> + * An application may optionally install one or more finalize
>> + * callbacks.
>> + *
>> + * All finalize callbacks are invoked by the dispatcher when a
>> + * complete batch of events (retrieve using rte_event_dequeue_burst())
>> + * have been delivered to the application (or have been dropped).
>> + *
>> + * The finalize callback is not tied to any particular handler.
>> + *
>> + * The finalize callback provides an opportunity for the application
>> + * to do per-batch processing. One case where this may be useful is if
>> + * an event output buffer is used, and is shared among several
>> + * handlers. In such a case, proper output buffer flushing may be
>> + * assured using a finalize callback.
>> + *
>> + * rte_dispatcher_finalize_register() may be called by any thread
>> + * (including unregistered non-EAL threads), but not while the
>> + * dispatcher is running on any service lcore.
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + *
>> + * @param finalize_fun
>> + *  The function called after completing the processing of a
>> + *  dequeue batch.
>> + *
>> + * @param finalize_data
>> + *  A pointer to some application-specific opaque data (or NULL),
>> + *  which is supplied back to the application when @c finalize_fun is
>> + *  called.
>> + *
>> + * @return
>> + *  - >= 0: The identifier for this registration.
>> + *  - -ENOMEM: Unable to allocate sufficient resources.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_finalize_register(uint8_t id,
>> +				 rte_dispatcher_finalize_t finalize_fun,
>> +				 void *finalize_data);
>> +
>> +/**
>> + * Unregister a finalize callback.
>> + *
>> + * This function may be called by any thread (including unregistered
>> + * non-EAL threads), but not while the dispatcher is running on
>> + * any service lcore.
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + *
>> + * @param reg_id
>> + *  The finalize registration id returned by the original
>> + *  rte_dispatcher_finalize_register() call.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -EINVAL: The @c id and/or the @c reg_id parameter was invalid.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_finalize_unregister(uint8_t id, int reg_id);
>> +
>> +/**
>> + * Start a dispatcher instance.
>> + *
>> + * Enables the dispatcher service.
>> + *
>> + * The underlying event device must have been started prior to calling
>> + * rte_dispatcher_start().
>> + *
>> + * For the dispatcher to actually perform work (i.e., dispatch
>> + * events), its service must have been mapped to one or more service
>> + * lcores, and its service run state set to '1'. A dispatcher's
>> + * service is retrieved using rte_dispatcher_service_id_get().
>> + *
>> + * Each service lcore to which the dispatcher is mapped should
>> + * have at least one event port configured. Such configuration is
>> + * performed by calling rte_dispatcher_bind_port_to_lcore(), prior to
>> + * starting the dispatcher.
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -EINVAL: Invalid @c id.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_start(uint8_t id);
>> +
>> +/**
>> + * Stop a running dispatcher instance.
>> + *
>> + * Disables the dispatcher service.
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -EINVAL: Invalid @c id.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_stop(uint8_t id);
>> +
>> +/**
>> + * Retrieve statistics for a dispatcher instance.
>> + *
>> + * This function is MT safe and may be called by any thread
>> + * (including unregistered non-EAL threads).
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + * @param[out] stats
>> + *   A pointer to a structure to fill with statistics.
>> + * @return
>> + *  - 0: Success
>> + *  - -EINVAL: The @c id parameter was invalid.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_stats_get(uint8_t id,
>> +			       struct rte_dispatcher_stats *stats);
>> +
>> +/**
>> + * Reset statistics for a dispatcher instance.
>> + *
>> + * This function may be called by any thread (including unregistered
>> + * non-EAL threads), but may not produce the correct result if the
>> + * dispatcher is running on any service lcore.
>> + *
>> + * @param id
>> + *  The dispatcher identifier.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -EINVAL: The @c id parameter was invalid.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_stats_reset(uint8_t id);
>> +
>> +#ifdef __cplusplus
>> +}
>> +#endif
>> +
>> +#endif /* __RTE_DISPATCHER__ */
>> diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map new file
>> mode 100644 index 0000000000..8f9ad96522
>> --- /dev/null
>> +++ b/lib/dispatcher/version.map
>> @@ -0,0 +1,20 @@
>> +EXPERIMENTAL {
>> +	global:
>> +
>> +	# added in 23.11
>> +	rte_dispatcher_create;
>> +	rte_dispatcher_free;
>> +	rte_dispatcher_service_id_get;
>> +	rte_dispatcher_bind_port_to_lcore;
>> +	rte_dispatcher_unbind_port_from_lcore;
>> +	rte_dispatcher_register;
>> +	rte_dispatcher_unregister;
>> +	rte_dispatcher_finalize_register;
>> +	rte_dispatcher_finalize_unregister;
>> +	rte_dispatcher_start;
>> +	rte_dispatcher_stop;
>> +	rte_dispatcher_stats_get;
>> +	rte_dispatcher_stats_reset;
>> +
>> +	local: *;
>> +};
>> diff --git a/lib/meson.build b/lib/meson.build index
>> 099b0ed18a..3093b338d2 100644
>> --- a/lib/meson.build
>> +++ b/lib/meson.build
>> @@ -35,6 +35,7 @@ libraries = [
>>           'distributor',
>>           'efd',
>>           'eventdev',
>> +        'dispatcher', # dispatcher depends on eventdev
>>           'gpudev',
>>           'gro',
>>           'gso',
>> @@ -81,6 +82,7 @@ optional_libs = [
>>           'cfgfile',
>>           'compressdev',
>>           'cryptodev',
>> +        'dispatcher',
>>           'distributor',
>>           'dmadev',
>>           'efd',
>> --
>> 2.34.1
> 

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-04 13:03                                 ` [PATCH v3 1/3] lib: introduce " Mattias Rönnblom
  2023-09-17 16:46                                   ` Naga Harish K, S V
@ 2023-09-19 10:58                                   ` Jerin Jacob
  2023-09-21 16:47                                     ` Mattias Rönnblom
  2023-09-21 18:36                                   ` Jerin Jacob
  2023-09-22  7:38                                   ` [PATCH v4 0/3] Add " Mattias Rönnblom
  3 siblings, 1 reply; 102+ messages in thread
From: Jerin Jacob @ 2023-09-19 10:58 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, hofors,
	Peter Nilsson, Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Morten Brørup

On Mon, Sep 4, 2023 at 6:39 PM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> The purpose of the dispatcher library is to help reduce coupling in an
> Eventdev-based DPDK application.
>
> In addition, the dispatcher also provides a convenient and flexible
> way for the application to use service cores for application-level
> processing.
>
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>

High level architecture comment
--------------------------------

1) I think, we don't need tie this library ONLY to event dev
application. It can be used with poll mode as well,
that way traditiona pipeline application with ethdev as source could
use this library dispatch the packets.

We dont need to implement that first version but API can make room for
such abstractions.

Based on my understanding in fast-path it has means to
a)Pull out the events using rte_event_dequeue()
b)Compare with registered match functions and call process upon match.

if we abstract (a) as rte_dispatcher_source, We could pull from ethdev
via rte_eth_rx_burst() or
from ring via dequeue_burst API or so based on rte_dispatcher_source
selected for dispatch configuration
and we can use different sevice function pointers to have different service core
implementation without effecting performance each sources.

High level cosmetic comment
----------------------------------------------------
1)Missing doxygen connection- See doc/api/doxy-api-index.md

Process related comment
------------------------------------
1) Documentation does not need need separate patch. All recent library
changes documentation in same file.
You could have doc and API header file as first patch and
implementation as subsequent patches.



> diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
> new file mode 100644
> index 0000000000..6712687a08
> --- /dev/null
> +++ b/lib/dispatcher/rte_dispatcher.h
> @@ -0,0 +1,480 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Ericsson AB
> + */
> +
> +#ifndef __RTE_DISPATCHER_H__
> +#define __RTE_DISPATCHER_H__
> +


All new API should be experimental. See
https://elixir.bootlin.com/dpdk/latest/source/lib/graph/rte_graph.h#L12
example.


> +/**
> + * @file
> + *
> + * RTE Dispatcher
> + *
> + * The purpose of the dispatcher is to help decouple different parts
> + * of an application (e.g., modules), sharing the same underlying
> + * event device.
> +
> +/**
> + * Function prototype for match callbacks.
> + *
> + * Match callbacks are used by an application to decide how the
> + * dispatcher distributes events to different parts of the
> + * application.
> + *
> + * The application is not expected to process the event at the point
> + * of the match call. Such matters should be deferred to the process
> + * callback invocation.
> + *
> + * The match callback may be used as an opportunity to prefetch data.
> + *
> + * @param event
> + *  Pointer to event
> + *
> + * @param cb_data
> + *  The pointer supplied by the application in
> + *  rte_dispatcher_register().
> + *
> + * @return
> + *   Returns true in case this events should be delivered (via
> + *   the process callback), and false otherwise.
> + */
> +typedef bool
> +(*rte_dispatcher_match_t)(const struct rte_event *event, void *cb_data);


a) Can we use void* event, so that it can be used with mbuf or other
type by casting in the call back implementer.

b) I was thinking, How we can avoid this function pointer and enable
more have better performance at architecture level.

Both x86, ARM has vector instructions[1] to form a vector from various
offset from memory and compare N events
in one shot. That is, if express match data like offset = X has value
is Y and offset = X has value = A.
I know, it may not good existing application using this APIs. But I
believe, it will be more performance
effective. If make sense, you can adapt to this.(Something to think about)


[1]
https://developer.arm.com/documentation/den0018/a/NEON-and-VFP-Instruction-Summary/NEON-general-data-processing-instructions/VTBL

> +
> +/**
> + * Function prototype for process callbacks.
> + *
> + * The process callbacks are used by the dispatcher to deliver
> + * events for processing.
> + *
> + * @param event_dev_id
> + *  The originating event device id.
> + *
> + * @param event_port_id
> + *  The originating event port.
> + *
> + * @param events
> + *  Pointer to an array of events.
> + *
> + * @param num
> + *  The number of events in the @p events array.
> + *
> + * @param cb_data
> + *  The pointer supplied by the application in
> + *  rte_dispatcher_register().
> + */
> +
> +typedef void
> +(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
> +                                 struct rte_event *events, uint16_t num,
> +                                 void *cb_data);

Same as above comment, can event_port_id can be change to source_id?


> +/**
> + * Create a dispatcher with the specified id.
> + *
> + * @param id
> + *  An application-specified, unique (across all dispatcher
> + *  instances) identifier.
> + *
> + * @param event_dev_id
> + *  The identifier of the event device from which this dispatcher
> + *  will dequeue events.
> + *
> + * @return
> + *   - 0: Success
> + *   - <0: Error code on failure
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id);

Following could be used to abstract more dispatcher sources, like

enum rte_dispatcher_source {
         RTE_DISPATCHER_SOURCE_EVENTDEV, // Use rte_event_dequeue() to
pull the packet
         RTE_DISPATCHER_SOURCE_ETHDEV, // Use rte_ethdev_rx_burst() to
pull the packet
};

struct rte_dispatcher_params {
            enum rte_dispatcher_source source;
            union {
                   /* Valid when source == RTE_DISPATCHER_SOURCE_EVENTDEV */
                    struct event_source {
                             uint8_t event_dev_id;
                             uin8_t event_port_id;
                    };
                   /* Valid when source == RTE_DISPATCHER_SOURCE_ETHDEV*/
                    struct ethdev_source {
                             uint16_t ethdev__dev_id;
                             uin16_t ethdev_rx_queue_id;
                    };
             }
};

rte_dispatcher_create(uint8_t id,  struct rte_dispatcher_params *parms);

I will stop reviewing at this point. Will review based on direction agree on.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* RE: [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-19  9:20                                     ` Mattias Rönnblom
@ 2023-09-20  9:11                                       ` Naga Harish K, S V
  0 siblings, 0 replies; 102+ messages in thread
From: Naga Harish K, S V @ 2023-09-20  9:11 UTC (permalink / raw)
  To: Mattias Rönnblom, mattias.ronnblom, dev
  Cc: Jerin Jacob, techboard, Van Haaren, Harry, Nilsson, Peter,
	Heng Wang, Pavan Nikhilesh, Gujjar, Abhinandan S, Carrillo,
	Erik G, Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Mccarthy, Peter, Yan, Zhirun



> -----Original Message-----
> From: Mattias Rönnblom <hofors@lysator.liu.se>
> Sent: Tuesday, September 19, 2023 2:51 PM
> To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; mattias.ronnblom
> <mattias.ronnblom@ericsson.com>; dev@dpdk.org
> Cc: Jerin Jacob <jerinj@marvell.com>; techboard@dpdk.org; Van Haaren,
> Harry <harry.van.haaren@intel.com>; Nilsson, Peter
> <peter.j.nilsson@ericsson.com>; Heng Wang <heng.wang@ericsson.com>;
> Pavan Nikhilesh <pbhagavatula@marvell.com>; Gujjar, Abhinandan S
> <abhinandan.gujjar@intel.com>; Carrillo, Erik G <erik.g.carrillo@intel.com>;
> Shijith Thotton <sthotton@marvell.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>; Sachin Saxena <sachin.saxena@oss.nxp.com>;
> Liang Ma <liangma@liangbit.com>; Mccarthy, Peter
> <peter.mccarthy@intel.com>; Yan, Zhirun <zhirun.yan@intel.com>
> Subject: Re: [PATCH v3 1/3] lib: introduce dispatcher library
> 
> On 2023-09-17 18:46, Naga Harish K, S V wrote:
> >
> >
> >> -----Original Message-----
> >> From: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> >> Sent: Monday, September 4, 2023 6:33 PM
> >> To: dev@dpdk.org
> >> Cc: Jerin Jacob <jerinj@marvell.com>; techboard@dpdk.org; Van Haaren,
> >> Harry <harry.van.haaren@intel.com>; hofors@lysator.liu.se; Nilsson,
> >> Peter <peter.j.nilsson@ericsson.com>; Heng Wang
> >> <heng.wang@ericsson.com>; Naga Harish K, S V
> >> <s.v.naga.harish.k@intel.com>; Pavan Nikhilesh
> >> <pbhagavatula@marvell.com>; Gujjar, Abhinandan S
> >> <abhinandan.gujjar@intel.com>; Carrillo, Erik G
> >> <Erik.G.Carrillo@intel.com>; Shijith Thotton <sthotton@marvell.com>;
> >> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> >> <sachin.saxena@oss.nxp.com>; Liang Ma <liangma@liangbit.com>;
> >> Mccarthy, Peter <Peter.Mccarthy@intel.com>; Yan, Zhirun
> >> <Zhirun.Yan@intel.com>; mattias.ronnblom
> >> <mattias.ronnblom@ericsson.com>
> >> Subject: [PATCH v3 1/3] lib: introduce dispatcher library
> >>
> >> The purpose of the dispatcher library is to help reduce coupling in
> >> an Eventdev-based DPDK application.
> >>
> >> In addition, the dispatcher also provides a convenient and flexible
> >> way for the application to use service cores for application-level processing.
> >>
> >> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> >> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
> >> Reviewed-by: Heng Wang <heng.wang@ericsson.com>
> >>
> >> --
> >>
> >> PATCH v3:
> >>   o To underline its optional character and since it does not provide
> >>     hardware abstraction, the event dispatcher is now a separate
> >>     library.
> >>   o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
> >>     shorter and to avoid the rte_event_* namespace.
> >>
> >
> > Rte_dispatcher is basically dispatching events but it feels like the name does
> not convey that.
> > Also, it is like any other adapter service that can reside within the eventdev
> directory.
> >
> > I can see some discussion in previous threads related to the placement of the
> dispatcher library.
> >
> > It is an optional eventdev application service, not enforcing this
> programming model to the application.
> > The documentation may need to be updated and mention that this is
> optional.
> >
> > If any hardware comes up with the dispatcher feature, then this library may
> need to be moved inside eventdev library later.
> >
> 
> It seems to me that the deciding factor for what functionality goes into a DPDK
> library or not is not such much dependent on if it's implemented in hardware,
> in software, or some combination thereof. The important thing is that the
> library is be able to present a coherent API to the application (or other
> libraries).
> 
> That said, as I've mentioned before, I have no strong opionion on this subject.
> 

What is the next step here? 
The response is not conclusive as It looks like both yes and no to change the directory structure.


> > So, It makes sense to keep this optional service in the eventdev folder as an
> optional feature.
> >
> >> PATCH v2:
> >>   o Add dequeue batch count statistic.
> >>   o Add statistics reset function to API.
> >>   o Clarify MT safety guarantees (or lack thereof) in the API documentation.
> >>   o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
> >>     to be consistent with similar loops elsewhere in the dispatcher.
> >>   o Fix variable names in finalizer unregister function.
> >>
> >> PATCH:
> >>   o Change prefix from RED to EVD, to avoid confusion with random
> >>     early detection.
> >>
> >> RFC v4:
> >>   o Move handlers to per-lcore data structures.
> >>   o Introduce mechanism which rearranges handlers so that often-used
> >>     handlers tend to be tried first.
> >>   o Terminate dispatch loop in case all events are delivered.
> >>   o To avoid the dispatcher's service function hogging the CPU, process
> >>     only one batch per call.
> >>   o Have service function return -EAGAIN if no work is performed.
> >>   o Events delivered in the process function is no longer marked 'const',
> >>     since modifying them may be useful for the application and cause
> >>     no difficulties for the dispatcher.
> >>   o Various minor API documentation improvements.
> >>
> >> RFC v3:
> >>   o Add stats_get() function to the version.map file.
> >> ---
> >>   MAINTAINERS                     |   3 +
> >>   lib/dispatcher/meson.build      |  17 +
> >>   lib/dispatcher/rte_dispatcher.c | 791
> >> ++++++++++++++++++++++++++++++++  lib/dispatcher/rte_dispatcher.h
> |
> >> 480 +++++++++++++++++++
> >>   lib/dispatcher/version.map      |  20 +
> >>   lib/meson.build                 |   2 +
> >>   6 files changed, 1313 insertions(+)
> >>   create mode 100644 lib/dispatcher/meson.build  create mode 100644
> >> lib/dispatcher/rte_dispatcher.c  create mode 100644
> >> lib/dispatcher/rte_dispatcher.h  create mode 100644
> >> lib/dispatcher/version.map
> >>
> >> diff --git a/MAINTAINERS b/MAINTAINERS index a926155f26..6704cd5b2c
> >> 100644
> >> --- a/MAINTAINERS
> >> +++ b/MAINTAINERS
> >> @@ -1726,6 +1726,9 @@ M: Nithin Dabilpuram
> <ndabilpuram@marvell.com>
> >>   M: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >>   F: lib/node/
> >>
> >> +Dispatcher - EXPERIMENTAL
> >> +M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> >> +F: lib/dispatcher/
> >>
> >>   Test Applications
> >>   -----------------
> >> diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build
> >> new file mode 100644 index 0000000000..c6054a3a5d
> >> --- /dev/null
> >> +++ b/lib/dispatcher/meson.build
> >> @@ -0,0 +1,17 @@
> >> +# SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2023 Ericsson
> >> +AB
> >> +
> >> +if is_windows
> >> +    build = false
> >> +    reason = 'not supported on Windows'
> >> +    subdir_done()
> >> +endif
> >> +
> >> +sources = files(
> >> +        'rte_dispatcher.c',
> >> +)
> >> +headers = files(
> >> +        'rte_dispatcher.h',
> >> +)
> >> +
> >> +deps += ['eventdev']
> >> diff --git a/lib/dispatcher/rte_dispatcher.c
> >> b/lib/dispatcher/rte_dispatcher.c new file mode 100644 index
> >> 0000000000..3319fe09f2
> >> --- /dev/null
> >> +++ b/lib/dispatcher/rte_dispatcher.c
> >> @@ -0,0 +1,791 @@
> >> +/* SPDX-License-Identifier: BSD-3-Clause
> >> + * Copyright(c) 2023 Ericsson AB
> >> + */
> >> +
> >> +#include <stdbool.h>
> >> +#include <stdint.h>
> >> +
> >> +#include <rte_branch_prediction.h>
> >> +#include <rte_common.h>
> >> +#include <rte_lcore.h>
> >> +#include <rte_random.h>
> >> +#include <rte_service_component.h>
> >> +
> >> +#include "eventdev_pmd.h"
> >> +
> >> +#include <rte_dispatcher.h>
> >> +
> >> +#define EVD_MAX_PORTS_PER_LCORE 4
> >> +#define EVD_MAX_HANDLERS 32
> >> +#define EVD_MAX_FINALIZERS 16
> >> +#define EVD_AVG_PRIO_INTERVAL 2000
> >> +
> >> +struct rte_dispatcher_lcore_port {
> >> +	uint8_t port_id;
> >> +	uint16_t batch_size;
> >> +	uint64_t timeout;
> >> +};
> >> +
> >> +struct rte_dispatcher_handler {
> >> +	int id;
> >> +	rte_dispatcher_match_t match_fun;
> >> +	void *match_data;
> >> +	rte_dispatcher_process_t process_fun;
> >> +	void *process_data;
> >> +};
> >> +
> >> +struct rte_dispatcher_finalizer {
> >> +	int id;
> >> +	rte_dispatcher_finalize_t finalize_fun;
> >> +	void *finalize_data;
> >> +};
> >> +
> >> +struct rte_dispatcher_lcore {
> >> +	uint8_t num_ports;
> >> +	uint16_t num_handlers;
> >> +	int32_t prio_count;
> >> +	struct rte_dispatcher_lcore_port
> >> ports[EVD_MAX_PORTS_PER_LCORE];
> >> +	struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
> >> +	struct rte_dispatcher_stats stats;
> >> +} __rte_cache_aligned;
> >> +
> >> +struct rte_dispatcher {
> >> +	uint8_t id;
> >> +	uint8_t event_dev_id;
> >> +	int socket_id;
> >> +	uint32_t service_id;
> >> +	struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
> >> +	uint16_t num_finalizers;
> >> +	struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS]; };
> >> +
> >> +static struct rte_dispatcher *dispatchers[UINT8_MAX];
> >> +
> >> +static bool
> >> +evd_has_dispatcher(uint8_t id)
> >> +{
> >> +	return dispatchers[id] != NULL;
> >> +}
> >> +
> >> +static struct rte_dispatcher *
> >> +evd_get_dispatcher(uint8_t id)
> >> +{
> >> +	return dispatchers[id];
> >> +}
> >> +
> >> +static void
> >> +evd_set_dispatcher(uint8_t id, struct rte_dispatcher *dispatcher) {
> >> +	dispatchers[id] = dispatcher;
> >> +}
> >> +
> >> +#define EVD_VALID_ID_OR_RET_EINVAL(id)
> >> 	\
> >> +	do {								\
> >> +		if (unlikely(!evd_has_dispatcher(id))) {		\
> >> +			RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id);
> >> \
> >> +			return -EINVAL;					\
> >> +		}							\
> >> +	} while (0)
> >> +
> >> +static int
> >> +evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
> >> +		       const struct rte_event *event) {
> >> +	uint16_t i;
> >> +
> >> +	for (i = 0; i < lcore->num_handlers; i++) {
> >> +		struct rte_dispatcher_handler *handler =
> >> +			&lcore->handlers[i];
> >> +
> >> +		if (handler->match_fun(event, handler->match_data))
> >> +			return i;
> >> +	}
> >> +
> >> +	return -1;
> >> +}
> >> +
> >> +static void
> >> +evd_prioritize_handler(struct rte_dispatcher_lcore *lcore,
> >> +		       int handler_idx)
> >> +{
> >> +	struct rte_dispatcher_handler tmp;
> >> +
> >> +	if (handler_idx == 0)
> >> +		return;
> >> +
> >> +	/* Let the lucky handler "bubble" up the list */
> >> +
> >> +	tmp = lcore->handlers[handler_idx - 1];
> >> +
> >> +	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
> >> +
> >> +	lcore->handlers[handler_idx] = tmp; }
> >> +
> >> +static inline void
> >> +evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore,
> >> +				int handler_idx, uint16_t handler_events) {
> >> +	lcore->prio_count -= handler_events;
> >> +
> >> +	if (unlikely(lcore->prio_count <= 0)) {
> >> +		evd_prioritize_handler(lcore, handler_idx);
> >> +
> >> +		/*
> >> +		 * Randomize the interval in the unlikely case
> >> +		 * the traffic follow some very strict pattern.
> >> +		 */
> >> +		lcore->prio_count =
> >> +			rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
> >> +			EVD_AVG_PRIO_INTERVAL / 2;
> >> +	}
> >> +}
> >> +
> >> +static inline void
> >> +evd_dispatch_events(struct rte_dispatcher *dispatcher,
> >> +		    struct rte_dispatcher_lcore *lcore,
> >> +		    struct rte_dispatcher_lcore_port *port,
> >> +		    struct rte_event *events, uint16_t num_events) {
> >> +	int i;
> >> +	struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
> >> +	uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
> >> +	uint16_t drop_count = 0;
> >> +	uint16_t dispatch_count;
> >> +	uint16_t dispatched = 0;
> >> +
> >> +	for (i = 0; i < num_events; i++) {
> >> +		struct rte_event *event = &events[i];
> >> +		int handler_idx;
> >> +
> >> +		handler_idx = evd_lookup_handler_idx(lcore, event);
> >> +
> >> +		if (unlikely(handler_idx < 0)) {
> >> +			drop_count++;
> >> +			continue;
> >> +		}
> >> +
> >> +		bursts[handler_idx][burst_lens[handler_idx]] = *event;
> >> +		burst_lens[handler_idx]++;
> >> +	}
> >> +
> >> +	dispatch_count = num_events - drop_count;
> >> +
> >> +	for (i = 0; i < lcore->num_handlers &&
> >> +		 dispatched < dispatch_count; i++) {
> >> +		struct rte_dispatcher_handler *handler =
> >> +			&lcore->handlers[i];
> >> +		uint16_t len = burst_lens[i];
> >> +
> >> +		if (len == 0)
> >> +			continue;
> >> +
> >> +		handler->process_fun(dispatcher->event_dev_id, port-
> >>> port_id,
> >> +				     bursts[i], len, handler->process_data);
> >> +
> >> +		dispatched += len;
> >> +
> >> +		/*
> >> +		 * Safe, since any reshuffling will only involve
> >> +		 * already-processed handlers.
> >> +		 */
> >> +		evd_consider_prioritize_handler(lcore, i, len);
> >> +	}
> >> +
> >> +	lcore->stats.ev_batch_count++;
> >> +	lcore->stats.ev_dispatch_count += dispatch_count;
> >> +	lcore->stats.ev_drop_count += drop_count;
> >> +
> >> +	for (i = 0; i < dispatcher->num_finalizers; i++) {
> >> +		struct rte_dispatcher_finalizer *finalizer =
> >> +			&dispatcher->finalizers[i];
> >> +
> >> +		finalizer->finalize_fun(dispatcher->event_dev_id,
> >> +					port->port_id,
> >> +					finalizer->finalize_data);
> >> +	}
> >> +}
> >> +
> >> +static __rte_always_inline uint16_t
> >> +evd_port_dequeue(struct rte_dispatcher *dispatcher,
> >> +		 struct rte_dispatcher_lcore *lcore,
> >> +		 struct rte_dispatcher_lcore_port *port) {
> >> +	uint16_t batch_size = port->batch_size;
> >> +	struct rte_event events[batch_size];
> >> +	uint16_t n;
> >> +
> >> +	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port-
> >>> port_id,
> >> +				    events, batch_size, port->timeout);
> >> +
> >> +	if (likely(n > 0))
> >> +		evd_dispatch_events(dispatcher, lcore, port, events, n);
> >> +
> >> +	lcore->stats.poll_count++;
> >> +
> >> +	return n;
> >> +}
> >> +
> >> +static __rte_always_inline uint16_t
> >> +evd_lcore_process(struct rte_dispatcher *dispatcher,
> >> +		  struct rte_dispatcher_lcore *lcore) {
> >> +	uint16_t i;
> >> +	uint16_t event_count = 0;
> >> +
> >> +	for (i = 0; i < lcore->num_ports; i++) {
> >> +		struct rte_dispatcher_lcore_port *port =
> >> +			&lcore->ports[i];
> >> +
> >> +		event_count += evd_port_dequeue(dispatcher, lcore, port);
> >> +	}
> >> +
> >> +	return event_count;
> >> +}
> >> +
> >> +static int32_t
> >> +evd_process(void *userdata)
> >> +{
> >> +	struct rte_dispatcher *dispatcher = userdata;
> >> +	unsigned int lcore_id = rte_lcore_id();
> >> +	struct rte_dispatcher_lcore *lcore =
> >> +		&dispatcher->lcores[lcore_id];
> >> +	uint64_t event_count;
> >> +
> >> +	event_count = evd_lcore_process(dispatcher, lcore);
> >> +
> >> +	if (unlikely(event_count == 0))
> >> +		return -EAGAIN;
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +static int
> >> +evd_service_register(struct rte_dispatcher *dispatcher) {
> >> +	struct rte_service_spec service = {
> >> +		.callback = evd_process,
> >> +		.callback_userdata = dispatcher,
> >> +		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
> >> +		.socket_id = dispatcher->socket_id
> >> +	};
> >> +	int rc;
> >> +
> >> +	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "evd_%d",
> >> +		 dispatcher->id);
> >> +
> >> +	rc = rte_service_component_register(&service,
> >> +&dispatcher->service_id);
> >> +
> >> +	if (rc)
> >> +		RTE_EDEV_LOG_ERR("Registration of dispatcher service "
> >> +				 "%s failed with error code %d\n",
> >> +				 service.name, rc);
> >> +
> >> +	return rc;
> >> +}
> >> +
> >> +static int
> >> +evd_service_unregister(struct rte_dispatcher *dispatcher) {
> >> +	int rc;
> >> +
> >> +	rc = rte_service_component_unregister(dispatcher->service_id);
> >> +
> >> +	if (rc)
> >> +		RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
> >> +				 "failed with error code %d\n", rc);
> >> +
> >> +	return rc;
> >> +}
> >> +
> >> +int
> >> +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id) {
> >> +	int socket_id;
> >> +	struct rte_dispatcher *dispatcher;
> >> +	int rc;
> >> +
> >> +	if (evd_has_dispatcher(id)) {
> >> +		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
> >> +				 id);
> >> +		return -EEXIST;
> >> +	}
> >> +
> >> +	socket_id = rte_event_dev_socket_id(event_dev_id);
> >> +
> >> +	dispatcher =
> >> +		rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher),
> >> +				  RTE_CACHE_LINE_SIZE, socket_id);
> >> +
> >> +	if (dispatcher == NULL) {
> >> +		RTE_EDEV_LOG_ERR("Unable to allocate memory for
> >> dispatcher\n");
> >> +		return -ENOMEM;
> >> +	}
> >> +
> >> +	*dispatcher = (struct rte_dispatcher) {
> >> +		.id = id,
> >> +		.event_dev_id = event_dev_id,
> >> +		.socket_id = socket_id
> >> +	};
> >> +
> >> +	rc = evd_service_register(dispatcher);
> >> +
> >> +	if (rc < 0) {
> >> +		rte_free(dispatcher);
> >> +		return rc;
> >> +	}
> >> +
> >> +	evd_set_dispatcher(id, dispatcher);
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +int
> >> +rte_dispatcher_free(uint8_t id)
> >> +{
> >> +	struct rte_dispatcher *dispatcher;
> >> +	int rc;
> >> +
> >> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> >> +	dispatcher = evd_get_dispatcher(id);
> >> +
> >> +	rc = evd_service_unregister(dispatcher);
> >> +
> >> +	if (rc)
> >> +		return rc;
> >> +
> >> +	evd_set_dispatcher(id, NULL);
> >> +
> >> +	rte_free(dispatcher);
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +int
> >> +rte_dispatcher_service_id_get(uint8_t id, uint32_t *service_id) {
> >> +	struct rte_dispatcher *dispatcher;
> >> +
> >> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> >> +	dispatcher = evd_get_dispatcher(id);
> >> +
> >
> > Service_id pointer needs to be validated for NULL before accessing
> >
> >
> 
> Noted.
> 
> Returning error codes on API violations is bad practice (such should be met
> with assertions), but it is DPDK standard practice and how Eventdev does
> things as well, so I'll change it.
> 
> It would also be consistent with allowing invalid 'id' parameter values.
> 
> >> +	*service_id = dispatcher->service_id;
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +static int
> >> +lcore_port_index(struct rte_dispatcher_lcore *lcore,
> >> +		 uint8_t event_port_id)
> >> +{
> >> +	uint16_t i;
> >> +
> >> +	for (i = 0; i < lcore->num_ports; i++) {
> >> +		struct rte_dispatcher_lcore_port *port =
> >> +			&lcore->ports[i];
> >> +
> >> +		if (port->port_id == event_port_id)
> >> +			return i;
> >> +	}
> >> +
> >> +	return -1;
> >> +}
> >> +
> >> +int
> >> +rte_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
> >> +					uint16_t batch_size, uint64_t
> >> timeout,
> >> +					unsigned int lcore_id)
> >> +{
> >> +	struct rte_dispatcher *dispatcher;
> >> +	struct rte_dispatcher_lcore *lcore;
> >> +	struct rte_dispatcher_lcore_port *port;
> >> +
> >> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> >> +	dispatcher = evd_get_dispatcher(id);
> >> +
> >> +	lcore =	&dispatcher->lcores[lcore_id];
> >> +
> >> +	if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
> >> +		return -ENOMEM;
> >> +
> >> +	if (lcore_port_index(lcore, event_port_id) >= 0)
> >> +		return -EEXIST;
> >> +
> >> +	port = &lcore->ports[lcore->num_ports];
> >> +
> >> +	*port = (struct rte_dispatcher_lcore_port) {
> >> +		.port_id = event_port_id,
> >> +		.batch_size = batch_size,
> >> +		.timeout = timeout
> >> +	};
> >> +
> >> +	lcore->num_ports++;
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +int
> >> +rte_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t
> event_port_id,
> >> +					    unsigned int lcore_id)
> >> +{
> >> +	struct rte_dispatcher *dispatcher;
> >> +	struct rte_dispatcher_lcore *lcore;
> >> +	int port_idx;
> >> +	struct rte_dispatcher_lcore_port *port;
> >> +	struct rte_dispatcher_lcore_port *last;
> >> +
> >> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> >> +	dispatcher = evd_get_dispatcher(id);
> >> +
> >> +	lcore =	&dispatcher->lcores[lcore_id];
> >> +
> >> +	port_idx = lcore_port_index(lcore, event_port_id);
> >> +
> >> +	if (port_idx < 0)
> >> +		return -ENOENT;
> >> +
> >> +	port = &lcore->ports[port_idx];
> >> +	last = &lcore->ports[lcore->num_ports - 1];
> >> +
> >> +	if (port != last)
> >> +		*port = *last;
> >> +
> >> +	lcore->num_ports--;
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +static struct rte_dispatcher_handler*
> >> +evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore,
> >> +			    int handler_id)
> >> +{
> >> +	uint16_t i;
> >> +
> >> +	for (i = 0; i < lcore->num_handlers; i++) {
> >> +		struct rte_dispatcher_handler *handler =
> >> +			&lcore->handlers[i];
> >> +
> >> +		if (handler->id == handler_id)
> >> +			return handler;
> >> +	}
> >> +
> >> +	return NULL;
> >> +}
> >> +
> >> +static int
> >> +evd_alloc_handler_id(struct rte_dispatcher *dispatcher) {
> >> +	int handler_id = 0;
> >> +	struct rte_dispatcher_lcore *reference_lcore =
> >> +		&dispatcher->lcores[0];
> >> +
> >> +	if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
> >> +		return -1;
> >> +
> >> +	while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) !=
> >> NULL)
> >> +		handler_id++;
> >> +
> >> +	return handler_id;
> >> +}
> >> +
> >> +static void
> >> +evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore,
> >> +		    const struct rte_dispatcher_handler *handler) {
> >> +	int handler_idx = lcore->num_handlers;
> >> +
> >> +	lcore->handlers[handler_idx] = *handler;
> >> +	lcore->num_handlers++;
> >> +}
> >> +
> >> +static void
> >> +evd_install_handler(struct rte_dispatcher *dispatcher,
> >> +		    const struct rte_dispatcher_handler *handler) {
> >> +	int i;
> >> +
> >> +	for (i = 0; i < RTE_MAX_LCORE; i++) {
> >> +		struct rte_dispatcher_lcore *lcore =
> >> +			&dispatcher->lcores[i];
> >> +		evd_lcore_install_handler(lcore, handler);
> >> +	}
> >> +}
> >> +
> >> +int
> >> +rte_dispatcher_register(uint8_t id,
> >> +			      rte_dispatcher_match_t match_fun,
> >> +			      void *match_data,
> >> +			      rte_dispatcher_process_t process_fun,
> >> +			      void *process_data)
> >> +{
> >> +	struct rte_dispatcher *dispatcher;
> >> +	struct rte_dispatcher_handler handler = {
> >> +		.match_fun = match_fun,
> >> +		.match_data = match_data,
> >
> > We can have a default function which uses queue_id as matching data.
> > This reduces the application load to provide two callbacks, one for matching
> and one for processing the event.
> > Application can pass NULL parameter for "match_fun", in that case default
> function pointer can be used here.
> >
> 
> But which queue id would this default function pointer match?
> 
> I think you need more API calls to allow for something like this. I've discussed
> this kind of API in some previous messager on this list, if I recall correctly.
> 

Agree, it may require some more APIs to implement this functionality.
I am fine to continue with the current logic.


> >
> >> +		.process_fun = process_fun,
> >> +		.process_data = process_data
> >> +	};
> >> +
> >> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> >> +	dispatcher = evd_get_dispatcher(id);
> >> +
> >> +	handler.id = evd_alloc_handler_id(dispatcher);
> >> +
> >> +	if (handler.id < 0)
> >> +		return -ENOMEM;
> >> +
> >> +	evd_install_handler(dispatcher, &handler);
> >> +
> >> +	return handler.id;
> >> +}
> >> +
> >> +static int
> >> +evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
> >> +			    int handler_id)
> >> +{
> >> +	struct rte_dispatcher_handler *unreg_handler;
> >> +	int handler_idx;
> >> +	uint16_t last_idx;
> >> +
> >> +	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
> >> +
> >> +	if (unreg_handler == NULL)
> >> +		return -EINVAL;
> >> +
> >
> > Shouldn't the logic be " handler_idx = unreg_handler - &lcore->handlers[0];"
> > Because, unreg_handler will be a higher or equal address to the
> > handler base address (&lcore->handlers[0])
> >
> 
> True. Will fix.
> 
> >
> >> +	handler_idx = &lcore->handlers[0] - unreg_handler;
> >> +
> >> +	last_idx = lcore->num_handlers - 1;
> >> +
> >> +	if (handler_idx != last_idx) {
> >> +		/* move all handlers to maintain handler order */
> >> +		int n = last_idx - handler_idx;
> >> +		memmove(unreg_handler, unreg_handler + 1,
> >> +			sizeof(struct rte_dispatcher_handler) * n);
> >> +	}
> >> +
> >> +	lcore->num_handlers--;
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +static int
> >> +evd_uninstall_handler(struct rte_dispatcher *dispatcher,
> >> +		      int handler_id)
> >> +{
> >> +	unsigned int lcore_id;
> >> +
> >> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> >> +		struct rte_dispatcher_lcore *lcore =
> >> +			&dispatcher->lcores[lcore_id];
> >> +		int rc;
> >> +
> >> +		rc = evd_lcore_uninstall_handler(lcore, handler_id);
> >> +
> >> +		if (rc < 0)
> >> +			return rc;
> >> +	}
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +int
> >> +rte_dispatcher_unregister(uint8_t id, int handler_id) {
> >> +	struct rte_dispatcher *dispatcher;
> >> +	int rc;
> >> +
> >> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> >> +	dispatcher = evd_get_dispatcher(id);
> >> +
> >> +	rc = evd_uninstall_handler(dispatcher, handler_id);
> >> +
> >> +	return rc;
> >> +}
> >> +
> >> +static struct rte_dispatcher_finalizer*
> >> +evd_get_finalizer_by_id(struct rte_dispatcher *dispatcher,
> >> +		       int handler_id)
> >> +{
> >> +	int i;
> >> +
> >> +	for (i = 0; i < dispatcher->num_finalizers; i++) {
> >> +		struct rte_dispatcher_finalizer *finalizer =
> >> +			&dispatcher->finalizers[i];
> >> +
> >> +		if (finalizer->id == handler_id)
> >> +			return finalizer;
> >> +	}
> >> +
> >> +	return NULL;
> >> +}
> >> +
> >> +static int
> >> +evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher) {
> >> +	int finalizer_id = 0;
> >> +
> >> +	while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
> >> +		finalizer_id++;
> >> +
> >> +	return finalizer_id;
> >> +}
> >> +
> >> +static struct rte_dispatcher_finalizer * evd_alloc_finalizer(struct
> >> +rte_dispatcher *dispatcher) {
> >> +	int finalizer_idx;
> >> +	struct rte_dispatcher_finalizer *finalizer;
> >> +
> >> +	if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
> >> +		return NULL;
> >> +
> >> +	finalizer_idx = dispatcher->num_finalizers;
> >> +	finalizer = &dispatcher->finalizers[finalizer_idx];
> >> +
> >> +	finalizer->id = evd_alloc_finalizer_id(dispatcher);
> >> +
> >> +	dispatcher->num_finalizers++;
> >> +
> >> +	return finalizer;
> >> +}
> >> +
> >> +int
> >> +rte_dispatcher_finalize_register(uint8_t id,
> >> +			      rte_dispatcher_finalize_t finalize_fun,
> >> +			      void *finalize_data)
> >> +{
> >> +	struct rte_dispatcher *dispatcher;
> >> +	struct rte_dispatcher_finalizer *finalizer;
> >> +
> >> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> >> +	dispatcher = evd_get_dispatcher(id);
> >> +
> >> +	finalizer = evd_alloc_finalizer(dispatcher);
> >> +
> >> +	if (finalizer == NULL)
> >> +		return -ENOMEM;
> >> +
> >> +	finalizer->finalize_fun = finalize_fun;
> >> +	finalizer->finalize_data = finalize_data;
> >> +
> >> +	return finalizer->id;
> >> +}
> >> +
> >> +int
> >> +rte_dispatcher_finalize_unregister(uint8_t id, int handler_id) {
> >> +	struct rte_dispatcher *dispatcher;
> >> +	struct rte_dispatcher_finalizer *unreg_finalizer;
> >> +	int finalizer_idx;
> >> +	uint16_t last_idx;
> >> +
> >> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> >> +	dispatcher = evd_get_dispatcher(id);
> >> +
> >> +	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, handler_id);
> >> +
> >> +	if (unreg_finalizer == NULL)
> >> +		return -EINVAL;
> >> +
> >
> > Same as above comment in rte_dispatcher_unregister, base address needs
> > to be subtracted from unreg_finalizer
> >
> 
> Yes.
> 
> >
> >> +	finalizer_idx = &dispatcher->finalizers[0] - unreg_finalizer;
> >> +
> >> +	last_idx = dispatcher->num_finalizers - 1;
> >> +
> >> +	if (finalizer_idx != last_idx) {
> >> +		/* move all finalizers to maintain order */
> >> +		int n = last_idx - finalizer_idx;
> >> +		memmove(unreg_finalizer, unreg_finalizer + 1,
> >> +			sizeof(struct rte_dispatcher_finalizer) * n);
> >> +	}
> >> +
> >> +	dispatcher->num_finalizers--;
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +static int
> >> +evd_set_service_runstate(uint8_t id, int state) {
> >> +	struct rte_dispatcher *dispatcher;
> >> +	int rc;
> >> +
> >> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> >> +	dispatcher = evd_get_dispatcher(id);
> >> +
> >> +	rc = rte_service_component_runstate_set(dispatcher->service_id,
> >> +						state);
> >> +
> >> +	if (rc != 0) {
> >> +		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while
> >> setting "
> >> +				 "service component run state to %d\n", rc,
> >> +				 state);
> >> +		RTE_ASSERT(0);
> >> +	}
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +int
> >> +rte_dispatcher_start(uint8_t id)
> >> +{
> >> +	return evd_set_service_runstate(id, 1); }
> >> +
> >> +int
> >> +rte_dispatcher_stop(uint8_t id)
> >> +{
> >> +	return evd_set_service_runstate(id, 0); }
> >> +
> >> +static void
> >> +evd_aggregate_stats(struct rte_dispatcher_stats *result,
> >> +		    const struct rte_dispatcher_stats *part) {
> >> +	result->poll_count += part->poll_count;
> >> +	result->ev_batch_count += part->ev_batch_count;
> >> +	result->ev_dispatch_count += part->ev_dispatch_count;
> >> +	result->ev_drop_count += part->ev_drop_count; }
> >> +
> >> +int
> >> +rte_dispatcher_stats_get(uint8_t id,
> >> +			       struct rte_dispatcher_stats *stats) {
> >> +	struct rte_dispatcher *dispatcher;
> >> +	unsigned int lcore_id;
> >> +
> >> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> >> +	dispatcher = evd_get_dispatcher(id);
> >> +
> >
> > Stats pointer needs to be validated for NULL before accessing
> >
> >
> 
> Yes.
> 
> Thanks a lot for your review comments.
> 
> >> +	*stats = (struct rte_dispatcher_stats) {};
> >> +
> >> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> >> +		struct rte_dispatcher_lcore *lcore =
> >> +			&dispatcher->lcores[lcore_id];
> >> +
> >> +		evd_aggregate_stats(stats, &lcore->stats);
> >> +	}
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +int
> >> +rte_dispatcher_stats_reset(uint8_t id) {
> >> +	struct rte_dispatcher *dispatcher;
> >> +	unsigned int lcore_id;
> >> +
> >> +	EVD_VALID_ID_OR_RET_EINVAL(id);
> >> +	dispatcher = evd_get_dispatcher(id);
> >> +
> >> +
> >> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> >> +		struct rte_dispatcher_lcore *lcore =
> >> +			&dispatcher->lcores[lcore_id];
> >> +
> >> +		lcore->stats = (struct rte_dispatcher_stats) {};
> >> +	}
> >> +
> >> +	return 0;
> >> +
> >> +}
> >> diff --git a/lib/dispatcher/rte_dispatcher.h
> >> b/lib/dispatcher/rte_dispatcher.h new file mode 100644 index
> >> 0000000000..6712687a08
> >> --- /dev/null
> >> +++ b/lib/dispatcher/rte_dispatcher.h
> >> @@ -0,0 +1,480 @@
> >> +/* SPDX-License-Identifier: BSD-3-Clause
> >> + * Copyright(c) 2023 Ericsson AB
> >> + */
> >> +
> >> +#ifndef __RTE_DISPATCHER_H__
> >> +#define __RTE_DISPATCHER_H__
> >> +
> >> +/**
> >> + * @file
> >> + *
> >> + * RTE Dispatcher
> >> + *
> >> + * The purpose of the dispatcher is to help decouple different parts
> >> + * of an application (e.g., modules), sharing the same underlying
> >> + * event device.
> >> + */
> >> +
> >> +#ifdef __cplusplus
> >> +extern "C" {
> >> +#endif
> >> +
> >> +#include <rte_eventdev.h>
> >> +
> >> +/**
> >> + * Function prototype for match callbacks.
> >> + *
> >> + * Match callbacks are used by an application to decide how the
> >> + * dispatcher distributes events to different parts of the
> >> + * application.
> >> + *
> >> + * The application is not expected to process the event at the point
> >> + * of the match call. Such matters should be deferred to the process
> >> + * callback invocation.
> >> + *
> >> + * The match callback may be used as an opportunity to prefetch data.
> >> + *
> >> + * @param event
> >> + *  Pointer to event
> >> + *
> >> + * @param cb_data
> >> + *  The pointer supplied by the application in
> >> + *  rte_dispatcher_register().
> >> + *
> >> + * @return
> >> + *   Returns true in case this events should be delivered (via
> >> + *   the process callback), and false otherwise.
> >> + */
> >> +typedef bool
> >> +(*rte_dispatcher_match_t)(const struct rte_event *event, void
> >> +*cb_data);
> >> +
> >> +/**
> >> + * Function prototype for process callbacks.
> >> + *
> >> + * The process callbacks are used by the dispatcher to deliver
> >> + * events for processing.
> >> + *
> >> + * @param event_dev_id
> >> + *  The originating event device id.
> >> + *
> >> + * @param event_port_id
> >> + *  The originating event port.
> >> + *
> >> + * @param events
> >> + *  Pointer to an array of events.
> >> + *
> >> + * @param num
> >> + *  The number of events in the @p events array.
> >> + *
> >> + * @param cb_data
> >> + *  The pointer supplied by the application in
> >> + *  rte_dispatcher_register().
> >> + */
> >> +
> >> +typedef void
> >> +(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
> >> +				  struct rte_event *events, uint16_t num,
> >> +				  void *cb_data);
> >> +
> >> +/**
> >> + * Function prototype for finalize callbacks.
> >> + *
> >> + * The finalize callbacks are used by the dispatcher to notify the
> >> + * application it has delivered all events from a particular batch
> >> + * dequeued from the event device.
> >> + *
> >> + * @param event_dev_id
> >> + *  The originating event device id.
> >> + *
> >> + * @param event_port_id
> >> + *  The originating event port.
> >> + *
> >> + * @param cb_data
> >> + *  The pointer supplied by the application in
> >> + *  rte_dispatcher_finalize_register().
> >> + */
> >> +
> >> +typedef void
> >> +(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
> >> +				   void *cb_data);
> >> +
> >> +/**
> >> + * Dispatcher statistics
> >> + */
> >> +struct rte_dispatcher_stats {
> >> +	uint64_t poll_count;
> >> +	/**< Number of event dequeue calls made toward the event device. */
> >> +	uint64_t ev_batch_count;
> >> +	/**< Number of non-empty event batches dequeued from event
> >> device.*/
> >> +	uint64_t ev_dispatch_count;
> >> +	/**< Number of events dispatched to a handler.*/
> >> +	uint64_t ev_drop_count;
> >> +	/**< Number of events dropped because no handler was found. */ };
> >> +
> >> +/**
> >> + * Create a dispatcher with the specified id.
> >> + *
> >> + * @param id
> >> + *  An application-specified, unique (across all dispatcher
> >> + *  instances) identifier.
> >> + *
> >> + * @param event_dev_id
> >> + *  The identifier of the event device from which this dispatcher
> >> + *  will dequeue events.
> >> + *
> >> + * @return
> >> + *   - 0: Success
> >> + *   - <0: Error code on failure
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id);
> >> +
> >> +/**
> >> + * Free a dispatcher.
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - <0: Error code on failure
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_free(uint8_t id);
> >> +
> >> +/**
> >> + * Retrieve the service identifier of a dispatcher.
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + *
> >> + * @param [out] service_id
> >> + *  A pointer to a caller-supplied buffer where the dispatcher's
> >> + *  service id will be stored.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - <0: Error code on failure.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
> >> +
> >> +/**
> >> + * Binds an event device port to a specific lcore on the specified
> >> + * dispatcher.
> >> + *
> >> + * This function configures the event port id to be used by the
> >> +event
> >> + * dispatcher service, if run on the specified lcore.
> >> + *
> >> + * Multiple event device ports may be bound to the same lcore. A
> >> + * particular port must not be bound to more than one lcore.
> >> + *
> >> + * If the dispatcher service is mapped (with
> >> +rte_service_map_lcore_set())
> >> + * to a lcore to which no ports are bound, the service function will
> >> +be a
> >> + * no-operation.
> >> + *
> >> + * This function may be called by any thread (including unregistered
> >> + * non-EAL threads), but not while the dispatcher is running on
> >> +lcore
> >> + * specified by @c lcore_id.
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + *
> >> + * @param event_port_id
> >> + *  The event device port identifier.
> >> + *
> >> + * @param batch_size
> >> + *  The batch size to use in rte_event_dequeue_burst(), for the
> >> + *  configured event device port and lcore.
> >> + *
> >> + * @param timeout
> >> + *  The timeout parameter to use in rte_event_dequeue_burst(), for
> >> +the
> >> + *  configured event device port and lcore.
> >> + *
> >> + * @param lcore_id
> >> + *  The lcore by which this event port will be used.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - -ENOMEM: Unable to allocate sufficient resources.
> >> + *  - -EEXISTS: Event port is already configured.
> >> + *  - -EINVAL: Invalid arguments.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
> >> +					uint16_t batch_size, uint64_t
> >> timeout,
> >> +					unsigned int lcore_id);
> >> +
> >> +/**
> >> + * Unbind an event device port from a specific lcore.
> >> + *
> >> + * This function may be called by any thread (including unregistered
> >> + * non-EAL threads), but not while the dispatcher is running on
> >> + * lcore specified by @c lcore_id.
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + *
> >> + * @param event_port_id
> >> + *  The event device port identifier.
> >> + *
> >> + * @param lcore_id
> >> + *  The lcore which was using this event port.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - -EINVAL: Invalid @c id.
> >> + *  - -ENOENT: Event port id not bound to this @c lcore_id.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t
> event_port_id,
> >> +					    unsigned int lcore_id);
> >> +
> >> +/**
> >> + * Register an event handler.
> >> + *
> >> + * The match callback function is used to select if a particular
> >> +event
> >> + * should be delivered, using the corresponding process callback
> >> + * function.
> >> + *
> >> + * The reason for having two distinct steps is to allow the
> >> +dispatcher
> >> + * to deliver all events as a batch. This in turn will cause
> >> + * processing of a particular kind of events to happen in a
> >> + * back-to-back manner, improving cache locality.
> >> + *
> >> + * The list of handler callback functions is shared among all
> >> +lcores,
> >> + * but will only be executed on lcores which has an eventdev port
> >> + * bound to them, and which are running the dispatcher service.
> >> + *
> >> + * An event is delivered to at most one handler. Events where no
> >> + * handler is found are dropped.
> >> + *
> >> + * The application must not depend on the order of which the match
> >> + * functions are invoked.
> >> + *
> >> + * Ordering of events is not guaranteed to be maintained between
> >> + * different deliver callbacks. For example, suppose there are two
> >> + * callbacks registered, matching different subsets of events
> >> +arriving
> >> + * on an atomic queue. A batch of events [ev0, ev1, ev2] are
> >> +dequeued
> >> + * on a particular port, all pertaining to the same flow. The match
> >> + * callback for registration A returns true for ev0 and ev2, and the
> >> + * matching function for registration B for ev1. In that scenario,
> >> +the
> >> + * dispatcher may choose to deliver first [ev0, ev2] using A's
> >> +deliver
> >> + * function, and then [ev1] to B - or vice versa.
> >> + *
> >> + * rte_dispatcher_register() may be called by any thread
> >> + * (including unregistered non-EAL threads), but not while the event
> >> + * dispatcher is running on any service lcore.
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + *
> >> + * @param match_fun
> >> + *  The match callback function.
> >> + *
> >> + * @param match_cb_data
> >> + *  A pointer to some application-specific opaque data (or NULL),
> >> + *  which is supplied back to the application when match_fun is
> >> + *  called.
> >> + *
> >> + * @param process_fun
> >> + *  The process callback function.
> >> + *
> >> + * @param process_cb_data
> >> + *  A pointer to some application-specific opaque data (or NULL),
> >> + *  which is supplied back to the application when process_fun is
> >> + *  called.
> >> + *
> >> + * @return
> >> + *  - >= 0: The identifier for this registration.
> >> + *  - -ENOMEM: Unable to allocate sufficient resources.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_register(uint8_t id,
> >> +			      rte_dispatcher_match_t match_fun,
> >> +			      void *match_cb_data,
> >> +			      rte_dispatcher_process_t process_fun,
> >> +			      void *process_cb_data);
> >> +
> >> +/**
> >> + * Unregister an event handler.
> >> + *
> >> + * This function may be called by any thread (including unregistered
> >> + * non-EAL threads), but not while the dispatcher is running on
> >> + * any service lcore.
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + *
> >> + * @param handler_id
> >> + *  The handler registration id returned by the original
> >> + *  rte_dispatcher_register() call.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - -EINVAL: The @c id and/or the @c handler_id parameter was invalid.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_unregister(uint8_t id, int handler_id);
> >> +
> >> +/**
> >> + * Register a finalize callback function.
> >> + *
> >> + * An application may optionally install one or more finalize
> >> + * callbacks.
> >> + *
> >> + * All finalize callbacks are invoked by the dispatcher when a
> >> + * complete batch of events (retrieve using
> >> +rte_event_dequeue_burst())
> >> + * have been delivered to the application (or have been dropped).
> >> + *
> >> + * The finalize callback is not tied to any particular handler.
> >> + *
> >> + * The finalize callback provides an opportunity for the application
> >> + * to do per-batch processing. One case where this may be useful is
> >> +if
> >> + * an event output buffer is used, and is shared among several
> >> + * handlers. In such a case, proper output buffer flushing may be
> >> + * assured using a finalize callback.
> >> + *
> >> + * rte_dispatcher_finalize_register() may be called by any thread
> >> + * (including unregistered non-EAL threads), but not while the
> >> + * dispatcher is running on any service lcore.
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + *
> >> + * @param finalize_fun
> >> + *  The function called after completing the processing of a
> >> + *  dequeue batch.
> >> + *
> >> + * @param finalize_data
> >> + *  A pointer to some application-specific opaque data (or NULL),
> >> + *  which is supplied back to the application when @c finalize_fun
> >> +is
> >> + *  called.
> >> + *
> >> + * @return
> >> + *  - >= 0: The identifier for this registration.
> >> + *  - -ENOMEM: Unable to allocate sufficient resources.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_finalize_register(uint8_t id,
> >> +				 rte_dispatcher_finalize_t finalize_fun,
> >> +				 void *finalize_data);
> >> +
> >> +/**
> >> + * Unregister a finalize callback.
> >> + *
> >> + * This function may be called by any thread (including unregistered
> >> + * non-EAL threads), but not while the dispatcher is running on
> >> + * any service lcore.
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + *
> >> + * @param reg_id
> >> + *  The finalize registration id returned by the original
> >> + *  rte_dispatcher_finalize_register() call.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - -EINVAL: The @c id and/or the @c reg_id parameter was invalid.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_finalize_unregister(uint8_t id, int reg_id);
> >> +
> >> +/**
> >> + * Start a dispatcher instance.
> >> + *
> >> + * Enables the dispatcher service.
> >> + *
> >> + * The underlying event device must have been started prior to
> >> +calling
> >> + * rte_dispatcher_start().
> >> + *
> >> + * For the dispatcher to actually perform work (i.e., dispatch
> >> + * events), its service must have been mapped to one or more service
> >> + * lcores, and its service run state set to '1'. A dispatcher's
> >> + * service is retrieved using rte_dispatcher_service_id_get().
> >> + *
> >> + * Each service lcore to which the dispatcher is mapped should
> >> + * have at least one event port configured. Such configuration is
> >> + * performed by calling rte_dispatcher_bind_port_to_lcore(), prior
> >> +to
> >> + * starting the dispatcher.
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - -EINVAL: Invalid @c id.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_start(uint8_t id);
> >> +
> >> +/**
> >> + * Stop a running dispatcher instance.
> >> + *
> >> + * Disables the dispatcher service.
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - -EINVAL: Invalid @c id.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_stop(uint8_t id);
> >> +
> >> +/**
> >> + * Retrieve statistics for a dispatcher instance.
> >> + *
> >> + * This function is MT safe and may be called by any thread
> >> + * (including unregistered non-EAL threads).
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + * @param[out] stats
> >> + *   A pointer to a structure to fill with statistics.
> >> + * @return
> >> + *  - 0: Success
> >> + *  - -EINVAL: The @c id parameter was invalid.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_stats_get(uint8_t id,
> >> +			       struct rte_dispatcher_stats *stats);
> >> +
> >> +/**
> >> + * Reset statistics for a dispatcher instance.
> >> + *
> >> + * This function may be called by any thread (including unregistered
> >> + * non-EAL threads), but may not produce the correct result if the
> >> + * dispatcher is running on any service lcore.
> >> + *
> >> + * @param id
> >> + *  The dispatcher identifier.
> >> + *
> >> + * @return
> >> + *  - 0: Success
> >> + *  - -EINVAL: The @c id parameter was invalid.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dispatcher_stats_reset(uint8_t id);
> >> +
> >> +#ifdef __cplusplus
> >> +}
> >> +#endif
> >> +
> >> +#endif /* __RTE_DISPATCHER__ */
> >> diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map
> >> new file mode 100644 index 0000000000..8f9ad96522
> >> --- /dev/null
> >> +++ b/lib/dispatcher/version.map
> >> @@ -0,0 +1,20 @@
> >> +EXPERIMENTAL {
> >> +	global:
> >> +
> >> +	# added in 23.11
> >> +	rte_dispatcher_create;
> >> +	rte_dispatcher_free;
> >> +	rte_dispatcher_service_id_get;
> >> +	rte_dispatcher_bind_port_to_lcore;
> >> +	rte_dispatcher_unbind_port_from_lcore;
> >> +	rte_dispatcher_register;
> >> +	rte_dispatcher_unregister;
> >> +	rte_dispatcher_finalize_register;
> >> +	rte_dispatcher_finalize_unregister;
> >> +	rte_dispatcher_start;
> >> +	rte_dispatcher_stop;
> >> +	rte_dispatcher_stats_get;
> >> +	rte_dispatcher_stats_reset;
> >> +
> >> +	local: *;
> >> +};
> >> diff --git a/lib/meson.build b/lib/meson.build index
> >> 099b0ed18a..3093b338d2 100644
> >> --- a/lib/meson.build
> >> +++ b/lib/meson.build
> >> @@ -35,6 +35,7 @@ libraries = [
> >>           'distributor',
> >>           'efd',
> >>           'eventdev',
> >> +        'dispatcher', # dispatcher depends on eventdev
> >>           'gpudev',
> >>           'gro',
> >>           'gso',
> >> @@ -81,6 +82,7 @@ optional_libs = [
> >>           'cfgfile',
> >>           'compressdev',
> >>           'cryptodev',
> >> +        'dispatcher',
> >>           'distributor',
> >>           'dmadev',
> >>           'efd',
> >> --
> >> 2.34.1
> >

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-17 16:46                                   ` Naga Harish K, S V
  2023-09-19  9:20                                     ` Mattias Rönnblom
@ 2023-09-20  9:32                                     ` Jerin Jacob
  2023-09-21  5:59                                       ` Naga Harish K, S V
  1 sibling, 1 reply; 102+ messages in thread
From: Jerin Jacob @ 2023-09-20  9:32 UTC (permalink / raw)
  To: Naga Harish K, S V
  Cc: mattias.ronnblom, dev, Jerin Jacob, techboard, Van Haaren, Harry,
	hofors, Nilsson, Peter, Heng Wang, Pavan Nikhilesh, Gujjar,
	Abhinandan S, Carrillo, Erik G, Shijith Thotton, Hemant Agrawal,
	Sachin Saxena, Liang Ma, Mccarthy, Peter, Yan, Zhirun

On Mon, Sep 18, 2023 at 5:26 AM Naga Harish K, S V
<s.v.naga.harish.k@intel.com> wrote:
>
>
>
> > -----Original Message-----
> > From: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> > Sent: Monday, September 4, 2023 6:33 PM
> > To: dev@dpdk.org
> > Cc: Jerin Jacob <jerinj@marvell.com>; techboard@dpdk.org; Van Haaren,
> > Harry <harry.van.haaren@intel.com>; hofors@lysator.liu.se; Nilsson, Peter
> > <peter.j.nilsson@ericsson.com>; Heng Wang <heng.wang@ericsson.com>;
> > Naga Harish K, S V <s.v.naga.harish.k@intel.com>; Pavan Nikhilesh
> > <pbhagavatula@marvell.com>; Gujjar, Abhinandan S
> > <abhinandan.gujjar@intel.com>; Carrillo, Erik G <Erik.G.Carrillo@intel.com>;
> > Shijith Thotton <sthotton@marvell.com>; Hemant Agrawal
> > <hemant.agrawal@nxp.com>; Sachin Saxena <sachin.saxena@oss.nxp.com>;
> > Liang Ma <liangma@liangbit.com>; Mccarthy, Peter
> > <Peter.Mccarthy@intel.com>; Yan, Zhirun <Zhirun.Yan@intel.com>;
> > mattias.ronnblom <mattias.ronnblom@ericsson.com>
> > Subject: [PATCH v3 1/3] lib: introduce dispatcher library
> >
> > The purpose of the dispatcher library is to help reduce coupling in an
> > Eventdev-based DPDK application.
> >
> > In addition, the dispatcher also provides a convenient and flexible way for the
> > application to use service cores for application-level processing.
> >
> > Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> > Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
> > Reviewed-by: Heng Wang <heng.wang@ericsson.com>
> >
> > --
> >
> > PATCH v3:
> >  o To underline its optional character and since it does not provide
> >    hardware abstraction, the event dispatcher is now a separate
> >    library.
> >  o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
> >    shorter and to avoid the rte_event_* namespace.
> >
>
> Rte_dispatcher is basically dispatching events but it feels like the name does not convey that.
> Also, it is like any other adapter service that can reside within the eventdev directory.
>
> I can see some discussion in previous threads related to the placement of the dispatcher library.
>
> It is an optional eventdev application service, not enforcing this programming model to the application.
> The documentation may need to be updated and mention that this is optional.
>
> If any hardware comes up with the dispatcher feature, then this library may need to be moved inside eventdev library later.

I would like to follow YAGNI principle in eventdev library.
Even if a HW comes(I assume not), the interface should not look like that.
None of the HW will be comparing a bunch of function pointers and call
the callback.
So interface will look different for HW enablement. We need to model
the API based on HW for device libraries and SW libraries based on CPU
modeling dynamics.

Also, There is no need to tie up this library/framework only event
]dev, other than using rte_event_dequeue()  to pull packet it has no
eventdev significance.
The library scope if just pull the packet from a source and compare
with in N number of matches and call respective process callback.
The dispatcher source can rte_ethdev_rx_burst or ring.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* RE: [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-20  9:32                                     ` Jerin Jacob
@ 2023-09-21  5:59                                       ` Naga Harish K, S V
  2023-09-21  7:23                                         ` Jerin Jacob
  0 siblings, 1 reply; 102+ messages in thread
From: Naga Harish K, S V @ 2023-09-21  5:59 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: mattias.ronnblom, dev, Jerin Jacob, techboard, Van Haaren, Harry,
	hofors, Nilsson, Peter, Heng Wang, Pavan Nikhilesh, Gujjar,
	Abhinandan S, Carrillo, Erik G, Shijith Thotton, Hemant Agrawal,
	Sachin Saxena, Liang Ma, Mccarthy, Peter, Yan, Zhirun



> -----Original Message-----
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Sent: Wednesday, September 20, 2023 3:02 PM
> To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>
> Cc: mattias.ronnblom <mattias.ronnblom@ericsson.com>; dev@dpdk.org;
> Jerin Jacob <jerinj@marvell.com>; techboard@dpdk.org; Van Haaren, Harry
> <harry.van.haaren@intel.com>; hofors@lysator.liu.se; Nilsson, Peter
> <peter.j.nilsson@ericsson.com>; Heng Wang <heng.wang@ericsson.com>;
> Pavan Nikhilesh <pbhagavatula@marvell.com>; Gujjar, Abhinandan S
> <abhinandan.gujjar@intel.com>; Carrillo, Erik G <erik.g.carrillo@intel.com>;
> Shijith Thotton <sthotton@marvell.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>; Sachin Saxena <sachin.saxena@oss.nxp.com>;
> Liang Ma <liangma@liangbit.com>; Mccarthy, Peter
> <peter.mccarthy@intel.com>; Yan, Zhirun <zhirun.yan@intel.com>
> Subject: Re: [PATCH v3 1/3] lib: introduce dispatcher library
> 
> On Mon, Sep 18, 2023 at 5:26 AM Naga Harish K, S V
> <s.v.naga.harish.k@intel.com> wrote:
> >
> >
> >
> > > -----Original Message-----
> > > From: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> > > Sent: Monday, September 4, 2023 6:33 PM
> > > To: dev@dpdk.org
> > > Cc: Jerin Jacob <jerinj@marvell.com>; techboard@dpdk.org; Van
> > > Haaren, Harry <harry.van.haaren@intel.com>; hofors@lysator.liu.se;
> > > Nilsson, Peter <peter.j.nilsson@ericsson.com>; Heng Wang
> > > <heng.wang@ericsson.com>; Naga Harish K, S V
> > > <s.v.naga.harish.k@intel.com>; Pavan Nikhilesh
> > > <pbhagavatula@marvell.com>; Gujjar, Abhinandan S
> > > <abhinandan.gujjar@intel.com>; Carrillo, Erik G
> > > <Erik.G.Carrillo@intel.com>; Shijith Thotton <sthotton@marvell.com>;
> > > Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> > > <sachin.saxena@oss.nxp.com>; Liang Ma <liangma@liangbit.com>;
> > > Mccarthy, Peter <Peter.Mccarthy@intel.com>; Yan, Zhirun
> > > <Zhirun.Yan@intel.com>; mattias.ronnblom
> > > <mattias.ronnblom@ericsson.com>
> > > Subject: [PATCH v3 1/3] lib: introduce dispatcher library
> > >
> > > The purpose of the dispatcher library is to help reduce coupling in
> > > an Eventdev-based DPDK application.
> > >
> > > In addition, the dispatcher also provides a convenient and flexible
> > > way for the application to use service cores for application-level processing.
> > >
> > > Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> > > Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
> > > Reviewed-by: Heng Wang <heng.wang@ericsson.com>
> > >
> > > --
> > >
> > > PATCH v3:
> > >  o To underline its optional character and since it does not provide
> > >    hardware abstraction, the event dispatcher is now a separate
> > >    library.
> > >  o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
> > >    shorter and to avoid the rte_event_* namespace.
> > >
> >
> > Rte_dispatcher is basically dispatching events but it feels like the name does
> not convey that.
> > Also, it is like any other adapter service that can reside within the eventdev
> directory.
> >
> > I can see some discussion in previous threads related to the placement of the
> dispatcher library.
> >
> > It is an optional eventdev application service, not enforcing this
> programming model to the application.
> > The documentation may need to be updated and mention that this is
> optional.
> >
> > If any hardware comes up with the dispatcher feature, then this library may
> need to be moved inside eventdev library later.
> 
> I would like to follow YAGNI principle in eventdev library.

What is YAGNI principle? for understanding purposes.

> Even if a HW comes(I assume not), the interface should not look like that.
> None of the HW will be comparing a bunch of function pointers and call the
> callback.
> So interface will look different for HW enablement. We need to model the API
> based on HW for device libraries and SW libraries based on CPU modeling
> dynamics.
> 
> Also, There is no need to tie up this library/framework only event ]dev, other
> than using rte_event_dequeue()  to pull packet it has no eventdev significance.
> The library scope if just pull the packet from a source and compare with in N
> number of matches and call respective process callback.
> The dispatcher source can rte_ethdev_rx_burst or ring.

The current implementation of rte_dispatcher is event-dev centric.
All the data structures are defined around the eventdev.

The documentation also mentions it is for eventdev-based applications.
Maybe the documentation needs to have the info on supporting different sources (from ethdev, ring etc).



^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-21  5:59                                       ` Naga Harish K, S V
@ 2023-09-21  7:23                                         ` Jerin Jacob
  0 siblings, 0 replies; 102+ messages in thread
From: Jerin Jacob @ 2023-09-21  7:23 UTC (permalink / raw)
  To: Naga Harish K, S V
  Cc: mattias.ronnblom, dev, Jerin Jacob, techboard, Van Haaren, Harry,
	hofors, Nilsson, Peter, Heng Wang, Pavan Nikhilesh, Gujjar,
	Abhinandan S, Carrillo, Erik G, Shijith Thotton, Hemant Agrawal,
	Sachin Saxena, Liang Ma, Mccarthy, Peter, Yan, Zhirun

On Thu, Sep 21, 2023 at 11:29 AM Naga Harish K, S V
<s.v.naga.harish.k@intel.com> wrote:
>
>
>
> > -----Original Message-----
> > From: Jerin Jacob <jerinjacobk@gmail.com>
> > Sent: Wednesday, September 20, 2023 3:02 PM
> > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>
> > Cc: mattias.ronnblom <mattias.ronnblom@ericsson.com>; dev@dpdk.org;
> > Jerin Jacob <jerinj@marvell.com>; techboard@dpdk.org; Van Haaren, Harry
> > <harry.van.haaren@intel.com>; hofors@lysator.liu.se; Nilsson, Peter
> > <peter.j.nilsson@ericsson.com>; Heng Wang <heng.wang@ericsson.com>;
> > Pavan Nikhilesh <pbhagavatula@marvell.com>; Gujjar, Abhinandan S
> > <abhinandan.gujjar@intel.com>; Carrillo, Erik G <erik.g.carrillo@intel.com>;
> > Shijith Thotton <sthotton@marvell.com>; Hemant Agrawal
> > <hemant.agrawal@nxp.com>; Sachin Saxena <sachin.saxena@oss.nxp.com>;
> > Liang Ma <liangma@liangbit.com>; Mccarthy, Peter
> > <peter.mccarthy@intel.com>; Yan, Zhirun <zhirun.yan@intel.com>
> > Subject: Re: [PATCH v3 1/3] lib: introduce dispatcher library
> >
> > On Mon, Sep 18, 2023 at 5:26 AM Naga Harish K, S V
> > <s.v.naga.harish.k@intel.com> wrote:
> > >
> > >
> > >
> > > > -----Original Message-----
> > > > From: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> > > > Sent: Monday, September 4, 2023 6:33 PM
> > > > To: dev@dpdk.org
> > > > Cc: Jerin Jacob <jerinj@marvell.com>; techboard@dpdk.org; Van
> > > > Haaren, Harry <harry.van.haaren@intel.com>; hofors@lysator.liu.se;
> > > > Nilsson, Peter <peter.j.nilsson@ericsson.com>; Heng Wang
> > > > <heng.wang@ericsson.com>; Naga Harish K, S V
> > > > <s.v.naga.harish.k@intel.com>; Pavan Nikhilesh
> > > > <pbhagavatula@marvell.com>; Gujjar, Abhinandan S
> > > > <abhinandan.gujjar@intel.com>; Carrillo, Erik G
> > > > <Erik.G.Carrillo@intel.com>; Shijith Thotton <sthotton@marvell.com>;
> > > > Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> > > > <sachin.saxena@oss.nxp.com>; Liang Ma <liangma@liangbit.com>;
> > > > Mccarthy, Peter <Peter.Mccarthy@intel.com>; Yan, Zhirun
> > > > <Zhirun.Yan@intel.com>; mattias.ronnblom
> > > > <mattias.ronnblom@ericsson.com>
> > > > Subject: [PATCH v3 1/3] lib: introduce dispatcher library
> > > >
> > > > The purpose of the dispatcher library is to help reduce coupling in
> > > > an Eventdev-based DPDK application.
> > > >
> > > > In addition, the dispatcher also provides a convenient and flexible
> > > > way for the application to use service cores for application-level processing.
> > > >
> > > > Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> > > > Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
> > > > Reviewed-by: Heng Wang <heng.wang@ericsson.com>
> > > >
> > > > --
> > > >
> > > > PATCH v3:
> > > >  o To underline its optional character and since it does not provide
> > > >    hardware abstraction, the event dispatcher is now a separate
> > > >    library.
> > > >  o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
> > > >    shorter and to avoid the rte_event_* namespace.
> > > >
> > >
> > > Rte_dispatcher is basically dispatching events but it feels like the name does
> > not convey that.
> > > Also, it is like any other adapter service that can reside within the eventdev
> > directory.
> > >
> > > I can see some discussion in previous threads related to the placement of the
> > dispatcher library.
> > >
> > > It is an optional eventdev application service, not enforcing this
> > programming model to the application.
> > > The documentation may need to be updated and mention that this is
> > optional.
> > >
> > > If any hardware comes up with the dispatcher feature, then this library may
> > need to be moved inside eventdev library later.
> >
> > I would like to follow YAGNI principle in eventdev library.
>
> What is YAGNI principle? for understanding purposes.

https://www.techtarget.com/whatis/definition/You-arent-gonna-need-it#:~:text=YAGNI%20principle%20(%22You%20Aren',desired%20increased%20frequency%20of%20releases.

What meant by that, If it meant to be abstracting any HW Device
feature(on the point of above: If any hardware comes up with the
dispatcher feature, then this library may
need to be moved inside eventdev library later) lest define API based
on a Device definition and its capability. If it is targeting for an
SW library align API based on that.

For example, If this needed to be implemented in HW as device feature,
I would implement as following
- CAM as ACL or EM to store the rules
- Key extractor to extract the interested filed(match actions).
Similar to rte_flow flow patterns
- Action will something similar to rte_flow mark action
(https://doc.dpdk.org/guides/prog_guide/rte_flow.html#action-mark).
- When packet/event delivered to application. HW stores the mark value
in the metadata up on match.

For such HW, if we try to abstract with current API proposal, it is
not efficient.
On the other way, if the use case, addressing the existing SW API
library and model around the above HW primitives also not efficient.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-19 10:58                                   ` Jerin Jacob
@ 2023-09-21 16:47                                     ` Mattias Rönnblom
  2023-09-21 17:47                                       ` Jerin Jacob
  0 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-21 16:47 UTC (permalink / raw)
  To: Jerin Jacob, Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Morten Brørup

On 2023-09-19 12:58, Jerin Jacob wrote:
> On Mon, Sep 4, 2023 at 6:39 PM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>>
>> The purpose of the dispatcher library is to help reduce coupling in an
>> Eventdev-based DPDK application.
>>
>> In addition, the dispatcher also provides a convenient and flexible
>> way for the application to use service cores for application-level
>> processing.
>>
>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
> 
> High level architecture comment
> --------------------------------
> 
> 1) I think, we don't need tie this library ONLY to event dev
> application. It can be used with poll mode as well,
> that way traditiona pipeline application with ethdev as source could
> use this library dispatch the packets.
> 

They could potentially use a library *like this*, but I'm not sure it 
should be this library, or at least I think it should be a different API 
(better type checking, plus no obvious benefit of being more generic).

Another option for a traditional app calling rte_eth_rx_burst() directly 
is to start using eventdev. :)

> We dont need to implement that first version but API can make room for
> such abstractions.
> 
> Based on my understanding in fast-path it has means to
> a)Pull out the events using rte_event_dequeue()
> b)Compare with registered match functions and call process upon match.
> 
> if we abstract (a) as rte_dispatcher_source, We could pull from ethdev
> via rte_eth_rx_burst() or
> from ring via dequeue_burst API or so based on rte_dispatcher_source
> selected for dispatch configuration
> and we can use different sevice function pointers to have different service core
> implementation without effecting performance each sources.
> 

It could be generalized, for sure. I don't think it should be, at this 
point at least.

Non-event dev events could - and at this point I'm leaning toward 
*should* - be consumed as a different DPDK service, but potentially on 
the same lcore.

If you would want to prepare for a future with several different event 
sources, one could consider reintroducing the word "event" somewhere in 
the dispatcher's name. So you would have
rte_event_dispatcher.h
rte_eth_dispatcher.h

or

rte_dispatcher_event.h
rte_dispatcher_eth.h

> High level cosmetic comment
> ----------------------------------------------------
> 1)Missing doxygen connection- See doc/api/doxy-api-index.md
> 

rte_dispatcher.h is listed under **classification**, but this change is 
in the programming guide patch. I'll move it to the patch containing the 
header file.


> Process related comment
> ------------------------------------
> 1) Documentation does not need need separate patch. All recent library
> changes documentation in same file.
> You could have doc and API header file as first patch and
> implementation as subsequent patches.
> 
> 

I'm not sure how this is an improvement. Can you elaborate? For me, it 
just seems like a change.

Are there some guidelines on how to split a larger change into a patch 
set? A section on this matter in the contribution guide would be great.

> 
>> diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
>> new file mode 100644
>> index 0000000000..6712687a08
>> --- /dev/null
>> +++ b/lib/dispatcher/rte_dispatcher.h
>> @@ -0,0 +1,480 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2023 Ericsson AB
>> + */
>> +
>> +#ifndef __RTE_DISPATCHER_H__
>> +#define __RTE_DISPATCHER_H__
>> +
> 
> 
> All new API should be experimental. See
> https://elixir.bootlin.com/dpdk/latest/source/lib/graph/rte_graph.h#L12
> example.
> 

Noted.

> 
>> +/**
>> + * @file
>> + *
>> + * RTE Dispatcher
>> + *
>> + * The purpose of the dispatcher is to help decouple different parts
>> + * of an application (e.g., modules), sharing the same underlying
>> + * event device.
>> +
>> +/**
>> + * Function prototype for match callbacks.
>> + *
>> + * Match callbacks are used by an application to decide how the
>> + * dispatcher distributes events to different parts of the
>> + * application.
>> + *
>> + * The application is not expected to process the event at the point
>> + * of the match call. Such matters should be deferred to the process
>> + * callback invocation.
>> + *
>> + * The match callback may be used as an opportunity to prefetch data.
>> + *
>> + * @param event
>> + *  Pointer to event
>> + *
>> + * @param cb_data
>> + *  The pointer supplied by the application in
>> + *  rte_dispatcher_register().
>> + *
>> + * @return
>> + *   Returns true in case this events should be delivered (via
>> + *   the process callback), and false otherwise.
>> + */
>> +typedef bool
>> +(*rte_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
> 
> 
> a) Can we use void* event, so that it can be used with mbuf or other
> type by casting in the call back implementer.
> 
> b) I was thinking, How we can avoid this function pointer and enable
> more have better performance at architecture level.
> 
> Both x86, ARM has vector instructions[1] to form a vector from various
> offset from memory and compare N events
> in one shot. That is, if express match data like offset = X has value
> is Y and offset = X has value = A.
> I know, it may not good existing application using this APIs. But I
> believe, it will be more performance
> effective. If make sense, you can adapt to this.(Something to think about)
> 

There may be a future development where you try to shave off a few of 
the circa 10 clock cycles per event of overhead that the current 
implementation incur. 10 cc is not a lot though. Your eventdev will need 
something like 10x that.

With link-time optimizations, the matching function call will go away. 
I'm not sure how much auto-vectorization will happen. (The number quoted 
above is without LTO.)

The event dispatcher as a separate library will result in an extra 
function call across a shared object boundary, which is not exactly for 
free. (The 10 cc are for static linking.)

> 
> [1]
> https://developer.arm.com/documentation/den0018/a/NEON-and-VFP-Instruction-Summary/NEON-general-data-processing-instructions/VTBL
> 
>> +
>> +/**
>> + * Function prototype for process callbacks.
>> + *
>> + * The process callbacks are used by the dispatcher to deliver
>> + * events for processing.
>> + *
>> + * @param event_dev_id
>> + *  The originating event device id.
>> + *
>> + * @param event_port_id
>> + *  The originating event port.
>> + *
>> + * @param events
>> + *  Pointer to an array of events.
>> + *
>> + * @param num
>> + *  The number of events in the @p events array.
>> + *
>> + * @param cb_data
>> + *  The pointer supplied by the application in
>> + *  rte_dispatcher_register().
>> + */
>> +
>> +typedef void
>> +(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
>> +                                 struct rte_event *events, uint16_t num,
>> +                                 void *cb_data);
> 
> Same as above comment, can event_port_id can be change to source_id?
> 
> 
>> +/**
>> + * Create a dispatcher with the specified id.
>> + *
>> + * @param id
>> + *  An application-specified, unique (across all dispatcher
>> + *  instances) identifier.
>> + *
>> + * @param event_dev_id
>> + *  The identifier of the event device from which this dispatcher
>> + *  will dequeue events.
>> + *
>> + * @return
>> + *   - 0: Success
>> + *   - <0: Error code on failure
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id);
> 
> Following could be used to abstract more dispatcher sources, like
> 
> enum rte_dispatcher_source {
>           RTE_DISPATCHER_SOURCE_EVENTDEV, // Use rte_event_dequeue() to
> pull the packet
>           RTE_DISPATCHER_SOURCE_ETHDEV, // Use rte_ethdev_rx_burst() to
> pull the packet
> };
> 
> struct rte_dispatcher_params {
>              enum rte_dispatcher_source source;
>              union {
>                     /* Valid when source == RTE_DISPATCHER_SOURCE_EVENTDEV */
>                      struct event_source {
>                               uint8_t event_dev_id;
>                               uin8_t event_port_id;
>                      };
>                     /* Valid when source == RTE_DISPATCHER_SOURCE_ETHDEV*/
>                      struct ethdev_source {
>                               uint16_t ethdev__dev_id;
>                               uin16_t ethdev_rx_queue_id;
>                      };
>               }
> };
> 
> rte_dispatcher_create(uint8_t id,  struct rte_dispatcher_params *parms);
> 
> I will stop reviewing at this point. Will review based on direction agree on.

Thanks!

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-21 16:47                                     ` Mattias Rönnblom
@ 2023-09-21 17:47                                       ` Jerin Jacob
  0 siblings, 0 replies; 102+ messages in thread
From: Jerin Jacob @ 2023-09-21 17:47 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan, Morten Brørup

On Thu, Sep 21, 2023 at 10:17 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>
> On 2023-09-19 12:58, Jerin Jacob wrote:
> > On Mon, Sep 4, 2023 at 6:39 PM Mattias Rönnblom
> > <mattias.ronnblom@ericsson.com> wrote:
> >>
> >> The purpose of the dispatcher library is to help reduce coupling in an
> >> Eventdev-based DPDK application.
> >>
> >> In addition, the dispatcher also provides a convenient and flexible
> >> way for the application to use service cores for application-level
> >> processing.
> >>
> >> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> >> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
> >
> > High level architecture comment
> > --------------------------------
> >
> > 1) I think, we don't need tie this library ONLY to event dev
> > application. It can be used with poll mode as well,
> > that way traditiona pipeline application with ethdev as source could
> > use this library dispatch the packets.
> >
>
> They could potentially use a library *like this*, but I'm not sure it
> should be this library, or at least I think it should be a different API
> (better type checking, plus no obvious benefit of being more generic).

The only reason why I thought of this, It is cheap to do as all the logic
for comparing match actions, packet/event aggregation and calling the
action function is _same_
and better type checking can be added by separate callback for each source.
and allow more user to use this library.

I don't have a strong opinion of API semantic on this library API
other than the placement.
Feel free to ignore.

> Another option for a traditional app calling rte_eth_rx_burst() directly
> is to start using eventdev. :)

Yes. Those who can afford extra SW cores to emulate eventdev or has evendev HW.

>
> > We dont need to implement that first version but API can make room for
> > such abstractions.
> >
> > Based on my understanding in fast-path it has means to
> > a)Pull out the events using rte_event_dequeue()
> > b)Compare with registered match functions and call process upon match.
> >
> > if we abstract (a) as rte_dispatcher_source, We could pull from ethdev
> > via rte_eth_rx_burst() or
> > from ring via dequeue_burst API or so based on rte_dispatcher_source
> > selected for dispatch configuration
> > and we can use different sevice function pointers to have different service core
> > implementation without effecting performance each sources.
> >
>
> It could be generalized, for sure. I don't think it should be, at this
> point at least.
>
> Non-event dev events could - and at this point I'm leaning toward
> *should* - be consumed as a different DPDK service, but potentially on
> the same lcore.
>
> If you would want to prepare for a future with several different event
> sources, one could consider reintroducing the word "event" somewhere in
> the dispatcher's name. So you would have
> rte_event_dispatcher.h
> rte_eth_dispatcher.h
>
> or
>
> rte_dispatcher_event.h
> rte_dispatcher_eth.h

Yes.

> > High level cosmetic comment
> > ----------------------------------------------------
> > 1)Missing doxygen connection- See doc/api/doxy-api-index.md
> >
>
> rte_dispatcher.h is listed under **classification**, but this change is
> in the programming guide patch. I'll move it to the patch containing the
> header file.
>
>
> > Process related comment
> > ------------------------------------
> > 1) Documentation does not need need separate patch. All recent library
> > changes documentation in same file.
> > You could have doc and API header file as first patch and
> > implementation as subsequent patches.
> >
> >
>
> I'm not sure how this is an improvement. Can you elaborate? For me, it
> just seems like a change.
>
> Are there some guidelines on how to split a larger change into a patch
> set? A section on this matter in the contribution guide would be great.

In general, more patches easy review and attract more reviewers.

Last library was added to dpdk is lib/mldev. You can see
git log lib/mldev/

There operations like _create/free() etc made as separate patches.

I leave it up to you and Thomas as this library will be merged though main tree.
No strong opinion.

>
> >
> >> diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
> >> new file mode 100644
> >> index 0000000000..6712687a08
> >> --- /dev/null
> >> +++ b/lib/dispatcher/rte_dispatcher.h
> >> @@ -0,0 +1,480 @@
> >> +/* SPDX-License-Identifier: BSD-3-Clause
> >> + * Copyright(c) 2023 Ericsson AB
> >> + */
> >> +
> >> +#ifndef __RTE_DISPATCHER_H__
> >> +#define __RTE_DISPATCHER_H__
> >> +
> >
> >
> > All new API should be experimental. See
> > https://elixir.bootlin.com/dpdk/latest/source/lib/graph/rte_graph.h#L12
> > example.
> >
>
> Noted.
>
> >
> >> +/**
> >> + * @file
> >> + *
> >> + * RTE Dispatcher
> >> + *
> >> + * The purpose of the dispatcher is to help decouple different parts
> >> + * of an application (e.g., modules), sharing the same underlying
> >> + * event device.
> >> +
> >> +/**
> >> + * Function prototype for match callbacks.
> >> + *
> >> + * Match callbacks are used by an application to decide how the
> >> + * dispatcher distributes events to different parts of the
> >> + * application.
> >> + *
> >> + * The application is not expected to process the event at the point
> >> + * of the match call. Such matters should be deferred to the process
> >> + * callback invocation.
> >> + *
> >> + * The match callback may be used as an opportunity to prefetch data.
> >> + *
> >> + * @param event
> >> + *  Pointer to event
> >> + *
> >> + * @param cb_data
> >> + *  The pointer supplied by the application in
> >> + *  rte_dispatcher_register().
> >> + *
> >> + * @return
> >> + *   Returns true in case this events should be delivered (via
> >> + *   the process callback), and false otherwise.
> >> + */
> >> +typedef bool
> >> +(*rte_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
> >
> >
> > a) Can we use void* event, so that it can be used with mbuf or other
> > type by casting in the call back implementer.
> >
> > b) I was thinking, How we can avoid this function pointer and enable
> > more have better performance at architecture level.
> >
> > Both x86, ARM has vector instructions[1] to form a vector from various
> > offset from memory and compare N events
> > in one shot. That is, if express match data like offset = X has value
> > is Y and offset = X has value = A.
> > I know, it may not good existing application using this APIs. But I
> > believe, it will be more performance
> > effective. If make sense, you can adapt to this.(Something to think about)
> >
>
> There may be a future development where you try to shave off a few of
> the circa 10 clock cycles per event of overhead that the current

OK, as you wish.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-04 13:03                                 ` [PATCH v3 1/3] lib: introduce " Mattias Rönnblom
  2023-09-17 16:46                                   ` Naga Harish K, S V
  2023-09-19 10:58                                   ` Jerin Jacob
@ 2023-09-21 18:36                                   ` Jerin Jacob
  2023-09-22  6:32                                     ` Mattias Rönnblom
  2023-09-22  7:38                                   ` [PATCH v4 0/3] Add " Mattias Rönnblom
  3 siblings, 1 reply; 102+ messages in thread
From: Jerin Jacob @ 2023-09-21 18:36 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, hofors,
	Peter Nilsson, Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On Mon, Sep 4, 2023 at 6:39 PM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> The purpose of the dispatcher library is to help reduce coupling in an
> Eventdev-based DPDK application.
>
> In addition, the dispatcher also provides a convenient and flexible
> way for the application to use service cores for application-level
> processing.
>
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
> Reviewed-by: Heng Wang <heng.wang@ericsson.com>
>

> +static inline void
> +evd_dispatch_events(struct rte_dispatcher *dispatcher,
> +                   struct rte_dispatcher_lcore *lcore,
> +                   struct rte_dispatcher_lcore_port *port,
> +                   struct rte_event *events, uint16_t num_events)
> +{
> +       int i;
> +       struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
> +       uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
> +       uint16_t drop_count = 0;
> +       uint16_t dispatch_count;
> +       uint16_t dispatched = 0;
> +
> +       for (i = 0; i < num_events; i++) {
> +               struct rte_event *event = &events[i];
> +               int handler_idx;
> +
> +               handler_idx = evd_lookup_handler_idx(lcore, event);
> +
> +               if (unlikely(handler_idx < 0)) {
> +                       drop_count++;
> +                       continue;
> +               }
> +
> +               bursts[handler_idx][burst_lens[handler_idx]] = *event;

Looks like it caching the event to accumulate ? If flow or queue is
configured as RTE_SCHED_TYPE_ORDERED?
Will it completely lose ordering as next rte_event_enqueue_burst will
release context?


Definition of RTE_SCHED_TYPE_ORDERED

#define RTE_SCHED_TYPE_ORDERED          0
/**< Ordered scheduling
 *
 * Events from an ordered flow of an event queue can be scheduled to multiple
 * ports for concurrent processing while maintaining the original event order.
 * This scheme enables the user to achieve high single flow throughput by
 * avoiding SW synchronization for ordering between ports which bound to cores.
 *
 * The source flow ordering from an event queue is maintained when events are
 * enqueued to their destination queue within the same ordered flow context.
 * An event port holds the context until application call
 * rte_event_dequeue_burst() from the same port, which implicitly releases
 * the context.
 * User may allow the scheduler to release the context earlier than that
 * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
 *
 * Events from the source queue appear in their original order when dequeued
 * from a destination queue.
 * Event ordering is based on the received event(s), but also other
 * (newly allocated or stored) events are ordered when enqueued within the same
 * ordered context. Events not enqueued (e.g. released or stored) within the
 * context are  considered missing from reordering and are skipped at this time
 * (but can be ordered again within another context).
 *
 * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
 */

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v3 1/3] lib: introduce dispatcher library
  2023-09-21 18:36                                   ` Jerin Jacob
@ 2023-09-22  6:32                                     ` Mattias Rönnblom
  0 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-22  6:32 UTC (permalink / raw)
  To: Jerin Jacob, Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On 2023-09-21 20:36, Jerin Jacob wrote:
> On Mon, Sep 4, 2023 at 6:39 PM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>>
>> The purpose of the dispatcher library is to help reduce coupling in an
>> Eventdev-based DPDK application.
>>
>> In addition, the dispatcher also provides a convenient and flexible
>> way for the application to use service cores for application-level
>> processing.
>>
>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
>> Reviewed-by: Heng Wang <heng.wang@ericsson.com>
>>
> 
>> +static inline void
>> +evd_dispatch_events(struct rte_dispatcher *dispatcher,
>> +                   struct rte_dispatcher_lcore *lcore,
>> +                   struct rte_dispatcher_lcore_port *port,
>> +                   struct rte_event *events, uint16_t num_events)
>> +{
>> +       int i;
>> +       struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
>> +       uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
>> +       uint16_t drop_count = 0;
>> +       uint16_t dispatch_count;
>> +       uint16_t dispatched = 0;
>> +
>> +       for (i = 0; i < num_events; i++) {
>> +               struct rte_event *event = &events[i];
>> +               int handler_idx;
>> +
>> +               handler_idx = evd_lookup_handler_idx(lcore, event);
>> +
>> +               if (unlikely(handler_idx < 0)) {
>> +                       drop_count++;
>> +                       continue;
>> +               }
>> +
>> +               bursts[handler_idx][burst_lens[handler_idx]] = *event;
> 
> Looks like it caching the event to accumulate ? If flow or queue is
> configured as RTE_SCHED_TYPE_ORDERED?


The ordering guarantees (and lack thereof) are covered in detail in the 
programming guide.

"Delivery order" (the order the callbacks see the events) is maintained 
only for events destined for the same handler.

I have considered adding a flags field to the create function, to then 
in turn (now, or in the future) add an option to maintain strict 
ordering between handlers. In my mind, and in the applications where 
this pattern has been used in the past, the "clustering" of events going 
to the same handler is a feature, not a bug, since it much improves 
cache temporal locality and provides more opportunity for software 
prefetching/preloading. (Prefetching may be done already in the match 
function.)

If your event device does clustering already, or if the application 
implements this pattern already, you will obviously see no gains. If 
neither of those are true, the application will likely suffer fewer 
cache misses, much outweighing the tiny bit of extra processing required 
in the event dispatcher.

This reshuffling ("clustering") of events is the only thing I think 
could be offloaded to hardware. The event device is already free to 
reshuffle events as long as it conforms to whatever ordering guarantees 
the eventdev scheduling types in questions require, but the event 
dispatcher relaxes those further, and give further hints to the 
platform, what events are actually related.

> Will it completely lose ordering as next rte_event_enqueue_burst will
> release context? >

It is the dequeue operation that will release the context (provided 
"implicit release" is not disabled). See the documentation you quote below.

(Total) ordering is guaranteed between dequeue bursts.

> 
> Definition of RTE_SCHED_TYPE_ORDERED
> 
> #define RTE_SCHED_TYPE_ORDERED          0
> /**< Ordered scheduling
>   *
>   * Events from an ordered flow of an event queue can be scheduled to multiple
>   * ports for concurrent processing while maintaining the original event order.
>   * This scheme enables the user to achieve high single flow throughput by
>   * avoiding SW synchronization for ordering between ports which bound to cores.
>   *
>   * The source flow ordering from an event queue is maintained when events are
>   * enqueued to their destination queue within the same ordered flow context.
>   * An event port holds the context until application call
>   * rte_event_dequeue_burst() from the same port, which implicitly releases
>   * the context.
>   * User may allow the scheduler to release the context earlier than that
>   * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
>   *
>   * Events from the source queue appear in their original order when dequeued
>   * from a destination queue.
>   * Event ordering is based on the received event(s), but also other
>   * (newly allocated or stored) events are ordered when enqueued within the same
>   * ordered context. Events not enqueued (e.g. released or stored) within the
>   * context are  considered missing from reordering and are skipped at this time
>   * (but can be ordered again within another context).
>   *
>   * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
>   */

^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v4 0/3] Add dispatcher library
  2023-09-04 13:03                                 ` [PATCH v3 1/3] lib: introduce " Mattias Rönnblom
                                                     ` (2 preceding siblings ...)
  2023-09-21 18:36                                   ` Jerin Jacob
@ 2023-09-22  7:38                                   ` Mattias Rönnblom
  2023-09-22  7:38                                     ` [PATCH v4 1/3] lib: introduce " Mattias Rönnblom
                                                       ` (2 more replies)
  3 siblings, 3 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-22  7:38 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to decouple different parts
of an eventdev-based application (e.g., processing pipeline stages),
sharing the same underlying event device.

The dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on event meta data, such as the queue id or scheduling type.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the dispatcher relies on application-supplied matching callback
functions to decide where to deliver events.

A dispatcher is configured to dequeue events from a specific event
device, and ties into the service core framework, to do its (and the
application's) work.

The dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

Although the dispatcher adds some overhead, experience suggests that
the net effect on the application (both synthetic benchmarks and more
real-world applications) may well be positive. This is primarily due
to clustering (see programming guide) reducing cache misses.

Benchmarking indicates that the overhead is ~10 cc/event (on a
large core), with a handful of often-used handlers.

The dispatcher does not support run-time reconfiguration.

The use of the dispatcher library is optional, and an eventdev-based
application may still opt to access the event device using direct
eventdev API calls, or by some other means.

Mattias Rönnblom (3):
  lib: introduce dispatcher library
  test: add dispatcher test suite
  doc: add dispatcher programming guide

 MAINTAINERS                              |    5 +
 app/test/meson.build                     |    1 +
 app/test/test_dispatcher.c               | 1054 ++++++++++++++++++++++
 doc/api/doxy-api-index.md                |    1 +
 doc/api/doxy-api.conf.in                 |    1 +
 doc/guides/prog_guide/dispatcher_lib.rst |  434 +++++++++
 doc/guides/prog_guide/index.rst          |    1 +
 lib/dispatcher/meson.build               |   17 +
 lib/dispatcher/rte_dispatcher.c          |  798 ++++++++++++++++
 lib/dispatcher/rte_dispatcher.h          |  484 ++++++++++
 lib/dispatcher/version.map               |   20 +
 lib/meson.build                          |    2 +
 12 files changed, 2818 insertions(+)
 create mode 100644 app/test/test_dispatcher.c
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v4 1/3] lib: introduce dispatcher library
  2023-09-22  7:38                                   ` [PATCH v4 0/3] Add " Mattias Rönnblom
@ 2023-09-22  7:38                                     ` Mattias Rönnblom
  2023-09-25  7:11                                       ` Mattias Rönnblom
  2023-09-28  7:30                                       ` [PATCH v5 0/3] Add " Mattias Rönnblom
  2023-09-22  7:38                                     ` [PATCH v4 2/3] test: add dispatcher test suite Mattias Rönnblom
  2023-09-22  7:38                                     ` [PATCH v4 3/3] doc: add dispatcher programming guide Mattias Rönnblom
  2 siblings, 2 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-22  7:38 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to help reduce coupling in an
Eventdev-based DPDK application.

In addition, the dispatcher also provides a convenient and flexible
way for the application to use service cores for application-level
processing.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
Reviewed-by: Heng Wang <heng.wang@ericsson.com>

--

PATCH v4:
 o Fix bugs in handler and finalizer unregistration. (Naga Harish)
 o Return -EINVAL in cases where NULL pointers were provided in
   calls requiring non-NULL pointers. (Naga Harish)
 o Add experimental warning for the whole API. (Jerin Jacob)

PATCH v3:
 o To underline its optional character and since it does not provide
   hardware abstraction, the event dispatcher is now a separate
   library.
 o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
   shorter and to avoid the rte_event_* namespace.

PATCH v2:
 o Add dequeue batch count statistic.
 o Add statistics reset function to API.
 o Clarify MT safety guarantees (or lack thereof) in the API documentation.
 o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
   to be consistent with similar loops elsewhere in the dispatcher.
 o Fix variable names in finalizer unregister function.

PATCH:
 o Change prefix from RED to EVD, to avoid confusion with random
   early detection.

RFC v4:
 o Move handlers to per-lcore data structures.
 o Introduce mechanism which rearranges handlers so that often-used
   handlers tend to be tried first.
 o Terminate dispatch loop in case all events are delivered.
 o To avoid the dispatcher's service function hogging the CPU, process
   only one batch per call.
 o Have service function return -EAGAIN if no work is performed.
 o Events delivered in the process function is no longer marked 'const',
   since modifying them may be useful for the application and cause
   no difficulties for the dispatcher.
 o Various minor API documentation improvements.

RFC v3:
 o Add stats_get() function to the version.map file.
---
 MAINTAINERS                     |   3 +
 doc/api/doxy-api-index.md       |   1 +
 doc/api/doxy-api.conf.in        |   1 +
 lib/dispatcher/meson.build      |  17 +
 lib/dispatcher/rte_dispatcher.c | 798 ++++++++++++++++++++++++++++++++
 lib/dispatcher/rte_dispatcher.h | 484 +++++++++++++++++++
 lib/dispatcher/version.map      |  20 +
 lib/meson.build                 |   2 +
 8 files changed, 1326 insertions(+)
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index a926155f26..6704cd5b2c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1726,6 +1726,9 @@ M: Nithin Dabilpuram <ndabilpuram@marvell.com>
 M: Pavan Nikhilesh <pbhagavatula@marvell.com>
 F: lib/node/
 
+Dispatcher - EXPERIMENTAL
+M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
+F: lib/dispatcher/
 
 Test Applications
 -----------------
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index fdeda13932..7d0cad9fed 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -155,6 +155,7 @@ The public API headers are grouped by topics:
 
 - **classification**
   [reorder](@ref rte_reorder.h),
+  [dispatcher](@ref rte_dispatcher.h),
   [distributor](@ref rte_distributor.h),
   [EFD](@ref rte_efd.h),
   [ACL](@ref rte_acl.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index a88accd907..59c679e621 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dispatcher \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build
new file mode 100644
index 0000000000..c6054a3a5d
--- /dev/null
+++ b/lib/dispatcher/meson.build
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2023 Ericsson AB
+
+if is_windows
+    build = false
+    reason = 'not supported on Windows'
+    subdir_done()
+endif
+
+sources = files(
+        'rte_dispatcher.c',
+)
+headers = files(
+        'rte_dispatcher.h',
+)
+
+deps += ['eventdev']
diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
new file mode 100644
index 0000000000..3a5a40502f
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.c
@@ -0,0 +1,798 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_random.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_dispatcher.h>
+
+#define EVD_MAX_PORTS_PER_LCORE 4
+#define EVD_MAX_HANDLERS 32
+#define EVD_MAX_FINALIZERS 16
+#define EVD_AVG_PRIO_INTERVAL 2000
+
+struct rte_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_dispatcher_handler {
+	int id;
+	rte_dispatcher_match_t match_fun;
+	void *match_data;
+	rte_dispatcher_process_t process_fun;
+	void *process_data;
+};
+
+struct rte_dispatcher_finalizer {
+	int id;
+	rte_dispatcher_finalize_t finalize_fun;
+	void *finalize_data;
+};
+
+struct rte_dispatcher_lcore {
+	uint8_t num_ports;
+	uint16_t num_handlers;
+	int32_t prio_count;
+	struct rte_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
+	struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
+	struct rte_dispatcher_stats stats;
+} __rte_cache_aligned;
+
+struct rte_dispatcher {
+	uint8_t id;
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	uint16_t num_finalizers;
+	struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
+};
+
+static struct rte_dispatcher *dispatchers[UINT8_MAX];
+
+static bool
+evd_has_dispatcher(uint8_t id)
+{
+	return dispatchers[id] != NULL;
+}
+
+static struct rte_dispatcher *
+evd_get_dispatcher(uint8_t id)
+{
+	return dispatchers[id];
+}
+
+static void
+evd_set_dispatcher(uint8_t id, struct rte_dispatcher *dispatcher)
+{
+	dispatchers[id] = dispatcher;
+}
+
+#define EVD_TRUE_OR_RET_EINVAL(expr, fmt, ...)				\
+	do {								\
+		if (unlikely(!(expr))) {				\
+			RTE_EDEV_LOG_ERR(fmt, __VA_ARGS__);		\
+			return -EINVAL;					\
+		}							\
+	} while (0)
+
+#define EVD_VALID_ID_OR_RET_EINVAL(id)					\
+	EVD_TRUE_OR_RET_EINVAL(evd_has_dispatcher(id),			\
+			       "Invalid dispatcher id %d\n", id)
+
+#define EVD_NON_NULL_OR_RET_EINVAL(ptr)					\
+	EVD_TRUE_OR_RET_EINVAL(ptr != NULL, "\"%s\" is NULL\n", #ptr)
+
+static int
+evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
+		       const struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->match_fun(event, handler->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static void
+evd_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+		       int handler_idx)
+{
+	struct rte_dispatcher_handler tmp;
+
+	if (handler_idx == 0)
+		return;
+
+	/* Let the lucky handler "bubble" up the list */
+
+	tmp = lcore->handlers[handler_idx - 1];
+
+	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
+
+	lcore->handlers[handler_idx] = tmp;
+}
+
+static inline void
+evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+				int handler_idx, uint16_t handler_events)
+{
+	lcore->prio_count -= handler_events;
+
+	if (unlikely(lcore->prio_count <= 0)) {
+		evd_prioritize_handler(lcore, handler_idx);
+
+		/*
+		 * Randomize the interval in the unlikely case
+		 * the traffic follow some very strict pattern.
+		 */
+		lcore->prio_count =
+			rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
+			EVD_AVG_PRIO_INTERVAL / 2;
+	}
+}
+
+static inline void
+evd_dispatch_events(struct rte_dispatcher *dispatcher,
+		    struct rte_dispatcher_lcore *lcore,
+		    struct rte_dispatcher_lcore_port *port,
+		    struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
+	uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
+	uint16_t drop_count = 0;
+	uint16_t dispatch_count;
+	uint16_t dispatched = 0;
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *event = &events[i];
+		int handler_idx;
+
+		handler_idx = evd_lookup_handler_idx(lcore, event);
+
+		if (unlikely(handler_idx < 0)) {
+			drop_count++;
+			continue;
+		}
+
+		bursts[handler_idx][burst_lens[handler_idx]] = *event;
+		burst_lens[handler_idx]++;
+	}
+
+	dispatch_count = num_events - drop_count;
+
+	for (i = 0; i < lcore->num_handlers &&
+		 dispatched < dispatch_count; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		handler->process_fun(dispatcher->event_dev_id, port->port_id,
+				     bursts[i], len, handler->process_data);
+
+		dispatched += len;
+
+		/*
+		 * Safe, since any reshuffling will only involve
+		 * already-processed handlers.
+		 */
+		evd_consider_prioritize_handler(lcore, i, len);
+	}
+
+	lcore->stats.ev_batch_count++;
+	lcore->stats.ev_dispatch_count += dispatch_count;
+	lcore->stats.ev_drop_count += drop_count;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		finalizer->finalize_fun(dispatcher->event_dev_id,
+					port->port_id,
+					finalizer->finalize_data);
+	}
+}
+
+static __rte_always_inline uint16_t
+evd_port_dequeue(struct rte_dispatcher *dispatcher,
+		 struct rte_dispatcher_lcore *lcore,
+		 struct rte_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	if (likely(n > 0))
+		evd_dispatch_events(dispatcher, lcore, port, events, n);
+
+	lcore->stats.poll_count++;
+
+	return n;
+}
+
+static __rte_always_inline uint16_t
+evd_lcore_process(struct rte_dispatcher *dispatcher,
+		  struct rte_dispatcher_lcore *lcore)
+{
+	uint16_t i;
+	uint16_t event_count = 0;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		event_count += evd_port_dequeue(dispatcher, lcore, port);
+	}
+
+	return event_count;
+}
+
+static int32_t
+evd_process(void *userdata)
+{
+	struct rte_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+	uint64_t event_count;
+
+	event_count = evd_lcore_process(dispatcher, lcore);
+
+	if (unlikely(event_count == 0))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int
+evd_service_register(struct rte_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = evd_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "evd_%d",
+		 dispatcher->id);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Registration of dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	return rc;
+}
+
+static int
+evd_service_unregister(struct rte_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+int
+rte_dispatcher_create(uint8_t id, uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	if (evd_has_dispatcher(id)) {
+		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
+				 id);
+		return -EEXIST;
+	}
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for dispatcher\n");
+		return -ENOMEM;
+	}
+
+	*dispatcher = (struct rte_dispatcher) {
+		.id = id,
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = evd_service_register(dispatcher);
+
+	if (rc < 0) {
+		rte_free(dispatcher);
+		return rc;
+	}
+
+	evd_set_dispatcher(id, dispatcher);
+
+	return 0;
+}
+
+int
+rte_dispatcher_free(uint8_t id)
+{
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = evd_service_unregister(dispatcher);
+
+	if (rc)
+		return rc;
+
+	evd_set_dispatcher(id, NULL);
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+int
+rte_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
+{
+	struct rte_dispatcher *dispatcher;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+
+	dispatcher = evd_get_dispatcher(id);
+
+	EVD_NON_NULL_OR_RET_EINVAL(service_id);
+
+	*service_id = dispatcher->service_id;
+
+	return 0;
+}
+
+static int
+lcore_port_index(struct rte_dispatcher_lcore *lcore,
+		 uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id)
+{
+	struct rte_dispatcher *dispatcher;
+	struct rte_dispatcher_lcore *lcore;
+	struct rte_dispatcher_lcore_port *port;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id)
+{
+	struct rte_dispatcher *dispatcher;
+	struct rte_dispatcher_lcore *lcore;
+	int port_idx;
+	struct rte_dispatcher_lcore_port *port;
+	struct rte_dispatcher_lcore_port *last;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static struct rte_dispatcher_handler*
+evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->id == handler_id)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_handler_id(struct rte_dispatcher *dispatcher)
+{
+	int handler_id = 0;
+	struct rte_dispatcher_lcore *reference_lcore =
+		&dispatcher->lcores[0];
+
+	if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
+		return -1;
+
+	while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
+		handler_id++;
+
+	return handler_id;
+}
+
+static void
+evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore,
+		    const struct rte_dispatcher_handler *handler)
+{
+	int handler_idx = lcore->num_handlers;
+
+	lcore->handlers[handler_idx] = *handler;
+	lcore->num_handlers++;
+}
+
+static void
+evd_install_handler(struct rte_dispatcher *dispatcher,
+		    const struct rte_dispatcher_handler *handler)
+{
+	int i;
+
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[i];
+		evd_lcore_install_handler(lcore, handler);
+	}
+}
+
+int
+rte_dispatcher_register(uint8_t id,
+			      rte_dispatcher_match_t match_fun,
+			      void *match_data,
+			      rte_dispatcher_process_t process_fun,
+			      void *process_data)
+{
+	struct rte_dispatcher *dispatcher;
+	struct rte_dispatcher_handler handler = {
+		.match_fun = match_fun,
+		.match_data = match_data,
+		.process_fun = process_fun,
+		.process_data = process_data
+	};
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	handler.id = evd_alloc_handler_id(dispatcher);
+
+	if (handler.id < 0)
+		return -ENOMEM;
+
+	evd_install_handler(dispatcher, &handler);
+
+	return handler.id;
+}
+
+static int
+evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	struct rte_dispatcher_handler *unreg_handler;
+	int handler_idx;
+	uint16_t last_idx;
+
+	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
+
+	EVD_NON_NULL_OR_RET_EINVAL(unreg_handler);
+
+	handler_idx = unreg_handler - &lcore->handlers[0];
+
+	last_idx = lcore->num_handlers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all handlers to maintain handler order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_handler, unreg_handler + 1,
+			sizeof(struct rte_dispatcher_handler) * n);
+	}
+
+	lcore->num_handlers--;
+
+	return 0;
+}
+
+static int
+evd_uninstall_handler(struct rte_dispatcher *dispatcher,
+		      int handler_id)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+		int rc;
+
+		rc = evd_lcore_uninstall_handler(lcore, handler_id);
+
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_unregister(uint8_t id, int handler_id)
+{
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = evd_uninstall_handler(dispatcher, handler_id);
+
+	return rc;
+}
+
+static struct rte_dispatcher_finalizer*
+evd_get_finalizer_by_id(struct rte_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		if (finalizer->id == handler_id)
+			return finalizer;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_id = 0;
+
+	while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
+		finalizer_id++;
+
+	return finalizer_id;
+}
+
+static struct rte_dispatcher_finalizer *
+evd_alloc_finalizer(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_idx;
+	struct rte_dispatcher_finalizer *finalizer;
+
+	if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
+		return NULL;
+
+	finalizer_idx = dispatcher->num_finalizers;
+	finalizer = &dispatcher->finalizers[finalizer_idx];
+
+	finalizer->id = evd_alloc_finalizer_id(dispatcher);
+
+	dispatcher->num_finalizers++;
+
+	return finalizer;
+}
+
+int
+rte_dispatcher_finalize_register(uint8_t id,
+			      rte_dispatcher_finalize_t finalize_fun,
+			      void *finalize_data)
+{
+	struct rte_dispatcher *dispatcher;
+	struct rte_dispatcher_finalizer *finalizer;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	finalizer = evd_alloc_finalizer(dispatcher);
+
+	if (finalizer == NULL)
+		return -ENOMEM;
+
+	finalizer->finalize_fun = finalize_fun;
+	finalizer->finalize_data = finalize_data;
+
+	return finalizer->id;
+}
+
+int
+rte_dispatcher_finalize_unregister(uint8_t id, int handler_id)
+{
+	struct rte_dispatcher *dispatcher;
+	struct rte_dispatcher_finalizer *unreg_finalizer;
+	int finalizer_idx;
+	uint16_t last_idx;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, handler_id);
+
+	EVD_NON_NULL_OR_RET_EINVAL(unreg_finalizer);
+	finalizer_idx = unreg_finalizer - &dispatcher->finalizers[0];
+
+	last_idx = dispatcher->num_finalizers - 1;
+
+	if (finalizer_idx != last_idx) {
+		/* move all finalizers to maintain order */
+		int n = last_idx - finalizer_idx;
+		memmove(unreg_finalizer, unreg_finalizer + 1,
+			sizeof(struct rte_dispatcher_finalizer) * n);
+	}
+
+	dispatcher->num_finalizers--;
+
+	return 0;
+}
+
+static int
+evd_set_service_runstate(uint8_t id, int state)
+{
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	rc = rte_service_component_runstate_set(dispatcher->service_id,
+						state);
+
+	if (rc != 0) {
+		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+				 "service component run state to %d\n", rc,
+				 state);
+		RTE_ASSERT(0);
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_start(uint8_t id)
+{
+	return evd_set_service_runstate(id, 1);
+}
+
+int
+rte_dispatcher_stop(uint8_t id)
+{
+	return evd_set_service_runstate(id, 0);
+}
+
+static void
+evd_aggregate_stats(struct rte_dispatcher_stats *result,
+		    const struct rte_dispatcher_stats *part)
+{
+	result->poll_count += part->poll_count;
+	result->ev_batch_count += part->ev_batch_count;
+	result->ev_dispatch_count += part->ev_dispatch_count;
+	result->ev_drop_count += part->ev_drop_count;
+}
+
+int
+rte_dispatcher_stats_get(uint8_t id, struct rte_dispatcher_stats *stats)
+{
+	struct rte_dispatcher *dispatcher;
+	unsigned int lcore_id;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+	EVD_NON_NULL_OR_RET_EINVAL(stats);
+	*stats = (struct rte_dispatcher_stats) {};
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		evd_aggregate_stats(stats, &lcore->stats);
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_stats_reset(uint8_t id)
+{
+	struct rte_dispatcher *dispatcher;
+	unsigned int lcore_id;
+
+	EVD_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = evd_get_dispatcher(id);
+
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		lcore->stats = (struct rte_dispatcher_stats) {};
+	}
+
+	return 0;
+
+}
diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
new file mode 100644
index 0000000000..76e099c3ca
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.h
@@ -0,0 +1,484 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#ifndef __RTE_DISPATCHER_H__
+#define __RTE_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Dispatcher
+ *
+ * @warning
+ * @b EXPERIMENTAL:
+ * All functions in this file may be changed or removed without prior notice.
+ *
+ * The purpose of the dispatcher is to help decouple different parts
+ * of an application (e.g., modules), sharing the same underlying
+ * event device.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * Match callbacks are used by an application to decide how the
+ * dispatcher distributes events to different parts of the
+ * application.
+ *
+ * The application is not expected to process the event at the point
+ * of the match call. Such matters should be deferred to the process
+ * callback invocation.
+ *
+ * The match callback may be used as an opportunity to prefetch data.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this events should be delivered (via
+ *   the process callback), and false otherwise.
+ */
+typedef bool
+(*rte_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
+
+/**
+ * Function prototype for process callbacks.
+ *
+ * The process callbacks are used by the dispatcher to deliver
+ * events for processing.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ */
+
+typedef void
+(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				  struct rte_event *events, uint16_t num,
+				  void *cb_data);
+
+/**
+ * Function prototype for finalize callbacks.
+ *
+ * The finalize callbacks are used by the dispatcher to notify the
+ * application it has delivered all events from a particular batch
+ * dequeued from the event device.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_finalize_register().
+ */
+
+typedef void
+(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				   void *cb_data);
+
+/**
+ * Dispatcher statistics
+ */
+struct rte_dispatcher_stats {
+	uint64_t poll_count;
+	/**< Number of event dequeue calls made toward the event device. */
+	uint64_t ev_batch_count;
+	/**< Number of non-empty event batches dequeued from event device.*/
+	uint64_t ev_dispatch_count;
+	/**< Number of events dispatched to a handler.*/
+	uint64_t ev_drop_count;
+	/**< Number of events dropped because no handler was found. */
+};
+
+/**
+ * Create a dispatcher with the specified id.
+ *
+ * @param id
+ *  An application-specified, unique (across all dispatcher
+ *  instances) identifier.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   - 0: Success
+ *   - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_dispatcher_create(uint8_t id, uint8_t event_dev_id);
+
+/**
+ * Free a dispatcher.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_dispatcher_free(uint8_t id);
+
+/**
+ * Retrieve the service identifier of a dispatcher.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param [out] service_id
+ *  A pointer to a caller-supplied buffer where the dispatcher's
+ *  service id will be stored.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * dispatcher.
+ *
+ * This function configures the event port id to be used by the event
+ * dispatcher service, if run on the specified lcore.
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port must not be bound to more than one lcore.
+ *
+ * If the dispatcher service is mapped (with rte_service_map_lcore_set())
+ * to a lcore to which no ports are bound, the service function will be a
+ * no-operation.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on lcore
+ * specified by @c lcore_id.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ *  - -EEXISTS: Event port is already configured.
+ *  - -EINVAL: Invalid arguments.
+ */
+__rte_experimental
+int
+rte_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * lcore specified by @c lcore_id.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ *  - -ENOENT: Event port id not bound to this @c lcore_id.
+ */
+__rte_experimental
+int
+rte_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id);
+
+/**
+ * Register an event handler.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding process callback
+ * function.
+ *
+ * The reason for having two distinct steps is to allow the dispatcher
+ * to deliver all events as a batch. This in turn will cause
+ * processing of a particular kind of events to happen in a
+ * back-to-back manner, improving cache locality.
+ *
+ * The list of handler callback functions is shared among all lcores,
+ * but will only be executed on lcores which has an eventdev port
+ * bound to them, and which are running the dispatcher service.
+ *
+ * An event is delivered to at most one handler. Events where no
+ * handler is found are dropped.
+ *
+ * The application must not depend on the order of which the match
+ * functions are invoked.
+ *
+ * Ordering of events is not guaranteed to be maintained between
+ * different deliver callbacks. For example, suppose there are two
+ * callbacks registered, matching different subsets of events arriving
+ * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
+ * on a particular port, all pertaining to the same flow. The match
+ * callback for registration A returns true for ev0 and ev2, and the
+ * matching function for registration B for ev1. In that scenario, the
+ * dispatcher may choose to deliver first [ev0, ev2] using A's deliver
+ * function, and then [ev1] to B - or vice versa.
+ *
+ * rte_dispatcher_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the event
+ * dispatcher is running on any service lcore.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param process_fun
+ *  The process callback function.
+ *
+ * @param process_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when process_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_register(uint8_t id,
+			      rte_dispatcher_match_t match_fun,
+			      void *match_cb_data,
+			      rte_dispatcher_process_t process_fun,
+			      void *process_cb_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param handler_id
+ *  The handler registration id returned by the original
+ *  rte_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c handler_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_unregister(uint8_t id, int handler_id);
+
+/**
+ * Register a finalize callback function.
+ *
+ * An application may optionally install one or more finalize
+ * callbacks.
+ *
+ * All finalize callbacks are invoked by the dispatcher when a
+ * complete batch of events (retrieve using rte_event_dequeue_burst())
+ * have been delivered to the application (or have been dropped).
+ *
+ * The finalize callback is not tied to any particular handler.
+ *
+ * The finalize callback provides an opportunity for the application
+ * to do per-batch processing. One case where this may be useful is if
+ * an event output buffer is used, and is shared among several
+ * handlers. In such a case, proper output buffer flushing may be
+ * assured using a finalize callback.
+ *
+ * rte_dispatcher_finalize_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the
+ * dispatcher is running on any service lcore.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param finalize_fun
+ *  The function called after completing the processing of a
+ *  dequeue batch.
+ *
+ * @param finalize_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when @c finalize_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_register(uint8_t id,
+				 rte_dispatcher_finalize_t finalize_fun,
+				 void *finalize_data);
+
+/**
+ * Unregister a finalize callback.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @param reg_id
+ *  The finalize registration id returned by the original
+ *  rte_dispatcher_finalize_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c reg_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_unregister(uint8_t id, int reg_id);
+
+/**
+ * Start a dispatcher instance.
+ *
+ * Enables the dispatcher service.
+ *
+ * The underlying event device must have been started prior to calling
+ * rte_dispatcher_start().
+ *
+ * For the dispatcher to actually perform work (i.e., dispatch
+ * events), its service must have been mapped to one or more service
+ * lcores, and its service run state set to '1'. A dispatcher's
+ * service is retrieved using rte_dispatcher_service_id_get().
+ *
+ * Each service lcore to which the dispatcher is mapped should
+ * have at least one event port configured. Such configuration is
+ * performed by calling rte_dispatcher_bind_port_to_lcore(), prior to
+ * starting the dispatcher.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_dispatcher_start(uint8_t id);
+
+/**
+ * Stop a running dispatcher instance.
+ *
+ * Disables the dispatcher service.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_dispatcher_stop(uint8_t id);
+
+/**
+ * Retrieve statistics for a dispatcher instance.
+ *
+ * This function is MT safe and may be called by any thread
+ * (including unregistered non-EAL threads).
+ *
+ * @param id
+ *  The dispatcher identifier.
+ * @param[out] stats
+ *   A pointer to a structure to fill with statistics.
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_stats_get(uint8_t id,
+			       struct rte_dispatcher_stats *stats);
+
+/**
+ * Reset statistics for a dispatcher instance.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but may not produce the correct result if the
+ * dispatcher is running on any service lcore.
+ *
+ * @param id
+ *  The dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_stats_reset(uint8_t id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DISPATCHER__ */
diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map
new file mode 100644
index 0000000000..8f9ad96522
--- /dev/null
+++ b/lib/dispatcher/version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+	global:
+
+	# added in 23.11
+	rte_dispatcher_create;
+	rte_dispatcher_free;
+	rte_dispatcher_service_id_get;
+	rte_dispatcher_bind_port_to_lcore;
+	rte_dispatcher_unbind_port_from_lcore;
+	rte_dispatcher_register;
+	rte_dispatcher_unregister;
+	rte_dispatcher_finalize_register;
+	rte_dispatcher_finalize_unregister;
+	rte_dispatcher_start;
+	rte_dispatcher_stop;
+	rte_dispatcher_stats_get;
+	rte_dispatcher_stats_reset;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 099b0ed18a..3093b338d2 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -35,6 +35,7 @@ libraries = [
         'distributor',
         'efd',
         'eventdev',
+        'dispatcher', # dispatcher depends on eventdev
         'gpudev',
         'gro',
         'gso',
@@ -81,6 +82,7 @@ optional_libs = [
         'cfgfile',
         'compressdev',
         'cryptodev',
+        'dispatcher',
         'distributor',
         'dmadev',
         'efd',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v4 2/3] test: add dispatcher test suite
  2023-09-22  7:38                                   ` [PATCH v4 0/3] Add " Mattias Rönnblom
  2023-09-22  7:38                                     ` [PATCH v4 1/3] lib: introduce " Mattias Rönnblom
@ 2023-09-22  7:38                                     ` Mattias Rönnblom
  2023-09-22  7:38                                     ` [PATCH v4 3/3] doc: add dispatcher programming guide Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-22  7:38 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Add unit tests for the dispatcher.

--
PATCH v3:
 o Adapt the test suite to dispatcher API name changes.

PATCH v2:
 o Test finalize callback functionality.
 o Test handler and finalizer count upper limits.
 o Add statistics reset test.
 o Make sure dispatcher supply the proper event dev id and port id back
   to the application.

PATCH:
 o Extend test to cover often-used handler optimization feature.

RFC v4:
 o Adapt to non-const events in process function prototype.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 MAINTAINERS                |    1 +
 app/test/meson.build       |    1 +
 app/test/test_dispatcher.c | 1054 ++++++++++++++++++++++++++++++++++++
 3 files changed, 1056 insertions(+)
 create mode 100644 app/test/test_dispatcher.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 6704cd5b2c..43890cad0e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1729,6 +1729,7 @@ F: lib/node/
 Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
+F: app/test/test_dispatcher.c
 
 Test Applications
 -----------------
diff --git a/app/test/meson.build b/app/test/meson.build
index 05bae9216d..3303c73817 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -55,6 +55,7 @@ source_file_deps = {
     'test_cycles.c': [],
     'test_debug.c': [],
     'test_devargs.c': ['kvargs'],
+    'test_dispatcher.c': ['dispatcher'],
     'test_distributor.c': ['distributor'],
     'test_distributor_perf.c': ['distributor'],
     'test_dmadev.c': ['dmadev', 'bus_vdev'],
diff --git a/app/test/test_dispatcher.c b/app/test/test_dispatcher.c
new file mode 100644
index 0000000000..b64103c48e
--- /dev/null
+++ b/app/test/test_dispatcher.c
@@ -0,0 +1,1054 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include "test.h"
+
+#include <stdatomic.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_dispatcher.h>
+#include <rte_eventdev.h>
+#include <rte_random.h>
+#include <rte_service.h>
+
+#define NUM_WORKERS 3
+
+#define NUM_PORTS (NUM_WORKERS + 1)
+#define WORKER_PORT_ID(worker_idx) (worker_idx)
+#define DRIVER_PORT_ID (NUM_PORTS - 1)
+
+#define NUM_SERVICE_CORES NUM_WORKERS
+
+/* Eventdev */
+#define NUM_QUEUES 8
+#define LAST_QUEUE_ID (NUM_QUEUES - 1)
+#define MAX_EVENTS 4096
+#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
+#define DEQUEUE_BURST_SIZE 32
+#define ENQUEUE_BURST_SIZE 32
+
+#define NUM_EVENTS 10000000
+#define NUM_FLOWS 16
+
+#define DSW_VDEV "event_dsw0"
+
+struct app_queue {
+	uint8_t queue_id;
+	uint64_t sn[NUM_FLOWS];
+	int dispatcher_reg_id;
+};
+
+struct cb_count {
+	uint8_t expected_event_dev_id;
+	uint8_t expected_event_port_id[RTE_MAX_LCORE];
+	atomic_int count;
+};
+
+struct test_app {
+	uint8_t event_dev_id;
+	uint8_t dispatcher_id;
+	uint32_t dispatcher_service_id;
+
+	unsigned int service_lcores[NUM_SERVICE_CORES];
+
+	int never_match_reg_id;
+	uint64_t never_match_count;
+	struct cb_count never_process_count;
+
+	struct app_queue queues[NUM_QUEUES];
+
+	int finalize_reg_id;
+	struct cb_count finalize_count;
+
+	bool running;
+
+	atomic_int completed_events;
+	atomic_int errors;
+};
+
+#define RETURN_ON_ERROR(rc) \
+	do {					\
+		if (rc != TEST_SUCCESS)		\
+			return rc;		\
+	} while (0)
+
+static struct test_app *
+test_app_create(void)
+{
+	int i;
+	struct test_app *app;
+
+	app = calloc(1, sizeof(struct test_app));
+
+	if (app == NULL)
+		return NULL;
+
+	for (i = 0; i < NUM_QUEUES; i++)
+		app->queues[i].queue_id = i;
+
+	return app;
+}
+
+static void
+test_app_free(struct test_app *app)
+{
+	free(app);
+}
+
+static int
+test_app_create_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_vdev_init(DSW_VDEV, NULL);
+	if (rc < 0)
+		return TEST_SKIPPED;
+
+	rc = rte_event_dev_get_dev_id(DSW_VDEV);
+
+	app->event_dev_id = (uint8_t)rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_destroy_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_close(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
+
+	rc = rte_vdev_uninit(DSW_VDEV);
+	TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_event_dev(struct test_app *app)
+{
+	int rc;
+	int i;
+
+	rc = test_app_create_vdev(app);
+	if (rc < 0)
+		return rc;
+
+	struct rte_event_dev_config config = {
+		.nb_event_queues = NUM_QUEUES,
+		.nb_event_ports = NUM_PORTS,
+		.nb_events_limit = MAX_EVENTS,
+		.nb_event_queue_flows = 64,
+		.nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
+		.nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	rc = rte_event_dev_configure(app->event_dev_id, &config);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
+
+	struct rte_event_queue_conf queue_config = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.nb_atomic_flows = 64
+	};
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		uint8_t queue_id = i;
+
+		rc = rte_event_queue_setup(app->event_dev_id, queue_id,
+					   &queue_config);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
+	}
+
+	struct rte_event_port_conf port_config = {
+		.new_event_threshold = NEW_EVENT_THRESHOLD,
+		.dequeue_depth = DEQUEUE_BURST_SIZE,
+		.enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	for (i = 0; i < NUM_PORTS; i++) {
+		uint8_t event_port_id = i;
+
+		rc = rte_event_port_setup(app->event_dev_id, event_port_id,
+					  &port_config);
+		TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
+				    event_port_id);
+
+		if (event_port_id == DRIVER_PORT_ID)
+			continue;
+
+		rc = rte_event_port_link(app->event_dev_id, event_port_id,
+					 NULL, NULL, 0);
+
+		TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
+				  event_port_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_event_dev(struct test_app *app)
+{
+	return test_app_destroy_vdev(app);
+}
+
+static int
+test_app_start_event_dev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_start(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
+
+	return TEST_SUCCESS;
+}
+
+static void
+test_app_stop_event_dev(struct test_app *app)
+{
+	rte_event_dev_stop(app->event_dev_id);
+}
+
+static int
+test_app_create_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	app->dispatcher_id = rte_rand_max(256);
+
+	rc = rte_dispatcher_create(app->dispatcher_id, app->event_dev_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to create event dispatcher");
+
+	rc = rte_dispatcher_service_id_get(app->dispatcher_id,
+					   &app->dispatcher_service_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to get event dispatcher service ID");
+
+	rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
+			    "stats");
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_free_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	rte_dispatcher_free(app->dispatcher_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_bind_ports(struct test_app *app)
+{
+	int i;
+
+	app->never_process_count.expected_event_dev_id =
+		app->event_dev_id;
+	app->finalize_count.expected_event_dev_id =
+		app->event_dev_id;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		uint8_t port_id = WORKER_PORT_ID(i);
+
+		int rc = rte_dispatcher_bind_port_to_lcore(
+			app->dispatcher_id, port_id, DEQUEUE_BURST_SIZE, 0,
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
+				    "to lcore %d", port_id, lcore_id);
+
+		app->never_process_count.expected_event_port_id[lcore_id] =
+			port_id;
+		app->finalize_count.expected_event_port_id[lcore_id] = port_id;
+	}
+
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unbind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_dispatcher_unbind_port_from_lcore(
+			app->dispatcher_id,
+			WORKER_PORT_ID(i),
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
+				    "from lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static bool
+match_queue(const struct rte_event *event, void *cb_data)
+{
+	uintptr_t queue_id = (uintptr_t)cb_data;
+
+	return event->queue_id == queue_id;
+}
+
+static int
+test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++)
+		if (app->service_lcores[i] == lcore_id)
+			return i;
+
+	return -1;
+}
+
+static int
+test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
+{
+	int worker;
+
+	worker = test_app_get_worker_index(app, lcore_id);
+
+	if (worker < 0)
+		return -1;
+
+	return WORKER_PORT_ID(worker);
+}
+
+static void
+test_app_queue_note_error(struct test_app *app)
+{
+	atomic_fetch_add_explicit(&app->errors, 1, memory_order_relaxed);
+}
+
+static void
+test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
+		       struct rte_event *in_events, uint16_t num,
+		       void *cb_data)
+{
+	struct app_queue *app_queue = cb_data;
+	struct test_app *app = container_of(app_queue, struct test_app,
+					    queues[app_queue->queue_id]);
+	unsigned int lcore_id = rte_lcore_id();
+	bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
+	int event_port_id;
+	uint16_t i;
+	struct rte_event out_events[num];
+
+	event_port_id = test_app_get_worker_port(app, lcore_id);
+
+	if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
+	    p_event_port_id != event_port_id) {
+		test_app_queue_note_error(app);
+		return;
+	}
+
+	for (i = 0; i < num; i++) {
+		const struct rte_event *in_event = &in_events[i];
+		struct rte_event *out_event = &out_events[i];
+		uint64_t sn = in_event->u64;
+		uint64_t expected_sn;
+
+		if (in_event->queue_id != app_queue->queue_id) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		expected_sn = app_queue->sn[in_event->flow_id]++;
+
+		if (expected_sn != sn) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		if (intermediate_queue)
+			*out_event = (struct rte_event) {
+				.queue_id = in_event->queue_id + 1,
+				.flow_id = in_event->flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_FORWARD,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+	}
+
+	if (intermediate_queue) {
+		uint16_t n = 0;
+
+		do {
+			n += rte_event_enqueue_forward_burst(p_event_dev_id,
+							     p_event_port_id,
+							     out_events + n,
+							     num - n);
+		} while (n != num);
+	} else
+		atomic_fetch_add_explicit(&app->completed_events, num,
+					  memory_order_relaxed);
+}
+
+static bool
+never_match(const struct rte_event *event __rte_unused, void *cb_data)
+{
+	uint64_t *count = cb_data;
+
+	(*count)++;
+
+	return false;
+}
+
+static void
+test_app_never_process(uint8_t event_dev_id,
+		       uint8_t event_port_id,
+		       struct rte_event *in_events __rte_unused,
+		       uint16_t num, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		atomic_fetch_add_explicit(&count->count, num,
+					  memory_order_relaxed);
+}
+
+static void
+finalize(uint8_t event_dev_id, uint8_t event_port_id, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		atomic_fetch_add_explicit(&count->count, 1,
+					  memory_order_relaxed);
+}
+
+static int
+test_app_register_callbacks(struct test_app *app)
+{
+	int i;
+
+	app->never_match_reg_id =
+		rte_dispatcher_register(app->dispatcher_id, never_match,
+					&app->never_match_count,
+					test_app_never_process,
+					&app->never_process_count);
+
+	TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register "
+		    "never-match handler");
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		struct app_queue *app_queue = &app->queues[i];
+		uintptr_t queue_id = app_queue->queue_id;
+		int reg_id;
+
+		reg_id = rte_dispatcher_register(app->dispatcher_id,
+						 match_queue, (void *)queue_id,
+						 test_app_process_queue,
+						 app_queue);
+
+		TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
+			    "callback for queue %d", i);
+
+		app_queue->dispatcher_reg_id = reg_id;
+	}
+
+	app->finalize_reg_id =
+		rte_dispatcher_finalize_register(app->dispatcher_id,
+						       finalize,
+						       &app->finalize_count);
+	TEST_ASSERT_SUCCESS(app->finalize_reg_id, "Error registering "
+			    "finalize callback");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
+{
+	int reg_id = app->queues[queue_id].dispatcher_reg_id;
+	int rc;
+
+	if (reg_id < 0) /* unregistered already */
+		return 0;
+
+	rc = rte_dispatcher_unregister(app->dispatcher_id, reg_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
+			    "callback for queue %d", queue_id);
+
+	app->queues[queue_id].dispatcher_reg_id = -1;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callbacks(struct test_app *app)
+{
+	int i;
+	int rc;
+
+	if (app->never_match_reg_id >= 0) {
+		rc = rte_dispatcher_unregister(app->dispatcher_id,
+						     app->never_match_reg_id);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister never-match "
+				    "handler");
+		app->never_match_reg_id = -1;
+	}
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		rc = test_app_unregister_callback(app, i);
+		RETURN_ON_ERROR(rc);
+	}
+
+	if (app->finalize_reg_id >= 0) {
+		rc = rte_dispatcher_finalize_unregister(
+			app->dispatcher_id, app->finalize_reg_id
+		);
+		app->finalize_reg_id = -1;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_dispatcher_start(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to start the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_dispatcher_stop(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to stop the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_reset_dispatcher_stats(struct test_app *app)
+{
+	int rc;
+	struct rte_dispatcher_stats stats;
+
+	rc = rte_dispatcher_stats_reset(app->dispatcher_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to reset event dispatcher statistics");
+
+	rc = rte_dispatcher_stats_get(app->dispatcher_id, &stats);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to retrieve event dispatcher "
+			    "statistics");
+
+	TEST_ASSERT_EQUAL(stats.poll_count, 0, "Poll count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_batch_count, 0, "Batch count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, "Dispatch count "
+			  "not zero");
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count not zero");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_lcore_add(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
+			    "service core", lcore_id);
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_cores(struct test_app *app)
+{
+	int i;
+	int lcore_id = -1;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
+
+		TEST_ASSERT(lcore_id != RTE_MAX_LCORE,
+			    "Too few lcores. Needs at least %d worker lcores",
+			    NUM_SERVICE_CORES);
+
+		app->service_lcores[i] = lcore_id;
+	}
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		int rc;
+
+		rc = test_app_setup_service_core(app, app->service_lcores[i]);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
+
+	rc = rte_service_lcore_del(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
+			    lcore_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = test_app_teardown_service_core(app, lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_start(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_stop(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_start_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_dispatcher(app);
+
+	app->running = true;
+
+	return rc;
+}
+
+static int
+test_app_stop(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_stop_dispatcher(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	app->running = false;
+
+	return TEST_SUCCESS;
+}
+
+struct test_app *test_app;
+
+static int
+test_setup(void)
+{
+	int rc;
+
+	test_app = test_app_create();
+	TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
+
+	rc = test_app_setup_event_dev(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_create_dispatcher(test_app);
+
+	rc = test_app_setup_service_cores(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_register_callbacks(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_bind_ports(test_app);
+
+	return rc;
+}
+
+static void test_teardown(void)
+{
+	if (test_app->running)
+		test_app_stop(test_app);
+
+	test_app_teardown_service_cores(test_app);
+
+	test_app_unregister_callbacks(test_app);
+
+	test_app_unbind_ports(test_app);
+
+	test_app_free_dispatcher(test_app);
+
+	test_app_teardown_event_dev(test_app);
+
+	test_app_free(test_app);
+
+	test_app = NULL;
+}
+
+static int
+test_app_get_completed_events(struct test_app *app)
+{
+	return atomic_load_explicit(&app->completed_events,
+				    memory_order_relaxed);
+}
+
+static int
+test_app_get_errors(struct test_app *app)
+{
+	return atomic_load_explicit(&app->errors, memory_order_relaxed);
+}
+
+static int
+test_basic(void)
+{
+	int rc;
+	int i;
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	uint64_t sns[NUM_FLOWS] = { 0 };
+
+	for (i = 0; i < NUM_EVENTS;) {
+		struct rte_event events[ENQUEUE_BURST_SIZE];
+		int left;
+		int batch_size;
+		int j;
+		uint16_t n = 0;
+
+		batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
+		left = NUM_EVENTS - i;
+
+		batch_size = RTE_MIN(left, batch_size);
+
+		for (j = 0; j < batch_size; j++) {
+			struct rte_event *event = &events[j];
+			uint64_t sn;
+			uint32_t flow_id;
+
+			flow_id = rte_rand_max(NUM_FLOWS);
+
+			sn = sns[flow_id]++;
+
+			*event = (struct rte_event) {
+				.queue_id = 0,
+				.flow_id = flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_NEW,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+		}
+
+		while (n < batch_size)
+			n += rte_event_enqueue_new_burst(test_app->event_dev_id,
+							 DRIVER_PORT_ID,
+							 events + n,
+							 batch_size - n);
+
+		i += batch_size;
+	}
+
+	while (test_app_get_completed_events(test_app) != NUM_EVENTS)
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+
+	rc = test_app_get_errors(test_app);
+	TEST_ASSERT(rc == 0, "%d errors occurred", rc);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_dispatcher_stats stats;
+	rc = rte_dispatcher_stats_get(test_app->dispatcher_id,
+					    &stats);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
+			  "Invalid dispatch count");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	TEST_ASSERT_EQUAL(test_app->never_process_count.count, 0,
+			  "Never-match handler's process function has "
+			  "been called");
+
+	int finalize_count =
+		atomic_load_explicit(&test_app->finalize_count.count,
+				     memory_order_relaxed);
+
+	TEST_ASSERT(finalize_count > 0, "Finalize count is zero");
+	TEST_ASSERT(finalize_count <= (int)stats.ev_dispatch_count,
+		    "Finalize count larger than event count");
+
+	TEST_ASSERT_EQUAL(finalize_count, (int)stats.ev_batch_count,
+			  "%"PRIu64" batches dequeued, but finalize called %d "
+			  "times", stats.ev_batch_count, finalize_count);
+
+	/*
+	 * The event dispatcher should call often-matching match functions
+	 * more often, and thus this never-matching match function should
+	 * be called relatively infrequently.
+	 */
+	TEST_ASSERT(test_app->never_match_count <
+		    (stats.ev_dispatch_count / 4),
+		    "Never-matching match function called suspiciously often");
+
+	rc = test_app_reset_dispatcher_stats(test_app);
+	RETURN_ON_ERROR(rc);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_drop(void)
+{
+	int rc;
+	uint8_t unhandled_queue;
+	struct rte_dispatcher_stats stats;
+
+	unhandled_queue = (uint8_t)rte_rand_max(NUM_QUEUES);
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_unregister_callback(test_app, unhandled_queue);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_event event = {
+	    .queue_id = unhandled_queue,
+	    .flow_id = 0,
+	    .sched_type = RTE_SCHED_TYPE_ATOMIC,
+	    .op = RTE_EVENT_OP_NEW,
+	    .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	    .u64 = 0
+	};
+
+	do {
+		rc = rte_event_enqueue_burst(test_app->event_dev_id,
+					     DRIVER_PORT_ID, &event, 1);
+	} while (rc == 0);
+
+	do {
+		rc = rte_dispatcher_stats_get(test_app->dispatcher_id,
+						    &stats);
+		RETURN_ON_ERROR(rc);
+
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+	} while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
+			  "Dispatch count is not zero");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+#define MORE_THAN_MAX_HANDLERS 1000
+#define MIN_HANDLERS 32
+
+static int
+test_many_handler_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_HANDLERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+
+	RETURN_ON_ERROR(rc);
+
+	for (i = 0; i < MORE_THAN_MAX_HANDLERS; i++) {
+		reg_id = rte_dispatcher_register(test_app->dispatcher_id,
+						       never_match, NULL,
+						       test_app_never_process,
+						       NULL);
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_HANDLERS, "Registration failed already "
+		    "after %d handler registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_unregister(test_app->dispatcher_id,
+						     reg_ids[i]);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister handler %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static void
+dummy_finalize(uint8_t event_dev_id __rte_unused,
+	       uint8_t event_port_id __rte_unused,
+	       void *cb_data __rte_unused)
+{
+}
+
+#define MORE_THAN_MAX_FINALIZERS 1000
+#define MIN_FINALIZERS 16
+
+static int
+test_many_finalize_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_FINALIZERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+
+	RETURN_ON_ERROR(rc);
+
+	for (i = 0; i < MORE_THAN_MAX_FINALIZERS; i++) {
+		reg_id = rte_dispatcher_finalize_register(
+			test_app->dispatcher_id, dummy_finalize, NULL
+		);
+
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_FINALIZERS, "Finalize registration failed "
+		    "already after %d registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_finalize_unregister(
+			test_app->dispatcher_id, reg_ids[i]
+		);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister finalizer %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite test_suite = {
+	.suite_name = "Event dispatcher test suite",
+	.unit_test_cases = {
+		TEST_CASE_ST(test_setup, test_teardown, test_basic),
+		TEST_CASE_ST(test_setup, test_teardown, test_drop),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_handler_registrations),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_finalize_registrations),
+		TEST_CASES_END()
+	}
+};
+
+static int
+test_dispatcher(void)
+{
+	return unit_test_suite_runner(&test_suite);
+}
+
+REGISTER_TEST_COMMAND(dispatcher_autotest, test_dispatcher);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v4 3/3] doc: add dispatcher programming guide
  2023-09-22  7:38                                   ` [PATCH v4 0/3] Add " Mattias Rönnblom
  2023-09-22  7:38                                     ` [PATCH v4 1/3] lib: introduce " Mattias Rönnblom
  2023-09-22  7:38                                     ` [PATCH v4 2/3] test: add dispatcher test suite Mattias Rönnblom
@ 2023-09-22  7:38                                     ` Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-22  7:38 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Provide programming guide for the dispatcher library.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--
PATCH v3:
 o Adapt guide to the dispatcher API name changes.

PATCH:
 o Improve grammar and spelling.

RFC v4:
 o Extend event matching section of the programming guide.
 o Improve grammar and spelling.
---
 MAINTAINERS                              |   1 +
 doc/guides/prog_guide/dispatcher_lib.rst | 434 +++++++++++++++++++++++
 doc/guides/prog_guide/index.rst          |   1 +
 3 files changed, 436 insertions(+)
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst

diff --git a/MAINTAINERS b/MAINTAINERS
index 43890cad0e..ab35498204 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1730,6 +1730,7 @@ Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
 F: app/test/test_dispatcher.c
+F: doc/guides/prog_guide/dispatcher_lib.rst
 
 Test Applications
 -----------------
diff --git a/doc/guides/prog_guide/dispatcher_lib.rst b/doc/guides/prog_guide/dispatcher_lib.rst
new file mode 100644
index 0000000000..d4f29ce7ba
--- /dev/null
+++ b/doc/guides/prog_guide/dispatcher_lib.rst
@@ -0,0 +1,434 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Ericsson AB.
+
+Dispatcher
+==========
+
+Overview
+--------
+
+The purpose of the dispatcher is to help reduce coupling in an
+:doc:`Eventdev <eventdev>`-based DPDK application.
+
+In particular, the dispatcher addresses a scenario where an
+application's modules share the same event device and event device
+ports, and performs work on the same lcore threads.
+
+The dispatcher replaces the conditional logic that follows an event
+device dequeue operation, where events are dispatched to different
+parts of the application, typically based on fields in the
+``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
+``sched_type``.
+
+Below is an excerpt from a fictitious application consisting of two
+modules; A and B. In this example, event-to-module routing is based
+purely on queue id, where module A expects all events to a certain
+queue id, and module B two other queue ids. [#Mapping]_
+
+.. code-block:: c
+
+    for (;;) {
+            struct rte_event events[MAX_BURST];
+            unsigned int n;
+    
+            n = rte_event_dequeue_burst(dev_id, port_id, events,
+	                                MAX_BURST, 0);
+    
+            for (i = 0; i < n; i++) {
+                    const struct rte_event *event = &events[i];
+    
+                    switch (event->queue_id) {
+                    case MODULE_A_QUEUE_ID:
+                            module_a_process(event);
+                            break;
+                    case MODULE_B_STAGE_0_QUEUE_ID:
+                            module_b_process_stage_0(event);
+                            break;
+                    case MODULE_B_STAGE_1_QUEUE_ID:
+                            module_b_process_stage_1(event);
+                            break;
+                    }
+            }
+    }
+
+The issue this example attempts to illustrate is that the centralized
+conditional logic has knowledge of things that should be private to
+the modules. In other words, this pattern leads to a violation of
+module encapsulation.
+
+The shared conditional logic contains explicit knowledge about what
+events should go where. In case, for example, the
+``module_a_process()`` is broken into two processing stages — a
+module-internal affair — the shared conditional code must be updated
+to reflect this change.
+
+The centralized event routing code becomes an issue in larger
+applications, where modules are developed by different organizations.
+This pattern also makes module reuse across different application more
+difficult. The part of the conditional logic relevant for a particular
+application may need to be duplicated across many module
+instantiations (e.g., applications and test setups).
+
+The dispatcher separates the mechanism (routing events to their
+receiver) from the policy (which events should go where).
+
+The basic operation of the dispatcher is as follows:
+
+* Dequeue a batch of events from the event device.
+* For each event determine which handler should receive the event, using
+  a set of application-provided, per-handler event matching callback
+  functions.
+* Provide events matching a particular handler, to that handler, using
+  its process callback.
+
+If the above application would have made use of the dispatcher, the
+code relevant for its module A may have looked something like this:
+
+.. code-block:: c
+
+    static bool
+    module_a_match(const struct rte_event *event, void *cb_data)
+    {
+           return event->queue_id == MODULE_A_QUEUE_ID;
+    }
+    
+    static void
+    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
+                            const struct rte_event *events,
+			    uint16_t num, void *cb_data)
+    {
+            uint16_t i;
+
+            for (i = 0; i < num; i++)
+                    module_a_process_event(&events[i]);
+    }
+    
+    /* In the module's initialization code */
+    rte_dispatcher_register(DISPATCHER_ID, module_a_match, NULL,
+			    module_a_process_events, module_a_data);
+
+(Error handling is left out of this and future example code in this
+chapter.)
+
+When the shared conditional logic is removed, a new question arise:
+which part of the system actually runs the dispatching mechanism? Or
+phrased differently, what is replacing the function hosting the shared
+conditional logic (typically launched on all lcores using
+``rte_eal_remote_launch()``)? To solve this issue, the dispatcher is a
+run as a DPDK :doc:`Service <service_cores>`.
+
+The dispatcher is a layer between the application and the event device
+in the receive direction. In the transmit (i.e., item of work
+submission) direction, the application directly accesses the Eventdev
+core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
+forwarded event to the event device.
+
+Dispatcher Creation
+-------------------
+
+A dispatcher is created with using
+``rte_dispatcher_create()``.
+
+The dispatcher id is provided by the application, and must be unique.
+
+The event device must be configured before the dispatcher is created.
+
+Usually, only one dispatcher is needed per event device. A dispatcher
+handles exactly one event device.
+
+An dispatcher is freed using the ``rte_dispatcher_free()``
+function. The dispatcher's service functions must not be running on
+any lcore at the point of this call.
+
+Event Port Binding
+------------------
+
+To be able to dequeue events, the dispatcher must know which event
+ports are to be used, on all the lcores it uses. The application
+provides this information using
+``rte_dispatcher_bind_port_to_lcore()``.
+
+This call is typically made from the part of the application that
+deals with deployment issues (e.g., iterating lcores and determining
+which lcore does what), at the time of application initialization.
+
+The ``rte_dispatcher_unbind_port_from_lcore()`` is used to undo
+this operation.
+
+Multiple lcore threads may not safely use the same event
+port. [#Port-MT-Safety]
+
+Event ports cannot safely be bound or unbound while the dispatcher's
+service function is running on any lcore.
+
+Event Handlers
+--------------
+
+The dispatcher handler is an interface between the dispatcher and an
+application module, used to route events to the appropriate part of
+the application.
+
+Handler Registration
+^^^^^^^^^^^^^^^^^^^^
+
+The event handler interface consists of two function pointers:
+
+* The ``rte_dispatcher_match_t`` callback, which job is to
+  decide if this event is to be the property of this handler.
+* The ``rte_dispatcher_process_t``, which is used by the
+  dispatcher to deliver matched events.
+
+An event handler registration is valid on all lcores.
+
+The functions pointed to by the match and process callbacks resides in
+the application's domain logic, with one or more handlers per
+application module.
+
+A module may use more than one event handler, for convenience or to
+further decouple sub-modules. However, the dispatcher may impose an
+upper limit of the number handlers. In addition, installing a large
+number of handlers increase dispatcher overhead, although this does
+not nessarily translate to a system-level performance degradation. See
+the section on :ref:`Event Clustering` for more information.
+
+Handler registration and unregistration cannot safely be done while
+the dispatcher's service function is running on any lcore.
+
+Event Matching
+^^^^^^^^^^^^^^
+
+A handler's match callback function decides if an event should be
+delivered to this handler, or not.
+
+An event is routed to no more than one handler. Thus, if a match
+function returns true, no further match functions will be invoked for
+that event.
+
+Match functions must not depend on being invocated in any particular
+order (e.g., in the handler registration order).
+
+Events failing to match any handler are dropped, and the
+``ev_drop_count`` counter is updated accordingly.
+
+Event Delivery
+^^^^^^^^^^^^^^
+
+The handler callbacks are invocated by the dispatcher's service
+function, upon the arrival of events to the event ports bound to the
+running service lcore.
+
+A particular event is delivery to at most one handler.
+
+The application must not depend on all match callback invocations for
+a particular event batch being made prior to any process calls are
+being made. For example, if the dispatcher dequeues two events from
+the event device, it may choose to find out the destination for the
+first event, and deliver it, and then continue to find out the
+destination for the second, and then deliver that event as well. The
+dispatcher may also choose a strategy where no event is delivered
+until the destination handler for both events have been determined.
+
+The events provided in a single process call always belong to the same
+event port dequeue burst.
+
+.. _Event Clustering:
+
+Event Clustering
+^^^^^^^^^^^^^^^^
+
+The dispatcher maintains the order of events destined for the same
+handler.
+
+*Order* here refers to the order in which the events were delivered
+from the event device to the dispatcher (i.e., in the event array
+populated by ``rte_event_dequeue_burst()``), in relation to the order
+in which the dispatcher deliveres these events to the application.
+
+The dispatcher *does not* guarantee to maintain the order of events
+delivered to *different* handlers.
+
+For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
+and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
+consider a scenario where the following events are dequeued from the
+event device (qid is short for event queue id).
+
+.. code-block::
+
+    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
+
+The dispatcher may deliver the events in the following manner:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
+   module_a_process([e2: qid=0])
+   module_b_stage_0_process([e2: qid=1])
+
+The dispatcher may also choose to cluster (group) all events destined
+for ``module_b_stage_0_process()`` into one array:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
+   module_a_process([e2: qid=0])
+
+Here, the event ``e2`` is reordered and placed behind ``e3``, from a
+delivery order point of view. This kind of reshuffling is allowed,
+since the events are destined for different handlers.
+
+The dispatcher may also deliver ``e2`` before the three events
+destined for module B.
+
+An example of what the dispatcher may not do, is to reorder event
+``e1`` so, that it precedes ``e0`` in the array passed to the module
+B's stage 0 process callback.
+
+Although clustering requires some extra work for the dispatcher, it
+leads to fewer process function calls. In addition, and likely more
+importantly, it improves temporal locality of memory accesses to
+handler-specific data structures in the application, which in turn may
+lead to fewer cache misses and improved overall performance.
+
+Finalize
+--------
+
+The dispatcher may be configured to notify one or more parts of the
+application when the matching and processing of a batch of events has
+completed.
+
+The ``rte_dispatcher_finalize_register`` call is used to
+register a finalize callback. The function
+``rte_dispatcher_finalize_unregister`` is used to remove a
+callback.
+
+The finalize hook may be used by a set of event handlers (in the same
+modules, or a set of cooperating modules) sharing an event output
+buffer, since it allows for flushing of the buffers at the last
+possible moment. In particular, it allows for buffering of
+``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
+``rte_event_dequeue_burst()`` call is made (assuming implicit release
+is employed).
+
+The following is an example with an application-defined event output
+buffer (the ``event_buffer``):
+
+.. code-block:: c
+
+    static void
+    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
+                   void *cb_data)
+    {
+            struct event_buffer *buffer = cb_data;
+            unsigned lcore_id = rte_lcore_id();
+            struct event_buffer_lcore *lcore_buffer =
+                    &buffer->lcore_buffer[lcore_id];
+    
+            event_buffer_lcore_flush(lcore_buffer);
+    }
+
+    /* In the module's initialization code */
+    rte_dispatcher_finalize_register(DISPATCHER_ID, finalize_batch,
+                                     shared_event_buffer);
+
+The dispatcher does not track any relationship between a handler and a
+finalize callback, and all finalize callbacks will be called, if (and
+only if) at least one event was dequeued from the event device.
+
+Finalize callback registration and unregistration cannot safely be
+done while the dispatcher's service function is running on any lcore.
+
+Service
+-------
+
+The dispatcher is a DPDK service, and is managed in a manner similar
+to other DPDK services (e.g., an Event Timer Adapter).
+
+Below is an example of how to configure a particular lcore to serve as
+a service lcore, and to map an already-configured dispatcher
+(identified by ``DISPATCHER_ID``) to that lcore.
+
+.. code-block:: c
+
+    static void
+    launch_dispatcher_core(unsigned lcore_id)
+    {
+            uint32_t service_id;
+    
+            rte_service_lcore_add(lcore_id);
+    
+            rte_dispatcher_service_id_get(DISPATCHER_ID, &service_id);
+    
+            rte_service_map_lcore_set(service_id, lcore_id, 1);
+    
+            rte_service_lcore_start(lcore_id);
+    
+            rte_service_runstate_set(service_id, 1);
+    }
+
+As the final step, the dispatcher must be started.
+
+.. code-block:: c
+
+    rte_dispatcher_start(DISPATCHER_ID);
+
+
+Multi Service Dispatcher Lcores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In an Eventdev application, most (or all) compute-intensive and
+performance-sensitive processing is done in an event-driven manner,
+where CPU cycles spent on application domain logic is the direct
+result of items of work (i.e., ``rte_event`` events) dequeued from an
+event device.
+
+In the light of this, it makes sense to have the dispatcher service be
+the only DPDK service on all lcores used for packet processing — at
+least in principle.
+
+However, there is nothing in DPDK that prevents colocating other
+services with the dispatcher service on the same lcore.
+
+Tasks that prior to the introduction of the dispatcher into the
+application was performed on the lcore, even though no events were
+received, are prime targets for being converted into such auxiliary
+services, running on the dispatcher core set.
+
+An example of such a task would be the management of a per-lcore timer
+wheel (i.e., calling ``rte_timer_manage()``).
+
+For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
+similar technique), may opt for having quiescent state (e.g., calling
+``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
+service, to assure resource reclaimination occurs even in though some
+lcores currently do not process any events.
+
+If more services than the dispatcher service is mapped to a service
+lcore, it's important that the other service are well-behaved and
+don't interfere with event processing to the extent the system's
+throughput and/or latency requirements are at risk of not being met.
+
+In particular, to avoid jitter, they should have an small upper bound
+for the maximum amount of time spent in a single service function
+call.
+
+An example of scenario with a more CPU-heavy colocated service is a
+low-lcore count deployment, where the event device lacks the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
+require software to feed incoming packets into the event device). In
+this case, the best performance may be achieved if the Event Ethernet
+RX and/or TX Adapters are mapped to lcores also used by for event
+dispatching, since otherwise the adapter lcores would have a lot of
+idle CPU cycles.
+
+.. rubric:: Footnotes
+
+.. [#Mapping]
+   Event routing may reasonably be done based on other ``rte_event``
+   fields (or even event user data). Indeed, that's the very reason to
+   have match callback functions, instead of a simple queue
+   id-to-handler mapping scheme. Queue id-based routing serves well in
+   a simple example.
+
+.. [#Port-MT-Safety]
+   This property (which is a feature, not a bug) is inherited from the
+   core Eventdev APIs.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 52a6d9e7aa..ab05bd6074 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -60,6 +60,7 @@ Programmer's Guide
     event_ethernet_tx_adapter
     event_timer_adapter
     event_crypto_adapter
+    dispatcher_lib
     qos_framework
     power_man
     packet_classif_access_ctrl
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v4 1/3] lib: introduce dispatcher library
  2023-09-22  7:38                                     ` [PATCH v4 1/3] lib: introduce " Mattias Rönnblom
@ 2023-09-25  7:11                                       ` Mattias Rönnblom
  2023-09-25  7:59                                         ` Bruce Richardson
  2023-09-26 18:28                                         ` Jerin Jacob
  2023-09-28  7:30                                       ` [PATCH v5 0/3] Add " Mattias Rönnblom
  1 sibling, 2 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-25  7:11 UTC (permalink / raw)
  To: Mattias Rönnblom, dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On 2023-09-22 09:38, Mattias Rönnblom wrote:

<snip>

> +int
> +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id)
> +{


There are two changes I'm considering:

1) Removing the "id" to identify the dispatcher, replacing it with an 
forward-declared rte_dispatcher struct pointer.

struct rte_dispatcher;

struct rte_dispatcher *
rte_dispatcher_create(uint8_t event_dev_id);


The original reason for using an integer id to identify a dispatcher is 
to make it look like everything else in Eventdev. I find this pattern a 
little awkward to use - in particular the fact the id is 
application-allocated (and thus require coordination between different 
part of the application in case multiple instances are used).

2) Adding a flags field to the create function "for future use". But 
since the API is experimental, there may not be that much need to 
attempt to be future-proof?

Any thoughts are appreciated.

<snip>

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v4 1/3] lib: introduce dispatcher library
  2023-09-25  7:11                                       ` Mattias Rönnblom
@ 2023-09-25  7:59                                         ` Bruce Richardson
  2023-09-26 18:28                                         ` Jerin Jacob
  1 sibling, 0 replies; 102+ messages in thread
From: Bruce Richardson @ 2023-09-25  7:59 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan

On Mon, Sep 25, 2023 at 09:11:38AM +0200, Mattias Rönnblom wrote:
> On 2023-09-22 09:38, Mattias Rönnblom wrote:
> 
> <snip>
> 
> > +int
> > +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id)
> > +{
> 
> 
> There are two changes I'm considering:
> 
> 1) Removing the "id" to identify the dispatcher, replacing it with an
> forward-declared rte_dispatcher struct pointer.
> 
> struct rte_dispatcher;
> 
> struct rte_dispatcher *
> rte_dispatcher_create(uint8_t event_dev_id);
> 
> 
> The original reason for using an integer id to identify a dispatcher is to
> make it look like everything else in Eventdev. I find this pattern a little
> awkward to use - in particular the fact the id is application-allocated (and
> thus require coordination between different part of the application in case
> multiple instances are used).
> 
> 2) Adding a flags field to the create function "for future use". But since
> the API is experimental, there may not be that much need to attempt to be
> future-proof?
> 
I'd add the flags anyway, if you can forsee it potentially being needed.
For something like create it could be useful, and it's not a major overhead
on the user to add an extra "0" to each function call. Just ensure it's
checked for zero inside the initial implementation.

/Bruce

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v4 1/3] lib: introduce dispatcher library
  2023-09-25  7:11                                       ` Mattias Rönnblom
  2023-09-25  7:59                                         ` Bruce Richardson
@ 2023-09-26 18:28                                         ` Jerin Jacob
  2023-09-27  8:13                                           ` Bruce Richardson
  1 sibling, 1 reply; 102+ messages in thread
From: Jerin Jacob @ 2023-09-26 18:28 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan

On Mon, Sep 25, 2023 at 12:41 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>
> On 2023-09-22 09:38, Mattias Rönnblom wrote:
>
> <snip>
>
> > +int
> > +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id)
> > +{
>
>
> There are two changes I'm considering:
>
> 1) Removing the "id" to identify the dispatcher, replacing it with an
> forward-declared rte_dispatcher struct pointer.
>
> struct rte_dispatcher;
>
> struct rte_dispatcher *
> rte_dispatcher_create(uint8_t event_dev_id);
>
>
> The original reason for using an integer id to identify a dispatcher is
> to make it look like everything else in Eventdev. I find this pattern a
> little awkward to use - in particular the fact the id is
> application-allocated (and thus require coordination between different
> part of the application in case multiple instances are used).
>
> 2) Adding a flags field to the create function "for future use". But
> since the API is experimental, there may not be that much need to
> attempt to be future-proof?
>
> Any thoughts are appreciated.

IMO, better to have rte_dispatcher_create(struct
rte_dispatch_create_params *params)
for better future proofing with specific
rte_dispatch_crearte_params_init() API(No need to add reserved fields
in rte_dispatch_create_params  now, may need only for before removing
experimental status)

Just 2c.

>
> <snip>

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v4 1/3] lib: introduce dispatcher library
  2023-09-26 18:28                                         ` Jerin Jacob
@ 2023-09-27  8:13                                           ` Bruce Richardson
  2023-09-28  7:44                                             ` Mattias Rönnblom
  2023-10-03 17:31                                             ` Jerin Jacob
  0 siblings, 2 replies; 102+ messages in thread
From: Bruce Richardson @ 2023-09-27  8:13 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Mattias Rönnblom, Mattias Rönnblom, dev, Jerin Jacob,
	techboard, harry.van.haaren, Peter Nilsson, Heng Wang,
	Naga Harish K S V, Pavan Nikhilesh, Gujjar Abhinandan S,
	Erik Gabriel Carrillo, Shijith Thotton, Hemant Agrawal,
	Sachin Saxena, Liang Ma, Peter Mccarthy, Zhirun Yan

On Tue, Sep 26, 2023 at 11:58:37PM +0530, Jerin Jacob wrote:
> On Mon, Sep 25, 2023 at 12:41 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
> >
> > On 2023-09-22 09:38, Mattias Rönnblom wrote:
> >
> > <snip>
> >
> > > +int
> > > +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id)
> > > +{
> >
> >
> > There are two changes I'm considering:
> >
> > 1) Removing the "id" to identify the dispatcher, replacing it with an
> > forward-declared rte_dispatcher struct pointer.
> >
> > struct rte_dispatcher;
> >
> > struct rte_dispatcher *
> > rte_dispatcher_create(uint8_t event_dev_id);
> >
> >
> > The original reason for using an integer id to identify a dispatcher is
> > to make it look like everything else in Eventdev. I find this pattern a
> > little awkward to use - in particular the fact the id is
> > application-allocated (and thus require coordination between different
> > part of the application in case multiple instances are used).
> >
> > 2) Adding a flags field to the create function "for future use". But
> > since the API is experimental, there may not be that much need to
> > attempt to be future-proof?
> >
> > Any thoughts are appreciated.
> 
> IMO, better to have rte_dispatcher_create(struct
> rte_dispatch_create_params *params)
> for better future proofing with specific
> rte_dispatch_crearte_params_init() API(No need to add reserved fields
> in rte_dispatch_create_params  now, may need only for before removing
> experimental status)
> 
> Just 2c.
> 

I don't like using structs in those cases, I'd much rather have a flags
parameter, as flags can be checked for explicit zeros for future proofing,
while a struct cannot be checked for extra space on the end for future
fields added.

Furthermore, if we need to add new parameters to the create function, I
actually believe it is better to add them as explicit parameters rather
than new fields to the struct. Struct fields can be missed by a user just
recompiling, while new function parameters will be flagged by the compiler
to make the user aware of the change. [There would be no change for ABI
compatibility as function versioning would be usable in both cases]

/Bruce

^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v5 0/3] Add dispatcher library
  2023-09-22  7:38                                     ` [PATCH v4 1/3] lib: introduce " Mattias Rönnblom
  2023-09-25  7:11                                       ` Mattias Rönnblom
@ 2023-09-28  7:30                                       ` Mattias Rönnblom
  2023-09-28  7:30                                         ` [PATCH v5 1/3] lib: introduce " Mattias Rönnblom
                                                           ` (2 more replies)
  1 sibling, 3 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-28  7:30 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to decouple different parts
of an eventdev-based application (e.g., processing pipeline stages),
sharing the same underlying event device.

The dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on event meta data, such as the queue id or scheduling type.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the dispatcher relies on application-supplied matching callback
functions to decide where to deliver events.

A dispatcher is configured to dequeue events from a specific event
device, and ties into the service core framework, to do its (and the
application's) work.

The dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

Although the dispatcher adds some overhead, experience suggests that
the net effect on the application (both synthetic benchmarks and more
real-world applications) may well be positive. This is primarily due
to clustering (see programming guide) reducing cache misses.

Benchmarking indicates that the overhead is ~10 cc/event (on a
large core), with a handful of often-used handlers.

The dispatcher does not support run-time reconfiguration.

The use of the dispatcher library is optional, and an eventdev-based
application may still opt to access the event device using direct
eventdev API calls, or by some other means.

Mattias Rönnblom (3):
  lib: introduce dispatcher library
  test: add dispatcher test suite
  doc: add dispatcher programming guide

 MAINTAINERS                              |    5 +
 app/test/meson.build                     |    1 +
 app/test/test_dispatcher.c               | 1046 ++++++++++++++++++++++
 doc/api/doxy-api-index.md                |    1 +
 doc/api/doxy-api.conf.in                 |    1 +
 doc/guides/prog_guide/dispatcher_lib.rst |  433 +++++++++
 doc/guides/prog_guide/index.rst          |    1 +
 lib/dispatcher/meson.build               |   17 +
 lib/dispatcher/rte_dispatcher.c          |  708 +++++++++++++++
 lib/dispatcher/rte_dispatcher.h          |  468 ++++++++++
 lib/dispatcher/version.map               |   20 +
 lib/meson.build                          |    2 +
 12 files changed, 2703 insertions(+)
 create mode 100644 app/test/test_dispatcher.c
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v5 1/3] lib: introduce dispatcher library
  2023-09-28  7:30                                       ` [PATCH v5 0/3] Add " Mattias Rönnblom
@ 2023-09-28  7:30                                         ` Mattias Rönnblom
  2023-10-05  8:36                                           ` David Marchand
  2023-10-09 18:17                                           ` [PATCH v6 0/3] Add " Mattias Rönnblom
  2023-09-28  7:30                                         ` [PATCH v5 2/3] test: add dispatcher test suite Mattias Rönnblom
  2023-09-28  7:30                                         ` [PATCH v5 3/3] doc: add dispatcher programming guide Mattias Rönnblom
  2 siblings, 2 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-28  7:30 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to help reduce coupling in an
Eventdev-based DPDK application.

In addition, the dispatcher also provides a convenient and flexible
way for the application to use service cores for application-level
processing.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
Reviewed-by: Heng Wang <heng.wang@ericsson.com>

--

PATCH v5:
 o Move from using an integer id to a pointer to reference a dispatcher
   instance, to simplify the API.
 o Fix bug where dispatcher stats retrieval function erroneously depended
   on the user-supplied stats buffer being all-zero.

PATCH v4:
 o Fix bugs in handler and finalizer unregistration. (Naga Harish)
 o Return -EINVAL in cases where NULL pointers were provided in
   calls requiring non-NULL pointers. (Naga Harish)
 o Add experimental warning for the whole API. (Jerin Jacob)

PATCH v3:
 o To underline its optional character and since it does not provide
   hardware abstraction, the event dispatcher is now a separate
   library.
 o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
   shorter and to avoid the rte_event_* namespace.

PATCH v2:
 o Add dequeue batch count statistic.
 o Add statistics reset function to API.
 o Clarify MT safety guarantees (or lack thereof) in the API documentation.
 o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
   to be consistent with similar loops elsewhere in the dispatcher.
 o Fix variable names in finalizer unregister function.

PATCH:
 o Change prefix from RED to EVD, to avoid confusion with random
   early detection.

RFC v4:
 o Move handlers to per-lcore data structures.
 o Introduce mechanism which rearranges handlers so that often-used
   handlers tend to be tried first.
 o Terminate dispatch loop in case all events are delivered.
 o To avoid the dispatcher's service function hogging the CPU, process
   only one batch per call.
 o Have service function return -EAGAIN if no work is performed.
 o Events delivered in the process function is no longer marked 'const',
   since modifying them may be useful for the application and cause
   no difficulties for the dispatcher.
 o Various minor API documentation improvements.

RFC v3:
 o Add stats_get() function to the version.map file.
---
 MAINTAINERS                     |   3 +
 doc/api/doxy-api-index.md       |   1 +
 doc/api/doxy-api.conf.in        |   1 +
 lib/dispatcher/meson.build      |  17 +
 lib/dispatcher/rte_dispatcher.c | 708 ++++++++++++++++++++++++++++++++
 lib/dispatcher/rte_dispatcher.h | 468 +++++++++++++++++++++
 lib/dispatcher/version.map      |  20 +
 lib/meson.build                 |   2 +
 8 files changed, 1220 insertions(+)
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index a926155f26..6704cd5b2c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1726,6 +1726,9 @@ M: Nithin Dabilpuram <ndabilpuram@marvell.com>
 M: Pavan Nikhilesh <pbhagavatula@marvell.com>
 F: lib/node/
 
+Dispatcher - EXPERIMENTAL
+M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
+F: lib/dispatcher/
 
 Test Applications
 -----------------
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index fdeda13932..7d0cad9fed 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -155,6 +155,7 @@ The public API headers are grouped by topics:
 
 - **classification**
   [reorder](@ref rte_reorder.h),
+  [dispatcher](@ref rte_dispatcher.h),
   [distributor](@ref rte_distributor.h),
   [EFD](@ref rte_efd.h),
   [ACL](@ref rte_acl.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index a88accd907..59c679e621 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dispatcher \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build
new file mode 100644
index 0000000000..c6054a3a5d
--- /dev/null
+++ b/lib/dispatcher/meson.build
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2023 Ericsson AB
+
+if is_windows
+    build = false
+    reason = 'not supported on Windows'
+    subdir_done()
+endif
+
+sources = files(
+        'rte_dispatcher.c',
+)
+headers = files(
+        'rte_dispatcher.h',
+)
+
+deps += ['eventdev']
diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
new file mode 100644
index 0000000000..0e69db2b9b
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.c
@@ -0,0 +1,708 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_random.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_dispatcher.h>
+
+#define EVD_MAX_PORTS_PER_LCORE 4
+#define EVD_MAX_HANDLERS 32
+#define EVD_MAX_FINALIZERS 16
+#define EVD_AVG_PRIO_INTERVAL 2000
+#define EVD_SERVICE_NAME "dispatcher"
+
+struct rte_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_dispatcher_handler {
+	int id;
+	rte_dispatcher_match_t match_fun;
+	void *match_data;
+	rte_dispatcher_process_t process_fun;
+	void *process_data;
+};
+
+struct rte_dispatcher_finalizer {
+	int id;
+	rte_dispatcher_finalize_t finalize_fun;
+	void *finalize_data;
+};
+
+struct rte_dispatcher_lcore {
+	uint8_t num_ports;
+	uint16_t num_handlers;
+	int32_t prio_count;
+	struct rte_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
+	struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
+	struct rte_dispatcher_stats stats;
+} __rte_cache_aligned;
+
+struct rte_dispatcher {
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	uint16_t num_finalizers;
+	struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
+};
+
+static int
+evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
+		       const struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->match_fun(event, handler->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static void
+evd_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+		       int handler_idx)
+{
+	struct rte_dispatcher_handler tmp;
+
+	if (handler_idx == 0)
+		return;
+
+	/* Let the lucky handler "bubble" up the list */
+
+	tmp = lcore->handlers[handler_idx - 1];
+
+	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
+
+	lcore->handlers[handler_idx] = tmp;
+}
+
+static inline void
+evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+				int handler_idx, uint16_t handler_events)
+{
+	lcore->prio_count -= handler_events;
+
+	if (unlikely(lcore->prio_count <= 0)) {
+		evd_prioritize_handler(lcore, handler_idx);
+
+		/*
+		 * Randomize the interval in the unlikely case
+		 * the traffic follow some very strict pattern.
+		 */
+		lcore->prio_count =
+			rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
+			EVD_AVG_PRIO_INTERVAL / 2;
+	}
+}
+
+static inline void
+evd_dispatch_events(struct rte_dispatcher *dispatcher,
+		    struct rte_dispatcher_lcore *lcore,
+		    struct rte_dispatcher_lcore_port *port,
+		    struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
+	uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
+	uint16_t drop_count = 0;
+	uint16_t dispatch_count;
+	uint16_t dispatched = 0;
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *event = &events[i];
+		int handler_idx;
+
+		handler_idx = evd_lookup_handler_idx(lcore, event);
+
+		if (unlikely(handler_idx < 0)) {
+			drop_count++;
+			continue;
+		}
+
+		bursts[handler_idx][burst_lens[handler_idx]] = *event;
+		burst_lens[handler_idx]++;
+	}
+
+	dispatch_count = num_events - drop_count;
+
+	for (i = 0; i < lcore->num_handlers &&
+		 dispatched < dispatch_count; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		handler->process_fun(dispatcher->event_dev_id, port->port_id,
+				     bursts[i], len, handler->process_data);
+
+		dispatched += len;
+
+		/*
+		 * Safe, since any reshuffling will only involve
+		 * already-processed handlers.
+		 */
+		evd_consider_prioritize_handler(lcore, i, len);
+	}
+
+	lcore->stats.ev_batch_count++;
+	lcore->stats.ev_dispatch_count += dispatch_count;
+	lcore->stats.ev_drop_count += drop_count;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		finalizer->finalize_fun(dispatcher->event_dev_id,
+					port->port_id,
+					finalizer->finalize_data);
+	}
+}
+
+static __rte_always_inline uint16_t
+evd_port_dequeue(struct rte_dispatcher *dispatcher,
+		 struct rte_dispatcher_lcore *lcore,
+		 struct rte_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	if (likely(n > 0))
+		evd_dispatch_events(dispatcher, lcore, port, events, n);
+
+	lcore->stats.poll_count++;
+
+	return n;
+}
+
+static __rte_always_inline uint16_t
+evd_lcore_process(struct rte_dispatcher *dispatcher,
+		  struct rte_dispatcher_lcore *lcore)
+{
+	uint16_t i;
+	uint16_t event_count = 0;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		event_count += evd_port_dequeue(dispatcher, lcore, port);
+	}
+
+	return event_count;
+}
+
+static int32_t
+evd_process(void *userdata)
+{
+	struct rte_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+	uint64_t event_count;
+
+	event_count = evd_lcore_process(dispatcher, lcore);
+
+	if (unlikely(event_count == 0))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int
+evd_service_register(struct rte_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = evd_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, sizeof(service.name), EVD_SERVICE_NAME);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Registration of dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	return rc;
+}
+
+static int
+evd_service_unregister(struct rte_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+struct rte_dispatcher *
+rte_dispatcher_create(uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for dispatcher\n");
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	*dispatcher = (struct rte_dispatcher) {
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = evd_service_register(dispatcher);
+
+	if (rc < 0) {
+		rte_free(dispatcher);
+		rte_errno = -rc;
+		return NULL;
+	}
+
+	return dispatcher;
+}
+
+int
+rte_dispatcher_free(struct rte_dispatcher *dispatcher)
+{
+	int rc;
+
+	if (dispatcher == NULL)
+		return 0;
+
+	rc = evd_service_unregister(dispatcher);
+
+	if (rc)
+		return rc;
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+uint32_t
+rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher)
+{
+	return dispatcher->service_id;
+}
+
+static int
+lcore_port_index(struct rte_dispatcher_lcore *lcore,
+		 uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
+				  uint8_t event_port_id, uint16_t batch_size,
+				  uint64_t timeout, unsigned int lcore_id)
+{
+	struct rte_dispatcher_lcore *lcore;
+	struct rte_dispatcher_lcore_port *port;
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
+				      uint8_t event_port_id,
+				      unsigned int lcore_id)
+{
+	struct rte_dispatcher_lcore *lcore;
+	int port_idx;
+	struct rte_dispatcher_lcore_port *port;
+	struct rte_dispatcher_lcore_port *last;
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static struct rte_dispatcher_handler*
+evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->id == handler_id)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_handler_id(struct rte_dispatcher *dispatcher)
+{
+	int handler_id = 0;
+	struct rte_dispatcher_lcore *reference_lcore =
+		&dispatcher->lcores[0];
+
+	if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
+		return -1;
+
+	while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
+		handler_id++;
+
+	return handler_id;
+}
+
+static void
+evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore,
+		    const struct rte_dispatcher_handler *handler)
+{
+	int handler_idx = lcore->num_handlers;
+
+	lcore->handlers[handler_idx] = *handler;
+	lcore->num_handlers++;
+}
+
+static void
+evd_install_handler(struct rte_dispatcher *dispatcher,
+		    const struct rte_dispatcher_handler *handler)
+{
+	int i;
+
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[i];
+		evd_lcore_install_handler(lcore, handler);
+	}
+}
+
+int
+rte_dispatcher_register(struct rte_dispatcher *dispatcher,
+			rte_dispatcher_match_t match_fun, void *match_data,
+			rte_dispatcher_process_t process_fun,
+			void *process_data)
+{
+	struct rte_dispatcher_handler handler = {
+		.match_fun = match_fun,
+		.match_data = match_data,
+		.process_fun = process_fun,
+		.process_data = process_data
+	};
+
+	handler.id = evd_alloc_handler_id(dispatcher);
+
+	if (handler.id < 0)
+		return -ENOMEM;
+
+	evd_install_handler(dispatcher, &handler);
+
+	return handler.id;
+}
+
+static int
+evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	struct rte_dispatcher_handler *unreg_handler;
+	int handler_idx;
+	uint16_t last_idx;
+
+	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
+
+	if (unreg_handler == NULL) {
+		RTE_EDEV_LOG_ERR("Invalid handler id %d\n", handler_id);
+		return -EINVAL;
+	}
+
+	handler_idx = unreg_handler - &lcore->handlers[0];
+
+	last_idx = lcore->num_handlers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all handlers to maintain handler order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_handler, unreg_handler + 1,
+			sizeof(struct rte_dispatcher_handler) * n);
+	}
+
+	lcore->num_handlers--;
+
+	return 0;
+}
+
+static int
+evd_uninstall_handler(struct rte_dispatcher *dispatcher,
+		      int handler_id)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+		int rc;
+
+		rc = evd_lcore_uninstall_handler(lcore, handler_id);
+
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id)
+{
+	int rc;
+
+	rc = evd_uninstall_handler(dispatcher, handler_id);
+
+	return rc;
+}
+
+static struct rte_dispatcher_finalizer*
+evd_get_finalizer_by_id(struct rte_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		if (finalizer->id == handler_id)
+			return finalizer;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_id = 0;
+
+	while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
+		finalizer_id++;
+
+	return finalizer_id;
+}
+
+static struct rte_dispatcher_finalizer *
+evd_alloc_finalizer(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_idx;
+	struct rte_dispatcher_finalizer *finalizer;
+
+	if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
+		return NULL;
+
+	finalizer_idx = dispatcher->num_finalizers;
+	finalizer = &dispatcher->finalizers[finalizer_idx];
+
+	finalizer->id = evd_alloc_finalizer_id(dispatcher);
+
+	dispatcher->num_finalizers++;
+
+	return finalizer;
+}
+
+int
+rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
+				 rte_dispatcher_finalize_t finalize_fun,
+				 void *finalize_data)
+{
+	struct rte_dispatcher_finalizer *finalizer;
+
+	finalizer = evd_alloc_finalizer(dispatcher);
+
+	if (finalizer == NULL)
+		return -ENOMEM;
+
+	finalizer->finalize_fun = finalize_fun;
+	finalizer->finalize_data = finalize_data;
+
+	return finalizer->id;
+}
+
+int
+rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher,
+				   int finalizer_id)
+{
+	struct rte_dispatcher_finalizer *unreg_finalizer;
+	int finalizer_idx;
+	uint16_t last_idx;
+
+	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, finalizer_id);
+
+	if (unreg_finalizer == NULL) {
+		RTE_EDEV_LOG_ERR("Invalid finalizer id %d\n", finalizer_id);
+		return -EINVAL;
+	}
+
+	finalizer_idx = unreg_finalizer - &dispatcher->finalizers[0];
+
+	last_idx = dispatcher->num_finalizers - 1;
+
+	if (finalizer_idx != last_idx) {
+		/* move all finalizers to maintain order */
+		int n = last_idx - finalizer_idx;
+		memmove(unreg_finalizer, unreg_finalizer + 1,
+			sizeof(struct rte_dispatcher_finalizer) * n);
+	}
+
+	dispatcher->num_finalizers--;
+
+	return 0;
+}
+
+static int
+evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
+{
+	int rc;
+
+	rc = rte_service_component_runstate_set(dispatcher->service_id,
+						state);
+
+	if (rc != 0) {
+		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+				 "service component run state to %d\n", rc,
+				 state);
+		RTE_ASSERT(0);
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_start(struct rte_dispatcher *dispatcher)
+{
+	return evd_set_service_runstate(dispatcher, 1);
+}
+
+int
+rte_dispatcher_stop(struct rte_dispatcher *dispatcher)
+{
+	return evd_set_service_runstate(dispatcher, 0);
+}
+
+static void
+evd_aggregate_stats(struct rte_dispatcher_stats *result,
+		    const struct rte_dispatcher_stats *part)
+{
+	result->poll_count += part->poll_count;
+	result->ev_batch_count += part->ev_batch_count;
+	result->ev_dispatch_count += part->ev_dispatch_count;
+	result->ev_drop_count += part->ev_drop_count;
+}
+
+void
+rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
+			 struct rte_dispatcher_stats *stats)
+{
+	unsigned int lcore_id;
+
+	*stats = (struct rte_dispatcher_stats) {};
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		const struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		evd_aggregate_stats(stats, &lcore->stats);
+	}
+}
+
+void
+rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		lcore->stats = (struct rte_dispatcher_stats) {};
+	}
+}
diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
new file mode 100644
index 0000000000..0387316d7b
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.h
@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#ifndef __RTE_DISPATCHER_H__
+#define __RTE_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Dispatcher
+ *
+ * @warning
+ * @b EXPERIMENTAL:
+ * All functions in this file may be changed or removed without prior notice.
+ *
+ * The purpose of the dispatcher is to help decouple different parts
+ * of an application (e.g., modules), sharing the same underlying
+ * event device.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * Match callbacks are used by an application to decide how the
+ * dispatcher distributes events to different parts of the
+ * application.
+ *
+ * The application is not expected to process the event at the point
+ * of the match call. Such matters should be deferred to the process
+ * callback invocation.
+ *
+ * The match callback may be used as an opportunity to prefetch data.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this events should be delivered (via
+ *   the process callback), and false otherwise.
+ */
+typedef bool
+(*rte_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
+
+/**
+ * Function prototype for process callbacks.
+ *
+ * The process callbacks are used by the dispatcher to deliver
+ * events for processing.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ */
+
+typedef void
+(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				  struct rte_event *events, uint16_t num,
+				  void *cb_data);
+
+/**
+ * Function prototype for finalize callbacks.
+ *
+ * The finalize callbacks are used by the dispatcher to notify the
+ * application it has delivered all events from a particular batch
+ * dequeued from the event device.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_finalize_register().
+ */
+
+typedef void
+(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				   void *cb_data);
+
+/**
+ * Dispatcher statistics
+ */
+struct rte_dispatcher_stats {
+	uint64_t poll_count;
+	/**< Number of event dequeue calls made toward the event device. */
+	uint64_t ev_batch_count;
+	/**< Number of non-empty event batches dequeued from event device.*/
+	uint64_t ev_dispatch_count;
+	/**< Number of events dispatched to a handler.*/
+	uint64_t ev_drop_count;
+	/**< Number of events dropped because no handler was found. */
+};
+
+/**
+ * Create a dispatcher with the specified id.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   A pointer to a new dispatcher instance, or NULL on failure, in which
+ *   case rte_errno is set.
+ */
+__rte_experimental
+struct rte_dispatcher *
+rte_dispatcher_create(uint8_t event_dev_id);
+
+/**
+ * Free a dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_dispatcher_free(struct rte_dispatcher *dispatcher);
+
+/**
+ * Retrieve the service identifier of a dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  The dispatcher service's id.
+ */
+__rte_experimental
+uint32_t
+rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * dispatcher.
+ *
+ * This function configures the event port id to be used by the event
+ * dispatcher service, if run on the specified lcore.
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port must not be bound to more than one lcore.
+ *
+ * If the dispatcher service is mapped (with rte_service_map_lcore_set())
+ * to a lcore to which no ports are bound, the service function will be a
+ * no-operation.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on lcore
+ * specified by @c lcore_id.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ *  - -EEXISTS: Event port is already configured.
+ *  - -EINVAL: Invalid arguments.
+ */
+__rte_experimental
+int
+rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
+				  uint8_t event_port_id, uint16_t batch_size,
+				  uint64_t timeout, unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * lcore specified by @c lcore_id.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOENT: Event port id not bound to this @c lcore_id.
+ */
+__rte_experimental
+int
+rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
+				      uint8_t event_port_id,
+				      unsigned int lcore_id);
+
+/**
+ * Register an event handler.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding process callback
+ * function.
+ *
+ * The reason for having two distinct steps is to allow the dispatcher
+ * to deliver all events as a batch. This in turn will cause
+ * processing of a particular kind of events to happen in a
+ * back-to-back manner, improving cache locality.
+ *
+ * The list of handler callback functions is shared among all lcores,
+ * but will only be executed on lcores which has an eventdev port
+ * bound to them, and which are running the dispatcher service.
+ *
+ * An event is delivered to at most one handler. Events where no
+ * handler is found are dropped.
+ *
+ * The application must not depend on the order of which the match
+ * functions are invoked.
+ *
+ * Ordering of events is not guaranteed to be maintained between
+ * different deliver callbacks. For example, suppose there are two
+ * callbacks registered, matching different subsets of events arriving
+ * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
+ * on a particular port, all pertaining to the same flow. The match
+ * callback for registration A returns true for ev0 and ev2, and the
+ * matching function for registration B for ev1. In that scenario, the
+ * dispatcher may choose to deliver first [ev0, ev2] using A's deliver
+ * function, and then [ev1] to B - or vice versa.
+ *
+ * rte_dispatcher_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the event
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param process_fun
+ *  The process callback function.
+ *
+ * @param process_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when process_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_register(struct rte_dispatcher *dispatcher,
+			rte_dispatcher_match_t match_fun, void *match_cb_data,
+			rte_dispatcher_process_t process_fun,
+			void *process_cb_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param handler_id
+ *  The handler registration id returned by the original
+ *  rte_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c handler_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id);
+
+/**
+ * Register a finalize callback function.
+ *
+ * An application may optionally install one or more finalize
+ * callbacks.
+ *
+ * All finalize callbacks are invoked by the dispatcher when a
+ * complete batch of events (retrieve using rte_event_dequeue_burst())
+ * have been delivered to the application (or have been dropped).
+ *
+ * The finalize callback is not tied to any particular handler.
+ *
+ * The finalize callback provides an opportunity for the application
+ * to do per-batch processing. One case where this may be useful is if
+ * an event output buffer is used, and is shared among several
+ * handlers. In such a case, proper output buffer flushing may be
+ * assured using a finalize callback.
+ *
+ * rte_dispatcher_finalize_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param finalize_fun
+ *  The function called after completing the processing of a
+ *  dequeue batch.
+ *
+ * @param finalize_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when @c finalize_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
+				 rte_dispatcher_finalize_t finalize_fun,
+				 void *finalize_data);
+
+/**
+ * Unregister a finalize callback.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param reg_id
+ *  The finalize registration id returned by the original
+ *  rte_dispatcher_finalize_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c reg_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher,
+				   int reg_id);
+
+/**
+ * Start a dispatcher instance.
+ *
+ * Enables the dispatcher service.
+ *
+ * The underlying event device must have been started prior to calling
+ * rte_dispatcher_start().
+ *
+ * For the dispatcher to actually perform work (i.e., dispatch
+ * events), its service must have been mapped to one or more service
+ * lcores, and its service run state set to '1'. A dispatcher's
+ * service is retrieved using rte_dispatcher_service_id_get().
+ *
+ * Each service lcore to which the dispatcher is mapped should
+ * have at least one event port configured. Such configuration is
+ * performed by calling rte_dispatcher_bind_port_to_lcore(), prior to
+ * starting the dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_dispatcher_start(struct rte_dispatcher *dispatcher);
+
+/**
+ * Stop a running dispatcher instance.
+ *
+ * Disables the dispatcher service.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_dispatcher_stop(struct rte_dispatcher *dispatcher);
+
+/**
+ * Retrieve statistics for a dispatcher instance.
+ *
+ * This function is MT safe and may be called by any thread
+ * (including unregistered non-EAL threads).
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ * @param[out] stats
+ *   A pointer to a structure to fill with statistics.
+ */
+__rte_experimental
+void
+rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
+			 struct rte_dispatcher_stats *stats);
+
+/**
+ * Reset statistics for a dispatcher instance.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but may not produce the correct result if the
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ */
+__rte_experimental
+void
+rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DISPATCHER__ */
diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map
new file mode 100644
index 0000000000..8f9ad96522
--- /dev/null
+++ b/lib/dispatcher/version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+	global:
+
+	# added in 23.11
+	rte_dispatcher_create;
+	rte_dispatcher_free;
+	rte_dispatcher_service_id_get;
+	rte_dispatcher_bind_port_to_lcore;
+	rte_dispatcher_unbind_port_from_lcore;
+	rte_dispatcher_register;
+	rte_dispatcher_unregister;
+	rte_dispatcher_finalize_register;
+	rte_dispatcher_finalize_unregister;
+	rte_dispatcher_start;
+	rte_dispatcher_stop;
+	rte_dispatcher_stats_get;
+	rte_dispatcher_stats_reset;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 099b0ed18a..3093b338d2 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -35,6 +35,7 @@ libraries = [
         'distributor',
         'efd',
         'eventdev',
+        'dispatcher', # dispatcher depends on eventdev
         'gpudev',
         'gro',
         'gso',
@@ -81,6 +82,7 @@ optional_libs = [
         'cfgfile',
         'compressdev',
         'cryptodev',
+        'dispatcher',
         'distributor',
         'dmadev',
         'efd',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v5 2/3] test: add dispatcher test suite
  2023-09-28  7:30                                       ` [PATCH v5 0/3] Add " Mattias Rönnblom
  2023-09-28  7:30                                         ` [PATCH v5 1/3] lib: introduce " Mattias Rönnblom
@ 2023-09-28  7:30                                         ` Mattias Rönnblom
  2023-10-05  8:36                                           ` David Marchand
  2023-09-28  7:30                                         ` [PATCH v5 3/3] doc: add dispatcher programming guide Mattias Rönnblom
  2 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-28  7:30 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Add unit tests for the dispatcher.

--
PATCH v5:
 o Update test suite to use pointer and not integer id when calling
   dispatcher functions.

PATCH v3:
 o Adapt the test suite to dispatcher API name changes.

PATCH v2:
 o Test finalize callback functionality.
 o Test handler and finalizer count upper limits.
 o Add statistics reset test.
 o Make sure dispatcher supply the proper event dev id and port id back
   to the application.

PATCH:
 o Extend test to cover often-used handler optimization feature.

RFC v4:
 o Adapt to non-const events in process function prototype.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 MAINTAINERS                |    1 +
 app/test/meson.build       |    1 +
 app/test/test_dispatcher.c | 1046 ++++++++++++++++++++++++++++++++++++
 3 files changed, 1048 insertions(+)
 create mode 100644 app/test/test_dispatcher.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 6704cd5b2c..43890cad0e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1729,6 +1729,7 @@ F: lib/node/
 Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
+F: app/test/test_dispatcher.c
 
 Test Applications
 -----------------
diff --git a/app/test/meson.build b/app/test/meson.build
index 05bae9216d..3303c73817 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -55,6 +55,7 @@ source_file_deps = {
     'test_cycles.c': [],
     'test_debug.c': [],
     'test_devargs.c': ['kvargs'],
+    'test_dispatcher.c': ['dispatcher'],
     'test_distributor.c': ['distributor'],
     'test_distributor_perf.c': ['distributor'],
     'test_dmadev.c': ['dmadev', 'bus_vdev'],
diff --git a/app/test/test_dispatcher.c b/app/test/test_dispatcher.c
new file mode 100644
index 0000000000..2bce65fdd9
--- /dev/null
+++ b/app/test/test_dispatcher.c
@@ -0,0 +1,1046 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include "test.h"
+
+#include <stdatomic.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_dispatcher.h>
+#include <rte_eventdev.h>
+#include <rte_random.h>
+#include <rte_service.h>
+
+#define NUM_WORKERS 3
+
+#define NUM_PORTS (NUM_WORKERS + 1)
+#define WORKER_PORT_ID(worker_idx) (worker_idx)
+#define DRIVER_PORT_ID (NUM_PORTS - 1)
+
+#define NUM_SERVICE_CORES NUM_WORKERS
+
+/* Eventdev */
+#define NUM_QUEUES 8
+#define LAST_QUEUE_ID (NUM_QUEUES - 1)
+#define MAX_EVENTS 4096
+#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
+#define DEQUEUE_BURST_SIZE 32
+#define ENQUEUE_BURST_SIZE 32
+
+#define NUM_EVENTS 10000000
+#define NUM_FLOWS 16
+
+#define DSW_VDEV "event_dsw0"
+
+struct app_queue {
+	uint8_t queue_id;
+	uint64_t sn[NUM_FLOWS];
+	int dispatcher_reg_id;
+};
+
+struct cb_count {
+	uint8_t expected_event_dev_id;
+	uint8_t expected_event_port_id[RTE_MAX_LCORE];
+	atomic_int count;
+};
+
+struct test_app {
+	uint8_t event_dev_id;
+	struct rte_dispatcher *dispatcher;
+	uint32_t dispatcher_service_id;
+
+	unsigned int service_lcores[NUM_SERVICE_CORES];
+
+	int never_match_reg_id;
+	uint64_t never_match_count;
+	struct cb_count never_process_count;
+
+	struct app_queue queues[NUM_QUEUES];
+
+	int finalize_reg_id;
+	struct cb_count finalize_count;
+
+	bool running;
+
+	atomic_int completed_events;
+	atomic_int errors;
+};
+
+#define RETURN_ON_ERROR(rc) \
+	do {					\
+		if (rc != TEST_SUCCESS)		\
+			return rc;		\
+	} while (0)
+
+static struct test_app *
+test_app_create(void)
+{
+	int i;
+	struct test_app *app;
+
+	app = calloc(1, sizeof(struct test_app));
+
+	if (app == NULL)
+		return NULL;
+
+	for (i = 0; i < NUM_QUEUES; i++)
+		app->queues[i].queue_id = i;
+
+	return app;
+}
+
+static void
+test_app_free(struct test_app *app)
+{
+	free(app);
+}
+
+static int
+test_app_create_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_vdev_init(DSW_VDEV, NULL);
+	if (rc < 0)
+		return TEST_SKIPPED;
+
+	rc = rte_event_dev_get_dev_id(DSW_VDEV);
+
+	app->event_dev_id = (uint8_t)rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_destroy_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_close(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
+
+	rc = rte_vdev_uninit(DSW_VDEV);
+	TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_event_dev(struct test_app *app)
+{
+	int rc;
+	int i;
+
+	rc = test_app_create_vdev(app);
+	if (rc < 0)
+		return rc;
+
+	struct rte_event_dev_config config = {
+		.nb_event_queues = NUM_QUEUES,
+		.nb_event_ports = NUM_PORTS,
+		.nb_events_limit = MAX_EVENTS,
+		.nb_event_queue_flows = 64,
+		.nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
+		.nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	rc = rte_event_dev_configure(app->event_dev_id, &config);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
+
+	struct rte_event_queue_conf queue_config = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.nb_atomic_flows = 64
+	};
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		uint8_t queue_id = i;
+
+		rc = rte_event_queue_setup(app->event_dev_id, queue_id,
+					   &queue_config);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
+	}
+
+	struct rte_event_port_conf port_config = {
+		.new_event_threshold = NEW_EVENT_THRESHOLD,
+		.dequeue_depth = DEQUEUE_BURST_SIZE,
+		.enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	for (i = 0; i < NUM_PORTS; i++) {
+		uint8_t event_port_id = i;
+
+		rc = rte_event_port_setup(app->event_dev_id, event_port_id,
+					  &port_config);
+		TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
+				    event_port_id);
+
+		if (event_port_id == DRIVER_PORT_ID)
+			continue;
+
+		rc = rte_event_port_link(app->event_dev_id, event_port_id,
+					 NULL, NULL, 0);
+
+		TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
+				  event_port_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_event_dev(struct test_app *app)
+{
+	return test_app_destroy_vdev(app);
+}
+
+static int
+test_app_start_event_dev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_start(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
+
+	return TEST_SUCCESS;
+}
+
+static void
+test_app_stop_event_dev(struct test_app *app)
+{
+	rte_event_dev_stop(app->event_dev_id);
+}
+
+static int
+test_app_create_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	app->dispatcher = rte_dispatcher_create(app->event_dev_id);
+
+	TEST_ASSERT(app->dispatcher != NULL, "Unable to create event "
+		    "dispatcher");
+
+	app->dispatcher_service_id =
+		rte_dispatcher_service_id_get(app->dispatcher);
+
+	rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
+			    "stats");
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to set dispatcher service runstate");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_free_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	rc = rte_dispatcher_free(app->dispatcher);
+	TEST_ASSERT_SUCCESS(rc, "Error freeing dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_bind_ports(struct test_app *app)
+{
+	int i;
+
+	app->never_process_count.expected_event_dev_id =
+		app->event_dev_id;
+	app->finalize_count.expected_event_dev_id =
+		app->event_dev_id;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		uint8_t port_id = WORKER_PORT_ID(i);
+
+		int rc = rte_dispatcher_bind_port_to_lcore(
+			app->dispatcher, port_id, DEQUEUE_BURST_SIZE, 0,
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
+				    "to lcore %d", port_id, lcore_id);
+
+		app->never_process_count.expected_event_port_id[lcore_id] =
+			port_id;
+		app->finalize_count.expected_event_port_id[lcore_id] = port_id;
+	}
+
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unbind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_dispatcher_unbind_port_from_lcore(
+			app->dispatcher,
+			WORKER_PORT_ID(i),
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
+				    "from lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static bool
+match_queue(const struct rte_event *event, void *cb_data)
+{
+	uintptr_t queue_id = (uintptr_t)cb_data;
+
+	return event->queue_id == queue_id;
+}
+
+static int
+test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++)
+		if (app->service_lcores[i] == lcore_id)
+			return i;
+
+	return -1;
+}
+
+static int
+test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
+{
+	int worker;
+
+	worker = test_app_get_worker_index(app, lcore_id);
+
+	if (worker < 0)
+		return -1;
+
+	return WORKER_PORT_ID(worker);
+}
+
+static void
+test_app_queue_note_error(struct test_app *app)
+{
+	atomic_fetch_add_explicit(&app->errors, 1, memory_order_relaxed);
+}
+
+static void
+test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
+		       struct rte_event *in_events, uint16_t num,
+		       void *cb_data)
+{
+	struct app_queue *app_queue = cb_data;
+	struct test_app *app = container_of(app_queue, struct test_app,
+					    queues[app_queue->queue_id]);
+	unsigned int lcore_id = rte_lcore_id();
+	bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
+	int event_port_id;
+	uint16_t i;
+	struct rte_event out_events[num];
+
+	event_port_id = test_app_get_worker_port(app, lcore_id);
+
+	if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
+	    p_event_port_id != event_port_id) {
+		test_app_queue_note_error(app);
+		return;
+	}
+
+	for (i = 0; i < num; i++) {
+		const struct rte_event *in_event = &in_events[i];
+		struct rte_event *out_event = &out_events[i];
+		uint64_t sn = in_event->u64;
+		uint64_t expected_sn;
+
+		if (in_event->queue_id != app_queue->queue_id) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		expected_sn = app_queue->sn[in_event->flow_id]++;
+
+		if (expected_sn != sn) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		if (intermediate_queue)
+			*out_event = (struct rte_event) {
+				.queue_id = in_event->queue_id + 1,
+				.flow_id = in_event->flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_FORWARD,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+	}
+
+	if (intermediate_queue) {
+		uint16_t n = 0;
+
+		do {
+			n += rte_event_enqueue_forward_burst(p_event_dev_id,
+							     p_event_port_id,
+							     out_events + n,
+							     num - n);
+		} while (n != num);
+	} else
+		atomic_fetch_add_explicit(&app->completed_events, num,
+					  memory_order_relaxed);
+}
+
+static bool
+never_match(const struct rte_event *event __rte_unused, void *cb_data)
+{
+	uint64_t *count = cb_data;
+
+	(*count)++;
+
+	return false;
+}
+
+static void
+test_app_never_process(uint8_t event_dev_id,
+		       uint8_t event_port_id,
+		       struct rte_event *in_events __rte_unused,
+		       uint16_t num, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		atomic_fetch_add_explicit(&count->count, num,
+					  memory_order_relaxed);
+}
+
+static void
+finalize(uint8_t event_dev_id, uint8_t event_port_id, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		atomic_fetch_add_explicit(&count->count, 1,
+					  memory_order_relaxed);
+}
+
+static int
+test_app_register_callbacks(struct test_app *app)
+{
+	int i;
+
+	app->never_match_reg_id =
+		rte_dispatcher_register(app->dispatcher, never_match,
+					&app->never_match_count,
+					test_app_never_process,
+					&app->never_process_count);
+
+	TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register "
+		    "never-match handler");
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		struct app_queue *app_queue = &app->queues[i];
+		uintptr_t queue_id = app_queue->queue_id;
+		int reg_id;
+
+		reg_id = rte_dispatcher_register(app->dispatcher,
+						 match_queue, (void *)queue_id,
+						 test_app_process_queue,
+						 app_queue);
+
+		TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
+			    "callback for queue %d", i);
+
+		app_queue->dispatcher_reg_id = reg_id;
+	}
+
+	app->finalize_reg_id =
+		rte_dispatcher_finalize_register(app->dispatcher,
+						       finalize,
+						       &app->finalize_count);
+	TEST_ASSERT_SUCCESS(app->finalize_reg_id, "Error registering "
+			    "finalize callback");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
+{
+	int reg_id = app->queues[queue_id].dispatcher_reg_id;
+	int rc;
+
+	if (reg_id < 0) /* unregistered already */
+		return 0;
+
+	rc = rte_dispatcher_unregister(app->dispatcher, reg_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
+			    "callback for queue %d", queue_id);
+
+	app->queues[queue_id].dispatcher_reg_id = -1;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callbacks(struct test_app *app)
+{
+	int i;
+	int rc;
+
+	if (app->never_match_reg_id >= 0) {
+		rc = rte_dispatcher_unregister(app->dispatcher,
+						     app->never_match_reg_id);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister never-match "
+				    "handler");
+		app->never_match_reg_id = -1;
+	}
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		rc = test_app_unregister_callback(app, i);
+		RETURN_ON_ERROR(rc);
+	}
+
+	if (app->finalize_reg_id >= 0) {
+		rc = rte_dispatcher_finalize_unregister(
+			app->dispatcher, app->finalize_reg_id
+		);
+		app->finalize_reg_id = -1;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_dispatcher_start(app->dispatcher);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to start the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_dispatcher_stop(app->dispatcher);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to stop the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_reset_dispatcher_stats(struct test_app *app)
+{
+	struct rte_dispatcher_stats stats;
+
+	rte_dispatcher_stats_reset(app->dispatcher);
+
+	memset(&stats, 0xff, sizeof(stats));
+
+	rte_dispatcher_stats_get(app->dispatcher, &stats);
+
+	TEST_ASSERT_EQUAL(stats.poll_count, 0, "Poll count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_batch_count, 0, "Batch count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, "Dispatch count "
+			  "not zero");
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count not zero");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_lcore_add(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
+			    "service core", lcore_id);
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_cores(struct test_app *app)
+{
+	int i;
+	int lcore_id = -1;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
+
+		TEST_ASSERT(lcore_id != RTE_MAX_LCORE,
+			    "Too few lcores. Needs at least %d worker lcores",
+			    NUM_SERVICE_CORES);
+
+		app->service_lcores[i] = lcore_id;
+	}
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		int rc;
+
+		rc = test_app_setup_service_core(app, app->service_lcores[i]);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
+
+	rc = rte_service_lcore_del(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
+			    lcore_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = test_app_teardown_service_core(app, lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_start(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_stop(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
+				    lcore_id);
+
+		RETURN_ON_ERROR(rc);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_start_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_start_dispatcher(app);
+
+	app->running = true;
+
+	return rc;
+}
+
+static int
+test_app_stop(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_stop_dispatcher(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_service_cores(app);
+	RETURN_ON_ERROR(rc);
+
+	test_app_stop_event_dev(app);
+	RETURN_ON_ERROR(rc);
+
+	app->running = false;
+
+	return TEST_SUCCESS;
+}
+
+struct test_app *test_app;
+
+static int
+test_setup(void)
+{
+	int rc;
+
+	test_app = test_app_create();
+	TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
+
+	rc = test_app_setup_event_dev(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_create_dispatcher(test_app);
+
+	rc = test_app_setup_service_cores(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_register_callbacks(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_bind_ports(test_app);
+
+	return rc;
+}
+
+static void test_teardown(void)
+{
+	if (test_app->running)
+		test_app_stop(test_app);
+
+	test_app_teardown_service_cores(test_app);
+
+	test_app_unregister_callbacks(test_app);
+
+	test_app_unbind_ports(test_app);
+
+	test_app_free_dispatcher(test_app);
+
+	test_app_teardown_event_dev(test_app);
+
+	test_app_free(test_app);
+
+	test_app = NULL;
+}
+
+static int
+test_app_get_completed_events(struct test_app *app)
+{
+	return atomic_load_explicit(&app->completed_events,
+				    memory_order_relaxed);
+}
+
+static int
+test_app_get_errors(struct test_app *app)
+{
+	return atomic_load_explicit(&app->errors, memory_order_relaxed);
+}
+
+static int
+test_basic(void)
+{
+	int rc;
+	int i;
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	uint64_t sns[NUM_FLOWS] = { 0 };
+
+	for (i = 0; i < NUM_EVENTS;) {
+		struct rte_event events[ENQUEUE_BURST_SIZE];
+		int left;
+		int batch_size;
+		int j;
+		uint16_t n = 0;
+
+		batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
+		left = NUM_EVENTS - i;
+
+		batch_size = RTE_MIN(left, batch_size);
+
+		for (j = 0; j < batch_size; j++) {
+			struct rte_event *event = &events[j];
+			uint64_t sn;
+			uint32_t flow_id;
+
+			flow_id = rte_rand_max(NUM_FLOWS);
+
+			sn = sns[flow_id]++;
+
+			*event = (struct rte_event) {
+				.queue_id = 0,
+				.flow_id = flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_NEW,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+		}
+
+		while (n < batch_size)
+			n += rte_event_enqueue_new_burst(test_app->event_dev_id,
+							 DRIVER_PORT_ID,
+							 events + n,
+							 batch_size - n);
+
+		i += batch_size;
+	}
+
+	while (test_app_get_completed_events(test_app) != NUM_EVENTS)
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+
+	rc = test_app_get_errors(test_app);
+	TEST_ASSERT(rc == 0, "%d errors occurred", rc);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_dispatcher_stats stats;
+	rte_dispatcher_stats_get(test_app->dispatcher, &stats);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
+			  "Invalid dispatch count");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	TEST_ASSERT_EQUAL(test_app->never_process_count.count, 0,
+			  "Never-match handler's process function has "
+			  "been called");
+
+	int finalize_count =
+		atomic_load_explicit(&test_app->finalize_count.count,
+				     memory_order_relaxed);
+
+	TEST_ASSERT(finalize_count > 0, "Finalize count is zero");
+	TEST_ASSERT(finalize_count <= (int)stats.ev_dispatch_count,
+		    "Finalize count larger than event count");
+
+	TEST_ASSERT_EQUAL(finalize_count, (int)stats.ev_batch_count,
+			  "%"PRIu64" batches dequeued, but finalize called %d "
+			  "times", stats.ev_batch_count, finalize_count);
+
+	/*
+	 * The event dispatcher should call often-matching match functions
+	 * more often, and thus this never-matching match function should
+	 * be called relatively infrequently.
+	 */
+	TEST_ASSERT(test_app->never_match_count <
+		    (stats.ev_dispatch_count / 4),
+		    "Never-matching match function called suspiciously often");
+
+	rc = test_app_reset_dispatcher_stats(test_app);
+	RETURN_ON_ERROR(rc);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_drop(void)
+{
+	int rc;
+	uint8_t unhandled_queue;
+	struct rte_dispatcher_stats stats;
+
+	unhandled_queue = (uint8_t)rte_rand_max(NUM_QUEUES);
+
+	rc = test_app_start(test_app);
+	RETURN_ON_ERROR(rc);
+
+	rc = test_app_unregister_callback(test_app, unhandled_queue);
+	RETURN_ON_ERROR(rc);
+
+	struct rte_event event = {
+	    .queue_id = unhandled_queue,
+	    .flow_id = 0,
+	    .sched_type = RTE_SCHED_TYPE_ATOMIC,
+	    .op = RTE_EVENT_OP_NEW,
+	    .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	    .u64 = 0
+	};
+
+	do {
+		rc = rte_event_enqueue_burst(test_app->event_dev_id,
+					     DRIVER_PORT_ID, &event, 1);
+	} while (rc == 0);
+
+	do {
+		rte_dispatcher_stats_get(test_app->dispatcher, &stats);
+
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+	} while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
+
+	rc = test_app_stop(test_app);
+	RETURN_ON_ERROR(rc);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
+			  "Dispatch count is not zero");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+#define MORE_THAN_MAX_HANDLERS 1000
+#define MIN_HANDLERS 32
+
+static int
+test_many_handler_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_HANDLERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+
+	RETURN_ON_ERROR(rc);
+
+	for (i = 0; i < MORE_THAN_MAX_HANDLERS; i++) {
+		reg_id = rte_dispatcher_register(test_app->dispatcher,
+						 never_match, NULL,
+						 test_app_never_process, NULL);
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_HANDLERS, "Registration failed already "
+		    "after %d handler registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_unregister(test_app->dispatcher,
+					       reg_ids[i]);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister handler %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static void
+dummy_finalize(uint8_t event_dev_id __rte_unused,
+	       uint8_t event_port_id __rte_unused,
+	       void *cb_data __rte_unused)
+{
+}
+
+#define MORE_THAN_MAX_FINALIZERS 1000
+#define MIN_FINALIZERS 16
+
+static int
+test_many_finalize_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_FINALIZERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+
+	RETURN_ON_ERROR(rc);
+
+	for (i = 0; i < MORE_THAN_MAX_FINALIZERS; i++) {
+		reg_id = rte_dispatcher_finalize_register(
+			test_app->dispatcher, dummy_finalize, NULL
+		);
+
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_FINALIZERS, "Finalize registration failed "
+		    "already after %d registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_finalize_unregister(
+			test_app->dispatcher, reg_ids[i]
+		);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister finalizer %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite test_suite = {
+	.suite_name = "Event dispatcher test suite",
+	.unit_test_cases = {
+		TEST_CASE_ST(test_setup, test_teardown, test_basic),
+		TEST_CASE_ST(test_setup, test_teardown, test_drop),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_handler_registrations),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_finalize_registrations),
+		TEST_CASES_END()
+	}
+};
+
+static int
+test_dispatcher(void)
+{
+	return unit_test_suite_runner(&test_suite);
+}
+
+REGISTER_TEST_COMMAND(dispatcher_autotest, test_dispatcher);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v5 3/3] doc: add dispatcher programming guide
  2023-09-28  7:30                                       ` [PATCH v5 0/3] Add " Mattias Rönnblom
  2023-09-28  7:30                                         ` [PATCH v5 1/3] lib: introduce " Mattias Rönnblom
  2023-09-28  7:30                                         ` [PATCH v5 2/3] test: add dispatcher test suite Mattias Rönnblom
@ 2023-09-28  7:30                                         ` Mattias Rönnblom
  2023-10-05  8:36                                           ` David Marchand
  2 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-28  7:30 UTC (permalink / raw)
  To: dev
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Provide programming guide for the dispatcher library.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--
PATCH v5:
 o Update guide to match API changes related to dispatcher ids.

PATCH v3:
 o Adapt guide to the dispatcher API name changes.

PATCH:
 o Improve grammar and spelling.

RFC v4:
 o Extend event matching section of the programming guide.
 o Improve grammar and spelling.
---
 MAINTAINERS                              |   1 +
 doc/guides/prog_guide/dispatcher_lib.rst | 433 +++++++++++++++++++++++
 doc/guides/prog_guide/index.rst          |   1 +
 3 files changed, 435 insertions(+)
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst

diff --git a/MAINTAINERS b/MAINTAINERS
index 43890cad0e..ab35498204 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1730,6 +1730,7 @@ Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
 F: app/test/test_dispatcher.c
+F: doc/guides/prog_guide/dispatcher_lib.rst
 
 Test Applications
 -----------------
diff --git a/doc/guides/prog_guide/dispatcher_lib.rst b/doc/guides/prog_guide/dispatcher_lib.rst
new file mode 100644
index 0000000000..951db06081
--- /dev/null
+++ b/doc/guides/prog_guide/dispatcher_lib.rst
@@ -0,0 +1,433 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Ericsson AB.
+
+Dispatcher
+==========
+
+Overview
+--------
+
+The purpose of the dispatcher is to help reduce coupling in an
+:doc:`Eventdev <eventdev>`-based DPDK application.
+
+In particular, the dispatcher addresses a scenario where an
+application's modules share the same event device and event device
+ports, and performs work on the same lcore threads.
+
+The dispatcher replaces the conditional logic that follows an event
+device dequeue operation, where events are dispatched to different
+parts of the application, typically based on fields in the
+``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
+``sched_type``.
+
+Below is an excerpt from a fictitious application consisting of two
+modules; A and B. In this example, event-to-module routing is based
+purely on queue id, where module A expects all events to a certain
+queue id, and module B two other queue ids. [#Mapping]_
+
+.. code-block:: c
+
+    for (;;) {
+            struct rte_event events[MAX_BURST];
+            unsigned int n;
+    
+            n = rte_event_dequeue_burst(dev_id, port_id, events,
+	                                MAX_BURST, 0);
+    
+            for (i = 0; i < n; i++) {
+                    const struct rte_event *event = &events[i];
+    
+                    switch (event->queue_id) {
+                    case MODULE_A_QUEUE_ID:
+                            module_a_process(event);
+                            break;
+                    case MODULE_B_STAGE_0_QUEUE_ID:
+                            module_b_process_stage_0(event);
+                            break;
+                    case MODULE_B_STAGE_1_QUEUE_ID:
+                            module_b_process_stage_1(event);
+                            break;
+                    }
+            }
+    }
+
+The issue this example attempts to illustrate is that the centralized
+conditional logic has knowledge of things that should be private to
+the modules. In other words, this pattern leads to a violation of
+module encapsulation.
+
+The shared conditional logic contains explicit knowledge about what
+events should go where. In case, for example, the
+``module_a_process()`` is broken into two processing stages — a
+module-internal affair — the shared conditional code must be updated
+to reflect this change.
+
+The centralized event routing code becomes an issue in larger
+applications, where modules are developed by different organizations.
+This pattern also makes module reuse across different application more
+difficult. The part of the conditional logic relevant for a particular
+application may need to be duplicated across many module
+instantiations (e.g., applications and test setups).
+
+The dispatcher separates the mechanism (routing events to their
+receiver) from the policy (which events should go where).
+
+The basic operation of the dispatcher is as follows:
+
+* Dequeue a batch of events from the event device.
+* For each event determine which handler should receive the event, using
+  a set of application-provided, per-handler event matching callback
+  functions.
+* Provide events matching a particular handler, to that handler, using
+  its process callback.
+
+If the above application would have made use of the dispatcher, the
+code relevant for its module A may have looked something like this:
+
+.. code-block:: c
+
+    static bool
+    module_a_match(const struct rte_event *event, void *cb_data)
+    {
+           return event->queue_id == MODULE_A_QUEUE_ID;
+    }
+    
+    static void
+    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
+                            const struct rte_event *events,
+			    uint16_t num, void *cb_data)
+    {
+            uint16_t i;
+
+            for (i = 0; i < num; i++)
+                    module_a_process_event(&events[i]);
+    }
+    
+    /* In the module's initialization code */
+    rte_dispatcher_register(dispatcher, module_a_match, NULL,
+			    module_a_process_events, module_a_data);
+
+(Error handling is left out of this and future example code in this
+chapter.)
+
+When the shared conditional logic is removed, a new question arise:
+which part of the system actually runs the dispatching mechanism? Or
+phrased differently, what is replacing the function hosting the shared
+conditional logic (typically launched on all lcores using
+``rte_eal_remote_launch()``)? To solve this issue, the dispatcher is a
+run as a DPDK :doc:`Service <service_cores>`.
+
+The dispatcher is a layer between the application and the event device
+in the receive direction. In the transmit (i.e., item of work
+submission) direction, the application directly accesses the Eventdev
+core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
+forwarded event to the event device.
+
+Dispatcher Creation
+-------------------
+
+A dispatcher is created with using
+``rte_dispatcher_create()``.
+
+The event device must be configured before the dispatcher is created.
+
+Usually, only one dispatcher is needed per event device. A dispatcher
+handles exactly one event device.
+
+An dispatcher is freed using the ``rte_dispatcher_free()``
+function. The dispatcher's service functions must not be running on
+any lcore at the point of this call.
+
+Event Port Binding
+------------------
+
+To be able to dequeue events, the dispatcher must know which event
+ports are to be used, on all the lcores it uses. The application
+provides this information using
+``rte_dispatcher_bind_port_to_lcore()``.
+
+This call is typically made from the part of the application that
+deals with deployment issues (e.g., iterating lcores and determining
+which lcore does what), at the time of application initialization.
+
+The ``rte_dispatcher_unbind_port_from_lcore()`` is used to undo
+this operation.
+
+Multiple lcore threads may not safely use the same event
+port. [#Port-MT-Safety]
+
+Event ports cannot safely be bound or unbound while the dispatcher's
+service function is running on any lcore.
+
+Event Handlers
+--------------
+
+The dispatcher handler is an interface between the dispatcher and an
+application module, used to route events to the appropriate part of
+the application.
+
+Handler Registration
+^^^^^^^^^^^^^^^^^^^^
+
+The event handler interface consists of two function pointers:
+
+* The ``rte_dispatcher_match_t`` callback, which job is to
+  decide if this event is to be the property of this handler.
+* The ``rte_dispatcher_process_t``, which is used by the
+  dispatcher to deliver matched events.
+
+An event handler registration is valid on all lcores.
+
+The functions pointed to by the match and process callbacks resides in
+the application's domain logic, with one or more handlers per
+application module.
+
+A module may use more than one event handler, for convenience or to
+further decouple sub-modules. However, the dispatcher may impose an
+upper limit of the number handlers. In addition, installing a large
+number of handlers increase dispatcher overhead, although this does
+not nessarily translate to a system-level performance degradation. See
+the section on :ref:`Event Clustering` for more information.
+
+Handler registration and unregistration cannot safely be done while
+the dispatcher's service function is running on any lcore.
+
+Event Matching
+^^^^^^^^^^^^^^
+
+A handler's match callback function decides if an event should be
+delivered to this handler, or not.
+
+An event is routed to no more than one handler. Thus, if a match
+function returns true, no further match functions will be invoked for
+that event.
+
+Match functions must not depend on being invocated in any particular
+order (e.g., in the handler registration order).
+
+Events failing to match any handler are dropped, and the
+``ev_drop_count`` counter is updated accordingly.
+
+Event Delivery
+^^^^^^^^^^^^^^
+
+The handler callbacks are invocated by the dispatcher's service
+function, upon the arrival of events to the event ports bound to the
+running service lcore.
+
+A particular event is delivery to at most one handler.
+
+The application must not depend on all match callback invocations for
+a particular event batch being made prior to any process calls are
+being made. For example, if the dispatcher dequeues two events from
+the event device, it may choose to find out the destination for the
+first event, and deliver it, and then continue to find out the
+destination for the second, and then deliver that event as well. The
+dispatcher may also choose a strategy where no event is delivered
+until the destination handler for both events have been determined.
+
+The events provided in a single process call always belong to the same
+event port dequeue burst.
+
+.. _Event Clustering:
+
+Event Clustering
+^^^^^^^^^^^^^^^^
+
+The dispatcher maintains the order of events destined for the same
+handler.
+
+*Order* here refers to the order in which the events were delivered
+from the event device to the dispatcher (i.e., in the event array
+populated by ``rte_event_dequeue_burst()``), in relation to the order
+in which the dispatcher deliveres these events to the application.
+
+The dispatcher *does not* guarantee to maintain the order of events
+delivered to *different* handlers.
+
+For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
+and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
+consider a scenario where the following events are dequeued from the
+event device (qid is short for event queue id).
+
+.. code-block::
+
+    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
+
+The dispatcher may deliver the events in the following manner:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
+   module_a_process([e2: qid=0])
+   module_b_stage_0_process([e2: qid=1])
+
+The dispatcher may also choose to cluster (group) all events destined
+for ``module_b_stage_0_process()`` into one array:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
+   module_a_process([e2: qid=0])
+
+Here, the event ``e2`` is reordered and placed behind ``e3``, from a
+delivery order point of view. This kind of reshuffling is allowed,
+since the events are destined for different handlers.
+
+The dispatcher may also deliver ``e2`` before the three events
+destined for module B.
+
+An example of what the dispatcher may not do, is to reorder event
+``e1`` so, that it precedes ``e0`` in the array passed to the module
+B's stage 0 process callback.
+
+Although clustering requires some extra work for the dispatcher, it
+leads to fewer process function calls. In addition, and likely more
+importantly, it improves temporal locality of memory accesses to
+handler-specific data structures in the application, which in turn may
+lead to fewer cache misses and improved overall performance.
+
+Finalize
+--------
+
+The dispatcher may be configured to notify one or more parts of the
+application when the matching and processing of a batch of events has
+completed.
+
+The ``rte_dispatcher_finalize_register`` call is used to
+register a finalize callback. The function
+``rte_dispatcher_finalize_unregister`` is used to remove a
+callback.
+
+The finalize hook may be used by a set of event handlers (in the same
+modules, or a set of cooperating modules) sharing an event output
+buffer, since it allows for flushing of the buffers at the last
+possible moment. In particular, it allows for buffering of
+``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
+``rte_event_dequeue_burst()`` call is made (assuming implicit release
+is employed).
+
+The following is an example with an application-defined event output
+buffer (the ``event_buffer``):
+
+.. code-block:: c
+
+    static void
+    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
+                   void *cb_data)
+    {
+            struct event_buffer *buffer = cb_data;
+            unsigned lcore_id = rte_lcore_id();
+            struct event_buffer_lcore *lcore_buffer =
+                    &buffer->lcore_buffer[lcore_id];
+    
+            event_buffer_lcore_flush(lcore_buffer);
+    }
+
+    /* In the module's initialization code */
+    rte_dispatcher_finalize_register(dispatcher, finalize_batch,
+                                     shared_event_buffer);
+
+The dispatcher does not track any relationship between a handler and a
+finalize callback, and all finalize callbacks will be called, if (and
+only if) at least one event was dequeued from the event device.
+
+Finalize callback registration and unregistration cannot safely be
+done while the dispatcher's service function is running on any lcore.
+
+Service
+-------
+
+The dispatcher is a DPDK service, and is managed in a manner similar
+to other DPDK services (e.g., an Event Timer Adapter).
+
+Below is an example of how to configure a particular lcore to serve as
+a service lcore, and to map an already-configured dispatcher
+(identified by ``DISPATCHER_ID``) to that lcore.
+
+.. code-block:: c
+
+    static void
+    launch_dispatcher_core(struct rte_dispatcher *dispatcher,
+                           unsigned lcore_id)
+    {
+            uint32_t service_id;
+    
+            rte_service_lcore_add(lcore_id);
+    
+            rte_dispatcher_service_id_get(dispatcher, &service_id);
+    
+            rte_service_map_lcore_set(service_id, lcore_id, 1);
+    
+            rte_service_lcore_start(lcore_id);
+    
+            rte_service_runstate_set(service_id, 1);
+    }
+
+As the final step, the dispatcher must be started.
+
+.. code-block:: c
+
+    rte_dispatcher_start(dispatcher);
+
+
+Multi Service Dispatcher Lcores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In an Eventdev application, most (or all) compute-intensive and
+performance-sensitive processing is done in an event-driven manner,
+where CPU cycles spent on application domain logic is the direct
+result of items of work (i.e., ``rte_event`` events) dequeued from an
+event device.
+
+In the light of this, it makes sense to have the dispatcher service be
+the only DPDK service on all lcores used for packet processing — at
+least in principle.
+
+However, there is nothing in DPDK that prevents colocating other
+services with the dispatcher service on the same lcore.
+
+Tasks that prior to the introduction of the dispatcher into the
+application was performed on the lcore, even though no events were
+received, are prime targets for being converted into such auxiliary
+services, running on the dispatcher core set.
+
+An example of such a task would be the management of a per-lcore timer
+wheel (i.e., calling ``rte_timer_manage()``).
+
+For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
+similar technique), may opt for having quiescent state (e.g., calling
+``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
+service, to assure resource reclaimination occurs even in though some
+lcores currently do not process any events.
+
+If more services than the dispatcher service is mapped to a service
+lcore, it's important that the other service are well-behaved and
+don't interfere with event processing to the extent the system's
+throughput and/or latency requirements are at risk of not being met.
+
+In particular, to avoid jitter, they should have an small upper bound
+for the maximum amount of time spent in a single service function
+call.
+
+An example of scenario with a more CPU-heavy colocated service is a
+low-lcore count deployment, where the event device lacks the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
+require software to feed incoming packets into the event device). In
+this case, the best performance may be achieved if the Event Ethernet
+RX and/or TX Adapters are mapped to lcores also used by for event
+dispatching, since otherwise the adapter lcores would have a lot of
+idle CPU cycles.
+
+.. rubric:: Footnotes
+
+.. [#Mapping]
+   Event routing may reasonably be done based on other ``rte_event``
+   fields (or even event user data). Indeed, that's the very reason to
+   have match callback functions, instead of a simple queue
+   id-to-handler mapping scheme. Queue id-based routing serves well in
+   a simple example.
+
+.. [#Port-MT-Safety]
+   This property (which is a feature, not a bug) is inherited from the
+   core Eventdev APIs.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 52a6d9e7aa..ab05bd6074 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -60,6 +60,7 @@ Programmer's Guide
     event_ethernet_tx_adapter
     event_timer_adapter
     event_crypto_adapter
+    dispatcher_lib
     qos_framework
     power_man
     packet_classif_access_ctrl
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v4 1/3] lib: introduce dispatcher library
  2023-09-27  8:13                                           ` Bruce Richardson
@ 2023-09-28  7:44                                             ` Mattias Rönnblom
  2023-10-03 17:31                                             ` Jerin Jacob
  1 sibling, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-09-28  7:44 UTC (permalink / raw)
  To: Bruce Richardson, Jerin Jacob
  Cc: Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan

On 2023-09-27 10:13, Bruce Richardson wrote:
> On Tue, Sep 26, 2023 at 11:58:37PM +0530, Jerin Jacob wrote:
>> On Mon, Sep 25, 2023 at 12:41 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>>>
>>> On 2023-09-22 09:38, Mattias Rönnblom wrote:
>>>
>>> <snip>
>>>
>>>> +int
>>>> +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id)
>>>> +{
>>>
>>>
>>> There are two changes I'm considering:
>>>
>>> 1) Removing the "id" to identify the dispatcher, replacing it with an
>>> forward-declared rte_dispatcher struct pointer.
>>>
>>> struct rte_dispatcher;
>>>
>>> struct rte_dispatcher *
>>> rte_dispatcher_create(uint8_t event_dev_id);
>>>
>>>
>>> The original reason for using an integer id to identify a dispatcher is
>>> to make it look like everything else in Eventdev. I find this pattern a
>>> little awkward to use - in particular the fact the id is
>>> application-allocated (and thus require coordination between different
>>> part of the application in case multiple instances are used).
>>>
>>> 2) Adding a flags field to the create function "for future use". But
>>> since the API is experimental, there may not be that much need to
>>> attempt to be future-proof?
>>>
>>> Any thoughts are appreciated.
>>
>> IMO, better to have rte_dispatcher_create(struct
>> rte_dispatch_create_params *params)
>> for better future proofing with specific
>> rte_dispatch_crearte_params_init() API(No need to add reserved fields
>> in rte_dispatch_create_params  now, may need only for before removing
>> experimental status)
>>
>> Just 2c.
>>
> 
> I don't like using structs in those cases, I'd much rather have a flags
> parameter, as flags can be checked for explicit zeros for future proofing,
> while a struct cannot be checked for extra space on the end for future
> fields added.
> 
> Furthermore, if we need to add new parameters to the create function, I
> actually believe it is better to add them as explicit parameters rather
> than new fields to the struct. Struct fields can be missed by a user just
> recompiling, while new function parameters will be flagged by the compiler
> to make the user aware of the change. [There would be no change for ABI
> compatibility as function versioning would be usable in both cases]
> 

I will just have the create() function take the eventdev id, only, and 
thus make no attempt at "future-proofing". Then we will see what the 
future holds; flags, function parameters, or function parameters packed 
into structs.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v4 1/3] lib: introduce dispatcher library
  2023-09-27  8:13                                           ` Bruce Richardson
  2023-09-28  7:44                                             ` Mattias Rönnblom
@ 2023-10-03 17:31                                             ` Jerin Jacob
  1 sibling, 0 replies; 102+ messages in thread
From: Jerin Jacob @ 2023-10-03 17:31 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: Mattias Rönnblom, Mattias Rönnblom, dev, Jerin Jacob,
	techboard, harry.van.haaren, Peter Nilsson, Heng Wang,
	Naga Harish K S V, Pavan Nikhilesh, Gujjar Abhinandan S,
	Erik Gabriel Carrillo, Shijith Thotton, Hemant Agrawal,
	Sachin Saxena, Liang Ma, Peter Mccarthy, Zhirun Yan

On Wed, Sep 27, 2023 at 1:43 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Tue, Sep 26, 2023 at 11:58:37PM +0530, Jerin Jacob wrote:
> > On Mon, Sep 25, 2023 at 12:41 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
> > >
> > > On 2023-09-22 09:38, Mattias Rönnblom wrote:
> > >
> > > <snip>
> > >
> > > > +int
> > > > +rte_dispatcher_create(uint8_t id, uint8_t event_dev_id)
> > > > +{
> > >
> > >
> > > There are two changes I'm considering:
> > >
> > > 1) Removing the "id" to identify the dispatcher, replacing it with an
> > > forward-declared rte_dispatcher struct pointer.
> > >
> > > struct rte_dispatcher;
> > >
> > > struct rte_dispatcher *
> > > rte_dispatcher_create(uint8_t event_dev_id);
> > >
> > >
> > > The original reason for using an integer id to identify a dispatcher is
> > > to make it look like everything else in Eventdev. I find this pattern a
> > > little awkward to use - in particular the fact the id is
> > > application-allocated (and thus require coordination between different
> > > part of the application in case multiple instances are used).
> > >
> > > 2) Adding a flags field to the create function "for future use". But
> > > since the API is experimental, there may not be that much need to
> > > attempt to be future-proof?
> > >
> > > Any thoughts are appreciated.
> >
> > IMO, better to have rte_dispatcher_create(struct
> > rte_dispatch_create_params *params)
> > for better future proofing with specific
> > rte_dispatch_crearte_params_init() API(No need to add reserved fields
> > in rte_dispatch_create_params  now, may need only for before removing
> > experimental status)
> >
> > Just 2c.
> >
>
> I don't like using structs in those cases, I'd much rather have a flags
> parameter, as flags can be checked for explicit zeros for future proofing,
> while a struct cannot be checked for extra space on the end for future
> fields added.

For lib/dispatcher library, I have don't have specific preference. So
anything is fine for me.
However, I thought of understanding your rationale for arguments vs
structure(Looks like more of vi vs emac discussion) for _my
understanding_.

In my view,
# Use flags for setting up to express specific behavior, not as
inputting a lot of input parameters.
#  Do we need to check extra space if struct have reserved fields and
having init() functions for filling default

>
> Furthermore, if we need to add new parameters to the create function, I
> actually believe it is better to add them as explicit parameters rather
> than new fields to the struct. Struct fields can be missed by a user just
> recompiling, while new function parameters will be flagged by the compiler

I would see this as on the positive side, when

- Same code base needs to support multiple DPDK versions.
- A lot of times, API consumer may need only  _default_ values. Like
local_cache value in mempool_create API. So struct with _init() get
required values in easy way.

My views are based mostly used existing rte_mempool_create() APIs. For
some reason, I don't like this scheme.
struct rte_mempool *
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
                   unsigned cache_size, unsigned private_data_size,
                   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
                   rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
                   int socket_id, unsigned flags);


> to make the user aware of the change. [There would be no change for ABI
> compatibility as function versioning would be usable in both cases]

Yes. But need to too much template code via VERSION_SYMBOL where
structure scheme does not need.




>
> /Bruce

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 1/3] lib: introduce dispatcher library
  2023-09-28  7:30                                         ` [PATCH v5 1/3] lib: introduce " Mattias Rönnblom
@ 2023-10-05  8:36                                           ` David Marchand
  2023-10-05 10:08                                             ` Mattias Rönnblom
  2023-10-09 18:17                                           ` [PATCH v6 0/3] Add " Mattias Rönnblom
  1 sibling, 1 reply; 102+ messages in thread
From: David Marchand @ 2023-10-05  8:36 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, hofors,
	Peter Nilsson, Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

Hello Mattias,

On Thu, Sep 28, 2023 at 9:36 AM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> The purpose of the dispatcher library is to help reduce coupling in an
> Eventdev-based DPDK application.
>
> In addition, the dispatcher also provides a convenient and flexible
> way for the application to use service cores for application-level
> processing.
>
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
> Reviewed-by: Heng Wang <heng.wang@ericsson.com>
>

I have some simple comments.


> --
>
> PATCH v5:
>  o Move from using an integer id to a pointer to reference a dispatcher
>    instance, to simplify the API.
>  o Fix bug where dispatcher stats retrieval function erroneously depended
>    on the user-supplied stats buffer being all-zero.
>
> PATCH v4:
>  o Fix bugs in handler and finalizer unregistration. (Naga Harish)
>  o Return -EINVAL in cases where NULL pointers were provided in
>    calls requiring non-NULL pointers. (Naga Harish)
>  o Add experimental warning for the whole API. (Jerin Jacob)
>
> PATCH v3:
>  o To underline its optional character and since it does not provide
>    hardware abstraction, the event dispatcher is now a separate
>    library.
>  o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
>    shorter and to avoid the rte_event_* namespace.
>
> PATCH v2:
>  o Add dequeue batch count statistic.
>  o Add statistics reset function to API.
>  o Clarify MT safety guarantees (or lack thereof) in the API documentation.
>  o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
>    to be consistent with similar loops elsewhere in the dispatcher.
>  o Fix variable names in finalizer unregister function.
>
> PATCH:
>  o Change prefix from RED to EVD, to avoid confusion with random
>    early detection.
>
> RFC v4:
>  o Move handlers to per-lcore data structures.
>  o Introduce mechanism which rearranges handlers so that often-used
>    handlers tend to be tried first.
>  o Terminate dispatch loop in case all events are delivered.
>  o To avoid the dispatcher's service function hogging the CPU, process
>    only one batch per call.
>  o Have service function return -EAGAIN if no work is performed.
>  o Events delivered in the process function is no longer marked 'const',
>    since modifying them may be useful for the application and cause
>    no difficulties for the dispatcher.
>  o Various minor API documentation improvements.
>
> RFC v3:
>  o Add stats_get() function to the version.map file.
> ---
>  MAINTAINERS                     |   3 +
>  doc/api/doxy-api-index.md       |   1 +
>  doc/api/doxy-api.conf.in        |   1 +
>  lib/dispatcher/meson.build      |  17 +
>  lib/dispatcher/rte_dispatcher.c | 708 ++++++++++++++++++++++++++++++++
>  lib/dispatcher/rte_dispatcher.h | 468 +++++++++++++++++++++
>  lib/dispatcher/version.map      |  20 +
>  lib/meson.build                 |   2 +
>  8 files changed, 1220 insertions(+)
>  create mode 100644 lib/dispatcher/meson.build
>  create mode 100644 lib/dispatcher/rte_dispatcher.c
>  create mode 100644 lib/dispatcher/rte_dispatcher.h
>  create mode 100644 lib/dispatcher/version.map
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index a926155f26..6704cd5b2c 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -1726,6 +1726,9 @@ M: Nithin Dabilpuram <ndabilpuram@marvell.com>
>  M: Pavan Nikhilesh <pbhagavatula@marvell.com>
>  F: lib/node/
>
> +Dispatcher - EXPERIMENTAL
> +M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> +F: lib/dispatcher/

Double empty line between sections in MAINTAINERS file, please.

>
>  Test Applications
>  -----------------
> diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
> index fdeda13932..7d0cad9fed 100644
> --- a/doc/api/doxy-api-index.md
> +++ b/doc/api/doxy-api-index.md
> @@ -155,6 +155,7 @@ The public API headers are grouped by topics:
>
>  - **classification**
>    [reorder](@ref rte_reorder.h),
> +  [dispatcher](@ref rte_dispatcher.h),
>    [distributor](@ref rte_distributor.h),
>    [EFD](@ref rte_efd.h),
>    [ACL](@ref rte_acl.h),
> diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
> index a88accd907..59c679e621 100644
> --- a/doc/api/doxy-api.conf.in
> +++ b/doc/api/doxy-api.conf.in
> @@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
>                            @TOPDIR@/lib/cmdline \
>                            @TOPDIR@/lib/compressdev \
>                            @TOPDIR@/lib/cryptodev \
> +                          @TOPDIR@/lib/dispatcher \
>                            @TOPDIR@/lib/distributor \
>                            @TOPDIR@/lib/dmadev \
>                            @TOPDIR@/lib/efd \


I see no release note updates, please add one entry (in the "New
features" section) to announce this new library.


> diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build
> new file mode 100644
> index 0000000000..c6054a3a5d
> --- /dev/null
> +++ b/lib/dispatcher/meson.build
> @@ -0,0 +1,17 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(c) 2023 Ericsson AB
> +
> +if is_windows
> +    build = false
> +    reason = 'not supported on Windows'
> +    subdir_done()
> +endif
> +
> +sources = files(
> +        'rte_dispatcher.c',
> +)
> +headers = files(
> +        'rte_dispatcher.h',
> +)

For a single $file, you can go with a single line: files('$file')


> +
> +deps += ['eventdev']
> diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
> new file mode 100644
> index 0000000000..0e69db2b9b
> --- /dev/null
> +++ b/lib/dispatcher/rte_dispatcher.c
> @@ -0,0 +1,708 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Ericsson AB
> + */
> +
> +#include <stdbool.h>
> +#include <stdint.h>
> +
> +#include <rte_branch_prediction.h>
> +#include <rte_common.h>
> +#include <rte_lcore.h>
> +#include <rte_random.h>
> +#include <rte_service_component.h>
> +
> +#include "eventdev_pmd.h"
> +
> +#include <rte_dispatcher.h>
> +
> +#define EVD_MAX_PORTS_PER_LCORE 4
> +#define EVD_MAX_HANDLERS 32
> +#define EVD_MAX_FINALIZERS 16
> +#define EVD_AVG_PRIO_INTERVAL 2000
> +#define EVD_SERVICE_NAME "dispatcher"
> +
> +struct rte_dispatcher_lcore_port {
> +       uint8_t port_id;
> +       uint16_t batch_size;
> +       uint64_t timeout;
> +};
> +
> +struct rte_dispatcher_handler {
> +       int id;
> +       rte_dispatcher_match_t match_fun;
> +       void *match_data;
> +       rte_dispatcher_process_t process_fun;
> +       void *process_data;
> +};
> +
> +struct rte_dispatcher_finalizer {
> +       int id;
> +       rte_dispatcher_finalize_t finalize_fun;
> +       void *finalize_data;
> +};
> +
> +struct rte_dispatcher_lcore {
> +       uint8_t num_ports;
> +       uint16_t num_handlers;
> +       int32_t prio_count;
> +       struct rte_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
> +       struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
> +       struct rte_dispatcher_stats stats;
> +} __rte_cache_aligned;
> +
> +struct rte_dispatcher {
> +       uint8_t event_dev_id;
> +       int socket_id;
> +       uint32_t service_id;
> +       struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
> +       uint16_t num_finalizers;
> +       struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
> +};
> +
> +static int
> +evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
> +                      const struct rte_event *event)

Wrt DPDK coding tyle, indent is a single tab.
Adding an extra tab is recommended when continuing control statements
like if()/for()/..

On the other hand, max accepted length for a line is 100 columns.

Wdyt of a single line for this specific case?
And please check the indentation in the rest of the file.


> +{
> +       uint16_t i;
> +
> +       for (i = 0; i < lcore->num_handlers; i++) {
> +               struct rte_dispatcher_handler *handler =
> +                       &lcore->handlers[i];
> +
> +               if (handler->match_fun(event, handler->match_data))
> +                       return i;
> +       }
> +
> +       return -1;
> +}
> +
> +static void
> +evd_prioritize_handler(struct rte_dispatcher_lcore *lcore,
> +                      int handler_idx)
> +{
> +       struct rte_dispatcher_handler tmp;
> +
> +       if (handler_idx == 0)
> +               return;
> +
> +       /* Let the lucky handler "bubble" up the list */
> +
> +       tmp = lcore->handlers[handler_idx - 1];
> +
> +       lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
> +
> +       lcore->handlers[handler_idx] = tmp;

We don't need so many blank lines.


> +}
> +
> +static inline void
> +evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore,
> +                               int handler_idx, uint16_t handler_events)
> +{
> +       lcore->prio_count -= handler_events;
> +
> +       if (unlikely(lcore->prio_count <= 0)) {
> +               evd_prioritize_handler(lcore, handler_idx);
> +
> +               /*
> +                * Randomize the interval in the unlikely case
> +                * the traffic follow some very strict pattern.
> +                */
> +               lcore->prio_count =
> +                       rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
> +                       EVD_AVG_PRIO_INTERVAL / 2;
> +       }
> +}
> +
> +static inline void
> +evd_dispatch_events(struct rte_dispatcher *dispatcher,
> +                   struct rte_dispatcher_lcore *lcore,
> +                   struct rte_dispatcher_lcore_port *port,
> +                   struct rte_event *events, uint16_t num_events)
> +{
> +       int i;
> +       struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
> +       uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
> +       uint16_t drop_count = 0;
> +       uint16_t dispatch_count;
> +       uint16_t dispatched = 0;
> +
> +       for (i = 0; i < num_events; i++) {
> +               struct rte_event *event = &events[i];
> +               int handler_idx;
> +
> +               handler_idx = evd_lookup_handler_idx(lcore, event);
> +
> +               if (unlikely(handler_idx < 0)) {
> +                       drop_count++;
> +                       continue;
> +               }
> +
> +               bursts[handler_idx][burst_lens[handler_idx]] = *event;
> +               burst_lens[handler_idx]++;
> +       }
> +
> +       dispatch_count = num_events - drop_count;
> +
> +       for (i = 0; i < lcore->num_handlers &&
> +                dispatched < dispatch_count; i++) {
> +               struct rte_dispatcher_handler *handler =
> +                       &lcore->handlers[i];
> +               uint16_t len = burst_lens[i];
> +
> +               if (len == 0)
> +                       continue;
> +
> +               handler->process_fun(dispatcher->event_dev_id, port->port_id,
> +                                    bursts[i], len, handler->process_data);
> +
> +               dispatched += len;
> +
> +               /*
> +                * Safe, since any reshuffling will only involve
> +                * already-processed handlers.
> +                */
> +               evd_consider_prioritize_handler(lcore, i, len);
> +       }
> +
> +       lcore->stats.ev_batch_count++;
> +       lcore->stats.ev_dispatch_count += dispatch_count;
> +       lcore->stats.ev_drop_count += drop_count;
> +
> +       for (i = 0; i < dispatcher->num_finalizers; i++) {
> +               struct rte_dispatcher_finalizer *finalizer =
> +                       &dispatcher->finalizers[i];
> +
> +               finalizer->finalize_fun(dispatcher->event_dev_id,
> +                                       port->port_id,
> +                                       finalizer->finalize_data);
> +       }
> +}
> +
> +static __rte_always_inline uint16_t
> +evd_port_dequeue(struct rte_dispatcher *dispatcher,
> +                struct rte_dispatcher_lcore *lcore,
> +                struct rte_dispatcher_lcore_port *port)
> +{
> +       uint16_t batch_size = port->batch_size;
> +       struct rte_event events[batch_size];
> +       uint16_t n;
> +
> +       n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
> +                                   events, batch_size, port->timeout);
> +
> +       if (likely(n > 0))
> +               evd_dispatch_events(dispatcher, lcore, port, events, n);
> +
> +       lcore->stats.poll_count++;
> +
> +       return n;
> +}
> +
> +static __rte_always_inline uint16_t
> +evd_lcore_process(struct rte_dispatcher *dispatcher,
> +                 struct rte_dispatcher_lcore *lcore)
> +{
> +       uint16_t i;
> +       uint16_t event_count = 0;
> +
> +       for (i = 0; i < lcore->num_ports; i++) {
> +               struct rte_dispatcher_lcore_port *port =
> +                       &lcore->ports[i];
> +
> +               event_count += evd_port_dequeue(dispatcher, lcore, port);
> +       }
> +
> +       return event_count;
> +}
> +
> +static int32_t
> +evd_process(void *userdata)
> +{
> +       struct rte_dispatcher *dispatcher = userdata;
> +       unsigned int lcore_id = rte_lcore_id();
> +       struct rte_dispatcher_lcore *lcore =
> +               &dispatcher->lcores[lcore_id];
> +       uint64_t event_count;
> +
> +       event_count = evd_lcore_process(dispatcher, lcore);
> +
> +       if (unlikely(event_count == 0))
> +               return -EAGAIN;
> +
> +       return 0;
> +}
> +
> +static int
> +evd_service_register(struct rte_dispatcher *dispatcher)
> +{
> +       struct rte_service_spec service = {
> +               .callback = evd_process,
> +               .callback_userdata = dispatcher,
> +               .capabilities = RTE_SERVICE_CAP_MT_SAFE,
> +               .socket_id = dispatcher->socket_id
> +       };
> +       int rc;
> +
> +       snprintf(service.name, sizeof(service.name), EVD_SERVICE_NAME);
> +
> +       rc = rte_service_component_register(&service, &dispatcher->service_id);
> +

No need for blank line.

> +       if (rc)

if (rc != 0)

> +               RTE_EDEV_LOG_ERR("Registration of dispatcher service "
> +                                "%s failed with error code %d\n",
> +                                service.name, rc);
> +
> +       return rc;
> +}
> +
> +static int
> +evd_service_unregister(struct rte_dispatcher *dispatcher)
> +{
> +       int rc;
> +
> +       rc = rte_service_component_unregister(dispatcher->service_id);
> +
> +       if (rc)
> +               RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
> +                                "failed with error code %d\n", rc);
> +
> +       return rc;
> +}
> +
> +struct rte_dispatcher *
> +rte_dispatcher_create(uint8_t event_dev_id)
> +{
> +       int socket_id;
> +       struct rte_dispatcher *dispatcher;
> +       int rc;
> +
> +       socket_id = rte_event_dev_socket_id(event_dev_id);
> +
> +       dispatcher =
> +               rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher),
> +                                 RTE_CACHE_LINE_SIZE, socket_id);
> +
> +       if (dispatcher == NULL) {
> +               RTE_EDEV_LOG_ERR("Unable to allocate memory for dispatcher\n");
> +               rte_errno = ENOMEM;
> +               return NULL;
> +       }
> +
> +       *dispatcher = (struct rte_dispatcher) {
> +               .event_dev_id = event_dev_id,
> +               .socket_id = socket_id
> +       };
> +
> +       rc = evd_service_register(dispatcher);
> +
> +       if (rc < 0) {
> +               rte_free(dispatcher);
> +               rte_errno = -rc;
> +               return NULL;
> +       }
> +
> +       return dispatcher;
> +}
> +
> +int
> +rte_dispatcher_free(struct rte_dispatcher *dispatcher)
> +{
> +       int rc;
> +
> +       if (dispatcher == NULL)
> +               return 0;
> +
> +       rc = evd_service_unregister(dispatcher);
> +
> +       if (rc)
> +               return rc;
> +
> +       rte_free(dispatcher);
> +
> +       return 0;
> +}
> +
> +uint32_t
> +rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher)
> +{
> +       return dispatcher->service_id;
> +}
> +
> +static int
> +lcore_port_index(struct rte_dispatcher_lcore *lcore,
> +                uint8_t event_port_id)
> +{
> +       uint16_t i;
> +
> +       for (i = 0; i < lcore->num_ports; i++) {
> +               struct rte_dispatcher_lcore_port *port =
> +                       &lcore->ports[i];
> +
> +               if (port->port_id == event_port_id)
> +                       return i;
> +       }
> +
> +       return -1;
> +}
> +
> +int
> +rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
> +                                 uint8_t event_port_id, uint16_t batch_size,
> +                                 uint64_t timeout, unsigned int lcore_id)
> +{
> +       struct rte_dispatcher_lcore *lcore;
> +       struct rte_dispatcher_lcore_port *port;
> +
> +       lcore = &dispatcher->lcores[lcore_id];
> +
> +       if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
> +               return -ENOMEM;
> +
> +       if (lcore_port_index(lcore, event_port_id) >= 0)
> +               return -EEXIST;
> +
> +       port = &lcore->ports[lcore->num_ports];
> +
> +       *port = (struct rte_dispatcher_lcore_port) {
> +               .port_id = event_port_id,
> +               .batch_size = batch_size,
> +               .timeout = timeout
> +       };
> +
> +       lcore->num_ports++;
> +
> +       return 0;
> +}
> +
> +int
> +rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
> +                                     uint8_t event_port_id,
> +                                     unsigned int lcore_id)
> +{
> +       struct rte_dispatcher_lcore *lcore;
> +       int port_idx;
> +       struct rte_dispatcher_lcore_port *port;
> +       struct rte_dispatcher_lcore_port *last;
> +
> +       lcore = &dispatcher->lcores[lcore_id];
> +
> +       port_idx = lcore_port_index(lcore, event_port_id);
> +
> +       if (port_idx < 0)
> +               return -ENOENT;
> +
> +       port = &lcore->ports[port_idx];
> +       last = &lcore->ports[lcore->num_ports - 1];
> +
> +       if (port != last)
> +               *port = *last;
> +
> +       lcore->num_ports--;
> +
> +       return 0;
> +}
> +
> +static struct rte_dispatcher_handler*

Missing a space before *.

> +evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore,
> +                           int handler_id)
> +{
> +       uint16_t i;
> +
> +       for (i = 0; i < lcore->num_handlers; i++) {
> +               struct rte_dispatcher_handler *handler =
> +                       &lcore->handlers[i];
> +
> +               if (handler->id == handler_id)
> +                       return handler;
> +       }
> +
> +       return NULL;
> +}
> +
> +static int
> +evd_alloc_handler_id(struct rte_dispatcher *dispatcher)
> +{
> +       int handler_id = 0;
> +       struct rte_dispatcher_lcore *reference_lcore =
> +               &dispatcher->lcores[0];
> +
> +       if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
> +               return -1;
> +
> +       while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
> +               handler_id++;
> +
> +       return handler_id;
> +}
> +
> +static void
> +evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore,
> +                   const struct rte_dispatcher_handler *handler)
> +{
> +       int handler_idx = lcore->num_handlers;
> +
> +       lcore->handlers[handler_idx] = *handler;
> +       lcore->num_handlers++;
> +}
> +
> +static void
> +evd_install_handler(struct rte_dispatcher *dispatcher,
> +                   const struct rte_dispatcher_handler *handler)
> +{
> +       int i;
> +
> +       for (i = 0; i < RTE_MAX_LCORE; i++) {
> +               struct rte_dispatcher_lcore *lcore =
> +                       &dispatcher->lcores[i];
> +               evd_lcore_install_handler(lcore, handler);
> +       }
> +}
> +
> +int
> +rte_dispatcher_register(struct rte_dispatcher *dispatcher,
> +                       rte_dispatcher_match_t match_fun, void *match_data,
> +                       rte_dispatcher_process_t process_fun,
> +                       void *process_data)
> +{
> +       struct rte_dispatcher_handler handler = {
> +               .match_fun = match_fun,
> +               .match_data = match_data,
> +               .process_fun = process_fun,
> +               .process_data = process_data
> +       };
> +
> +       handler.id = evd_alloc_handler_id(dispatcher);
> +
> +       if (handler.id < 0)
> +               return -ENOMEM;
> +
> +       evd_install_handler(dispatcher, &handler);
> +
> +       return handler.id;
> +}
> +
> +static int
> +evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
> +                           int handler_id)
> +{
> +       struct rte_dispatcher_handler *unreg_handler;
> +       int handler_idx;
> +       uint16_t last_idx;
> +
> +       unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
> +
> +       if (unreg_handler == NULL) {
> +               RTE_EDEV_LOG_ERR("Invalid handler id %d\n", handler_id);
> +               return -EINVAL;
> +       }
> +
> +       handler_idx = unreg_handler - &lcore->handlers[0];
> +
> +       last_idx = lcore->num_handlers - 1;
> +
> +       if (handler_idx != last_idx) {
> +               /* move all handlers to maintain handler order */
> +               int n = last_idx - handler_idx;
> +               memmove(unreg_handler, unreg_handler + 1,
> +                       sizeof(struct rte_dispatcher_handler) * n);
> +       }
> +
> +       lcore->num_handlers--;
> +
> +       return 0;
> +}
> +
> +static int
> +evd_uninstall_handler(struct rte_dispatcher *dispatcher,
> +                     int handler_id)
> +{
> +       unsigned int lcore_id;
> +
> +       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +               struct rte_dispatcher_lcore *lcore =
> +                       &dispatcher->lcores[lcore_id];
> +               int rc;
> +
> +               rc = evd_lcore_uninstall_handler(lcore, handler_id);
> +
> +               if (rc < 0)
> +                       return rc;
> +       }
> +
> +       return 0;
> +}
> +
> +int
> +rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id)
> +{
> +       int rc;

No need for rc.

> +
> +       rc = evd_uninstall_handler(dispatcher, handler_id);
> +
> +       return rc;
> +}
> +
> +static struct rte_dispatcher_finalizer*

Missing a space before *.


> +evd_get_finalizer_by_id(struct rte_dispatcher *dispatcher,
> +                      int handler_id)
> +{
> +       int i;
> +
> +       for (i = 0; i < dispatcher->num_finalizers; i++) {
> +               struct rte_dispatcher_finalizer *finalizer =
> +                       &dispatcher->finalizers[i];
> +
> +               if (finalizer->id == handler_id)
> +                       return finalizer;
> +       }
> +
> +       return NULL;
> +}
> +
> +static int
> +evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher)
> +{
> +       int finalizer_id = 0;
> +
> +       while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
> +               finalizer_id++;
> +
> +       return finalizer_id;
> +}
> +
> +static struct rte_dispatcher_finalizer *
> +evd_alloc_finalizer(struct rte_dispatcher *dispatcher)
> +{
> +       int finalizer_idx;
> +       struct rte_dispatcher_finalizer *finalizer;
> +
> +       if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
> +               return NULL;
> +
> +       finalizer_idx = dispatcher->num_finalizers;
> +       finalizer = &dispatcher->finalizers[finalizer_idx];
> +
> +       finalizer->id = evd_alloc_finalizer_id(dispatcher);
> +
> +       dispatcher->num_finalizers++;
> +
> +       return finalizer;
> +}
> +
> +int
> +rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
> +                                rte_dispatcher_finalize_t finalize_fun,
> +                                void *finalize_data)
> +{
> +       struct rte_dispatcher_finalizer *finalizer;
> +
> +       finalizer = evd_alloc_finalizer(dispatcher);
> +
> +       if (finalizer == NULL)
> +               return -ENOMEM;
> +
> +       finalizer->finalize_fun = finalize_fun;
> +       finalizer->finalize_data = finalize_data;
> +
> +       return finalizer->id;
> +}
> +
> +int
> +rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher,
> +                                  int finalizer_id)
> +{
> +       struct rte_dispatcher_finalizer *unreg_finalizer;
> +       int finalizer_idx;
> +       uint16_t last_idx;
> +
> +       unreg_finalizer = evd_get_finalizer_by_id(dispatcher, finalizer_id);
> +
> +       if (unreg_finalizer == NULL) {
> +               RTE_EDEV_LOG_ERR("Invalid finalizer id %d\n", finalizer_id);
> +               return -EINVAL;
> +       }
> +
> +       finalizer_idx = unreg_finalizer - &dispatcher->finalizers[0];
> +
> +       last_idx = dispatcher->num_finalizers - 1;
> +
> +       if (finalizer_idx != last_idx) {
> +               /* move all finalizers to maintain order */
> +               int n = last_idx - finalizer_idx;
> +               memmove(unreg_finalizer, unreg_finalizer + 1,
> +                       sizeof(struct rte_dispatcher_finalizer) * n);
> +       }
> +
> +       dispatcher->num_finalizers--;
> +
> +       return 0;
> +}
> +
> +static int
> +evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
> +{
> +       int rc;
> +
> +       rc = rte_service_component_runstate_set(dispatcher->service_id,
> +                                               state);
> +
> +       if (rc != 0) {
> +               RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
> +                                "service component run state to %d\n", rc,
> +                                state);
> +               RTE_ASSERT(0);

Why not propagating the error to callers?


> +       }
> +
> +       return 0;
> +}
> +
> +int
> +rte_dispatcher_start(struct rte_dispatcher *dispatcher)
> +{
> +       return evd_set_service_runstate(dispatcher, 1);
> +}
> +
> +int
> +rte_dispatcher_stop(struct rte_dispatcher *dispatcher)
> +{
> +       return evd_set_service_runstate(dispatcher, 0);
> +}
> +
> +static void
> +evd_aggregate_stats(struct rte_dispatcher_stats *result,
> +                   const struct rte_dispatcher_stats *part)
> +{
> +       result->poll_count += part->poll_count;
> +       result->ev_batch_count += part->ev_batch_count;
> +       result->ev_dispatch_count += part->ev_dispatch_count;
> +       result->ev_drop_count += part->ev_drop_count;
> +}
> +
> +void
> +rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
> +                        struct rte_dispatcher_stats *stats)
> +{
> +       unsigned int lcore_id;
> +
> +       *stats = (struct rte_dispatcher_stats) {};
> +
> +       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +               const struct rte_dispatcher_lcore *lcore =
> +                       &dispatcher->lcores[lcore_id];
> +
> +               evd_aggregate_stats(stats, &lcore->stats);
> +       }
> +}
> +
> +void
> +rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher)
> +{
> +       unsigned int lcore_id;
> +
> +       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +               struct rte_dispatcher_lcore *lcore =
> +                       &dispatcher->lcores[lcore_id];
> +
> +               lcore->stats = (struct rte_dispatcher_stats) {};
> +       }
> +}
> diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
> new file mode 100644
> index 0000000000..0387316d7b
> --- /dev/null
> +++ b/lib/dispatcher/rte_dispatcher.h
> @@ -0,0 +1,468 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Ericsson AB
> + */
> +
> +#ifndef __RTE_DISPATCHER_H__
> +#define __RTE_DISPATCHER_H__
> +
> +/**
> + * @file
> + *
> + * RTE Dispatcher
> + *
> + * @warning
> + * @b EXPERIMENTAL:
> + * All functions in this file may be changed or removed without prior notice.
> + *
> + * The purpose of the dispatcher is to help decouple different parts
> + * of an application (e.g., modules), sharing the same underlying
> + * event device.
> + */
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#include <rte_eventdev.h>

The headers check does not complain, yet this header as a dependency
on stdbool (for bool type), stdint.h (for uintX_t types) and
rte_compat.h (for __rte_experimental).
I prefer we have explicit includes here rather than rely on implicit
rte_eventdev.h dependencies.

> +
> +/**
> + * Function prototype for match callbacks.
> + *
> + * Match callbacks are used by an application to decide how the
> + * dispatcher distributes events to different parts of the
> + * application.
> + *
> + * The application is not expected to process the event at the point
> + * of the match call. Such matters should be deferred to the process
> + * callback invocation.
> + *
> + * The match callback may be used as an opportunity to prefetch data.
> + *
> + * @param event
> + *  Pointer to event
> + *
> + * @param cb_data
> + *  The pointer supplied by the application in
> + *  rte_dispatcher_register().
> + *
> + * @return
> + *   Returns true in case this events should be delivered (via

event*

> + *   the process callback), and false otherwise.
> + */
> +typedef bool
> +(*rte_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
> +
> +/**
> + * Function prototype for process callbacks.
> + *
> + * The process callbacks are used by the dispatcher to deliver
> + * events for processing.
> + *
> + * @param event_dev_id
> + *  The originating event device id.
> + *
> + * @param event_port_id
> + *  The originating event port.
> + *
> + * @param events
> + *  Pointer to an array of events.
> + *
> + * @param num
> + *  The number of events in the @p events array.
> + *
> + * @param cb_data
> + *  The pointer supplied by the application in
> + *  rte_dispatcher_register().
> + */
> +
> +typedef void
> +(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
> +                                 struct rte_event *events, uint16_t num,
> +                                 void *cb_data);
> +
> +/**
> + * Function prototype for finalize callbacks.
> + *
> + * The finalize callbacks are used by the dispatcher to notify the
> + * application it has delivered all events from a particular batch
> + * dequeued from the event device.
> + *
> + * @param event_dev_id
> + *  The originating event device id.
> + *
> + * @param event_port_id
> + *  The originating event port.
> + *
> + * @param cb_data
> + *  The pointer supplied by the application in
> + *  rte_dispatcher_finalize_register().
> + */
> +
> +typedef void
> +(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
> +                                  void *cb_data);
> +
> +/**
> + * Dispatcher statistics
> + */
> +struct rte_dispatcher_stats {
> +       uint64_t poll_count;
> +       /**< Number of event dequeue calls made toward the event device. */

We had a number of issues with doxygen post annotations.
Prefer the prefixed ones.

+       /** Number of event dequeue calls made toward the event device. */
+       uint64_t poll_count;


> +       uint64_t ev_batch_count;
> +       /**< Number of non-empty event batches dequeued from event device.*/
> +       uint64_t ev_dispatch_count;
> +       /**< Number of events dispatched to a handler.*/
> +       uint64_t ev_drop_count;
> +       /**< Number of events dropped because no handler was found. */
> +};
> +
> +/**
> + * Create a dispatcher with the specified id.
> + *
> + * @param event_dev_id
> + *  The identifier of the event device from which this dispatcher
> + *  will dequeue events.
> + *
> + * @return
> + *   A pointer to a new dispatcher instance, or NULL on failure, in which
> + *   case rte_errno is set.
> + */
> +__rte_experimental
> +struct rte_dispatcher *
> +rte_dispatcher_create(uint8_t event_dev_id);
> +
> +/**
> + * Free a dispatcher.
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + *
> + * @return
> + *  - 0: Success
> + *  - <0: Error code on failure
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_free(struct rte_dispatcher *dispatcher);
> +
> +/**
> + * Retrieve the service identifier of a dispatcher.
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + *
> + * @return
> + *  The dispatcher service's id.
> + */
> +__rte_experimental
> +uint32_t
> +rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher);
> +
> +/**
> + * Binds an event device port to a specific lcore on the specified
> + * dispatcher.
> + *
> + * This function configures the event port id to be used by the event
> + * dispatcher service, if run on the specified lcore.
> + *
> + * Multiple event device ports may be bound to the same lcore. A
> + * particular port must not be bound to more than one lcore.
> + *
> + * If the dispatcher service is mapped (with rte_service_map_lcore_set())
> + * to a lcore to which no ports are bound, the service function will be a
> + * no-operation.
> + *
> + * This function may be called by any thread (including unregistered
> + * non-EAL threads), but not while the dispatcher is running on lcore
> + * specified by @c lcore_id.
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + *
> + * @param event_port_id
> + *  The event device port identifier.
> + *
> + * @param batch_size
> + *  The batch size to use in rte_event_dequeue_burst(), for the
> + *  configured event device port and lcore.
> + *
> + * @param timeout
> + *  The timeout parameter to use in rte_event_dequeue_burst(), for the
> + *  configured event device port and lcore.
> + *
> + * @param lcore_id
> + *  The lcore by which this event port will be used.
> + *
> + * @return
> + *  - 0: Success
> + *  - -ENOMEM: Unable to allocate sufficient resources.
> + *  - -EEXISTS: Event port is already configured.
> + *  - -EINVAL: Invalid arguments.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
> +                                 uint8_t event_port_id, uint16_t batch_size,
> +                                 uint64_t timeout, unsigned int lcore_id);
> +
> +/**
> + * Unbind an event device port from a specific lcore.
> + *
> + * This function may be called by any thread (including unregistered
> + * non-EAL threads), but not while the dispatcher is running on
> + * lcore specified by @c lcore_id.
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + *
> + * @param event_port_id
> + *  The event device port identifier.
> + *
> + * @param lcore_id
> + *  The lcore which was using this event port.
> + *
> + * @return
> + *  - 0: Success
> + *  - -ENOENT: Event port id not bound to this @c lcore_id.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
> +                                     uint8_t event_port_id,
> +                                     unsigned int lcore_id);
> +
> +/**
> + * Register an event handler.
> + *
> + * The match callback function is used to select if a particular event
> + * should be delivered, using the corresponding process callback
> + * function.
> + *
> + * The reason for having two distinct steps is to allow the dispatcher
> + * to deliver all events as a batch. This in turn will cause
> + * processing of a particular kind of events to happen in a
> + * back-to-back manner, improving cache locality.
> + *
> + * The list of handler callback functions is shared among all lcores,
> + * but will only be executed on lcores which has an eventdev port
> + * bound to them, and which are running the dispatcher service.
> + *
> + * An event is delivered to at most one handler. Events where no
> + * handler is found are dropped.
> + *
> + * The application must not depend on the order of which the match
> + * functions are invoked.
> + *
> + * Ordering of events is not guaranteed to be maintained between
> + * different deliver callbacks. For example, suppose there are two
> + * callbacks registered, matching different subsets of events arriving
> + * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
> + * on a particular port, all pertaining to the same flow. The match
> + * callback for registration A returns true for ev0 and ev2, and the
> + * matching function for registration B for ev1. In that scenario, the
> + * dispatcher may choose to deliver first [ev0, ev2] using A's deliver
> + * function, and then [ev1] to B - or vice versa.
> + *
> + * rte_dispatcher_register() may be called by any thread
> + * (including unregistered non-EAL threads), but not while the event
> + * dispatcher is running on any service lcore.
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + *
> + * @param match_fun
> + *  The match callback function.
> + *
> + * @param match_cb_data
> + *  A pointer to some application-specific opaque data (or NULL),
> + *  which is supplied back to the application when match_fun is
> + *  called.
> + *
> + * @param process_fun
> + *  The process callback function.
> + *
> + * @param process_cb_data
> + *  A pointer to some application-specific opaque data (or NULL),
> + *  which is supplied back to the application when process_fun is
> + *  called.
> + *
> + * @return
> + *  - >= 0: The identifier for this registration.
> + *  - -ENOMEM: Unable to allocate sufficient resources.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_register(struct rte_dispatcher *dispatcher,
> +                       rte_dispatcher_match_t match_fun, void *match_cb_data,
> +                       rte_dispatcher_process_t process_fun,
> +                       void *process_cb_data);
> +
> +/**
> + * Unregister an event handler.
> + *
> + * This function may be called by any thread (including unregistered
> + * non-EAL threads), but not while the dispatcher is running on
> + * any service lcore.
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + *
> + * @param handler_id
> + *  The handler registration id returned by the original
> + *  rte_dispatcher_register() call.
> + *
> + * @return
> + *  - 0: Success
> + *  - -EINVAL: The @c handler_id parameter was invalid.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id);
> +
> +/**
> + * Register a finalize callback function.
> + *
> + * An application may optionally install one or more finalize
> + * callbacks.
> + *
> + * All finalize callbacks are invoked by the dispatcher when a
> + * complete batch of events (retrieve using rte_event_dequeue_burst())
> + * have been delivered to the application (or have been dropped).
> + *
> + * The finalize callback is not tied to any particular handler.
> + *
> + * The finalize callback provides an opportunity for the application
> + * to do per-batch processing. One case where this may be useful is if
> + * an event output buffer is used, and is shared among several
> + * handlers. In such a case, proper output buffer flushing may be
> + * assured using a finalize callback.
> + *
> + * rte_dispatcher_finalize_register() may be called by any thread
> + * (including unregistered non-EAL threads), but not while the
> + * dispatcher is running on any service lcore.
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + *
> + * @param finalize_fun
> + *  The function called after completing the processing of a
> + *  dequeue batch.
> + *
> + * @param finalize_data
> + *  A pointer to some application-specific opaque data (or NULL),
> + *  which is supplied back to the application when @c finalize_fun is
> + *  called.
> + *
> + * @return
> + *  - >= 0: The identifier for this registration.
> + *  - -ENOMEM: Unable to allocate sufficient resources.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
> +                                rte_dispatcher_finalize_t finalize_fun,
> +                                void *finalize_data);
> +
> +/**
> + * Unregister a finalize callback.
> + *
> + * This function may be called by any thread (including unregistered
> + * non-EAL threads), but not while the dispatcher is running on
> + * any service lcore.
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + *
> + * @param reg_id
> + *  The finalize registration id returned by the original
> + *  rte_dispatcher_finalize_register() call.
> + *
> + * @return
> + *  - 0: Success
> + *  - -EINVAL: The @c reg_id parameter was invalid.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher,
> +                                  int reg_id);
> +
> +/**
> + * Start a dispatcher instance.
> + *
> + * Enables the dispatcher service.
> + *
> + * The underlying event device must have been started prior to calling
> + * rte_dispatcher_start().
> + *
> + * For the dispatcher to actually perform work (i.e., dispatch
> + * events), its service must have been mapped to one or more service
> + * lcores, and its service run state set to '1'. A dispatcher's
> + * service is retrieved using rte_dispatcher_service_id_get().
> + *
> + * Each service lcore to which the dispatcher is mapped should
> + * have at least one event port configured. Such configuration is
> + * performed by calling rte_dispatcher_bind_port_to_lcore(), prior to
> + * starting the dispatcher.
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + *
> + * @return
> + *  - 0: Success
> + *  - <0: Error code on failure
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_start(struct rte_dispatcher *dispatcher);
> +
> +/**
> + * Stop a running dispatcher instance.
> + *
> + * Disables the dispatcher service.
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + *
> + * @return
> + *  - 0: Success
> + *  - -EINVAL: Invalid @c id.
> + */
> +__rte_experimental
> +int
> +rte_dispatcher_stop(struct rte_dispatcher *dispatcher);
> +
> +/**
> + * Retrieve statistics for a dispatcher instance.
> + *
> + * This function is MT safe and may be called by any thread
> + * (including unregistered non-EAL threads).
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + * @param[out] stats
> + *   A pointer to a structure to fill with statistics.
> + */
> +__rte_experimental
> +void
> +rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
> +                        struct rte_dispatcher_stats *stats);
> +
> +/**
> + * Reset statistics for a dispatcher instance.
> + *
> + * This function may be called by any thread (including unregistered
> + * non-EAL threads), but may not produce the correct result if the
> + * dispatcher is running on any service lcore.
> + *
> + * @param dispatcher
> + *  The dispatcher instance.
> + */
> +__rte_experimental
> +void
> +rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* __RTE_DISPATCHER__ */
> diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map
> new file mode 100644
> index 0000000000..8f9ad96522
> --- /dev/null
> +++ b/lib/dispatcher/version.map
> @@ -0,0 +1,20 @@
> +EXPERIMENTAL {
> +       global:
> +
> +       # added in 23.11
> +       rte_dispatcher_create;
> +       rte_dispatcher_free;
> +       rte_dispatcher_service_id_get;
> +       rte_dispatcher_bind_port_to_lcore;
> +       rte_dispatcher_unbind_port_from_lcore;
> +       rte_dispatcher_register;
> +       rte_dispatcher_unregister;
> +       rte_dispatcher_finalize_register;
> +       rte_dispatcher_finalize_unregister;
> +       rte_dispatcher_start;
> +       rte_dispatcher_stop;
> +       rte_dispatcher_stats_get;
> +       rte_dispatcher_stats_reset;

Sort alphabetically please.



> +
> +       local: *;
> +};
> diff --git a/lib/meson.build b/lib/meson.build
> index 099b0ed18a..3093b338d2 100644
> --- a/lib/meson.build
> +++ b/lib/meson.build
> @@ -35,6 +35,7 @@ libraries = [
>          'distributor',
>          'efd',
>          'eventdev',
> +        'dispatcher', # dispatcher depends on eventdev
>          'gpudev',
>          'gro',
>          'gso',
> @@ -81,6 +82,7 @@ optional_libs = [
>          'cfgfile',
>          'compressdev',
>          'cryptodev',
> +        'dispatcher',
>          'distributor',
>          'dmadev',
>          'efd',
> --
> 2.34.1
>


-- 
David Marchand


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 2/3] test: add dispatcher test suite
  2023-09-28  7:30                                         ` [PATCH v5 2/3] test: add dispatcher test suite Mattias Rönnblom
@ 2023-10-05  8:36                                           ` David Marchand
  2023-10-05 11:25                                             ` Mattias Rönnblom
  0 siblings, 1 reply; 102+ messages in thread
From: David Marchand @ 2023-10-05  8:36 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, hofors,
	Peter Nilsson, Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On Thu, Sep 28, 2023 at 9:36 AM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> Add unit tests for the dispatcher.

Missing your SoB.

Same comment than patch 1 about indent.

>
> --
> PATCH v5:
>  o Update test suite to use pointer and not integer id when calling
>    dispatcher functions.
>
> PATCH v3:
>  o Adapt the test suite to dispatcher API name changes.
>
> PATCH v2:
>  o Test finalize callback functionality.
>  o Test handler and finalizer count upper limits.
>  o Add statistics reset test.
>  o Make sure dispatcher supply the proper event dev id and port id back
>    to the application.
>
> PATCH:
>  o Extend test to cover often-used handler optimization feature.
>
> RFC v4:
>  o Adapt to non-const events in process function prototype.
>
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> ---
>  MAINTAINERS                |    1 +
>  app/test/meson.build       |    1 +
>  app/test/test_dispatcher.c | 1046 ++++++++++++++++++++++++++++++++++++
>  3 files changed, 1048 insertions(+)
>  create mode 100644 app/test/test_dispatcher.c
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 6704cd5b2c..43890cad0e 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -1729,6 +1729,7 @@ F: lib/node/
>  Dispatcher - EXPERIMENTAL
>  M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>  F: lib/dispatcher/
> +F: app/test/test_dispatcher.c
>
>  Test Applications
>  -----------------
> diff --git a/app/test/meson.build b/app/test/meson.build
> index 05bae9216d..3303c73817 100644
> --- a/app/test/meson.build
> +++ b/app/test/meson.build
> @@ -55,6 +55,7 @@ source_file_deps = {
>      'test_cycles.c': [],
>      'test_debug.c': [],
>      'test_devargs.c': ['kvargs'],
> +    'test_dispatcher.c': ['dispatcher'],
>      'test_distributor.c': ['distributor'],
>      'test_distributor_perf.c': ['distributor'],
>      'test_dmadev.c': ['dmadev', 'bus_vdev'],
> diff --git a/app/test/test_dispatcher.c b/app/test/test_dispatcher.c
> new file mode 100644
> index 0000000000..2bce65fdd9
> --- /dev/null
> +++ b/app/test/test_dispatcher.c
> @@ -0,0 +1,1046 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Ericsson AB
> + */
> +
> +#include "test.h"

test.h is a "local" include and should go after "standard" includes
block and "DPDK" includes block.


> +
> +#include <stdatomic.h>

Can you use the new wrappers for atomics?
rte_stdatomic.h

> +
> +#include <rte_bus_vdev.h>
> +#include <rte_dispatcher.h>
> +#include <rte_eventdev.h>
> +#include <rte_random.h>
> +#include <rte_service.h>
> +
> +#define NUM_WORKERS 3
> +
> +#define NUM_PORTS (NUM_WORKERS + 1)
> +#define WORKER_PORT_ID(worker_idx) (worker_idx)
> +#define DRIVER_PORT_ID (NUM_PORTS - 1)
> +
> +#define NUM_SERVICE_CORES NUM_WORKERS
> +
> +/* Eventdev */
> +#define NUM_QUEUES 8
> +#define LAST_QUEUE_ID (NUM_QUEUES - 1)
> +#define MAX_EVENTS 4096
> +#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
> +#define DEQUEUE_BURST_SIZE 32
> +#define ENQUEUE_BURST_SIZE 32
> +
> +#define NUM_EVENTS 10000000
> +#define NUM_FLOWS 16
> +
> +#define DSW_VDEV "event_dsw0"
> +
> +struct app_queue {
> +       uint8_t queue_id;
> +       uint64_t sn[NUM_FLOWS];
> +       int dispatcher_reg_id;
> +};
> +
> +struct cb_count {
> +       uint8_t expected_event_dev_id;
> +       uint8_t expected_event_port_id[RTE_MAX_LCORE];
> +       atomic_int count;
> +};
> +
> +struct test_app {
> +       uint8_t event_dev_id;
> +       struct rte_dispatcher *dispatcher;
> +       uint32_t dispatcher_service_id;
> +
> +       unsigned int service_lcores[NUM_SERVICE_CORES];
> +
> +       int never_match_reg_id;
> +       uint64_t never_match_count;
> +       struct cb_count never_process_count;
> +
> +       struct app_queue queues[NUM_QUEUES];
> +
> +       int finalize_reg_id;
> +       struct cb_count finalize_count;
> +
> +       bool running;
> +
> +       atomic_int completed_events;
> +       atomic_int errors;
> +};
> +
> +#define RETURN_ON_ERROR(rc) \
> +       do {                                    \
> +               if (rc != TEST_SUCCESS)         \
> +                       return rc;              \
> +       } while (0)

TEST_ASSERT?
This gives context about which part of a test failed.


> +
> +static struct test_app *
> +test_app_create(void)
> +{
> +       int i;
> +       struct test_app *app;
> +
> +       app = calloc(1, sizeof(struct test_app));
> +
> +       if (app == NULL)
> +               return NULL;
> +
> +       for (i = 0; i < NUM_QUEUES; i++)
> +               app->queues[i].queue_id = i;
> +
> +       return app;
> +}
> +
> +static void
> +test_app_free(struct test_app *app)
> +{
> +       free(app);
> +}
> +
> +static int
> +test_app_create_vdev(struct test_app *app)
> +{
> +       int rc;
> +
> +       rc = rte_vdev_init(DSW_VDEV, NULL);
> +       if (rc < 0)
> +               return TEST_SKIPPED;
> +
> +       rc = rte_event_dev_get_dev_id(DSW_VDEV);
> +
> +       app->event_dev_id = (uint8_t)rc;
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_destroy_vdev(struct test_app *app)
> +{
> +       int rc;
> +
> +       rc = rte_event_dev_close(app->event_dev_id);
> +       TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
> +
> +       rc = rte_vdev_uninit(DSW_VDEV);
> +       TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_setup_event_dev(struct test_app *app)
> +{
> +       int rc;
> +       int i;
> +
> +       rc = test_app_create_vdev(app);
> +       if (rc < 0)
> +               return rc;
> +
> +       struct rte_event_dev_config config = {
> +               .nb_event_queues = NUM_QUEUES,
> +               .nb_event_ports = NUM_PORTS,
> +               .nb_events_limit = MAX_EVENTS,
> +               .nb_event_queue_flows = 64,
> +               .nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
> +               .nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
> +       };
> +
> +       rc = rte_event_dev_configure(app->event_dev_id, &config);
> +
> +       TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
> +
> +       struct rte_event_queue_conf queue_config = {
> +               .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
> +               .schedule_type = RTE_SCHED_TYPE_ATOMIC,
> +               .nb_atomic_flows = 64
> +       };
> +
> +       for (i = 0; i < NUM_QUEUES; i++) {
> +               uint8_t queue_id = i;
> +
> +               rc = rte_event_queue_setup(app->event_dev_id, queue_id,
> +                                          &queue_config);
> +
> +               TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
> +       }
> +
> +       struct rte_event_port_conf port_config = {
> +               .new_event_threshold = NEW_EVENT_THRESHOLD,
> +               .dequeue_depth = DEQUEUE_BURST_SIZE,
> +               .enqueue_depth = ENQUEUE_BURST_SIZE
> +       };
> +
> +       for (i = 0; i < NUM_PORTS; i++) {
> +               uint8_t event_port_id = i;
> +
> +               rc = rte_event_port_setup(app->event_dev_id, event_port_id,
> +                                         &port_config);
> +               TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
> +                                   event_port_id);
> +
> +               if (event_port_id == DRIVER_PORT_ID)
> +                       continue;
> +
> +               rc = rte_event_port_link(app->event_dev_id, event_port_id,
> +                                        NULL, NULL, 0);
> +
> +               TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
> +                                 event_port_id);
> +       }
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_teardown_event_dev(struct test_app *app)
> +{
> +       return test_app_destroy_vdev(app);
> +}
> +
> +static int
> +test_app_start_event_dev(struct test_app *app)
> +{
> +       int rc;
> +
> +       rc = rte_event_dev_start(app->event_dev_id);
> +       TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static void
> +test_app_stop_event_dev(struct test_app *app)
> +{
> +       rte_event_dev_stop(app->event_dev_id);
> +}
> +
> +static int
> +test_app_create_dispatcher(struct test_app *app)
> +{
> +       int rc;
> +
> +       app->dispatcher = rte_dispatcher_create(app->event_dev_id);
> +
> +       TEST_ASSERT(app->dispatcher != NULL, "Unable to create event "
> +                   "dispatcher");
> +
> +       app->dispatcher_service_id =
> +               rte_dispatcher_service_id_get(app->dispatcher);
> +
> +       rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
> +
> +       TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
> +                           "stats");
> +
> +       rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
> +
> +       TEST_ASSERT_SUCCESS(rc, "Unable to set dispatcher service runstate");
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_free_dispatcher(struct test_app *app)
> +{
> +       int rc;
> +
> +       rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
> +       TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
> +
> +       rc = rte_dispatcher_free(app->dispatcher);
> +       TEST_ASSERT_SUCCESS(rc, "Error freeing dispatcher");
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_bind_ports(struct test_app *app)
> +{
> +       int i;
> +
> +       app->never_process_count.expected_event_dev_id =
> +               app->event_dev_id;
> +       app->finalize_count.expected_event_dev_id =
> +               app->event_dev_id;
> +
> +       for (i = 0; i < NUM_WORKERS; i++) {
> +               unsigned int lcore_id = app->service_lcores[i];
> +               uint8_t port_id = WORKER_PORT_ID(i);
> +
> +               int rc = rte_dispatcher_bind_port_to_lcore(
> +                       app->dispatcher, port_id, DEQUEUE_BURST_SIZE, 0,
> +                       lcore_id
> +               );
> +
> +               TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
> +                                   "to lcore %d", port_id, lcore_id);
> +
> +               app->never_process_count.expected_event_port_id[lcore_id] =
> +                       port_id;
> +               app->finalize_count.expected_event_port_id[lcore_id] = port_id;
> +       }
> +
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_unbind_ports(struct test_app *app)
> +{
> +       int i;
> +
> +       for (i = 0; i < NUM_WORKERS; i++) {
> +               unsigned int lcore_id = app->service_lcores[i];
> +
> +               int rc = rte_dispatcher_unbind_port_from_lcore(
> +                       app->dispatcher,
> +                       WORKER_PORT_ID(i),
> +                       lcore_id
> +               );
> +
> +               TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
> +                                   "from lcore %d", WORKER_PORT_ID(i),
> +                                   lcore_id);
> +       }
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static bool
> +match_queue(const struct rte_event *event, void *cb_data)
> +{
> +       uintptr_t queue_id = (uintptr_t)cb_data;
> +
> +       return event->queue_id == queue_id;
> +}
> +
> +static int
> +test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
> +{
> +       int i;
> +
> +       for (i = 0; i < NUM_SERVICE_CORES; i++)
> +               if (app->service_lcores[i] == lcore_id)
> +                       return i;

This construct is hard to read and prone to error if the code is updated later.

for () {
  if ()
    return i;
}


> +
> +       return -1;
> +}
> +
> +static int
> +test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
> +{
> +       int worker;
> +
> +       worker = test_app_get_worker_index(app, lcore_id);
> +
> +       if (worker < 0)
> +               return -1;
> +
> +       return WORKER_PORT_ID(worker);
> +}
> +
> +static void
> +test_app_queue_note_error(struct test_app *app)
> +{
> +       atomic_fetch_add_explicit(&app->errors, 1, memory_order_relaxed);
> +}
> +
> +static void
> +test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
> +                      struct rte_event *in_events, uint16_t num,
> +                      void *cb_data)
> +{
> +       struct app_queue *app_queue = cb_data;
> +       struct test_app *app = container_of(app_queue, struct test_app,
> +                                           queues[app_queue->queue_id]);
> +       unsigned int lcore_id = rte_lcore_id();
> +       bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
> +       int event_port_id;
> +       uint16_t i;
> +       struct rte_event out_events[num];
> +
> +       event_port_id = test_app_get_worker_port(app, lcore_id);
> +
> +       if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
> +           p_event_port_id != event_port_id) {
> +               test_app_queue_note_error(app);
> +               return;
> +       }
> +
> +       for (i = 0; i < num; i++) {
> +               const struct rte_event *in_event = &in_events[i];
> +               struct rte_event *out_event = &out_events[i];
> +               uint64_t sn = in_event->u64;
> +               uint64_t expected_sn;
> +
> +               if (in_event->queue_id != app_queue->queue_id) {
> +                       test_app_queue_note_error(app);
> +                       return;
> +               }
> +
> +               expected_sn = app_queue->sn[in_event->flow_id]++;
> +
> +               if (expected_sn != sn) {
> +                       test_app_queue_note_error(app);
> +                       return;
> +               }
> +
> +               if (intermediate_queue)
> +                       *out_event = (struct rte_event) {
> +                               .queue_id = in_event->queue_id + 1,
> +                               .flow_id = in_event->flow_id,
> +                               .sched_type = RTE_SCHED_TYPE_ATOMIC,
> +                               .op = RTE_EVENT_OP_FORWARD,
> +                               .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
> +                               .u64 = sn
> +                       };
> +       }
> +
> +       if (intermediate_queue) {
> +               uint16_t n = 0;
> +
> +               do {
> +                       n += rte_event_enqueue_forward_burst(p_event_dev_id,
> +                                                            p_event_port_id,
> +                                                            out_events + n,
> +                                                            num - n);
> +               } while (n != num);
> +       } else
> +               atomic_fetch_add_explicit(&app->completed_events, num,
> +                                         memory_order_relaxed);
> +}
> +
> +static bool
> +never_match(const struct rte_event *event __rte_unused, void *cb_data)
> +{
> +       uint64_t *count = cb_data;
> +
> +       (*count)++;
> +
> +       return false;
> +}
> +
> +static void
> +test_app_never_process(uint8_t event_dev_id,
> +                      uint8_t event_port_id,
> +                      struct rte_event *in_events __rte_unused,
> +                      uint16_t num, void *cb_data)
> +{
> +       struct cb_count *count = cb_data;
> +       unsigned int lcore_id = rte_lcore_id();
> +
> +       if (event_dev_id == count->expected_event_dev_id &&
> +           event_port_id == count->expected_event_port_id[lcore_id])
> +               atomic_fetch_add_explicit(&count->count, num,
> +                                         memory_order_relaxed);
> +}
> +
> +static void
> +finalize(uint8_t event_dev_id, uint8_t event_port_id, void *cb_data)
> +{
> +       struct cb_count *count = cb_data;
> +       unsigned int lcore_id = rte_lcore_id();
> +
> +       if (event_dev_id == count->expected_event_dev_id &&
> +           event_port_id == count->expected_event_port_id[lcore_id])
> +               atomic_fetch_add_explicit(&count->count, 1,
> +                                         memory_order_relaxed);
> +}
> +
> +static int
> +test_app_register_callbacks(struct test_app *app)
> +{
> +       int i;
> +
> +       app->never_match_reg_id =
> +               rte_dispatcher_register(app->dispatcher, never_match,
> +                                       &app->never_match_count,
> +                                       test_app_never_process,
> +                                       &app->never_process_count);
> +
> +       TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register "
> +                   "never-match handler");
> +
> +       for (i = 0; i < NUM_QUEUES; i++) {
> +               struct app_queue *app_queue = &app->queues[i];
> +               uintptr_t queue_id = app_queue->queue_id;
> +               int reg_id;
> +
> +               reg_id = rte_dispatcher_register(app->dispatcher,
> +                                                match_queue, (void *)queue_id,
> +                                                test_app_process_queue,
> +                                                app_queue);
> +
> +               TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
> +                           "callback for queue %d", i);
> +
> +               app_queue->dispatcher_reg_id = reg_id;
> +       }
> +
> +       app->finalize_reg_id =
> +               rte_dispatcher_finalize_register(app->dispatcher,
> +                                                      finalize,
> +                                                      &app->finalize_count);
> +       TEST_ASSERT_SUCCESS(app->finalize_reg_id, "Error registering "
> +                           "finalize callback");
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
> +{
> +       int reg_id = app->queues[queue_id].dispatcher_reg_id;
> +       int rc;
> +
> +       if (reg_id < 0) /* unregistered already */
> +               return 0;
> +
> +       rc = rte_dispatcher_unregister(app->dispatcher, reg_id);
> +
> +       TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
> +                           "callback for queue %d", queue_id);
> +
> +       app->queues[queue_id].dispatcher_reg_id = -1;
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_unregister_callbacks(struct test_app *app)
> +{
> +       int i;
> +       int rc;
> +
> +       if (app->never_match_reg_id >= 0) {
> +               rc = rte_dispatcher_unregister(app->dispatcher,
> +                                                    app->never_match_reg_id);
> +
> +               TEST_ASSERT_SUCCESS(rc, "Unable to unregister never-match "
> +                                   "handler");
> +               app->never_match_reg_id = -1;
> +       }
> +
> +       for (i = 0; i < NUM_QUEUES; i++) {
> +               rc = test_app_unregister_callback(app, i);
> +               RETURN_ON_ERROR(rc);
> +       }
> +
> +       if (app->finalize_reg_id >= 0) {
> +               rc = rte_dispatcher_finalize_unregister(
> +                       app->dispatcher, app->finalize_reg_id
> +               );
> +               app->finalize_reg_id = -1;
> +       }
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_start_dispatcher(struct test_app *app)
> +{
> +       int rc;
> +
> +       rc = rte_dispatcher_start(app->dispatcher);
> +
> +       TEST_ASSERT_SUCCESS(rc, "Unable to start the event dispatcher");
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_stop_dispatcher(struct test_app *app)
> +{
> +       int rc;
> +
> +       rc = rte_dispatcher_stop(app->dispatcher);
> +
> +       TEST_ASSERT_SUCCESS(rc, "Unable to stop the event dispatcher");
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_reset_dispatcher_stats(struct test_app *app)
> +{
> +       struct rte_dispatcher_stats stats;
> +
> +       rte_dispatcher_stats_reset(app->dispatcher);
> +
> +       memset(&stats, 0xff, sizeof(stats));
> +
> +       rte_dispatcher_stats_get(app->dispatcher, &stats);
> +
> +       TEST_ASSERT_EQUAL(stats.poll_count, 0, "Poll count not zero");
> +       TEST_ASSERT_EQUAL(stats.ev_batch_count, 0, "Batch count not zero");
> +       TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, "Dispatch count "
> +                         "not zero");
> +       TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count not zero");
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
> +{
> +       int rc;
> +
> +       rc = rte_service_lcore_add(lcore_id);
> +       TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
> +                           "service core", lcore_id);
> +
> +       rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
> +       TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_setup_service_cores(struct test_app *app)
> +{
> +       int i;
> +       int lcore_id = -1;
> +
> +       for (i = 0; i < NUM_SERVICE_CORES; i++) {
> +               lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
> +
> +               TEST_ASSERT(lcore_id != RTE_MAX_LCORE,
> +                           "Too few lcores. Needs at least %d worker lcores",
> +                           NUM_SERVICE_CORES);
> +
> +               app->service_lcores[i] = lcore_id;
> +       }
> +
> +       for (i = 0; i < NUM_SERVICE_CORES; i++) {
> +               int rc;
> +
> +               rc = test_app_setup_service_core(app, app->service_lcores[i]);
> +
> +               RETURN_ON_ERROR(rc);
> +       }
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
> +{
> +       int rc;
> +
> +       rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
> +       TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
> +
> +       rc = rte_service_lcore_del(lcore_id);
> +       TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
> +                           lcore_id);
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_teardown_service_cores(struct test_app *app)
> +{
> +       int i;
> +
> +       for (i = 0; i < NUM_SERVICE_CORES; i++) {
> +               unsigned int lcore_id = app->service_lcores[i];
> +               int rc;
> +
> +               rc = test_app_teardown_service_core(app, lcore_id);
> +
> +               RETURN_ON_ERROR(rc);
> +       }
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_start_service_cores(struct test_app *app)
> +{
> +       int i;
> +
> +       for (i = 0; i < NUM_SERVICE_CORES; i++) {
> +               unsigned int lcore_id = app->service_lcores[i];
> +               int rc;
> +
> +               rc = rte_service_lcore_start(lcore_id);
> +               TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
> +                                   lcore_id);
> +
> +               RETURN_ON_ERROR(rc);
> +       }
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_stop_service_cores(struct test_app *app)
> +{
> +       int i;
> +
> +       for (i = 0; i < NUM_SERVICE_CORES; i++) {
> +               unsigned int lcore_id = app->service_lcores[i];
> +               int rc;
> +
> +               rc = rte_service_lcore_stop(lcore_id);
> +               TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
> +                                   lcore_id);
> +
> +               RETURN_ON_ERROR(rc);
> +       }
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_app_start(struct test_app *app)
> +{
> +       int rc;
> +
> +       rc = test_app_start_event_dev(app);
> +       RETURN_ON_ERROR(rc);
> +
> +       rc = test_app_start_service_cores(app);
> +       RETURN_ON_ERROR(rc);
> +
> +       rc = test_app_start_dispatcher(app);
> +
> +       app->running = true;
> +
> +       return rc;
> +}
> +
> +static int
> +test_app_stop(struct test_app *app)
> +{
> +       int rc;
> +
> +       rc = test_app_stop_dispatcher(app);
> +       RETURN_ON_ERROR(rc);
> +
> +       test_app_stop_service_cores(app);
> +       RETURN_ON_ERROR(rc);
> +
> +       test_app_stop_event_dev(app);
> +       RETURN_ON_ERROR(rc);
> +
> +       app->running = false;
> +
> +       return TEST_SUCCESS;
> +}
> +
> +struct test_app *test_app;
> +
> +static int
> +test_setup(void)
> +{
> +       int rc;
> +
> +       test_app = test_app_create();
> +       TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
> +
> +       rc = test_app_setup_event_dev(test_app);
> +       RETURN_ON_ERROR(rc);
> +
> +       rc = test_app_create_dispatcher(test_app);
> +
> +       rc = test_app_setup_service_cores(test_app);
> +       RETURN_ON_ERROR(rc);
> +
> +       rc = test_app_register_callbacks(test_app);
> +       RETURN_ON_ERROR(rc);
> +
> +       rc = test_app_bind_ports(test_app);
> +
> +       return rc;
> +}
> +
> +static void test_teardown(void)
> +{
> +       if (test_app->running)
> +               test_app_stop(test_app);
> +
> +       test_app_teardown_service_cores(test_app);
> +
> +       test_app_unregister_callbacks(test_app);
> +
> +       test_app_unbind_ports(test_app);
> +
> +       test_app_free_dispatcher(test_app);
> +
> +       test_app_teardown_event_dev(test_app);
> +
> +       test_app_free(test_app);
> +
> +       test_app = NULL;
> +}
> +
> +static int
> +test_app_get_completed_events(struct test_app *app)
> +{
> +       return atomic_load_explicit(&app->completed_events,
> +                                   memory_order_relaxed);
> +}
> +
> +static int
> +test_app_get_errors(struct test_app *app)
> +{
> +       return atomic_load_explicit(&app->errors, memory_order_relaxed);
> +}
> +
> +static int
> +test_basic(void)
> +{
> +       int rc;
> +       int i;
> +
> +       rc = test_app_start(test_app);
> +       RETURN_ON_ERROR(rc);
> +
> +       uint64_t sns[NUM_FLOWS] = { 0 };
> +
> +       for (i = 0; i < NUM_EVENTS;) {
> +               struct rte_event events[ENQUEUE_BURST_SIZE];
> +               int left;
> +               int batch_size;
> +               int j;
> +               uint16_t n = 0;
> +
> +               batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
> +               left = NUM_EVENTS - i;
> +
> +               batch_size = RTE_MIN(left, batch_size);
> +
> +               for (j = 0; j < batch_size; j++) {
> +                       struct rte_event *event = &events[j];
> +                       uint64_t sn;
> +                       uint32_t flow_id;
> +
> +                       flow_id = rte_rand_max(NUM_FLOWS);
> +
> +                       sn = sns[flow_id]++;
> +
> +                       *event = (struct rte_event) {
> +                               .queue_id = 0,
> +                               .flow_id = flow_id,
> +                               .sched_type = RTE_SCHED_TYPE_ATOMIC,
> +                               .op = RTE_EVENT_OP_NEW,
> +                               .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
> +                               .u64 = sn
> +                       };
> +               }
> +
> +               while (n < batch_size)
> +                       n += rte_event_enqueue_new_burst(test_app->event_dev_id,
> +                                                        DRIVER_PORT_ID,
> +                                                        events + n,
> +                                                        batch_size - n);
> +
> +               i += batch_size;
> +       }
> +
> +       while (test_app_get_completed_events(test_app) != NUM_EVENTS)
> +               rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
> +
> +       rc = test_app_get_errors(test_app);
> +       TEST_ASSERT(rc == 0, "%d errors occurred", rc);
> +
> +       rc = test_app_stop(test_app);
> +       RETURN_ON_ERROR(rc);
> +
> +       struct rte_dispatcher_stats stats;
> +       rte_dispatcher_stats_get(test_app->dispatcher, &stats);
> +
> +       TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
> +       TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
> +                         "Invalid dispatch count");
> +       TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
> +
> +       TEST_ASSERT_EQUAL(test_app->never_process_count.count, 0,
> +                         "Never-match handler's process function has "
> +                         "been called");
> +
> +       int finalize_count =
> +               atomic_load_explicit(&test_app->finalize_count.count,
> +                                    memory_order_relaxed);
> +
> +       TEST_ASSERT(finalize_count > 0, "Finalize count is zero");
> +       TEST_ASSERT(finalize_count <= (int)stats.ev_dispatch_count,
> +                   "Finalize count larger than event count");
> +
> +       TEST_ASSERT_EQUAL(finalize_count, (int)stats.ev_batch_count,
> +                         "%"PRIu64" batches dequeued, but finalize called %d "
> +                         "times", stats.ev_batch_count, finalize_count);
> +
> +       /*
> +        * The event dispatcher should call often-matching match functions
> +        * more often, and thus this never-matching match function should
> +        * be called relatively infrequently.
> +        */
> +       TEST_ASSERT(test_app->never_match_count <
> +                   (stats.ev_dispatch_count / 4),
> +                   "Never-matching match function called suspiciously often");
> +
> +       rc = test_app_reset_dispatcher_stats(test_app);
> +       RETURN_ON_ERROR(rc);
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static int
> +test_drop(void)
> +{
> +       int rc;
> +       uint8_t unhandled_queue;
> +       struct rte_dispatcher_stats stats;
> +
> +       unhandled_queue = (uint8_t)rte_rand_max(NUM_QUEUES);
> +
> +       rc = test_app_start(test_app);
> +       RETURN_ON_ERROR(rc);
> +
> +       rc = test_app_unregister_callback(test_app, unhandled_queue);
> +       RETURN_ON_ERROR(rc);
> +
> +       struct rte_event event = {
> +           .queue_id = unhandled_queue,
> +           .flow_id = 0,
> +           .sched_type = RTE_SCHED_TYPE_ATOMIC,
> +           .op = RTE_EVENT_OP_NEW,
> +           .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
> +           .u64 = 0
> +       };
> +
> +       do {
> +               rc = rte_event_enqueue_burst(test_app->event_dev_id,
> +                                            DRIVER_PORT_ID, &event, 1);
> +       } while (rc == 0);
> +
> +       do {
> +               rte_dispatcher_stats_get(test_app->dispatcher, &stats);
> +
> +               rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
> +       } while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
> +
> +       rc = test_app_stop(test_app);
> +       RETURN_ON_ERROR(rc);
> +
> +       TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
> +       TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
> +                         "Dispatch count is not zero");
> +       TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
> +
> +       return TEST_SUCCESS;
> +}
> +
> +#define MORE_THAN_MAX_HANDLERS 1000
> +#define MIN_HANDLERS 32
> +
> +static int
> +test_many_handler_registrations(void)
> +{
> +       int rc;
> +       int num_regs = 0;
> +       int reg_ids[MORE_THAN_MAX_HANDLERS];
> +       int reg_id;
> +       int i;
> +
> +       rc = test_app_unregister_callbacks(test_app);
> +
> +       RETURN_ON_ERROR(rc);
> +
> +       for (i = 0; i < MORE_THAN_MAX_HANDLERS; i++) {
> +               reg_id = rte_dispatcher_register(test_app->dispatcher,
> +                                                never_match, NULL,
> +                                                test_app_never_process, NULL);
> +               if (reg_id < 0)
> +                       break;
> +
> +               reg_ids[num_regs++] = reg_id;
> +       }
> +
> +       TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
> +                         "%d but was %d", -ENOMEM, reg_id);
> +       TEST_ASSERT(num_regs >= MIN_HANDLERS, "Registration failed already "
> +                   "after %d handler registrations.", num_regs);
> +
> +       for (i = 0; i < num_regs; i++) {
> +               rc = rte_dispatcher_unregister(test_app->dispatcher,
> +                                              reg_ids[i]);
> +               TEST_ASSERT_SUCCESS(rc, "Unable to unregister handler %d",
> +                                   reg_ids[i]);
> +       }
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static void
> +dummy_finalize(uint8_t event_dev_id __rte_unused,
> +              uint8_t event_port_id __rte_unused,
> +              void *cb_data __rte_unused)
> +{
> +}
> +
> +#define MORE_THAN_MAX_FINALIZERS 1000
> +#define MIN_FINALIZERS 16
> +
> +static int
> +test_many_finalize_registrations(void)
> +{
> +       int rc;
> +       int num_regs = 0;
> +       int reg_ids[MORE_THAN_MAX_FINALIZERS];
> +       int reg_id;
> +       int i;
> +
> +       rc = test_app_unregister_callbacks(test_app);
> +
> +       RETURN_ON_ERROR(rc);
> +
> +       for (i = 0; i < MORE_THAN_MAX_FINALIZERS; i++) {
> +               reg_id = rte_dispatcher_finalize_register(
> +                       test_app->dispatcher, dummy_finalize, NULL
> +               );
> +
> +               if (reg_id < 0)
> +                       break;
> +
> +               reg_ids[num_regs++] = reg_id;
> +       }
> +
> +       TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
> +                         "%d but was %d", -ENOMEM, reg_id);
> +       TEST_ASSERT(num_regs >= MIN_FINALIZERS, "Finalize registration failed "
> +                   "already after %d registrations.", num_regs);
> +
> +       for (i = 0; i < num_regs; i++) {
> +               rc = rte_dispatcher_finalize_unregister(
> +                       test_app->dispatcher, reg_ids[i]
> +               );
> +               TEST_ASSERT_SUCCESS(rc, "Unable to unregister finalizer %d",
> +                                   reg_ids[i]);
> +       }
> +
> +       return TEST_SUCCESS;
> +}
> +
> +static struct unit_test_suite test_suite = {
> +       .suite_name = "Event dispatcher test suite",
> +       .unit_test_cases = {
> +               TEST_CASE_ST(test_setup, test_teardown, test_basic),
> +               TEST_CASE_ST(test_setup, test_teardown, test_drop),
> +               TEST_CASE_ST(test_setup, test_teardown,
> +                            test_many_handler_registrations),
> +               TEST_CASE_ST(test_setup, test_teardown,
> +                            test_many_finalize_registrations),
> +               TEST_CASES_END()
> +       }
> +};
> +
> +static int
> +test_dispatcher(void)
> +{
> +       return unit_test_suite_runner(&test_suite);
> +}
> +
> +REGISTER_TEST_COMMAND(dispatcher_autotest, test_dispatcher);

We have new macros (see REGISTER_FAST_TEST for example) so a test is
associated to an existing testsuite.
I think this test should be part of the fast-test testsuite, wdyt?


-- 
David Marchand


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 3/3] doc: add dispatcher programming guide
  2023-09-28  7:30                                         ` [PATCH v5 3/3] doc: add dispatcher programming guide Mattias Rönnblom
@ 2023-10-05  8:36                                           ` David Marchand
  2023-10-05 11:33                                             ` Mattias Rönnblom
  0 siblings, 1 reply; 102+ messages in thread
From: David Marchand @ 2023-10-05  8:36 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, hofors,
	Peter Nilsson, Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On Thu, Sep 28, 2023 at 9:37 AM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> Provide programming guide for the dispatcher library.
>
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

Checkpatch complains about empty lines, can you double check?

For example.

ERROR:TRAILING_WHITESPACE: trailing whitespace
#63: FILE: doc/guides/prog_guide/dispatcher_lib.rst:33:
+    $

ERROR:TRAILING_WHITESPACE: trailing whitespace
#66: FILE: doc/guides/prog_guide/dispatcher_lib.rst:36:
+    $


>
> --
> PATCH v5:
>  o Update guide to match API changes related to dispatcher ids.
>
> PATCH v3:
>  o Adapt guide to the dispatcher API name changes.
>
> PATCH:
>  o Improve grammar and spelling.
>
> RFC v4:
>  o Extend event matching section of the programming guide.
>  o Improve grammar and spelling.
> ---
>  MAINTAINERS                              |   1 +
>  doc/guides/prog_guide/dispatcher_lib.rst | 433 +++++++++++++++++++++++
>  doc/guides/prog_guide/index.rst          |   1 +
>  3 files changed, 435 insertions(+)
>  create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 43890cad0e..ab35498204 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -1730,6 +1730,7 @@ Dispatcher - EXPERIMENTAL
>  M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>  F: lib/dispatcher/
>  F: app/test/test_dispatcher.c
> +F: doc/guides/prog_guide/dispatcher_lib.rst
>
>  Test Applications
>  -----------------
> diff --git a/doc/guides/prog_guide/dispatcher_lib.rst b/doc/guides/prog_guide/dispatcher_lib.rst
> new file mode 100644
> index 0000000000..951db06081
> --- /dev/null
> +++ b/doc/guides/prog_guide/dispatcher_lib.rst
> @@ -0,0 +1,433 @@
> +..  SPDX-License-Identifier: BSD-3-Clause
> +    Copyright(c) 2023 Ericsson AB.
> +
> +Dispatcher
> +==========
> +
> +Overview
> +--------
> +
> +The purpose of the dispatcher is to help reduce coupling in an
> +:doc:`Eventdev <eventdev>`-based DPDK application.
> +
> +In particular, the dispatcher addresses a scenario where an
> +application's modules share the same event device and event device
> +ports, and performs work on the same lcore threads.
> +
> +The dispatcher replaces the conditional logic that follows an event
> +device dequeue operation, where events are dispatched to different
> +parts of the application, typically based on fields in the
> +``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
> +``sched_type``.
> +
> +Below is an excerpt from a fictitious application consisting of two
> +modules; A and B. In this example, event-to-module routing is based
> +purely on queue id, where module A expects all events to a certain
> +queue id, and module B two other queue ids. [#Mapping]_
> +
> +.. code-block:: c
> +
> +    for (;;) {
> +            struct rte_event events[MAX_BURST];
> +            unsigned int n;
> +
> +            n = rte_event_dequeue_burst(dev_id, port_id, events,
> +                                       MAX_BURST, 0);
> +
> +            for (i = 0; i < n; i++) {
> +                    const struct rte_event *event = &events[i];
> +
> +                    switch (event->queue_id) {
> +                    case MODULE_A_QUEUE_ID:
> +                            module_a_process(event);
> +                            break;
> +                    case MODULE_B_STAGE_0_QUEUE_ID:
> +                            module_b_process_stage_0(event);
> +                            break;
> +                    case MODULE_B_STAGE_1_QUEUE_ID:
> +                            module_b_process_stage_1(event);
> +                            break;
> +                    }
> +            }
> +    }
> +
> +The issue this example attempts to illustrate is that the centralized
> +conditional logic has knowledge of things that should be private to
> +the modules. In other words, this pattern leads to a violation of
> +module encapsulation.
> +
> +The shared conditional logic contains explicit knowledge about what
> +events should go where. In case, for example, the
> +``module_a_process()`` is broken into two processing stages — a
> +module-internal affair — the shared conditional code must be updated
> +to reflect this change.
> +
> +The centralized event routing code becomes an issue in larger
> +applications, where modules are developed by different organizations.
> +This pattern also makes module reuse across different application more
> +difficult. The part of the conditional logic relevant for a particular
> +application may need to be duplicated across many module
> +instantiations (e.g., applications and test setups).
> +
> +The dispatcher separates the mechanism (routing events to their
> +receiver) from the policy (which events should go where).
> +
> +The basic operation of the dispatcher is as follows:
> +
> +* Dequeue a batch of events from the event device.
> +* For each event determine which handler should receive the event, using
> +  a set of application-provided, per-handler event matching callback
> +  functions.
> +* Provide events matching a particular handler, to that handler, using
> +  its process callback.
> +
> +If the above application would have made use of the dispatcher, the
> +code relevant for its module A may have looked something like this:
> +
> +.. code-block:: c
> +
> +    static bool
> +    module_a_match(const struct rte_event *event, void *cb_data)
> +    {
> +           return event->queue_id == MODULE_A_QUEUE_ID;
> +    }
> +
> +    static void
> +    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
> +                            const struct rte_event *events,
> +                           uint16_t num, void *cb_data)
> +    {
> +            uint16_t i;
> +
> +            for (i = 0; i < num; i++)
> +                    module_a_process_event(&events[i]);
> +    }
> +
> +    /* In the module's initialization code */
> +    rte_dispatcher_register(dispatcher, module_a_match, NULL,
> +                           module_a_process_events, module_a_data);
> +
> +(Error handling is left out of this and future example code in this
> +chapter.)
> +
> +When the shared conditional logic is removed, a new question arise:
> +which part of the system actually runs the dispatching mechanism? Or
> +phrased differently, what is replacing the function hosting the shared
> +conditional logic (typically launched on all lcores using
> +``rte_eal_remote_launch()``)? To solve this issue, the dispatcher is a
> +run as a DPDK :doc:`Service <service_cores>`.
> +
> +The dispatcher is a layer between the application and the event device
> +in the receive direction. In the transmit (i.e., item of work
> +submission) direction, the application directly accesses the Eventdev
> +core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
> +forwarded event to the event device.
> +
> +Dispatcher Creation
> +-------------------
> +
> +A dispatcher is created with using
> +``rte_dispatcher_create()``.
> +
> +The event device must be configured before the dispatcher is created.
> +
> +Usually, only one dispatcher is needed per event device. A dispatcher
> +handles exactly one event device.
> +
> +An dispatcher is freed using the ``rte_dispatcher_free()``
> +function. The dispatcher's service functions must not be running on
> +any lcore at the point of this call.
> +
> +Event Port Binding
> +------------------
> +
> +To be able to dequeue events, the dispatcher must know which event
> +ports are to be used, on all the lcores it uses. The application
> +provides this information using
> +``rte_dispatcher_bind_port_to_lcore()``.
> +
> +This call is typically made from the part of the application that
> +deals with deployment issues (e.g., iterating lcores and determining
> +which lcore does what), at the time of application initialization.
> +
> +The ``rte_dispatcher_unbind_port_from_lcore()`` is used to undo
> +this operation.
> +
> +Multiple lcore threads may not safely use the same event
> +port. [#Port-MT-Safety]
> +
> +Event ports cannot safely be bound or unbound while the dispatcher's
> +service function is running on any lcore.
> +
> +Event Handlers
> +--------------
> +
> +The dispatcher handler is an interface between the dispatcher and an
> +application module, used to route events to the appropriate part of
> +the application.
> +
> +Handler Registration
> +^^^^^^^^^^^^^^^^^^^^
> +
> +The event handler interface consists of two function pointers:
> +
> +* The ``rte_dispatcher_match_t`` callback, which job is to
> +  decide if this event is to be the property of this handler.
> +* The ``rte_dispatcher_process_t``, which is used by the
> +  dispatcher to deliver matched events.
> +
> +An event handler registration is valid on all lcores.
> +
> +The functions pointed to by the match and process callbacks resides in
> +the application's domain logic, with one or more handlers per
> +application module.
> +
> +A module may use more than one event handler, for convenience or to
> +further decouple sub-modules. However, the dispatcher may impose an
> +upper limit of the number handlers. In addition, installing a large
> +number of handlers increase dispatcher overhead, although this does
> +not nessarily translate to a system-level performance degradation. See

Typo on necessarily?



> +the section on :ref:`Event Clustering` for more information.
> +
> +Handler registration and unregistration cannot safely be done while
> +the dispatcher's service function is running on any lcore.
> +
> +Event Matching
> +^^^^^^^^^^^^^^
> +
> +A handler's match callback function decides if an event should be
> +delivered to this handler, or not.
> +
> +An event is routed to no more than one handler. Thus, if a match
> +function returns true, no further match functions will be invoked for
> +that event.
> +
> +Match functions must not depend on being invocated in any particular
> +order (e.g., in the handler registration order).
> +
> +Events failing to match any handler are dropped, and the
> +``ev_drop_count`` counter is updated accordingly.
> +
> +Event Delivery
> +^^^^^^^^^^^^^^
> +
> +The handler callbacks are invocated by the dispatcher's service
> +function, upon the arrival of events to the event ports bound to the
> +running service lcore.
> +
> +A particular event is delivery to at most one handler.
> +
> +The application must not depend on all match callback invocations for
> +a particular event batch being made prior to any process calls are
> +being made. For example, if the dispatcher dequeues two events from
> +the event device, it may choose to find out the destination for the
> +first event, and deliver it, and then continue to find out the
> +destination for the second, and then deliver that event as well. The
> +dispatcher may also choose a strategy where no event is delivered
> +until the destination handler for both events have been determined.
> +
> +The events provided in a single process call always belong to the same
> +event port dequeue burst.
> +
> +.. _Event Clustering:
> +
> +Event Clustering
> +^^^^^^^^^^^^^^^^
> +
> +The dispatcher maintains the order of events destined for the same
> +handler.
> +
> +*Order* here refers to the order in which the events were delivered
> +from the event device to the dispatcher (i.e., in the event array
> +populated by ``rte_event_dequeue_burst()``), in relation to the order
> +in which the dispatcher deliveres these events to the application.
> +
> +The dispatcher *does not* guarantee to maintain the order of events
> +delivered to *different* handlers.
> +
> +For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
> +and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
> +consider a scenario where the following events are dequeued from the
> +event device (qid is short for event queue id).
> +
> +.. code-block::
> +
> +    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
> +
> +The dispatcher may deliver the events in the following manner:
> +
> +.. code-block::
> +
> +   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
> +   module_a_process([e2: qid=0])
> +   module_b_stage_0_process([e2: qid=1])
> +
> +The dispatcher may also choose to cluster (group) all events destined
> +for ``module_b_stage_0_process()`` into one array:
> +
> +.. code-block::
> +
> +   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
> +   module_a_process([e2: qid=0])
> +
> +Here, the event ``e2`` is reordered and placed behind ``e3``, from a
> +delivery order point of view. This kind of reshuffling is allowed,
> +since the events are destined for different handlers.
> +
> +The dispatcher may also deliver ``e2`` before the three events
> +destined for module B.
> +
> +An example of what the dispatcher may not do, is to reorder event
> +``e1`` so, that it precedes ``e0`` in the array passed to the module
> +B's stage 0 process callback.
> +
> +Although clustering requires some extra work for the dispatcher, it
> +leads to fewer process function calls. In addition, and likely more
> +importantly, it improves temporal locality of memory accesses to
> +handler-specific data structures in the application, which in turn may
> +lead to fewer cache misses and improved overall performance.
> +
> +Finalize
> +--------
> +
> +The dispatcher may be configured to notify one or more parts of the
> +application when the matching and processing of a batch of events has
> +completed.
> +
> +The ``rte_dispatcher_finalize_register`` call is used to
> +register a finalize callback. The function
> +``rte_dispatcher_finalize_unregister`` is used to remove a
> +callback.
> +
> +The finalize hook may be used by a set of event handlers (in the same
> +modules, or a set of cooperating modules) sharing an event output
> +buffer, since it allows for flushing of the buffers at the last
> +possible moment. In particular, it allows for buffering of
> +``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
> +``rte_event_dequeue_burst()`` call is made (assuming implicit release
> +is employed).
> +
> +The following is an example with an application-defined event output
> +buffer (the ``event_buffer``):
> +
> +.. code-block:: c
> +
> +    static void
> +    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
> +                   void *cb_data)
> +    {
> +            struct event_buffer *buffer = cb_data;
> +            unsigned lcore_id = rte_lcore_id();
> +            struct event_buffer_lcore *lcore_buffer =
> +                    &buffer->lcore_buffer[lcore_id];
> +
> +            event_buffer_lcore_flush(lcore_buffer);
> +    }
> +
> +    /* In the module's initialization code */
> +    rte_dispatcher_finalize_register(dispatcher, finalize_batch,
> +                                     shared_event_buffer);
> +
> +The dispatcher does not track any relationship between a handler and a
> +finalize callback, and all finalize callbacks will be called, if (and
> +only if) at least one event was dequeued from the event device.
> +
> +Finalize callback registration and unregistration cannot safely be
> +done while the dispatcher's service function is running on any lcore.
> +
> +Service
> +-------
> +
> +The dispatcher is a DPDK service, and is managed in a manner similar
> +to other DPDK services (e.g., an Event Timer Adapter).
> +
> +Below is an example of how to configure a particular lcore to serve as
> +a service lcore, and to map an already-configured dispatcher
> +(identified by ``DISPATCHER_ID``) to that lcore.
> +
> +.. code-block:: c
> +
> +    static void
> +    launch_dispatcher_core(struct rte_dispatcher *dispatcher,
> +                           unsigned lcore_id)
> +    {
> +            uint32_t service_id;
> +
> +            rte_service_lcore_add(lcore_id);
> +
> +            rte_dispatcher_service_id_get(dispatcher, &service_id);
> +
> +            rte_service_map_lcore_set(service_id, lcore_id, 1);
> +
> +            rte_service_lcore_start(lcore_id);
> +
> +            rte_service_runstate_set(service_id, 1);
> +    }
> +
> +As the final step, the dispatcher must be started.
> +
> +.. code-block:: c
> +
> +    rte_dispatcher_start(dispatcher);
> +
> +
> +Multi Service Dispatcher Lcores
> +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> +
> +In an Eventdev application, most (or all) compute-intensive and
> +performance-sensitive processing is done in an event-driven manner,
> +where CPU cycles spent on application domain logic is the direct
> +result of items of work (i.e., ``rte_event`` events) dequeued from an
> +event device.
> +
> +In the light of this, it makes sense to have the dispatcher service be
> +the only DPDK service on all lcores used for packet processing — at
> +least in principle.
> +
> +However, there is nothing in DPDK that prevents colocating other
> +services with the dispatcher service on the same lcore.
> +
> +Tasks that prior to the introduction of the dispatcher into the
> +application was performed on the lcore, even though no events were
> +received, are prime targets for being converted into such auxiliary
> +services, running on the dispatcher core set.
> +
> +An example of such a task would be the management of a per-lcore timer
> +wheel (i.e., calling ``rte_timer_manage()``).
> +
> +For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
> +similar technique), may opt for having quiescent state (e.g., calling
> +``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
> +service, to assure resource reclaimination occurs even in though some
> +lcores currently do not process any events.
> +
> +If more services than the dispatcher service is mapped to a service
> +lcore, it's important that the other service are well-behaved and
> +don't interfere with event processing to the extent the system's
> +throughput and/or latency requirements are at risk of not being met.
> +
> +In particular, to avoid jitter, they should have an small upper bound
> +for the maximum amount of time spent in a single service function
> +call.
> +
> +An example of scenario with a more CPU-heavy colocated service is a
> +low-lcore count deployment, where the event device lacks the
> +``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
> +require software to feed incoming packets into the event device). In
> +this case, the best performance may be achieved if the Event Ethernet
> +RX and/or TX Adapters are mapped to lcores also used by for event
> +dispatching, since otherwise the adapter lcores would have a lot of
> +idle CPU cycles.
> +
> +.. rubric:: Footnotes
> +
> +.. [#Mapping]
> +   Event routing may reasonably be done based on other ``rte_event``
> +   fields (or even event user data). Indeed, that's the very reason to
> +   have match callback functions, instead of a simple queue
> +   id-to-handler mapping scheme. Queue id-based routing serves well in
> +   a simple example.
> +
> +.. [#Port-MT-Safety]
> +   This property (which is a feature, not a bug) is inherited from the
> +   core Eventdev APIs.
> diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
> index 52a6d9e7aa..ab05bd6074 100644
> --- a/doc/guides/prog_guide/index.rst
> +++ b/doc/guides/prog_guide/index.rst
> @@ -60,6 +60,7 @@ Programmer's Guide
>      event_ethernet_tx_adapter
>      event_timer_adapter
>      event_crypto_adapter
> +    dispatcher_lib
>      qos_framework
>      power_man
>      packet_classif_access_ctrl
> --
> 2.34.1
>


-- 
David Marchand


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 1/3] lib: introduce dispatcher library
  2023-10-05  8:36                                           ` David Marchand
@ 2023-10-05 10:08                                             ` Mattias Rönnblom
  2023-10-06  8:46                                               ` David Marchand
  0 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-05 10:08 UTC (permalink / raw)
  To: David Marchand, Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On 2023-10-05 10:36, David Marchand wrote:
> Hello Mattias,
> 
> On Thu, Sep 28, 2023 at 9:36 AM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>>
>> The purpose of the dispatcher library is to help reduce coupling in an
>> Eventdev-based DPDK application.
>>
>> In addition, the dispatcher also provides a convenient and flexible
>> way for the application to use service cores for application-level
>> processing.
>>
>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>> Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
>> Reviewed-by: Heng Wang <heng.wang@ericsson.com>
>>
> 
> I have some simple comments.
> 
> 
>> --
>>
>> PATCH v5:
>>   o Move from using an integer id to a pointer to reference a dispatcher
>>     instance, to simplify the API.
>>   o Fix bug where dispatcher stats retrieval function erroneously depended
>>     on the user-supplied stats buffer being all-zero.
>>
>> PATCH v4:
>>   o Fix bugs in handler and finalizer unregistration. (Naga Harish)
>>   o Return -EINVAL in cases where NULL pointers were provided in
>>     calls requiring non-NULL pointers. (Naga Harish)
>>   o Add experimental warning for the whole API. (Jerin Jacob)
>>
>> PATCH v3:
>>   o To underline its optional character and since it does not provide
>>     hardware abstraction, the event dispatcher is now a separate
>>     library.
>>   o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
>>     shorter and to avoid the rte_event_* namespace.
>>
>> PATCH v2:
>>   o Add dequeue batch count statistic.
>>   o Add statistics reset function to API.
>>   o Clarify MT safety guarantees (or lack thereof) in the API documentation.
>>   o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
>>     to be consistent with similar loops elsewhere in the dispatcher.
>>   o Fix variable names in finalizer unregister function.
>>
>> PATCH:
>>   o Change prefix from RED to EVD, to avoid confusion with random
>>     early detection.
>>
>> RFC v4:
>>   o Move handlers to per-lcore data structures.
>>   o Introduce mechanism which rearranges handlers so that often-used
>>     handlers tend to be tried first.
>>   o Terminate dispatch loop in case all events are delivered.
>>   o To avoid the dispatcher's service function hogging the CPU, process
>>     only one batch per call.
>>   o Have service function return -EAGAIN if no work is performed.
>>   o Events delivered in the process function is no longer marked 'const',
>>     since modifying them may be useful for the application and cause
>>     no difficulties for the dispatcher.
>>   o Various minor API documentation improvements.
>>
>> RFC v3:
>>   o Add stats_get() function to the version.map file.
>> ---
>>   MAINTAINERS                     |   3 +
>>   doc/api/doxy-api-index.md       |   1 +
>>   doc/api/doxy-api.conf.in        |   1 +
>>   lib/dispatcher/meson.build      |  17 +
>>   lib/dispatcher/rte_dispatcher.c | 708 ++++++++++++++++++++++++++++++++
>>   lib/dispatcher/rte_dispatcher.h | 468 +++++++++++++++++++++
>>   lib/dispatcher/version.map      |  20 +
>>   lib/meson.build                 |   2 +
>>   8 files changed, 1220 insertions(+)
>>   create mode 100644 lib/dispatcher/meson.build
>>   create mode 100644 lib/dispatcher/rte_dispatcher.c
>>   create mode 100644 lib/dispatcher/rte_dispatcher.h
>>   create mode 100644 lib/dispatcher/version.map
>>
>> diff --git a/MAINTAINERS b/MAINTAINERS
>> index a926155f26..6704cd5b2c 100644
>> --- a/MAINTAINERS
>> +++ b/MAINTAINERS
>> @@ -1726,6 +1726,9 @@ M: Nithin Dabilpuram <ndabilpuram@marvell.com>
>>   M: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>   F: lib/node/
>>
>> +Dispatcher - EXPERIMENTAL
>> +M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>> +F: lib/dispatcher/
> 
> Double empty line between sections in MAINTAINERS file, please.
> 

OK.

>>
>>   Test Applications
>>   -----------------
>> diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
>> index fdeda13932..7d0cad9fed 100644
>> --- a/doc/api/doxy-api-index.md
>> +++ b/doc/api/doxy-api-index.md
>> @@ -155,6 +155,7 @@ The public API headers are grouped by topics:
>>
>>   - **classification**
>>     [reorder](@ref rte_reorder.h),
>> +  [dispatcher](@ref rte_dispatcher.h),
>>     [distributor](@ref rte_distributor.h),
>>     [EFD](@ref rte_efd.h),
>>     [ACL](@ref rte_acl.h),
>> diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
>> index a88accd907..59c679e621 100644
>> --- a/doc/api/doxy-api.conf.in
>> +++ b/doc/api/doxy-api.conf.in
>> @@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
>>                             @TOPDIR@/lib/cmdline \
>>                             @TOPDIR@/lib/compressdev \
>>                             @TOPDIR@/lib/cryptodev \
>> +                          @TOPDIR@/lib/dispatcher \
>>                             @TOPDIR@/lib/distributor \
>>                             @TOPDIR@/lib/dmadev \
>>                             @TOPDIR@/lib/efd \
> 
> 
> I see no release note updates, please add one entry (in the "New
> features" section) to announce this new library.
> 
> 

OK.

>> diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build
>> new file mode 100644
>> index 0000000000..c6054a3a5d
>> --- /dev/null
>> +++ b/lib/dispatcher/meson.build
>> @@ -0,0 +1,17 @@
>> +# SPDX-License-Identifier: BSD-3-Clause
>> +# Copyright(c) 2023 Ericsson AB
>> +
>> +if is_windows
>> +    build = false
>> +    reason = 'not supported on Windows'
>> +    subdir_done()
>> +endif
>> +
>> +sources = files(
>> +        'rte_dispatcher.c',
>> +)
>> +headers = files(
>> +        'rte_dispatcher.h',
>> +)
> 
> For a single $file, you can go with a single line: files('$file')
> 

Makes sense.

> 
>> +
>> +deps += ['eventdev']
>> diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
>> new file mode 100644
>> index 0000000000..0e69db2b9b
>> --- /dev/null
>> +++ b/lib/dispatcher/rte_dispatcher.c
>> @@ -0,0 +1,708 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2023 Ericsson AB
>> + */
>> +
>> +#include <stdbool.h>
>> +#include <stdint.h>
>> +
>> +#include <rte_branch_prediction.h>
>> +#include <rte_common.h>
>> +#include <rte_lcore.h>
>> +#include <rte_random.h>
>> +#include <rte_service_component.h>
>> +
>> +#include "eventdev_pmd.h"
>> +
>> +#include <rte_dispatcher.h>
>> +
>> +#define EVD_MAX_PORTS_PER_LCORE 4
>> +#define EVD_MAX_HANDLERS 32
>> +#define EVD_MAX_FINALIZERS 16
>> +#define EVD_AVG_PRIO_INTERVAL 2000
>> +#define EVD_SERVICE_NAME "dispatcher"
>> +
>> +struct rte_dispatcher_lcore_port {
>> +       uint8_t port_id;
>> +       uint16_t batch_size;
>> +       uint64_t timeout;
>> +};
>> +
>> +struct rte_dispatcher_handler {
>> +       int id;
>> +       rte_dispatcher_match_t match_fun;
>> +       void *match_data;
>> +       rte_dispatcher_process_t process_fun;
>> +       void *process_data;
>> +};
>> +
>> +struct rte_dispatcher_finalizer {
>> +       int id;
>> +       rte_dispatcher_finalize_t finalize_fun;
>> +       void *finalize_data;
>> +};
>> +
>> +struct rte_dispatcher_lcore {
>> +       uint8_t num_ports;
>> +       uint16_t num_handlers;
>> +       int32_t prio_count;
>> +       struct rte_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
>> +       struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
>> +       struct rte_dispatcher_stats stats;
>> +} __rte_cache_aligned;
>> +
>> +struct rte_dispatcher {
>> +       uint8_t event_dev_id;
>> +       int socket_id;
>> +       uint32_t service_id;
>> +       struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
>> +       uint16_t num_finalizers;
>> +       struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
>> +};
>> +
>> +static int
>> +evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
>> +                      const struct rte_event *event)
> 
> Wrt DPDK coding tyle, indent is a single tab.
> Adding an extra tab is recommended when continuing control statements
> like if()/for()/..
> 

Sure, but I don't understand why you mention this.

> On the other hand, max accepted length for a line is 100 columns.
> 
> Wdyt of a single line for this specific case?


Are you asking why the evd_lookup_handler_idx() function prototype is 
not a single line?

It would make it long, that's why. Even if 100 wide lines are allowed, 
it doesn't means the author is forced to use such long lines?

> And please check the indentation in the rest of the file.
> 
> 
>> +{
>> +       uint16_t i;
>> +
>> +       for (i = 0; i < lcore->num_handlers; i++) {
>> +               struct rte_dispatcher_handler *handler =
>> +                       &lcore->handlers[i];
>> +
>> +               if (handler->match_fun(event, handler->match_data))
>> +                       return i;
>> +       }
>> +
>> +       return -1;
>> +}
>> +
>> +static void
>> +evd_prioritize_handler(struct rte_dispatcher_lcore *lcore,
>> +                      int handler_idx)
>> +{
>> +       struct rte_dispatcher_handler tmp;
>> +
>> +       if (handler_idx == 0)
>> +               return;
>> +
>> +       /* Let the lucky handler "bubble" up the list */
>> +
>> +       tmp = lcore->handlers[handler_idx - 1];
>> +
>> +       lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
>> +
>> +       lcore->handlers[handler_idx] = tmp;
> 
> We don't need so many blank lines.
> 

Indeed, we don't.

> 
>> +}
>> +
>> +static inline void
>> +evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore,
>> +                               int handler_idx, uint16_t handler_events)
>> +{
>> +       lcore->prio_count -= handler_events;
>> +
>> +       if (unlikely(lcore->prio_count <= 0)) {
>> +               evd_prioritize_handler(lcore, handler_idx);
>> +
>> +               /*
>> +                * Randomize the interval in the unlikely case
>> +                * the traffic follow some very strict pattern.
>> +                */
>> +               lcore->prio_count =
>> +                       rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
>> +                       EVD_AVG_PRIO_INTERVAL / 2;
>> +       }
>> +}
>> +
>> +static inline void
>> +evd_dispatch_events(struct rte_dispatcher *dispatcher,
>> +                   struct rte_dispatcher_lcore *lcore,
>> +                   struct rte_dispatcher_lcore_port *port,
>> +                   struct rte_event *events, uint16_t num_events)
>> +{
>> +       int i;
>> +       struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
>> +       uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
>> +       uint16_t drop_count = 0;
>> +       uint16_t dispatch_count;
>> +       uint16_t dispatched = 0;
>> +
>> +       for (i = 0; i < num_events; i++) {
>> +               struct rte_event *event = &events[i];
>> +               int handler_idx;
>> +
>> +               handler_idx = evd_lookup_handler_idx(lcore, event);
>> +
>> +               if (unlikely(handler_idx < 0)) {
>> +                       drop_count++;
>> +                       continue;
>> +               }
>> +
>> +               bursts[handler_idx][burst_lens[handler_idx]] = *event;
>> +               burst_lens[handler_idx]++;
>> +       }
>> +
>> +       dispatch_count = num_events - drop_count;
>> +
>> +       for (i = 0; i < lcore->num_handlers &&
>> +                dispatched < dispatch_count; i++) {
>> +               struct rte_dispatcher_handler *handler =
>> +                       &lcore->handlers[i];
>> +               uint16_t len = burst_lens[i];
>> +
>> +               if (len == 0)
>> +                       continue;
>> +
>> +               handler->process_fun(dispatcher->event_dev_id, port->port_id,
>> +                                    bursts[i], len, handler->process_data);
>> +
>> +               dispatched += len;
>> +
>> +               /*
>> +                * Safe, since any reshuffling will only involve
>> +                * already-processed handlers.
>> +                */
>> +               evd_consider_prioritize_handler(lcore, i, len);
>> +       }
>> +
>> +       lcore->stats.ev_batch_count++;
>> +       lcore->stats.ev_dispatch_count += dispatch_count;
>> +       lcore->stats.ev_drop_count += drop_count;
>> +
>> +       for (i = 0; i < dispatcher->num_finalizers; i++) {
>> +               struct rte_dispatcher_finalizer *finalizer =
>> +                       &dispatcher->finalizers[i];
>> +
>> +               finalizer->finalize_fun(dispatcher->event_dev_id,
>> +                                       port->port_id,
>> +                                       finalizer->finalize_data);
>> +       }
>> +}
>> +
>> +static __rte_always_inline uint16_t
>> +evd_port_dequeue(struct rte_dispatcher *dispatcher,
>> +                struct rte_dispatcher_lcore *lcore,
>> +                struct rte_dispatcher_lcore_port *port)
>> +{
>> +       uint16_t batch_size = port->batch_size;
>> +       struct rte_event events[batch_size];
>> +       uint16_t n;
>> +
>> +       n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
>> +                                   events, batch_size, port->timeout);
>> +
>> +       if (likely(n > 0))
>> +               evd_dispatch_events(dispatcher, lcore, port, events, n);
>> +
>> +       lcore->stats.poll_count++;
>> +
>> +       return n;
>> +}
>> +
>> +static __rte_always_inline uint16_t
>> +evd_lcore_process(struct rte_dispatcher *dispatcher,
>> +                 struct rte_dispatcher_lcore *lcore)
>> +{
>> +       uint16_t i;
>> +       uint16_t event_count = 0;
>> +
>> +       for (i = 0; i < lcore->num_ports; i++) {
>> +               struct rte_dispatcher_lcore_port *port =
>> +                       &lcore->ports[i];
>> +
>> +               event_count += evd_port_dequeue(dispatcher, lcore, port);
>> +       }
>> +
>> +       return event_count;
>> +}
>> +
>> +static int32_t
>> +evd_process(void *userdata)
>> +{
>> +       struct rte_dispatcher *dispatcher = userdata;
>> +       unsigned int lcore_id = rte_lcore_id();
>> +       struct rte_dispatcher_lcore *lcore =
>> +               &dispatcher->lcores[lcore_id];
>> +       uint64_t event_count;
>> +
>> +       event_count = evd_lcore_process(dispatcher, lcore);
>> +
>> +       if (unlikely(event_count == 0))
>> +               return -EAGAIN;
>> +
>> +       return 0;
>> +}
>> +
>> +static int
>> +evd_service_register(struct rte_dispatcher *dispatcher)
>> +{
>> +       struct rte_service_spec service = {
>> +               .callback = evd_process,
>> +               .callback_userdata = dispatcher,
>> +               .capabilities = RTE_SERVICE_CAP_MT_SAFE,
>> +               .socket_id = dispatcher->socket_id
>> +       };
>> +       int rc;
>> +
>> +       snprintf(service.name, sizeof(service.name), EVD_SERVICE_NAME);
>> +
>> +       rc = rte_service_component_register(&service, &dispatcher->service_id);
>> +
> 
> No need for blank line.
> 

OK.

>> +       if (rc)
> 
> if (rc != 0)
> 

Noted.

>> +               RTE_EDEV_LOG_ERR("Registration of dispatcher service "
>> +                                "%s failed with error code %d\n",
>> +                                service.name, rc);
>> +
>> +       return rc;
>> +}
>> +
>> +static int
>> +evd_service_unregister(struct rte_dispatcher *dispatcher)
>> +{
>> +       int rc;
>> +
>> +       rc = rte_service_component_unregister(dispatcher->service_id);
>> +
>> +       if (rc)
>> +               RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
>> +                                "failed with error code %d\n", rc);
>> +
>> +       return rc;
>> +}
>> +
>> +struct rte_dispatcher *
>> +rte_dispatcher_create(uint8_t event_dev_id)
>> +{
>> +       int socket_id;
>> +       struct rte_dispatcher *dispatcher;
>> +       int rc;
>> +
>> +       socket_id = rte_event_dev_socket_id(event_dev_id);
>> +
>> +       dispatcher =
>> +               rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher),
>> +                                 RTE_CACHE_LINE_SIZE, socket_id);
>> +
>> +       if (dispatcher == NULL) {
>> +               RTE_EDEV_LOG_ERR("Unable to allocate memory for dispatcher\n");
>> +               rte_errno = ENOMEM;
>> +               return NULL;
>> +       }
>> +
>> +       *dispatcher = (struct rte_dispatcher) {
>> +               .event_dev_id = event_dev_id,
>> +               .socket_id = socket_id
>> +       };
>> +
>> +       rc = evd_service_register(dispatcher);
>> +
>> +       if (rc < 0) {
>> +               rte_free(dispatcher);
>> +               rte_errno = -rc;
>> +               return NULL;
>> +       }
>> +
>> +       return dispatcher;
>> +}
>> +
>> +int
>> +rte_dispatcher_free(struct rte_dispatcher *dispatcher)
>> +{
>> +       int rc;
>> +
>> +       if (dispatcher == NULL)
>> +               return 0;
>> +
>> +       rc = evd_service_unregister(dispatcher);
>> +
>> +       if (rc)
>> +               return rc;
>> +
>> +       rte_free(dispatcher);
>> +
>> +       return 0;
>> +}
>> +
>> +uint32_t
>> +rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher)
>> +{
>> +       return dispatcher->service_id;
>> +}
>> +
>> +static int
>> +lcore_port_index(struct rte_dispatcher_lcore *lcore,
>> +                uint8_t event_port_id)
>> +{
>> +       uint16_t i;
>> +
>> +       for (i = 0; i < lcore->num_ports; i++) {
>> +               struct rte_dispatcher_lcore_port *port =
>> +                       &lcore->ports[i];
>> +
>> +               if (port->port_id == event_port_id)
>> +                       return i;
>> +       }
>> +
>> +       return -1;
>> +}
>> +
>> +int
>> +rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
>> +                                 uint8_t event_port_id, uint16_t batch_size,
>> +                                 uint64_t timeout, unsigned int lcore_id)
>> +{
>> +       struct rte_dispatcher_lcore *lcore;
>> +       struct rte_dispatcher_lcore_port *port;
>> +
>> +       lcore = &dispatcher->lcores[lcore_id];
>> +
>> +       if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
>> +               return -ENOMEM;
>> +
>> +       if (lcore_port_index(lcore, event_port_id) >= 0)
>> +               return -EEXIST;
>> +
>> +       port = &lcore->ports[lcore->num_ports];
>> +
>> +       *port = (struct rte_dispatcher_lcore_port) {
>> +               .port_id = event_port_id,
>> +               .batch_size = batch_size,
>> +               .timeout = timeout
>> +       };
>> +
>> +       lcore->num_ports++;
>> +
>> +       return 0;
>> +}
>> +
>> +int
>> +rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
>> +                                     uint8_t event_port_id,
>> +                                     unsigned int lcore_id)
>> +{
>> +       struct rte_dispatcher_lcore *lcore;
>> +       int port_idx;
>> +       struct rte_dispatcher_lcore_port *port;
>> +       struct rte_dispatcher_lcore_port *last;
>> +
>> +       lcore = &dispatcher->lcores[lcore_id];
>> +
>> +       port_idx = lcore_port_index(lcore, event_port_id);
>> +
>> +       if (port_idx < 0)
>> +               return -ENOENT;
>> +
>> +       port = &lcore->ports[port_idx];
>> +       last = &lcore->ports[lcore->num_ports - 1];
>> +
>> +       if (port != last)
>> +               *port = *last;
>> +
>> +       lcore->num_ports--;
>> +
>> +       return 0;
>> +}
>> +
>> +static struct rte_dispatcher_handler*
> 
> Missing a space before *.
> 

Yes, thanks.

>> +evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore,
>> +                           int handler_id)
>> +{
>> +       uint16_t i;
>> +
>> +       for (i = 0; i < lcore->num_handlers; i++) {
>> +               struct rte_dispatcher_handler *handler =
>> +                       &lcore->handlers[i];
>> +
>> +               if (handler->id == handler_id)
>> +                       return handler;
>> +       }
>> +
>> +       return NULL;
>> +}
>> +
>> +static int
>> +evd_alloc_handler_id(struct rte_dispatcher *dispatcher)
>> +{
>> +       int handler_id = 0;
>> +       struct rte_dispatcher_lcore *reference_lcore =
>> +               &dispatcher->lcores[0];
>> +
>> +       if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
>> +               return -1;
>> +
>> +       while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
>> +               handler_id++;
>> +
>> +       return handler_id;
>> +}
>> +
>> +static void
>> +evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore,
>> +                   const struct rte_dispatcher_handler *handler)
>> +{
>> +       int handler_idx = lcore->num_handlers;
>> +
>> +       lcore->handlers[handler_idx] = *handler;
>> +       lcore->num_handlers++;
>> +}
>> +
>> +static void
>> +evd_install_handler(struct rte_dispatcher *dispatcher,
>> +                   const struct rte_dispatcher_handler *handler)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < RTE_MAX_LCORE; i++) {
>> +               struct rte_dispatcher_lcore *lcore =
>> +                       &dispatcher->lcores[i];
>> +               evd_lcore_install_handler(lcore, handler);
>> +       }
>> +}
>> +
>> +int
>> +rte_dispatcher_register(struct rte_dispatcher *dispatcher,
>> +                       rte_dispatcher_match_t match_fun, void *match_data,
>> +                       rte_dispatcher_process_t process_fun,
>> +                       void *process_data)
>> +{
>> +       struct rte_dispatcher_handler handler = {
>> +               .match_fun = match_fun,
>> +               .match_data = match_data,
>> +               .process_fun = process_fun,
>> +               .process_data = process_data
>> +       };
>> +
>> +       handler.id = evd_alloc_handler_id(dispatcher);
>> +
>> +       if (handler.id < 0)
>> +               return -ENOMEM;
>> +
>> +       evd_install_handler(dispatcher, &handler);
>> +
>> +       return handler.id;
>> +}
>> +
>> +static int
>> +evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
>> +                           int handler_id)
>> +{
>> +       struct rte_dispatcher_handler *unreg_handler;
>> +       int handler_idx;
>> +       uint16_t last_idx;
>> +
>> +       unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
>> +
>> +       if (unreg_handler == NULL) {
>> +               RTE_EDEV_LOG_ERR("Invalid handler id %d\n", handler_id);
>> +               return -EINVAL;
>> +       }
>> +
>> +       handler_idx = unreg_handler - &lcore->handlers[0];
>> +
>> +       last_idx = lcore->num_handlers - 1;
>> +
>> +       if (handler_idx != last_idx) {
>> +               /* move all handlers to maintain handler order */
>> +               int n = last_idx - handler_idx;
>> +               memmove(unreg_handler, unreg_handler + 1,
>> +                       sizeof(struct rte_dispatcher_handler) * n);
>> +       }
>> +
>> +       lcore->num_handlers--;
>> +
>> +       return 0;
>> +}
>> +
>> +static int
>> +evd_uninstall_handler(struct rte_dispatcher *dispatcher,
>> +                     int handler_id)
>> +{
>> +       unsigned int lcore_id;
>> +
>> +       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
>> +               struct rte_dispatcher_lcore *lcore =
>> +                       &dispatcher->lcores[lcore_id];
>> +               int rc;
>> +
>> +               rc = evd_lcore_uninstall_handler(lcore, handler_id);
>> +
>> +               if (rc < 0)
>> +                       return rc;
>> +       }
>> +
>> +       return 0;
>> +}
>> +
>> +int
>> +rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id)
>> +{
>> +       int rc;
> 
> No need for rc.
> 

OK.

>> +
>> +       rc = evd_uninstall_handler(dispatcher, handler_id);
>> +
>> +       return rc;
>> +}
>> +
>> +static struct rte_dispatcher_finalizer*
> 
> Missing a space before *.
> 

OK.

> 
>> +evd_get_finalizer_by_id(struct rte_dispatcher *dispatcher,
>> +                      int handler_id)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < dispatcher->num_finalizers; i++) {
>> +               struct rte_dispatcher_finalizer *finalizer =
>> +                       &dispatcher->finalizers[i];
>> +
>> +               if (finalizer->id == handler_id)
>> +                       return finalizer;
>> +       }
>> +
>> +       return NULL;
>> +}
>> +
>> +static int
>> +evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher)
>> +{
>> +       int finalizer_id = 0;
>> +
>> +       while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
>> +               finalizer_id++;
>> +
>> +       return finalizer_id;
>> +}
>> +
>> +static struct rte_dispatcher_finalizer *
>> +evd_alloc_finalizer(struct rte_dispatcher *dispatcher)
>> +{
>> +       int finalizer_idx;
>> +       struct rte_dispatcher_finalizer *finalizer;
>> +
>> +       if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
>> +               return NULL;
>> +
>> +       finalizer_idx = dispatcher->num_finalizers;
>> +       finalizer = &dispatcher->finalizers[finalizer_idx];
>> +
>> +       finalizer->id = evd_alloc_finalizer_id(dispatcher);
>> +
>> +       dispatcher->num_finalizers++;
>> +
>> +       return finalizer;
>> +}
>> +
>> +int
>> +rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
>> +                                rte_dispatcher_finalize_t finalize_fun,
>> +                                void *finalize_data)
>> +{
>> +       struct rte_dispatcher_finalizer *finalizer;
>> +
>> +       finalizer = evd_alloc_finalizer(dispatcher);
>> +
>> +       if (finalizer == NULL)
>> +               return -ENOMEM;
>> +
>> +       finalizer->finalize_fun = finalize_fun;
>> +       finalizer->finalize_data = finalize_data;
>> +
>> +       return finalizer->id;
>> +}
>> +
>> +int
>> +rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher,
>> +                                  int finalizer_id)
>> +{
>> +       struct rte_dispatcher_finalizer *unreg_finalizer;
>> +       int finalizer_idx;
>> +       uint16_t last_idx;
>> +
>> +       unreg_finalizer = evd_get_finalizer_by_id(dispatcher, finalizer_id);
>> +
>> +       if (unreg_finalizer == NULL) {
>> +               RTE_EDEV_LOG_ERR("Invalid finalizer id %d\n", finalizer_id);
>> +               return -EINVAL;
>> +       }
>> +
>> +       finalizer_idx = unreg_finalizer - &dispatcher->finalizers[0];
>> +
>> +       last_idx = dispatcher->num_finalizers - 1;
>> +
>> +       if (finalizer_idx != last_idx) {
>> +               /* move all finalizers to maintain order */
>> +               int n = last_idx - finalizer_idx;
>> +               memmove(unreg_finalizer, unreg_finalizer + 1,
>> +                       sizeof(struct rte_dispatcher_finalizer) * n);
>> +       }
>> +
>> +       dispatcher->num_finalizers--;
>> +
>> +       return 0;
>> +}
>> +
>> +static int
>> +evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
>> +{
>> +       int rc;
>> +
>> +       rc = rte_service_component_runstate_set(dispatcher->service_id,
>> +                                               state);
>> +
>> +       if (rc != 0) {
>> +               RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
>> +                                "service component run state to %d\n", rc,
>> +                                state);
>> +               RTE_ASSERT(0);
> 
> Why not propagating the error to callers?
> 
> 

The root cause would be a programming error, hence an assertion is more 
appropriate way to deal with the situation.

>> +       }
>> +
>> +       return 0;
>> +}
>> +
>> +int
>> +rte_dispatcher_start(struct rte_dispatcher *dispatcher)
>> +{
>> +       return evd_set_service_runstate(dispatcher, 1);
>> +}
>> +
>> +int
>> +rte_dispatcher_stop(struct rte_dispatcher *dispatcher)
>> +{
>> +       return evd_set_service_runstate(dispatcher, 0);
>> +}
>> +
>> +static void
>> +evd_aggregate_stats(struct rte_dispatcher_stats *result,
>> +                   const struct rte_dispatcher_stats *part)
>> +{
>> +       result->poll_count += part->poll_count;
>> +       result->ev_batch_count += part->ev_batch_count;
>> +       result->ev_dispatch_count += part->ev_dispatch_count;
>> +       result->ev_drop_count += part->ev_drop_count;
>> +}
>> +
>> +void
>> +rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
>> +                        struct rte_dispatcher_stats *stats)
>> +{
>> +       unsigned int lcore_id;
>> +
>> +       *stats = (struct rte_dispatcher_stats) {};
>> +
>> +       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
>> +               const struct rte_dispatcher_lcore *lcore =
>> +                       &dispatcher->lcores[lcore_id];
>> +
>> +               evd_aggregate_stats(stats, &lcore->stats);
>> +       }
>> +}
>> +
>> +void
>> +rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher)
>> +{
>> +       unsigned int lcore_id;
>> +
>> +       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
>> +               struct rte_dispatcher_lcore *lcore =
>> +                       &dispatcher->lcores[lcore_id];
>> +
>> +               lcore->stats = (struct rte_dispatcher_stats) {};
>> +       }
>> +}
>> diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
>> new file mode 100644
>> index 0000000000..0387316d7b
>> --- /dev/null
>> +++ b/lib/dispatcher/rte_dispatcher.h
>> @@ -0,0 +1,468 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2023 Ericsson AB
>> + */
>> +
>> +#ifndef __RTE_DISPATCHER_H__
>> +#define __RTE_DISPATCHER_H__
>> +
>> +/**
>> + * @file
>> + *
>> + * RTE Dispatcher
>> + *
>> + * @warning
>> + * @b EXPERIMENTAL:
>> + * All functions in this file may be changed or removed without prior notice.
>> + *
>> + * The purpose of the dispatcher is to help decouple different parts
>> + * of an application (e.g., modules), sharing the same underlying
>> + * event device.
>> + */
>> +
>> +#ifdef __cplusplus
>> +extern "C" {
>> +#endif
>> +
>> +#include <rte_eventdev.h>
> 
> The headers check does not complain, yet this header as a dependency
> on stdbool (for bool type), stdint.h (for uintX_t types) and
> rte_compat.h (for __rte_experimental).
> I prefer we have explicit includes here rather than rely on implicit
> rte_eventdev.h dependencies.
> 

Make sense.

>> +
>> +/**
>> + * Function prototype for match callbacks.
>> + *
>> + * Match callbacks are used by an application to decide how the
>> + * dispatcher distributes events to different parts of the
>> + * application.
>> + *
>> + * The application is not expected to process the event at the point
>> + * of the match call. Such matters should be deferred to the process
>> + * callback invocation.
>> + *
>> + * The match callback may be used as an opportunity to prefetch data.
>> + *
>> + * @param event
>> + *  Pointer to event
>> + *
>> + * @param cb_data
>> + *  The pointer supplied by the application in
>> + *  rte_dispatcher_register().
>> + *
>> + * @return
>> + *   Returns true in case this events should be delivered (via
> 
> event*
> 

Fixed.

>> + *   the process callback), and false otherwise.
>> + */
>> +typedef bool
>> +(*rte_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
>> +
>> +/**
>> + * Function prototype for process callbacks.
>> + *
>> + * The process callbacks are used by the dispatcher to deliver
>> + * events for processing.
>> + *
>> + * @param event_dev_id
>> + *  The originating event device id.
>> + *
>> + * @param event_port_id
>> + *  The originating event port.
>> + *
>> + * @param events
>> + *  Pointer to an array of events.
>> + *
>> + * @param num
>> + *  The number of events in the @p events array.
>> + *
>> + * @param cb_data
>> + *  The pointer supplied by the application in
>> + *  rte_dispatcher_register().
>> + */
>> +
>> +typedef void
>> +(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
>> +                                 struct rte_event *events, uint16_t num,
>> +                                 void *cb_data);
>> +
>> +/**
>> + * Function prototype for finalize callbacks.
>> + *
>> + * The finalize callbacks are used by the dispatcher to notify the
>> + * application it has delivered all events from a particular batch
>> + * dequeued from the event device.
>> + *
>> + * @param event_dev_id
>> + *  The originating event device id.
>> + *
>> + * @param event_port_id
>> + *  The originating event port.
>> + *
>> + * @param cb_data
>> + *  The pointer supplied by the application in
>> + *  rte_dispatcher_finalize_register().
>> + */
>> +
>> +typedef void
>> +(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
>> +                                  void *cb_data);
>> +
>> +/**
>> + * Dispatcher statistics
>> + */
>> +struct rte_dispatcher_stats {
>> +       uint64_t poll_count;
>> +       /**< Number of event dequeue calls made toward the event device. */
> 
> We had a number of issues with doxygen post annotations.
> Prefer the prefixed ones.
> 

OK. More readable, too. I just used the postfix syntax since it seemed 
the only one used in DPDK.

> +       /** Number of event dequeue calls made toward the event device. */
> +       uint64_t poll_count;
> 
> 
>> +       uint64_t ev_batch_count;
>> +       /**< Number of non-empty event batches dequeued from event device.*/
>> +       uint64_t ev_dispatch_count;
>> +       /**< Number of events dispatched to a handler.*/
>> +       uint64_t ev_drop_count;
>> +       /**< Number of events dropped because no handler was found. */
>> +};
>> +
>> +/**
>> + * Create a dispatcher with the specified id.
>> + *
>> + * @param event_dev_id
>> + *  The identifier of the event device from which this dispatcher
>> + *  will dequeue events.
>> + *
>> + * @return
>> + *   A pointer to a new dispatcher instance, or NULL on failure, in which
>> + *   case rte_errno is set.
>> + */
>> +__rte_experimental
>> +struct rte_dispatcher *
>> +rte_dispatcher_create(uint8_t event_dev_id);
>> +
>> +/**
>> + * Free a dispatcher.
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - <0: Error code on failure
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_free(struct rte_dispatcher *dispatcher);
>> +
>> +/**
>> + * Retrieve the service identifier of a dispatcher.
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + *
>> + * @return
>> + *  The dispatcher service's id.
>> + */
>> +__rte_experimental
>> +uint32_t
>> +rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher);
>> +
>> +/**
>> + * Binds an event device port to a specific lcore on the specified
>> + * dispatcher.
>> + *
>> + * This function configures the event port id to be used by the event
>> + * dispatcher service, if run on the specified lcore.
>> + *
>> + * Multiple event device ports may be bound to the same lcore. A
>> + * particular port must not be bound to more than one lcore.
>> + *
>> + * If the dispatcher service is mapped (with rte_service_map_lcore_set())
>> + * to a lcore to which no ports are bound, the service function will be a
>> + * no-operation.
>> + *
>> + * This function may be called by any thread (including unregistered
>> + * non-EAL threads), but not while the dispatcher is running on lcore
>> + * specified by @c lcore_id.
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + *
>> + * @param event_port_id
>> + *  The event device port identifier.
>> + *
>> + * @param batch_size
>> + *  The batch size to use in rte_event_dequeue_burst(), for the
>> + *  configured event device port and lcore.
>> + *
>> + * @param timeout
>> + *  The timeout parameter to use in rte_event_dequeue_burst(), for the
>> + *  configured event device port and lcore.
>> + *
>> + * @param lcore_id
>> + *  The lcore by which this event port will be used.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -ENOMEM: Unable to allocate sufficient resources.
>> + *  - -EEXISTS: Event port is already configured.
>> + *  - -EINVAL: Invalid arguments.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
>> +                                 uint8_t event_port_id, uint16_t batch_size,
>> +                                 uint64_t timeout, unsigned int lcore_id);
>> +
>> +/**
>> + * Unbind an event device port from a specific lcore.
>> + *
>> + * This function may be called by any thread (including unregistered
>> + * non-EAL threads), but not while the dispatcher is running on
>> + * lcore specified by @c lcore_id.
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + *
>> + * @param event_port_id
>> + *  The event device port identifier.
>> + *
>> + * @param lcore_id
>> + *  The lcore which was using this event port.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -ENOENT: Event port id not bound to this @c lcore_id.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
>> +                                     uint8_t event_port_id,
>> +                                     unsigned int lcore_id);
>> +
>> +/**
>> + * Register an event handler.
>> + *
>> + * The match callback function is used to select if a particular event
>> + * should be delivered, using the corresponding process callback
>> + * function.
>> + *
>> + * The reason for having two distinct steps is to allow the dispatcher
>> + * to deliver all events as a batch. This in turn will cause
>> + * processing of a particular kind of events to happen in a
>> + * back-to-back manner, improving cache locality.
>> + *
>> + * The list of handler callback functions is shared among all lcores,
>> + * but will only be executed on lcores which has an eventdev port
>> + * bound to them, and which are running the dispatcher service.
>> + *
>> + * An event is delivered to at most one handler. Events where no
>> + * handler is found are dropped.
>> + *
>> + * The application must not depend on the order of which the match
>> + * functions are invoked.
>> + *
>> + * Ordering of events is not guaranteed to be maintained between
>> + * different deliver callbacks. For example, suppose there are two
>> + * callbacks registered, matching different subsets of events arriving
>> + * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
>> + * on a particular port, all pertaining to the same flow. The match
>> + * callback for registration A returns true for ev0 and ev2, and the
>> + * matching function for registration B for ev1. In that scenario, the
>> + * dispatcher may choose to deliver first [ev0, ev2] using A's deliver
>> + * function, and then [ev1] to B - or vice versa.
>> + *
>> + * rte_dispatcher_register() may be called by any thread
>> + * (including unregistered non-EAL threads), but not while the event
>> + * dispatcher is running on any service lcore.
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + *
>> + * @param match_fun
>> + *  The match callback function.
>> + *
>> + * @param match_cb_data
>> + *  A pointer to some application-specific opaque data (or NULL),
>> + *  which is supplied back to the application when match_fun is
>> + *  called.
>> + *
>> + * @param process_fun
>> + *  The process callback function.
>> + *
>> + * @param process_cb_data
>> + *  A pointer to some application-specific opaque data (or NULL),
>> + *  which is supplied back to the application when process_fun is
>> + *  called.
>> + *
>> + * @return
>> + *  - >= 0: The identifier for this registration.
>> + *  - -ENOMEM: Unable to allocate sufficient resources.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_register(struct rte_dispatcher *dispatcher,
>> +                       rte_dispatcher_match_t match_fun, void *match_cb_data,
>> +                       rte_dispatcher_process_t process_fun,
>> +                       void *process_cb_data);
>> +
>> +/**
>> + * Unregister an event handler.
>> + *
>> + * This function may be called by any thread (including unregistered
>> + * non-EAL threads), but not while the dispatcher is running on
>> + * any service lcore.
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + *
>> + * @param handler_id
>> + *  The handler registration id returned by the original
>> + *  rte_dispatcher_register() call.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -EINVAL: The @c handler_id parameter was invalid.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id);
>> +
>> +/**
>> + * Register a finalize callback function.
>> + *
>> + * An application may optionally install one or more finalize
>> + * callbacks.
>> + *
>> + * All finalize callbacks are invoked by the dispatcher when a
>> + * complete batch of events (retrieve using rte_event_dequeue_burst())
>> + * have been delivered to the application (or have been dropped).
>> + *
>> + * The finalize callback is not tied to any particular handler.
>> + *
>> + * The finalize callback provides an opportunity for the application
>> + * to do per-batch processing. One case where this may be useful is if
>> + * an event output buffer is used, and is shared among several
>> + * handlers. In such a case, proper output buffer flushing may be
>> + * assured using a finalize callback.
>> + *
>> + * rte_dispatcher_finalize_register() may be called by any thread
>> + * (including unregistered non-EAL threads), but not while the
>> + * dispatcher is running on any service lcore.
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + *
>> + * @param finalize_fun
>> + *  The function called after completing the processing of a
>> + *  dequeue batch.
>> + *
>> + * @param finalize_data
>> + *  A pointer to some application-specific opaque data (or NULL),
>> + *  which is supplied back to the application when @c finalize_fun is
>> + *  called.
>> + *
>> + * @return
>> + *  - >= 0: The identifier for this registration.
>> + *  - -ENOMEM: Unable to allocate sufficient resources.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
>> +                                rte_dispatcher_finalize_t finalize_fun,
>> +                                void *finalize_data);
>> +
>> +/**
>> + * Unregister a finalize callback.
>> + *
>> + * This function may be called by any thread (including unregistered
>> + * non-EAL threads), but not while the dispatcher is running on
>> + * any service lcore.
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + *
>> + * @param reg_id
>> + *  The finalize registration id returned by the original
>> + *  rte_dispatcher_finalize_register() call.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -EINVAL: The @c reg_id parameter was invalid.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher,
>> +                                  int reg_id);
>> +
>> +/**
>> + * Start a dispatcher instance.
>> + *
>> + * Enables the dispatcher service.
>> + *
>> + * The underlying event device must have been started prior to calling
>> + * rte_dispatcher_start().
>> + *
>> + * For the dispatcher to actually perform work (i.e., dispatch
>> + * events), its service must have been mapped to one or more service
>> + * lcores, and its service run state set to '1'. A dispatcher's
>> + * service is retrieved using rte_dispatcher_service_id_get().
>> + *
>> + * Each service lcore to which the dispatcher is mapped should
>> + * have at least one event port configured. Such configuration is
>> + * performed by calling rte_dispatcher_bind_port_to_lcore(), prior to
>> + * starting the dispatcher.
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - <0: Error code on failure
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_start(struct rte_dispatcher *dispatcher);
>> +
>> +/**
>> + * Stop a running dispatcher instance.
>> + *
>> + * Disables the dispatcher service.
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + *
>> + * @return
>> + *  - 0: Success
>> + *  - -EINVAL: Invalid @c id.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dispatcher_stop(struct rte_dispatcher *dispatcher);
>> +
>> +/**
>> + * Retrieve statistics for a dispatcher instance.
>> + *
>> + * This function is MT safe and may be called by any thread
>> + * (including unregistered non-EAL threads).
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + * @param[out] stats
>> + *   A pointer to a structure to fill with statistics.
>> + */
>> +__rte_experimental
>> +void
>> +rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
>> +                        struct rte_dispatcher_stats *stats);
>> +
>> +/**
>> + * Reset statistics for a dispatcher instance.
>> + *
>> + * This function may be called by any thread (including unregistered
>> + * non-EAL threads), but may not produce the correct result if the
>> + * dispatcher is running on any service lcore.
>> + *
>> + * @param dispatcher
>> + *  The dispatcher instance.
>> + */
>> +__rte_experimental
>> +void
>> +rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher);
>> +
>> +#ifdef __cplusplus
>> +}
>> +#endif
>> +
>> +#endif /* __RTE_DISPATCHER__ */
>> diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map
>> new file mode 100644
>> index 0000000000..8f9ad96522
>> --- /dev/null
>> +++ b/lib/dispatcher/version.map
>> @@ -0,0 +1,20 @@
>> +EXPERIMENTAL {
>> +       global:
>> +
>> +       # added in 23.11
>> +       rte_dispatcher_create;
>> +       rte_dispatcher_free;
>> +       rte_dispatcher_service_id_get;
>> +       rte_dispatcher_bind_port_to_lcore;
>> +       rte_dispatcher_unbind_port_from_lcore;
>> +       rte_dispatcher_register;
>> +       rte_dispatcher_unregister;
>> +       rte_dispatcher_finalize_register;
>> +       rte_dispatcher_finalize_unregister;
>> +       rte_dispatcher_start;
>> +       rte_dispatcher_stop;
>> +       rte_dispatcher_stats_get;
>> +       rte_dispatcher_stats_reset;
> 
> Sort alphabetically please.
> 

OK.

Thanks a lot David.

> 
> 
>> +
>> +       local: *;
>> +};
>> diff --git a/lib/meson.build b/lib/meson.build
>> index 099b0ed18a..3093b338d2 100644
>> --- a/lib/meson.build
>> +++ b/lib/meson.build
>> @@ -35,6 +35,7 @@ libraries = [
>>           'distributor',
>>           'efd',
>>           'eventdev',
>> +        'dispatcher', # dispatcher depends on eventdev
>>           'gpudev',
>>           'gro',
>>           'gso',
>> @@ -81,6 +82,7 @@ optional_libs = [
>>           'cfgfile',
>>           'compressdev',
>>           'cryptodev',
>> +        'dispatcher',
>>           'distributor',
>>           'dmadev',
>>           'efd',
>> --
>> 2.34.1
>>
> 
> 

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 2/3] test: add dispatcher test suite
  2023-10-05  8:36                                           ` David Marchand
@ 2023-10-05 11:25                                             ` Mattias Rönnblom
  2023-10-06  8:52                                               ` David Marchand
  0 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-05 11:25 UTC (permalink / raw)
  To: David Marchand, Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On 2023-10-05 10:36, David Marchand wrote:
> On Thu, Sep 28, 2023 at 9:36 AM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>>
>> Add unit tests for the dispatcher.
> 
> Missing your SoB.
> 

Fixed.

> Same comment than patch 1 about indent.
> 
>>
>> --
>> PATCH v5:
>>   o Update test suite to use pointer and not integer id when calling
>>     dispatcher functions.
>>
>> PATCH v3:
>>   o Adapt the test suite to dispatcher API name changes.
>>
>> PATCH v2:
>>   o Test finalize callback functionality.
>>   o Test handler and finalizer count upper limits.
>>   o Add statistics reset test.
>>   o Make sure dispatcher supply the proper event dev id and port id back
>>     to the application.
>>
>> PATCH:
>>   o Extend test to cover often-used handler optimization feature.
>>
>> RFC v4:
>>   o Adapt to non-const events in process function prototype.
>>
>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>> ---
>>   MAINTAINERS                |    1 +
>>   app/test/meson.build       |    1 +
>>   app/test/test_dispatcher.c | 1046 ++++++++++++++++++++++++++++++++++++
>>   3 files changed, 1048 insertions(+)
>>   create mode 100644 app/test/test_dispatcher.c
>>
>> diff --git a/MAINTAINERS b/MAINTAINERS
>> index 6704cd5b2c..43890cad0e 100644
>> --- a/MAINTAINERS
>> +++ b/MAINTAINERS
>> @@ -1729,6 +1729,7 @@ F: lib/node/
>>   Dispatcher - EXPERIMENTAL
>>   M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>>   F: lib/dispatcher/
>> +F: app/test/test_dispatcher.c
>>
>>   Test Applications
>>   -----------------
>> diff --git a/app/test/meson.build b/app/test/meson.build
>> index 05bae9216d..3303c73817 100644
>> --- a/app/test/meson.build
>> +++ b/app/test/meson.build
>> @@ -55,6 +55,7 @@ source_file_deps = {
>>       'test_cycles.c': [],
>>       'test_debug.c': [],
>>       'test_devargs.c': ['kvargs'],
>> +    'test_dispatcher.c': ['dispatcher'],
>>       'test_distributor.c': ['distributor'],
>>       'test_distributor_perf.c': ['distributor'],
>>       'test_dmadev.c': ['dmadev', 'bus_vdev'],
>> diff --git a/app/test/test_dispatcher.c b/app/test/test_dispatcher.c
>> new file mode 100644
>> index 0000000000..2bce65fdd9
>> --- /dev/null
>> +++ b/app/test/test_dispatcher.c
>> @@ -0,0 +1,1046 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2023 Ericsson AB
>> + */
>> +
>> +#include "test.h"
> 
> test.h is a "local" include and should go after "standard" includes
> block and "DPDK" includes block.
> 

OK.

> 
>> +
>> +#include <stdatomic.h>
> 
> Can you use the new wrappers for atomics?
> rte_stdatomic.h
> 

Sure.

>> +
>> +#include <rte_bus_vdev.h>
>> +#include <rte_dispatcher.h>
>> +#include <rte_eventdev.h>
>> +#include <rte_random.h>
>> +#include <rte_service.h>
>> +
>> +#define NUM_WORKERS 3
>> +
>> +#define NUM_PORTS (NUM_WORKERS + 1)
>> +#define WORKER_PORT_ID(worker_idx) (worker_idx)
>> +#define DRIVER_PORT_ID (NUM_PORTS - 1)
>> +
>> +#define NUM_SERVICE_CORES NUM_WORKERS
>> +
>> +/* Eventdev */
>> +#define NUM_QUEUES 8
>> +#define LAST_QUEUE_ID (NUM_QUEUES - 1)
>> +#define MAX_EVENTS 4096
>> +#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
>> +#define DEQUEUE_BURST_SIZE 32
>> +#define ENQUEUE_BURST_SIZE 32
>> +
>> +#define NUM_EVENTS 10000000
>> +#define NUM_FLOWS 16
>> +
>> +#define DSW_VDEV "event_dsw0"
>> +
>> +struct app_queue {
>> +       uint8_t queue_id;
>> +       uint64_t sn[NUM_FLOWS];
>> +       int dispatcher_reg_id;
>> +};
>> +
>> +struct cb_count {
>> +       uint8_t expected_event_dev_id;
>> +       uint8_t expected_event_port_id[RTE_MAX_LCORE];
>> +       atomic_int count;
>> +};
>> +
>> +struct test_app {
>> +       uint8_t event_dev_id;
>> +       struct rte_dispatcher *dispatcher;
>> +       uint32_t dispatcher_service_id;
>> +
>> +       unsigned int service_lcores[NUM_SERVICE_CORES];
>> +
>> +       int never_match_reg_id;
>> +       uint64_t never_match_count;
>> +       struct cb_count never_process_count;
>> +
>> +       struct app_queue queues[NUM_QUEUES];
>> +
>> +       int finalize_reg_id;
>> +       struct cb_count finalize_count;
>> +
>> +       bool running;
>> +
>> +       atomic_int completed_events;
>> +       atomic_int errors;
>> +};
>> +
>> +#define RETURN_ON_ERROR(rc) \
>> +       do {                                    \
>> +               if (rc != TEST_SUCCESS)         \
>> +                       return rc;              \
>> +       } while (0)
> 
> TEST_ASSERT?
> This gives context about which part of a test failed.
> 

This macro is used in a situation where the failure has occured and has 
been reported already.

Maybe it would be better to replace the macro instationation with just 
the if+return statements.

RETURN_ON_ERROR(rc);

->

if (rc != TEST_SUCCESS)
	return rc;

> 
>> +
>> +static struct test_app *
>> +test_app_create(void)
>> +{
>> +       int i;
>> +       struct test_app *app;
>> +
>> +       app = calloc(1, sizeof(struct test_app));
>> +
>> +       if (app == NULL)
>> +               return NULL;
>> +
>> +       for (i = 0; i < NUM_QUEUES; i++)
>> +               app->queues[i].queue_id = i;
>> +
>> +       return app;
>> +}
>> +
>> +static void
>> +test_app_free(struct test_app *app)
>> +{
>> +       free(app);
>> +}
>> +
>> +static int
>> +test_app_create_vdev(struct test_app *app)
>> +{
>> +       int rc;
>> +
>> +       rc = rte_vdev_init(DSW_VDEV, NULL);
>> +       if (rc < 0)
>> +               return TEST_SKIPPED;
>> +
>> +       rc = rte_event_dev_get_dev_id(DSW_VDEV);
>> +
>> +       app->event_dev_id = (uint8_t)rc;
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_destroy_vdev(struct test_app *app)
>> +{
>> +       int rc;
>> +
>> +       rc = rte_event_dev_close(app->event_dev_id);
>> +       TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
>> +
>> +       rc = rte_vdev_uninit(DSW_VDEV);
>> +       TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_setup_event_dev(struct test_app *app)
>> +{
>> +       int rc;
>> +       int i;
>> +
>> +       rc = test_app_create_vdev(app);
>> +       if (rc < 0)
>> +               return rc;
>> +
>> +       struct rte_event_dev_config config = {
>> +               .nb_event_queues = NUM_QUEUES,
>> +               .nb_event_ports = NUM_PORTS,
>> +               .nb_events_limit = MAX_EVENTS,
>> +               .nb_event_queue_flows = 64,
>> +               .nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
>> +               .nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
>> +       };
>> +
>> +       rc = rte_event_dev_configure(app->event_dev_id, &config);
>> +
>> +       TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
>> +
>> +       struct rte_event_queue_conf queue_config = {
>> +               .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
>> +               .schedule_type = RTE_SCHED_TYPE_ATOMIC,
>> +               .nb_atomic_flows = 64
>> +       };
>> +
>> +       for (i = 0; i < NUM_QUEUES; i++) {
>> +               uint8_t queue_id = i;
>> +
>> +               rc = rte_event_queue_setup(app->event_dev_id, queue_id,
>> +                                          &queue_config);
>> +
>> +               TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
>> +       }
>> +
>> +       struct rte_event_port_conf port_config = {
>> +               .new_event_threshold = NEW_EVENT_THRESHOLD,
>> +               .dequeue_depth = DEQUEUE_BURST_SIZE,
>> +               .enqueue_depth = ENQUEUE_BURST_SIZE
>> +       };
>> +
>> +       for (i = 0; i < NUM_PORTS; i++) {
>> +               uint8_t event_port_id = i;
>> +
>> +               rc = rte_event_port_setup(app->event_dev_id, event_port_id,
>> +                                         &port_config);
>> +               TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
>> +                                   event_port_id);
>> +
>> +               if (event_port_id == DRIVER_PORT_ID)
>> +                       continue;
>> +
>> +               rc = rte_event_port_link(app->event_dev_id, event_port_id,
>> +                                        NULL, NULL, 0);
>> +
>> +               TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
>> +                                 event_port_id);
>> +       }
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_teardown_event_dev(struct test_app *app)
>> +{
>> +       return test_app_destroy_vdev(app);
>> +}
>> +
>> +static int
>> +test_app_start_event_dev(struct test_app *app)
>> +{
>> +       int rc;
>> +
>> +       rc = rte_event_dev_start(app->event_dev_id);
>> +       TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static void
>> +test_app_stop_event_dev(struct test_app *app)
>> +{
>> +       rte_event_dev_stop(app->event_dev_id);
>> +}
>> +
>> +static int
>> +test_app_create_dispatcher(struct test_app *app)
>> +{
>> +       int rc;
>> +
>> +       app->dispatcher = rte_dispatcher_create(app->event_dev_id);
>> +
>> +       TEST_ASSERT(app->dispatcher != NULL, "Unable to create event "
>> +                   "dispatcher");
>> +
>> +       app->dispatcher_service_id =
>> +               rte_dispatcher_service_id_get(app->dispatcher);
>> +
>> +       rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
>> +
>> +       TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
>> +                           "stats");
>> +
>> +       rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
>> +
>> +       TEST_ASSERT_SUCCESS(rc, "Unable to set dispatcher service runstate");
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_free_dispatcher(struct test_app *app)
>> +{
>> +       int rc;
>> +
>> +       rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
>> +       TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
>> +
>> +       rc = rte_dispatcher_free(app->dispatcher);
>> +       TEST_ASSERT_SUCCESS(rc, "Error freeing dispatcher");
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_bind_ports(struct test_app *app)
>> +{
>> +       int i;
>> +
>> +       app->never_process_count.expected_event_dev_id =
>> +               app->event_dev_id;
>> +       app->finalize_count.expected_event_dev_id =
>> +               app->event_dev_id;
>> +
>> +       for (i = 0; i < NUM_WORKERS; i++) {
>> +               unsigned int lcore_id = app->service_lcores[i];
>> +               uint8_t port_id = WORKER_PORT_ID(i);
>> +
>> +               int rc = rte_dispatcher_bind_port_to_lcore(
>> +                       app->dispatcher, port_id, DEQUEUE_BURST_SIZE, 0,
>> +                       lcore_id
>> +               );
>> +
>> +               TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
>> +                                   "to lcore %d", port_id, lcore_id);
>> +
>> +               app->never_process_count.expected_event_port_id[lcore_id] =
>> +                       port_id;
>> +               app->finalize_count.expected_event_port_id[lcore_id] = port_id;
>> +       }
>> +
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_unbind_ports(struct test_app *app)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < NUM_WORKERS; i++) {
>> +               unsigned int lcore_id = app->service_lcores[i];
>> +
>> +               int rc = rte_dispatcher_unbind_port_from_lcore(
>> +                       app->dispatcher,
>> +                       WORKER_PORT_ID(i),
>> +                       lcore_id
>> +               );
>> +
>> +               TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
>> +                                   "from lcore %d", WORKER_PORT_ID(i),
>> +                                   lcore_id);
>> +       }
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static bool
>> +match_queue(const struct rte_event *event, void *cb_data)
>> +{
>> +       uintptr_t queue_id = (uintptr_t)cb_data;
>> +
>> +       return event->queue_id == queue_id;
>> +}
>> +
>> +static int
>> +test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < NUM_SERVICE_CORES; i++)
>> +               if (app->service_lcores[i] == lcore_id)
>> +                       return i;
> 
> This construct is hard to read and prone to error if the code is updated later.
> 
> for () {
>    if ()
>      return i;
> }
> 
> 

I wouldn't consider that an improvement (rather the opposite).

>> +
>> +       return -1;
>> +}
>> +
>> +static int
>> +test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
>> +{
>> +       int worker;
>> +
>> +       worker = test_app_get_worker_index(app, lcore_id);
>> +
>> +       if (worker < 0)
>> +               return -1;
>> +
>> +       return WORKER_PORT_ID(worker);
>> +}
>> +
>> +static void
>> +test_app_queue_note_error(struct test_app *app)
>> +{
>> +       atomic_fetch_add_explicit(&app->errors, 1, memory_order_relaxed);
>> +}
>> +
>> +static void
>> +test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
>> +                      struct rte_event *in_events, uint16_t num,
>> +                      void *cb_data)
>> +{
>> +       struct app_queue *app_queue = cb_data;
>> +       struct test_app *app = container_of(app_queue, struct test_app,
>> +                                           queues[app_queue->queue_id]);
>> +       unsigned int lcore_id = rte_lcore_id();
>> +       bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
>> +       int event_port_id;
>> +       uint16_t i;
>> +       struct rte_event out_events[num];
>> +
>> +       event_port_id = test_app_get_worker_port(app, lcore_id);
>> +
>> +       if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
>> +           p_event_port_id != event_port_id) {
>> +               test_app_queue_note_error(app);
>> +               return;
>> +       }
>> +
>> +       for (i = 0; i < num; i++) {
>> +               const struct rte_event *in_event = &in_events[i];
>> +               struct rte_event *out_event = &out_events[i];
>> +               uint64_t sn = in_event->u64;
>> +               uint64_t expected_sn;
>> +
>> +               if (in_event->queue_id != app_queue->queue_id) {
>> +                       test_app_queue_note_error(app);
>> +                       return;
>> +               }
>> +
>> +               expected_sn = app_queue->sn[in_event->flow_id]++;
>> +
>> +               if (expected_sn != sn) {
>> +                       test_app_queue_note_error(app);
>> +                       return;
>> +               }
>> +
>> +               if (intermediate_queue)
>> +                       *out_event = (struct rte_event) {
>> +                               .queue_id = in_event->queue_id + 1,
>> +                               .flow_id = in_event->flow_id,
>> +                               .sched_type = RTE_SCHED_TYPE_ATOMIC,
>> +                               .op = RTE_EVENT_OP_FORWARD,
>> +                               .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
>> +                               .u64 = sn
>> +                       };
>> +       }
>> +
>> +       if (intermediate_queue) {
>> +               uint16_t n = 0;
>> +
>> +               do {
>> +                       n += rte_event_enqueue_forward_burst(p_event_dev_id,
>> +                                                            p_event_port_id,
>> +                                                            out_events + n,
>> +                                                            num - n);
>> +               } while (n != num);
>> +       } else
>> +               atomic_fetch_add_explicit(&app->completed_events, num,
>> +                                         memory_order_relaxed);
>> +}
>> +
>> +static bool
>> +never_match(const struct rte_event *event __rte_unused, void *cb_data)
>> +{
>> +       uint64_t *count = cb_data;
>> +
>> +       (*count)++;
>> +
>> +       return false;
>> +}
>> +
>> +static void
>> +test_app_never_process(uint8_t event_dev_id,
>> +                      uint8_t event_port_id,
>> +                      struct rte_event *in_events __rte_unused,
>> +                      uint16_t num, void *cb_data)
>> +{
>> +       struct cb_count *count = cb_data;
>> +       unsigned int lcore_id = rte_lcore_id();
>> +
>> +       if (event_dev_id == count->expected_event_dev_id &&
>> +           event_port_id == count->expected_event_port_id[lcore_id])
>> +               atomic_fetch_add_explicit(&count->count, num,
>> +                                         memory_order_relaxed);
>> +}
>> +
>> +static void
>> +finalize(uint8_t event_dev_id, uint8_t event_port_id, void *cb_data)
>> +{
>> +       struct cb_count *count = cb_data;
>> +       unsigned int lcore_id = rte_lcore_id();
>> +
>> +       if (event_dev_id == count->expected_event_dev_id &&
>> +           event_port_id == count->expected_event_port_id[lcore_id])
>> +               atomic_fetch_add_explicit(&count->count, 1,
>> +                                         memory_order_relaxed);
>> +}
>> +
>> +static int
>> +test_app_register_callbacks(struct test_app *app)
>> +{
>> +       int i;
>> +
>> +       app->never_match_reg_id =
>> +               rte_dispatcher_register(app->dispatcher, never_match,
>> +                                       &app->never_match_count,
>> +                                       test_app_never_process,
>> +                                       &app->never_process_count);
>> +
>> +       TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register "
>> +                   "never-match handler");
>> +
>> +       for (i = 0; i < NUM_QUEUES; i++) {
>> +               struct app_queue *app_queue = &app->queues[i];
>> +               uintptr_t queue_id = app_queue->queue_id;
>> +               int reg_id;
>> +
>> +               reg_id = rte_dispatcher_register(app->dispatcher,
>> +                                                match_queue, (void *)queue_id,
>> +                                                test_app_process_queue,
>> +                                                app_queue);
>> +
>> +               TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
>> +                           "callback for queue %d", i);
>> +
>> +               app_queue->dispatcher_reg_id = reg_id;
>> +       }
>> +
>> +       app->finalize_reg_id =
>> +               rte_dispatcher_finalize_register(app->dispatcher,
>> +                                                      finalize,
>> +                                                      &app->finalize_count);
>> +       TEST_ASSERT_SUCCESS(app->finalize_reg_id, "Error registering "
>> +                           "finalize callback");
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
>> +{
>> +       int reg_id = app->queues[queue_id].dispatcher_reg_id;
>> +       int rc;
>> +
>> +       if (reg_id < 0) /* unregistered already */
>> +               return 0;
>> +
>> +       rc = rte_dispatcher_unregister(app->dispatcher, reg_id);
>> +
>> +       TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
>> +                           "callback for queue %d", queue_id);
>> +
>> +       app->queues[queue_id].dispatcher_reg_id = -1;
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_unregister_callbacks(struct test_app *app)
>> +{
>> +       int i;
>> +       int rc;
>> +
>> +       if (app->never_match_reg_id >= 0) {
>> +               rc = rte_dispatcher_unregister(app->dispatcher,
>> +                                                    app->never_match_reg_id);
>> +
>> +               TEST_ASSERT_SUCCESS(rc, "Unable to unregister never-match "
>> +                                   "handler");
>> +               app->never_match_reg_id = -1;
>> +       }
>> +
>> +       for (i = 0; i < NUM_QUEUES; i++) {
>> +               rc = test_app_unregister_callback(app, i);
>> +               RETURN_ON_ERROR(rc);
>> +       }
>> +
>> +       if (app->finalize_reg_id >= 0) {
>> +               rc = rte_dispatcher_finalize_unregister(
>> +                       app->dispatcher, app->finalize_reg_id
>> +               );
>> +               app->finalize_reg_id = -1;
>> +       }
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_start_dispatcher(struct test_app *app)
>> +{
>> +       int rc;
>> +
>> +       rc = rte_dispatcher_start(app->dispatcher);
>> +
>> +       TEST_ASSERT_SUCCESS(rc, "Unable to start the event dispatcher");
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_stop_dispatcher(struct test_app *app)
>> +{
>> +       int rc;
>> +
>> +       rc = rte_dispatcher_stop(app->dispatcher);
>> +
>> +       TEST_ASSERT_SUCCESS(rc, "Unable to stop the event dispatcher");
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_reset_dispatcher_stats(struct test_app *app)
>> +{
>> +       struct rte_dispatcher_stats stats;
>> +
>> +       rte_dispatcher_stats_reset(app->dispatcher);
>> +
>> +       memset(&stats, 0xff, sizeof(stats));
>> +
>> +       rte_dispatcher_stats_get(app->dispatcher, &stats);
>> +
>> +       TEST_ASSERT_EQUAL(stats.poll_count, 0, "Poll count not zero");
>> +       TEST_ASSERT_EQUAL(stats.ev_batch_count, 0, "Batch count not zero");
>> +       TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, "Dispatch count "
>> +                         "not zero");
>> +       TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count not zero");
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
>> +{
>> +       int rc;
>> +
>> +       rc = rte_service_lcore_add(lcore_id);
>> +       TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
>> +                           "service core", lcore_id);
>> +
>> +       rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
>> +       TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_setup_service_cores(struct test_app *app)
>> +{
>> +       int i;
>> +       int lcore_id = -1;
>> +
>> +       for (i = 0; i < NUM_SERVICE_CORES; i++) {
>> +               lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
>> +
>> +               TEST_ASSERT(lcore_id != RTE_MAX_LCORE,
>> +                           "Too few lcores. Needs at least %d worker lcores",
>> +                           NUM_SERVICE_CORES);
>> +
>> +               app->service_lcores[i] = lcore_id;
>> +       }
>> +
>> +       for (i = 0; i < NUM_SERVICE_CORES; i++) {
>> +               int rc;
>> +
>> +               rc = test_app_setup_service_core(app, app->service_lcores[i]);
>> +
>> +               RETURN_ON_ERROR(rc);
>> +       }
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
>> +{
>> +       int rc;
>> +
>> +       rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
>> +       TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
>> +
>> +       rc = rte_service_lcore_del(lcore_id);
>> +       TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
>> +                           lcore_id);
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_teardown_service_cores(struct test_app *app)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < NUM_SERVICE_CORES; i++) {
>> +               unsigned int lcore_id = app->service_lcores[i];
>> +               int rc;
>> +
>> +               rc = test_app_teardown_service_core(app, lcore_id);
>> +
>> +               RETURN_ON_ERROR(rc);
>> +       }
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_start_service_cores(struct test_app *app)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < NUM_SERVICE_CORES; i++) {
>> +               unsigned int lcore_id = app->service_lcores[i];
>> +               int rc;
>> +
>> +               rc = rte_service_lcore_start(lcore_id);
>> +               TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
>> +                                   lcore_id);
>> +
>> +               RETURN_ON_ERROR(rc);
>> +       }
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_stop_service_cores(struct test_app *app)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < NUM_SERVICE_CORES; i++) {
>> +               unsigned int lcore_id = app->service_lcores[i];
>> +               int rc;
>> +
>> +               rc = rte_service_lcore_stop(lcore_id);
>> +               TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
>> +                                   lcore_id);
>> +
>> +               RETURN_ON_ERROR(rc);
>> +       }
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_app_start(struct test_app *app)
>> +{
>> +       int rc;
>> +
>> +       rc = test_app_start_event_dev(app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       rc = test_app_start_service_cores(app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       rc = test_app_start_dispatcher(app);
>> +
>> +       app->running = true;
>> +
>> +       return rc;
>> +}
>> +
>> +static int
>> +test_app_stop(struct test_app *app)
>> +{
>> +       int rc;
>> +
>> +       rc = test_app_stop_dispatcher(app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       test_app_stop_service_cores(app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       test_app_stop_event_dev(app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       app->running = false;
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +struct test_app *test_app;
>> +
>> +static int
>> +test_setup(void)
>> +{
>> +       int rc;
>> +
>> +       test_app = test_app_create();
>> +       TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
>> +
>> +       rc = test_app_setup_event_dev(test_app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       rc = test_app_create_dispatcher(test_app);
>> +
>> +       rc = test_app_setup_service_cores(test_app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       rc = test_app_register_callbacks(test_app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       rc = test_app_bind_ports(test_app);
>> +
>> +       return rc;
>> +}
>> +
>> +static void test_teardown(void)
>> +{
>> +       if (test_app->running)
>> +               test_app_stop(test_app);
>> +
>> +       test_app_teardown_service_cores(test_app);
>> +
>> +       test_app_unregister_callbacks(test_app);
>> +
>> +       test_app_unbind_ports(test_app);
>> +
>> +       test_app_free_dispatcher(test_app);
>> +
>> +       test_app_teardown_event_dev(test_app);
>> +
>> +       test_app_free(test_app);
>> +
>> +       test_app = NULL;
>> +}
>> +
>> +static int
>> +test_app_get_completed_events(struct test_app *app)
>> +{
>> +       return atomic_load_explicit(&app->completed_events,
>> +                                   memory_order_relaxed);
>> +}
>> +
>> +static int
>> +test_app_get_errors(struct test_app *app)
>> +{
>> +       return atomic_load_explicit(&app->errors, memory_order_relaxed);
>> +}
>> +
>> +static int
>> +test_basic(void)
>> +{
>> +       int rc;
>> +       int i;
>> +
>> +       rc = test_app_start(test_app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       uint64_t sns[NUM_FLOWS] = { 0 };
>> +
>> +       for (i = 0; i < NUM_EVENTS;) {
>> +               struct rte_event events[ENQUEUE_BURST_SIZE];
>> +               int left;
>> +               int batch_size;
>> +               int j;
>> +               uint16_t n = 0;
>> +
>> +               batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
>> +               left = NUM_EVENTS - i;
>> +
>> +               batch_size = RTE_MIN(left, batch_size);
>> +
>> +               for (j = 0; j < batch_size; j++) {
>> +                       struct rte_event *event = &events[j];
>> +                       uint64_t sn;
>> +                       uint32_t flow_id;
>> +
>> +                       flow_id = rte_rand_max(NUM_FLOWS);
>> +
>> +                       sn = sns[flow_id]++;
>> +
>> +                       *event = (struct rte_event) {
>> +                               .queue_id = 0,
>> +                               .flow_id = flow_id,
>> +                               .sched_type = RTE_SCHED_TYPE_ATOMIC,
>> +                               .op = RTE_EVENT_OP_NEW,
>> +                               .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
>> +                               .u64 = sn
>> +                       };
>> +               }
>> +
>> +               while (n < batch_size)
>> +                       n += rte_event_enqueue_new_burst(test_app->event_dev_id,
>> +                                                        DRIVER_PORT_ID,
>> +                                                        events + n,
>> +                                                        batch_size - n);
>> +
>> +               i += batch_size;
>> +       }
>> +
>> +       while (test_app_get_completed_events(test_app) != NUM_EVENTS)
>> +               rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
>> +
>> +       rc = test_app_get_errors(test_app);
>> +       TEST_ASSERT(rc == 0, "%d errors occurred", rc);
>> +
>> +       rc = test_app_stop(test_app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       struct rte_dispatcher_stats stats;
>> +       rte_dispatcher_stats_get(test_app->dispatcher, &stats);
>> +
>> +       TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
>> +       TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
>> +                         "Invalid dispatch count");
>> +       TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
>> +
>> +       TEST_ASSERT_EQUAL(test_app->never_process_count.count, 0,
>> +                         "Never-match handler's process function has "
>> +                         "been called");
>> +
>> +       int finalize_count =
>> +               atomic_load_explicit(&test_app->finalize_count.count,
>> +                                    memory_order_relaxed);
>> +
>> +       TEST_ASSERT(finalize_count > 0, "Finalize count is zero");
>> +       TEST_ASSERT(finalize_count <= (int)stats.ev_dispatch_count,
>> +                   "Finalize count larger than event count");
>> +
>> +       TEST_ASSERT_EQUAL(finalize_count, (int)stats.ev_batch_count,
>> +                         "%"PRIu64" batches dequeued, but finalize called %d "
>> +                         "times", stats.ev_batch_count, finalize_count);
>> +
>> +       /*
>> +        * The event dispatcher should call often-matching match functions
>> +        * more often, and thus this never-matching match function should
>> +        * be called relatively infrequently.
>> +        */
>> +       TEST_ASSERT(test_app->never_match_count <
>> +                   (stats.ev_dispatch_count / 4),
>> +                   "Never-matching match function called suspiciously often");
>> +
>> +       rc = test_app_reset_dispatcher_stats(test_app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static int
>> +test_drop(void)
>> +{
>> +       int rc;
>> +       uint8_t unhandled_queue;
>> +       struct rte_dispatcher_stats stats;
>> +
>> +       unhandled_queue = (uint8_t)rte_rand_max(NUM_QUEUES);
>> +
>> +       rc = test_app_start(test_app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       rc = test_app_unregister_callback(test_app, unhandled_queue);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       struct rte_event event = {
>> +           .queue_id = unhandled_queue,
>> +           .flow_id = 0,
>> +           .sched_type = RTE_SCHED_TYPE_ATOMIC,
>> +           .op = RTE_EVENT_OP_NEW,
>> +           .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
>> +           .u64 = 0
>> +       };
>> +
>> +       do {
>> +               rc = rte_event_enqueue_burst(test_app->event_dev_id,
>> +                                            DRIVER_PORT_ID, &event, 1);
>> +       } while (rc == 0);
>> +
>> +       do {
>> +               rte_dispatcher_stats_get(test_app->dispatcher, &stats);
>> +
>> +               rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
>> +       } while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
>> +
>> +       rc = test_app_stop(test_app);
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
>> +       TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
>> +                         "Dispatch count is not zero");
>> +       TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +#define MORE_THAN_MAX_HANDLERS 1000
>> +#define MIN_HANDLERS 32
>> +
>> +static int
>> +test_many_handler_registrations(void)
>> +{
>> +       int rc;
>> +       int num_regs = 0;
>> +       int reg_ids[MORE_THAN_MAX_HANDLERS];
>> +       int reg_id;
>> +       int i;
>> +
>> +       rc = test_app_unregister_callbacks(test_app);
>> +
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       for (i = 0; i < MORE_THAN_MAX_HANDLERS; i++) {
>> +               reg_id = rte_dispatcher_register(test_app->dispatcher,
>> +                                                never_match, NULL,
>> +                                                test_app_never_process, NULL);
>> +               if (reg_id < 0)
>> +                       break;
>> +
>> +               reg_ids[num_regs++] = reg_id;
>> +       }
>> +
>> +       TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
>> +                         "%d but was %d", -ENOMEM, reg_id);
>> +       TEST_ASSERT(num_regs >= MIN_HANDLERS, "Registration failed already "
>> +                   "after %d handler registrations.", num_regs);
>> +
>> +       for (i = 0; i < num_regs; i++) {
>> +               rc = rte_dispatcher_unregister(test_app->dispatcher,
>> +                                              reg_ids[i]);
>> +               TEST_ASSERT_SUCCESS(rc, "Unable to unregister handler %d",
>> +                                   reg_ids[i]);
>> +       }
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static void
>> +dummy_finalize(uint8_t event_dev_id __rte_unused,
>> +              uint8_t event_port_id __rte_unused,
>> +              void *cb_data __rte_unused)
>> +{
>> +}
>> +
>> +#define MORE_THAN_MAX_FINALIZERS 1000
>> +#define MIN_FINALIZERS 16
>> +
>> +static int
>> +test_many_finalize_registrations(void)
>> +{
>> +       int rc;
>> +       int num_regs = 0;
>> +       int reg_ids[MORE_THAN_MAX_FINALIZERS];
>> +       int reg_id;
>> +       int i;
>> +
>> +       rc = test_app_unregister_callbacks(test_app);
>> +
>> +       RETURN_ON_ERROR(rc);
>> +
>> +       for (i = 0; i < MORE_THAN_MAX_FINALIZERS; i++) {
>> +               reg_id = rte_dispatcher_finalize_register(
>> +                       test_app->dispatcher, dummy_finalize, NULL
>> +               );
>> +
>> +               if (reg_id < 0)
>> +                       break;
>> +
>> +               reg_ids[num_regs++] = reg_id;
>> +       }
>> +
>> +       TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
>> +                         "%d but was %d", -ENOMEM, reg_id);
>> +       TEST_ASSERT(num_regs >= MIN_FINALIZERS, "Finalize registration failed "
>> +                   "already after %d registrations.", num_regs);
>> +
>> +       for (i = 0; i < num_regs; i++) {
>> +               rc = rte_dispatcher_finalize_unregister(
>> +                       test_app->dispatcher, reg_ids[i]
>> +               );
>> +               TEST_ASSERT_SUCCESS(rc, "Unable to unregister finalizer %d",
>> +                                   reg_ids[i]);
>> +       }
>> +
>> +       return TEST_SUCCESS;
>> +}
>> +
>> +static struct unit_test_suite test_suite = {
>> +       .suite_name = "Event dispatcher test suite",
>> +       .unit_test_cases = {
>> +               TEST_CASE_ST(test_setup, test_teardown, test_basic),
>> +               TEST_CASE_ST(test_setup, test_teardown, test_drop),
>> +               TEST_CASE_ST(test_setup, test_teardown,
>> +                            test_many_handler_registrations),
>> +               TEST_CASE_ST(test_setup, test_teardown,
>> +                            test_many_finalize_registrations),
>> +               TEST_CASES_END()
>> +       }
>> +};
>> +
>> +static int
>> +test_dispatcher(void)
>> +{
>> +       return unit_test_suite_runner(&test_suite);
>> +}
>> +
>> +REGISTER_TEST_COMMAND(dispatcher_autotest, test_dispatcher);
> 
> We have new macros (see REGISTER_FAST_TEST for example) so a test is
> associated to an existing testsuite.
> I think this test should be part of the fast-test testsuite, wdyt?
> 
> 

It needs setup and teardown methods, so I assume a generic test suite 
woulnd't do.

The dispatcher tests do have fairly short run times, so in that sense 
they should qualify.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 3/3] doc: add dispatcher programming guide
  2023-10-05  8:36                                           ` David Marchand
@ 2023-10-05 11:33                                             ` Mattias Rönnblom
  0 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-05 11:33 UTC (permalink / raw)
  To: David Marchand, Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On 2023-10-05 10:36, David Marchand wrote:
> On Thu, Sep 28, 2023 at 9:37 AM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>>
>> Provide programming guide for the dispatcher library.
>>
>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> 
> Checkpatch complains about empty lines, can you double check?
> 
> For example.
> 
> ERROR:TRAILING_WHITESPACE: trailing whitespace
> #63: FILE: doc/guides/prog_guide/dispatcher_lib.rst:33:
> +    $
> 
> ERROR:TRAILING_WHITESPACE: trailing whitespace
> #66: FILE: doc/guides/prog_guide/dispatcher_lib.rst:36:
> +    $
> 
> 

At some point I reached the conclusion that the code block was 
terminated unless that white space was included.

When I re-test now, at it seems like it's actually *not* needed.

I'll remove it.

>>
>> --
>> PATCH v5:
>>   o Update guide to match API changes related to dispatcher ids.
>>
>> PATCH v3:
>>   o Adapt guide to the dispatcher API name changes.
>>
>> PATCH:
>>   o Improve grammar and spelling.
>>
>> RFC v4:
>>   o Extend event matching section of the programming guide.
>>   o Improve grammar and spelling.
>> ---
>>   MAINTAINERS                              |   1 +
>>   doc/guides/prog_guide/dispatcher_lib.rst | 433 +++++++++++++++++++++++
>>   doc/guides/prog_guide/index.rst          |   1 +
>>   3 files changed, 435 insertions(+)
>>   create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst
>>
>> diff --git a/MAINTAINERS b/MAINTAINERS
>> index 43890cad0e..ab35498204 100644
>> --- a/MAINTAINERS
>> +++ b/MAINTAINERS
>> @@ -1730,6 +1730,7 @@ Dispatcher - EXPERIMENTAL
>>   M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>>   F: lib/dispatcher/
>>   F: app/test/test_dispatcher.c
>> +F: doc/guides/prog_guide/dispatcher_lib.rst
>>
>>   Test Applications
>>   -----------------
>> diff --git a/doc/guides/prog_guide/dispatcher_lib.rst b/doc/guides/prog_guide/dispatcher_lib.rst
>> new file mode 100644
>> index 0000000000..951db06081
>> --- /dev/null
>> +++ b/doc/guides/prog_guide/dispatcher_lib.rst
>> @@ -0,0 +1,433 @@
>> +..  SPDX-License-Identifier: BSD-3-Clause
>> +    Copyright(c) 2023 Ericsson AB.
>> +
>> +Dispatcher
>> +==========
>> +
>> +Overview
>> +--------
>> +
>> +The purpose of the dispatcher is to help reduce coupling in an
>> +:doc:`Eventdev <eventdev>`-based DPDK application.
>> +
>> +In particular, the dispatcher addresses a scenario where an
>> +application's modules share the same event device and event device
>> +ports, and performs work on the same lcore threads.
>> +
>> +The dispatcher replaces the conditional logic that follows an event
>> +device dequeue operation, where events are dispatched to different
>> +parts of the application, typically based on fields in the
>> +``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
>> +``sched_type``.
>> +
>> +Below is an excerpt from a fictitious application consisting of two
>> +modules; A and B. In this example, event-to-module routing is based
>> +purely on queue id, where module A expects all events to a certain
>> +queue id, and module B two other queue ids. [#Mapping]_
>> +
>> +.. code-block:: c
>> +
>> +    for (;;) {
>> +            struct rte_event events[MAX_BURST];
>> +            unsigned int n;
>> +
>> +            n = rte_event_dequeue_burst(dev_id, port_id, events,
>> +                                       MAX_BURST, 0);
>> +
>> +            for (i = 0; i < n; i++) {
>> +                    const struct rte_event *event = &events[i];
>> +
>> +                    switch (event->queue_id) {
>> +                    case MODULE_A_QUEUE_ID:
>> +                            module_a_process(event);
>> +                            break;
>> +                    case MODULE_B_STAGE_0_QUEUE_ID:
>> +                            module_b_process_stage_0(event);
>> +                            break;
>> +                    case MODULE_B_STAGE_1_QUEUE_ID:
>> +                            module_b_process_stage_1(event);
>> +                            break;
>> +                    }
>> +            }
>> +    }
>> +
>> +The issue this example attempts to illustrate is that the centralized
>> +conditional logic has knowledge of things that should be private to
>> +the modules. In other words, this pattern leads to a violation of
>> +module encapsulation.
>> +
>> +The shared conditional logic contains explicit knowledge about what
>> +events should go where. In case, for example, the
>> +``module_a_process()`` is broken into two processing stages — a
>> +module-internal affair — the shared conditional code must be updated
>> +to reflect this change.
>> +
>> +The centralized event routing code becomes an issue in larger
>> +applications, where modules are developed by different organizations.
>> +This pattern also makes module reuse across different application more
>> +difficult. The part of the conditional logic relevant for a particular
>> +application may need to be duplicated across many module
>> +instantiations (e.g., applications and test setups).
>> +
>> +The dispatcher separates the mechanism (routing events to their
>> +receiver) from the policy (which events should go where).
>> +
>> +The basic operation of the dispatcher is as follows:
>> +
>> +* Dequeue a batch of events from the event device.
>> +* For each event determine which handler should receive the event, using
>> +  a set of application-provided, per-handler event matching callback
>> +  functions.
>> +* Provide events matching a particular handler, to that handler, using
>> +  its process callback.
>> +
>> +If the above application would have made use of the dispatcher, the
>> +code relevant for its module A may have looked something like this:
>> +
>> +.. code-block:: c
>> +
>> +    static bool
>> +    module_a_match(const struct rte_event *event, void *cb_data)
>> +    {
>> +           return event->queue_id == MODULE_A_QUEUE_ID;
>> +    }
>> +
>> +    static void
>> +    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
>> +                            const struct rte_event *events,
>> +                           uint16_t num, void *cb_data)
>> +    {
>> +            uint16_t i;
>> +
>> +            for (i = 0; i < num; i++)
>> +                    module_a_process_event(&events[i]);
>> +    }
>> +
>> +    /* In the module's initialization code */
>> +    rte_dispatcher_register(dispatcher, module_a_match, NULL,
>> +                           module_a_process_events, module_a_data);
>> +
>> +(Error handling is left out of this and future example code in this
>> +chapter.)
>> +
>> +When the shared conditional logic is removed, a new question arise:
>> +which part of the system actually runs the dispatching mechanism? Or
>> +phrased differently, what is replacing the function hosting the shared
>> +conditional logic (typically launched on all lcores using
>> +``rte_eal_remote_launch()``)? To solve this issue, the dispatcher is a
>> +run as a DPDK :doc:`Service <service_cores>`.
>> +
>> +The dispatcher is a layer between the application and the event device
>> +in the receive direction. In the transmit (i.e., item of work
>> +submission) direction, the application directly accesses the Eventdev
>> +core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
>> +forwarded event to the event device.
>> +
>> +Dispatcher Creation
>> +-------------------
>> +
>> +A dispatcher is created with using
>> +``rte_dispatcher_create()``.
>> +
>> +The event device must be configured before the dispatcher is created.
>> +
>> +Usually, only one dispatcher is needed per event device. A dispatcher
>> +handles exactly one event device.
>> +
>> +An dispatcher is freed using the ``rte_dispatcher_free()``
>> +function. The dispatcher's service functions must not be running on
>> +any lcore at the point of this call.
>> +
>> +Event Port Binding
>> +------------------
>> +
>> +To be able to dequeue events, the dispatcher must know which event
>> +ports are to be used, on all the lcores it uses. The application
>> +provides this information using
>> +``rte_dispatcher_bind_port_to_lcore()``.
>> +
>> +This call is typically made from the part of the application that
>> +deals with deployment issues (e.g., iterating lcores and determining
>> +which lcore does what), at the time of application initialization.
>> +
>> +The ``rte_dispatcher_unbind_port_from_lcore()`` is used to undo
>> +this operation.
>> +
>> +Multiple lcore threads may not safely use the same event
>> +port. [#Port-MT-Safety]
>> +
>> +Event ports cannot safely be bound or unbound while the dispatcher's
>> +service function is running on any lcore.
>> +
>> +Event Handlers
>> +--------------
>> +
>> +The dispatcher handler is an interface between the dispatcher and an
>> +application module, used to route events to the appropriate part of
>> +the application.
>> +
>> +Handler Registration
>> +^^^^^^^^^^^^^^^^^^^^
>> +
>> +The event handler interface consists of two function pointers:
>> +
>> +* The ``rte_dispatcher_match_t`` callback, which job is to
>> +  decide if this event is to be the property of this handler.
>> +* The ``rte_dispatcher_process_t``, which is used by the
>> +  dispatcher to deliver matched events.
>> +
>> +An event handler registration is valid on all lcores.
>> +
>> +The functions pointed to by the match and process callbacks resides in
>> +the application's domain logic, with one or more handlers per
>> +application module.
>> +
>> +A module may use more than one event handler, for convenience or to
>> +further decouple sub-modules. However, the dispatcher may impose an
>> +upper limit of the number handlers. In addition, installing a large
>> +number of handlers increase dispatcher overhead, although this does
>> +not nessarily translate to a system-level performance degradation. See
> 
> Typo on necessarily?
> 
> 
> 
>> +the section on :ref:`Event Clustering` for more information.
>> +
>> +Handler registration and unregistration cannot safely be done while
>> +the dispatcher's service function is running on any lcore.
>> +
>> +Event Matching
>> +^^^^^^^^^^^^^^
>> +
>> +A handler's match callback function decides if an event should be
>> +delivered to this handler, or not.
>> +
>> +An event is routed to no more than one handler. Thus, if a match
>> +function returns true, no further match functions will be invoked for
>> +that event.
>> +
>> +Match functions must not depend on being invocated in any particular
>> +order (e.g., in the handler registration order).
>> +
>> +Events failing to match any handler are dropped, and the
>> +``ev_drop_count`` counter is updated accordingly.
>> +
>> +Event Delivery
>> +^^^^^^^^^^^^^^
>> +
>> +The handler callbacks are invocated by the dispatcher's service
>> +function, upon the arrival of events to the event ports bound to the
>> +running service lcore.
>> +
>> +A particular event is delivery to at most one handler.
>> +
>> +The application must not depend on all match callback invocations for
>> +a particular event batch being made prior to any process calls are
>> +being made. For example, if the dispatcher dequeues two events from
>> +the event device, it may choose to find out the destination for the
>> +first event, and deliver it, and then continue to find out the
>> +destination for the second, and then deliver that event as well. The
>> +dispatcher may also choose a strategy where no event is delivered
>> +until the destination handler for both events have been determined.
>> +
>> +The events provided in a single process call always belong to the same
>> +event port dequeue burst.
>> +
>> +.. _Event Clustering:
>> +
>> +Event Clustering
>> +^^^^^^^^^^^^^^^^
>> +
>> +The dispatcher maintains the order of events destined for the same
>> +handler.
>> +
>> +*Order* here refers to the order in which the events were delivered
>> +from the event device to the dispatcher (i.e., in the event array
>> +populated by ``rte_event_dequeue_burst()``), in relation to the order
>> +in which the dispatcher deliveres these events to the application.
>> +
>> +The dispatcher *does not* guarantee to maintain the order of events
>> +delivered to *different* handlers.
>> +
>> +For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
>> +and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
>> +consider a scenario where the following events are dequeued from the
>> +event device (qid is short for event queue id).
>> +
>> +.. code-block::
>> +
>> +    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
>> +
>> +The dispatcher may deliver the events in the following manner:
>> +
>> +.. code-block::
>> +
>> +   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
>> +   module_a_process([e2: qid=0])
>> +   module_b_stage_0_process([e2: qid=1])
>> +
>> +The dispatcher may also choose to cluster (group) all events destined
>> +for ``module_b_stage_0_process()`` into one array:
>> +
>> +.. code-block::
>> +
>> +   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
>> +   module_a_process([e2: qid=0])
>> +
>> +Here, the event ``e2`` is reordered and placed behind ``e3``, from a
>> +delivery order point of view. This kind of reshuffling is allowed,
>> +since the events are destined for different handlers.
>> +
>> +The dispatcher may also deliver ``e2`` before the three events
>> +destined for module B.
>> +
>> +An example of what the dispatcher may not do, is to reorder event
>> +``e1`` so, that it precedes ``e0`` in the array passed to the module
>> +B's stage 0 process callback.
>> +
>> +Although clustering requires some extra work for the dispatcher, it
>> +leads to fewer process function calls. In addition, and likely more
>> +importantly, it improves temporal locality of memory accesses to
>> +handler-specific data structures in the application, which in turn may
>> +lead to fewer cache misses and improved overall performance.
>> +
>> +Finalize
>> +--------
>> +
>> +The dispatcher may be configured to notify one or more parts of the
>> +application when the matching and processing of a batch of events has
>> +completed.
>> +
>> +The ``rte_dispatcher_finalize_register`` call is used to
>> +register a finalize callback. The function
>> +``rte_dispatcher_finalize_unregister`` is used to remove a
>> +callback.
>> +
>> +The finalize hook may be used by a set of event handlers (in the same
>> +modules, or a set of cooperating modules) sharing an event output
>> +buffer, since it allows for flushing of the buffers at the last
>> +possible moment. In particular, it allows for buffering of
>> +``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
>> +``rte_event_dequeue_burst()`` call is made (assuming implicit release
>> +is employed).
>> +
>> +The following is an example with an application-defined event output
>> +buffer (the ``event_buffer``):
>> +
>> +.. code-block:: c
>> +
>> +    static void
>> +    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
>> +                   void *cb_data)
>> +    {
>> +            struct event_buffer *buffer = cb_data;
>> +            unsigned lcore_id = rte_lcore_id();
>> +            struct event_buffer_lcore *lcore_buffer =
>> +                    &buffer->lcore_buffer[lcore_id];
>> +
>> +            event_buffer_lcore_flush(lcore_buffer);
>> +    }
>> +
>> +    /* In the module's initialization code */
>> +    rte_dispatcher_finalize_register(dispatcher, finalize_batch,
>> +                                     shared_event_buffer);
>> +
>> +The dispatcher does not track any relationship between a handler and a
>> +finalize callback, and all finalize callbacks will be called, if (and
>> +only if) at least one event was dequeued from the event device.
>> +
>> +Finalize callback registration and unregistration cannot safely be
>> +done while the dispatcher's service function is running on any lcore.
>> +
>> +Service
>> +-------
>> +
>> +The dispatcher is a DPDK service, and is managed in a manner similar
>> +to other DPDK services (e.g., an Event Timer Adapter).
>> +
>> +Below is an example of how to configure a particular lcore to serve as
>> +a service lcore, and to map an already-configured dispatcher
>> +(identified by ``DISPATCHER_ID``) to that lcore.
>> +
>> +.. code-block:: c
>> +
>> +    static void
>> +    launch_dispatcher_core(struct rte_dispatcher *dispatcher,
>> +                           unsigned lcore_id)
>> +    {
>> +            uint32_t service_id;
>> +
>> +            rte_service_lcore_add(lcore_id);
>> +
>> +            rte_dispatcher_service_id_get(dispatcher, &service_id);
>> +
>> +            rte_service_map_lcore_set(service_id, lcore_id, 1);
>> +
>> +            rte_service_lcore_start(lcore_id);
>> +
>> +            rte_service_runstate_set(service_id, 1);
>> +    }
>> +
>> +As the final step, the dispatcher must be started.
>> +
>> +.. code-block:: c
>> +
>> +    rte_dispatcher_start(dispatcher);
>> +
>> +
>> +Multi Service Dispatcher Lcores
>> +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>> +
>> +In an Eventdev application, most (or all) compute-intensive and
>> +performance-sensitive processing is done in an event-driven manner,
>> +where CPU cycles spent on application domain logic is the direct
>> +result of items of work (i.e., ``rte_event`` events) dequeued from an
>> +event device.
>> +
>> +In the light of this, it makes sense to have the dispatcher service be
>> +the only DPDK service on all lcores used for packet processing — at
>> +least in principle.
>> +
>> +However, there is nothing in DPDK that prevents colocating other
>> +services with the dispatcher service on the same lcore.
>> +
>> +Tasks that prior to the introduction of the dispatcher into the
>> +application was performed on the lcore, even though no events were
>> +received, are prime targets for being converted into such auxiliary
>> +services, running on the dispatcher core set.
>> +
>> +An example of such a task would be the management of a per-lcore timer
>> +wheel (i.e., calling ``rte_timer_manage()``).
>> +
>> +For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
>> +similar technique), may opt for having quiescent state (e.g., calling
>> +``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
>> +service, to assure resource reclaimination occurs even in though some
>> +lcores currently do not process any events.
>> +
>> +If more services than the dispatcher service is mapped to a service
>> +lcore, it's important that the other service are well-behaved and
>> +don't interfere with event processing to the extent the system's
>> +throughput and/or latency requirements are at risk of not being met.
>> +
>> +In particular, to avoid jitter, they should have an small upper bound
>> +for the maximum amount of time spent in a single service function
>> +call.
>> +
>> +An example of scenario with a more CPU-heavy colocated service is a
>> +low-lcore count deployment, where the event device lacks the
>> +``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
>> +require software to feed incoming packets into the event device). In
>> +this case, the best performance may be achieved if the Event Ethernet
>> +RX and/or TX Adapters are mapped to lcores also used by for event
>> +dispatching, since otherwise the adapter lcores would have a lot of
>> +idle CPU cycles.
>> +
>> +.. rubric:: Footnotes
>> +
>> +.. [#Mapping]
>> +   Event routing may reasonably be done based on other ``rte_event``
>> +   fields (or even event user data). Indeed, that's the very reason to
>> +   have match callback functions, instead of a simple queue
>> +   id-to-handler mapping scheme. Queue id-based routing serves well in
>> +   a simple example.
>> +
>> +.. [#Port-MT-Safety]
>> +   This property (which is a feature, not a bug) is inherited from the
>> +   core Eventdev APIs.
>> diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
>> index 52a6d9e7aa..ab05bd6074 100644
>> --- a/doc/guides/prog_guide/index.rst
>> +++ b/doc/guides/prog_guide/index.rst
>> @@ -60,6 +60,7 @@ Programmer's Guide
>>       event_ethernet_tx_adapter
>>       event_timer_adapter
>>       event_crypto_adapter
>> +    dispatcher_lib
>>       qos_framework
>>       power_man
>>       packet_classif_access_ctrl
>> --
>> 2.34.1
>>
> 
> 

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 1/3] lib: introduce dispatcher library
  2023-10-05 10:08                                             ` Mattias Rönnblom
@ 2023-10-06  8:46                                               ` David Marchand
  2023-10-06  9:03                                                 ` Thomas Monjalon
  2023-10-09 16:49                                                 ` Mattias Rönnblom
  0 siblings, 2 replies; 102+ messages in thread
From: David Marchand @ 2023-10-06  8:46 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan

Hello Mattias,

On Thu, Oct 5, 2023 at 12:09 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
> >> +
> >> +deps += ['eventdev']
> >> diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
> >> new file mode 100644
> >> index 0000000000..0e69db2b9b
> >> --- /dev/null
> >> +++ b/lib/dispatcher/rte_dispatcher.c
> >> @@ -0,0 +1,708 @@
> >> +/* SPDX-License-Identifier: BSD-3-Clause
> >> + * Copyright(c) 2023 Ericsson AB
> >> + */
> >> +
> >> +#include <stdbool.h>
> >> +#include <stdint.h>
> >> +
> >> +#include <rte_branch_prediction.h>
> >> +#include <rte_common.h>
> >> +#include <rte_lcore.h>
> >> +#include <rte_random.h>
> >> +#include <rte_service_component.h>
> >> +
> >> +#include "eventdev_pmd.h"
> >> +
> >> +#include <rte_dispatcher.h>
> >> +
> >> +#define EVD_MAX_PORTS_PER_LCORE 4
> >> +#define EVD_MAX_HANDLERS 32
> >> +#define EVD_MAX_FINALIZERS 16
> >> +#define EVD_AVG_PRIO_INTERVAL 2000
> >> +#define EVD_SERVICE_NAME "dispatcher"
> >> +
> >> +struct rte_dispatcher_lcore_port {
> >> +       uint8_t port_id;
> >> +       uint16_t batch_size;
> >> +       uint64_t timeout;
> >> +};
> >> +
> >> +struct rte_dispatcher_handler {
> >> +       int id;
> >> +       rte_dispatcher_match_t match_fun;
> >> +       void *match_data;
> >> +       rte_dispatcher_process_t process_fun;
> >> +       void *process_data;
> >> +};
> >> +
> >> +struct rte_dispatcher_finalizer {
> >> +       int id;
> >> +       rte_dispatcher_finalize_t finalize_fun;
> >> +       void *finalize_data;
> >> +};
> >> +
> >> +struct rte_dispatcher_lcore {
> >> +       uint8_t num_ports;
> >> +       uint16_t num_handlers;
> >> +       int32_t prio_count;
> >> +       struct rte_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
> >> +       struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
> >> +       struct rte_dispatcher_stats stats;
> >> +} __rte_cache_aligned;
> >> +
> >> +struct rte_dispatcher {
> >> +       uint8_t event_dev_id;
> >> +       int socket_id;
> >> +       uint32_t service_id;
> >> +       struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
> >> +       uint16_t num_finalizers;
> >> +       struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
> >> +};
> >> +
> >> +static int
> >> +evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
> >> +                      const struct rte_event *event)
> >
> > Wrt DPDK coding tyle, indent is a single tab.
> > Adding an extra tab is recommended when continuing control statements
> > like if()/for()/..
> >
>
> Sure, but I don't understand why you mention this.

I wanted to remind the DPDK coding style which I try to more closely
enforce for new code.
indent is off in this file (especially for function prototypes with
multiple tabs used).

>
> > On the other hand, max accepted length for a line is 100 columns.
> >
> > Wdyt of a single line for this specific case?
>
>
> Are you asking why the evd_lookup_handler_idx() function prototype is
> not a single line?
>
> It would make it long, that's why. Even if 100 wide lines are allowed,
> it doesn't means the author is forced to use such long lines?

I find it more readable.
If you want to stick to 80 columns, please comply with a single tab for indent.

[snip]


> >> +static int
> >> +evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
> >> +{
> >> +       int rc;
> >> +
> >> +       rc = rte_service_component_runstate_set(dispatcher->service_id,
> >> +                                               state);
> >> +
> >> +       if (rc != 0) {
> >> +               RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
> >> +                                "service component run state to %d\n", rc,
> >> +                                state);
> >> +               RTE_ASSERT(0);
> >
> > Why not propagating the error to callers?
> >
> >
>
> The root cause would be a programming error, hence an assertion is more
> appropriate way to deal with the situation.

Without building RTE_ENABLE_ASSERT (disabled by default), the code
later in this function will still be executed.

[snip]


> >> +typedef void
> >> +(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
> >> +                                  void *cb_data);
> >> +
> >> +/**
> >> + * Dispatcher statistics
> >> + */
> >> +struct rte_dispatcher_stats {
> >> +       uint64_t poll_count;
> >> +       /**< Number of event dequeue calls made toward the event device. */
> >
> > We had a number of issues with doxygen post annotations.
> > Prefer the prefixed ones.
> >
>
> OK. More readable, too. I just used the postfix syntax since it seemed
> the only one used in DPDK.

Historically yes, but we started cleaning headers for readability
(like in ethdev) and after catching a few errors with postfix
comments.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 2/3] test: add dispatcher test suite
  2023-10-05 11:25                                             ` Mattias Rönnblom
@ 2023-10-06  8:52                                               ` David Marchand
  2023-10-09 17:16                                                 ` Mattias Rönnblom
  0 siblings, 1 reply; 102+ messages in thread
From: David Marchand @ 2023-10-06  8:52 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan

On Thu, Oct 5, 2023 at 1:26 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:

[snip]

> >> +#define RETURN_ON_ERROR(rc) \
> >> +       do {                                    \
> >> +               if (rc != TEST_SUCCESS)         \
> >> +                       return rc;              \
> >> +       } while (0)
> >
> > TEST_ASSERT?
> > This gives context about which part of a test failed.
> >
>
> This macro is used in a situation where the failure has occured and has
> been reported already.
>
> Maybe it would be better to replace the macro instationation with just
> the if+return statements.
>
> RETURN_ON_ERROR(rc);
>
> ->
>
> if (rc != TEST_SUCCESS)
>         return rc;

Yes, this macro does not add much, you can remove it.

[snip]


> >> +       for (i = 0; i < NUM_SERVICE_CORES; i++)
> >> +               if (app->service_lcores[i] == lcore_id)
> >> +                       return i;
> >
> > This construct is hard to read and prone to error if the code is updated later.
> >
> > for () {
> >    if ()
> >      return i;
> > }
> >
> >
>
> I wouldn't consider that an improvement (rather the opposite).

Well, I disagree, but it is not enforced in the coding style so I won't insist.

[snip]


> >> +static struct unit_test_suite test_suite = {
> >> +       .suite_name = "Event dispatcher test suite",
> >> +       .unit_test_cases = {
> >> +               TEST_CASE_ST(test_setup, test_teardown, test_basic),
> >> +               TEST_CASE_ST(test_setup, test_teardown, test_drop),
> >> +               TEST_CASE_ST(test_setup, test_teardown,
> >> +                            test_many_handler_registrations),
> >> +               TEST_CASE_ST(test_setup, test_teardown,
> >> +                            test_many_finalize_registrations),
> >> +               TEST_CASES_END()
> >> +       }
> >> +};
> >> +
> >> +static int
> >> +test_dispatcher(void)
> >> +{
> >> +       return unit_test_suite_runner(&test_suite);
> >> +}
> >> +
> >> +REGISTER_TEST_COMMAND(dispatcher_autotest, test_dispatcher);
> >
> > We have new macros (see REGISTER_FAST_TEST for example) so a test is
> > associated to an existing testsuite.
> > I think this test should be part of the fast-test testsuite, wdyt?
> >
> >
>
> It needs setup and teardown methods, so I assume a generic test suite
> woulnd't do.
>
> The dispatcher tests do have fairly short run times, so in that sense
> they should qualify.


So please use REGISTER_FAST_TEST().
Thanks.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 1/3] lib: introduce dispatcher library
  2023-10-06  8:46                                               ` David Marchand
@ 2023-10-06  9:03                                                 ` Thomas Monjalon
  2023-10-09 17:40                                                   ` Mattias Rönnblom
  2023-10-09 16:49                                                 ` Mattias Rönnblom
  1 sibling, 1 reply; 102+ messages in thread
From: Thomas Monjalon @ 2023-10-06  9:03 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: techboard, Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan, David Marchand

06/10/2023 10:46, David Marchand:
> On Thu, Oct 5, 2023 at 12:09 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
> > >> +static int
> > >> +evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
> > >> +                      const struct rte_event *event)
> > >
> > > Wrt DPDK coding tyle, indent is a single tab.
> > > Adding an extra tab is recommended when continuing control statements
> > > like if()/for()/..
> > >
> >
> > Sure, but I don't understand why you mention this.
> 
> I wanted to remind the DPDK coding style which I try to more closely
> enforce for new code.
> indent is off in this file (especially for function prototypes with
> multiple tabs used).
> 
> >
> > > On the other hand, max accepted length for a line is 100 columns.
> > >
> > > Wdyt of a single line for this specific case?
> >
> >
> > Are you asking why the evd_lookup_handler_idx() function prototype is
> > not a single line?
> >
> > It would make it long, that's why. Even if 100 wide lines are allowed,
> > it doesn't means the author is forced to use such long lines?
> 
> I find it more readable.
> If you want to stick to 80 columns, please comply with a single tab for indent.

I think this is a case of continuation line, so it should be 2 tabs.
We can make it clear in the doc.


> > >> +static int
> > >> +evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
> > >> +{
> > >> +       int rc;
> > >> +
> > >> +       rc = rte_service_component_runstate_set(dispatcher->service_id,
> > >> +                                               state);
> > >> +
> > >> +       if (rc != 0) {
> > >> +               RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
> > >> +                                "service component run state to %d\n", rc,
> > >> +                                state);
> > >> +               RTE_ASSERT(0);
> > >
> > > Why not propagating the error to callers?
> > >
> > >
> >
> > The root cause would be a programming error, hence an assertion is more
> > appropriate way to deal with the situation.
> 
> Without building RTE_ENABLE_ASSERT (disabled by default), the code
> later in this function will still be executed.

Don't forget you are writing a library, so you shouldn't stop runtime
because of an error. It is always better to return errors.
Assert should be used only for debugging, that's why it is disabled by default.




^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 1/3] lib: introduce dispatcher library
  2023-10-06  8:46                                               ` David Marchand
  2023-10-06  9:03                                                 ` Thomas Monjalon
@ 2023-10-09 16:49                                                 ` Mattias Rönnblom
  2023-10-11 14:57                                                   ` David Marchand
  1 sibling, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-09 16:49 UTC (permalink / raw)
  To: David Marchand
  Cc: Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan

On 2023-10-06 10:46, David Marchand wrote:
> Hello Mattias,
> 
> On Thu, Oct 5, 2023 at 12:09 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>>>> +
>>>> +deps += ['eventdev']
>>>> diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
>>>> new file mode 100644
>>>> index 0000000000..0e69db2b9b
>>>> --- /dev/null
>>>> +++ b/lib/dispatcher/rte_dispatcher.c
>>>> @@ -0,0 +1,708 @@
>>>> +/* SPDX-License-Identifier: BSD-3-Clause
>>>> + * Copyright(c) 2023 Ericsson AB
>>>> + */
>>>> +
>>>> +#include <stdbool.h>
>>>> +#include <stdint.h>
>>>> +
>>>> +#include <rte_branch_prediction.h>
>>>> +#include <rte_common.h>
>>>> +#include <rte_lcore.h>
>>>> +#include <rte_random.h>
>>>> +#include <rte_service_component.h>
>>>> +
>>>> +#include "eventdev_pmd.h"
>>>> +
>>>> +#include <rte_dispatcher.h>
>>>> +
>>>> +#define EVD_MAX_PORTS_PER_LCORE 4
>>>> +#define EVD_MAX_HANDLERS 32
>>>> +#define EVD_MAX_FINALIZERS 16
>>>> +#define EVD_AVG_PRIO_INTERVAL 2000
>>>> +#define EVD_SERVICE_NAME "dispatcher"
>>>> +
>>>> +struct rte_dispatcher_lcore_port {
>>>> +       uint8_t port_id;
>>>> +       uint16_t batch_size;
>>>> +       uint64_t timeout;
>>>> +};
>>>> +
>>>> +struct rte_dispatcher_handler {
>>>> +       int id;
>>>> +       rte_dispatcher_match_t match_fun;
>>>> +       void *match_data;
>>>> +       rte_dispatcher_process_t process_fun;
>>>> +       void *process_data;
>>>> +};
>>>> +
>>>> +struct rte_dispatcher_finalizer {
>>>> +       int id;
>>>> +       rte_dispatcher_finalize_t finalize_fun;
>>>> +       void *finalize_data;
>>>> +};
>>>> +
>>>> +struct rte_dispatcher_lcore {
>>>> +       uint8_t num_ports;
>>>> +       uint16_t num_handlers;
>>>> +       int32_t prio_count;
>>>> +       struct rte_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
>>>> +       struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
>>>> +       struct rte_dispatcher_stats stats;
>>>> +} __rte_cache_aligned;
>>>> +
>>>> +struct rte_dispatcher {
>>>> +       uint8_t event_dev_id;
>>>> +       int socket_id;
>>>> +       uint32_t service_id;
>>>> +       struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
>>>> +       uint16_t num_finalizers;
>>>> +       struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
>>>> +};
>>>> +
>>>> +static int
>>>> +evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
>>>> +                      const struct rte_event *event)
>>>
>>> Wrt DPDK coding tyle, indent is a single tab.
>>> Adding an extra tab is recommended when continuing control statements
>>> like if()/for()/..
>>>
>>
>> Sure, but I don't understand why you mention this.
> 
> I wanted to remind the DPDK coding style which I try to more closely
> enforce for new code.
> indent is off in this file (especially for function prototypes with
> multiple tabs used).
> 

I just didn't understand what rule I was breaking, but I see now.

>>
>>> On the other hand, max accepted length for a line is 100 columns.
>>>
>>> Wdyt of a single line for this specific case?
>>
>>
>> Are you asking why the evd_lookup_handler_idx() function prototype is
>> not a single line?
>>
>> It would make it long, that's why. Even if 100 wide lines are allowed,
>> it doesn't means the author is forced to use such long lines?
> 
> I find it more readable.
> If you want to stick to 80 columns, please comply with a single tab for indent.
> 
> [snip]
> 
> 
>>>> +static int
>>>> +evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
>>>> +{
>>>> +       int rc;
>>>> +
>>>> +       rc = rte_service_component_runstate_set(dispatcher->service_id,
>>>> +                                               state);
>>>> +
>>>> +       if (rc != 0) {
>>>> +               RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
>>>> +                                "service component run state to %d\n", rc,
>>>> +                                state);
>>>> +               RTE_ASSERT(0);
>>>
>>> Why not propagating the error to callers?
>>>
>>>
>>
>> The root cause would be a programming error, hence an assertion is more
>> appropriate way to deal with the situation.
> 
> Without building RTE_ENABLE_ASSERT (disabled by default), the code
> later in this function will still be executed.
> 

If RTE_ASSERT() is not the way to assure a consistent internal library 
state, what is? RTE_VERIFY()?

A side note: in the DPDK code base, the majority of 
rte_service_component_runstate_set() calls ignore the return value. No 
big surprise, since in many cases this error can't happen with less than 
the internal state being inconsistent.

> [snip]
> 
> 
>>>> +typedef void
>>>> +(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
>>>> +                                  void *cb_data);
>>>> +
>>>> +/**
>>>> + * Dispatcher statistics
>>>> + */
>>>> +struct rte_dispatcher_stats {
>>>> +       uint64_t poll_count;
>>>> +       /**< Number of event dequeue calls made toward the event device. */
>>>
>>> We had a number of issues with doxygen post annotations.
>>> Prefer the prefixed ones.
>>>
>>
>> OK. More readable, too. I just used the postfix syntax since it seemed
>> the only one used in DPDK.
> 
> Historically yes, but we started cleaning headers for readability
> (like in ethdev) and after catching a few errors with postfix
> comments.
> 
> 

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 2/3] test: add dispatcher test suite
  2023-10-06  8:52                                               ` David Marchand
@ 2023-10-09 17:16                                                 ` Mattias Rönnblom
  0 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-09 17:16 UTC (permalink / raw)
  To: David Marchand
  Cc: Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan

On 2023-10-06 10:52, David Marchand wrote:
> On Thu, Oct 5, 2023 at 1:26 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
> 
> [snip]
> 
>>>> +#define RETURN_ON_ERROR(rc) \
>>>> +       do {                                    \
>>>> +               if (rc != TEST_SUCCESS)         \
>>>> +                       return rc;              \
>>>> +       } while (0)
>>>
>>> TEST_ASSERT?
>>> This gives context about which part of a test failed.
>>>
>>
>> This macro is used in a situation where the failure has occured and has
>> been reported already.
>>
>> Maybe it would be better to replace the macro instationation with just
>> the if+return statements.
>>
>> RETURN_ON_ERROR(rc);
>>
>> ->
>>
>> if (rc != TEST_SUCCESS)
>>          return rc;
> 
> Yes, this macro does not add much, you can remove it.
> 

OK, will do.

> [snip]
> 
> 
>>>> +       for (i = 0; i < NUM_SERVICE_CORES; i++)
>>>> +               if (app->service_lcores[i] == lcore_id)
>>>> +                       return i;
>>>
>>> This construct is hard to read and prone to error if the code is updated later.
>>>
>>> for () {
>>>     if ()
>>>       return i;
>>> }
>>>
>>>
>>
>> I wouldn't consider that an improvement (rather the opposite).
> 
> Well, I disagree, but it is not enforced in the coding style so I won't insist.
> 
> [snip]
> 
> 
>>>> +static struct unit_test_suite test_suite = {
>>>> +       .suite_name = "Event dispatcher test suite",
>>>> +       .unit_test_cases = {
>>>> +               TEST_CASE_ST(test_setup, test_teardown, test_basic),
>>>> +               TEST_CASE_ST(test_setup, test_teardown, test_drop),
>>>> +               TEST_CASE_ST(test_setup, test_teardown,
>>>> +                            test_many_handler_registrations),
>>>> +               TEST_CASE_ST(test_setup, test_teardown,
>>>> +                            test_many_finalize_registrations),
>>>> +               TEST_CASES_END()
>>>> +       }
>>>> +};
>>>> +
>>>> +static int
>>>> +test_dispatcher(void)
>>>> +{
>>>> +       return unit_test_suite_runner(&test_suite);
>>>> +}
>>>> +
>>>> +REGISTER_TEST_COMMAND(dispatcher_autotest, test_dispatcher);
>>>
>>> We have new macros (see REGISTER_FAST_TEST for example) so a test is
>>> associated to an existing testsuite.
>>> I think this test should be part of the fast-test testsuite, wdyt?
>>>
>>>
>>
>> It needs setup and teardown methods, so I assume a generic test suite
>> woulnd't do.
>>
>> The dispatcher tests do have fairly short run times, so in that sense
>> they should qualify.
> 
> 
> So please use REGISTER_FAST_TEST().
> Thanks.
> 
> 

OK.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 1/3] lib: introduce dispatcher library
  2023-10-06  9:03                                                 ` Thomas Monjalon
@ 2023-10-09 17:40                                                   ` Mattias Rönnblom
  0 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-09 17:40 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: techboard, Mattias Rönnblom, dev, Jerin Jacob,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan, David Marchand

On 2023-10-06 11:03, Thomas Monjalon wrote:
> 06/10/2023 10:46, David Marchand:
>> On Thu, Oct 5, 2023 at 12:09 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>>>>> +static int
>>>>> +evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
>>>>> +                      const struct rte_event *event)
>>>>
>>>> Wrt DPDK coding tyle, indent is a single tab.
>>>> Adding an extra tab is recommended when continuing control statements
>>>> like if()/for()/..
>>>>
>>>
>>> Sure, but I don't understand why you mention this.
>>
>> I wanted to remind the DPDK coding style which I try to more closely
>> enforce for new code.
>> indent is off in this file (especially for function prototypes with
>> multiple tabs used).
>>
>>>
>>>> On the other hand, max accepted length for a line is 100 columns.
>>>>
>>>> Wdyt of a single line for this specific case?
>>>
>>>
>>> Are you asking why the evd_lookup_handler_idx() function prototype is
>>> not a single line?
>>>
>>> It would make it long, that's why. Even if 100 wide lines are allowed,
>>> it doesn't means the author is forced to use such long lines?
>>
>> I find it more readable.
>> If you want to stick to 80 columns, please comply with a single tab for indent.
> 
> I think this is a case of continuation line, so it should be 2 tabs.
> We can make it clear in the doc.
> 
> 
>>>>> +static int
>>>>> +evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
>>>>> +{
>>>>> +       int rc;
>>>>> +
>>>>> +       rc = rte_service_component_runstate_set(dispatcher->service_id,
>>>>> +                                               state);
>>>>> +
>>>>> +       if (rc != 0) {
>>>>> +               RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
>>>>> +                                "service component run state to %d\n", rc,
>>>>> +                                state);
>>>>> +               RTE_ASSERT(0);
>>>>
>>>> Why not propagating the error to callers?
>>>>
>>>>
>>>
>>> The root cause would be a programming error, hence an assertion is more
>>> appropriate way to deal with the situation.
>>
>> Without building RTE_ENABLE_ASSERT (disabled by default), the code
>> later in this function will still be executed.
> 
> Don't forget you are writing a library, so you shouldn't stop runtime
> because of an error. It is always better to return errors.
> Assert should be used only for debugging, that's why it is disabled by default.
> 
> 
> 

A breach of the API contract should always be met with an assertion - 
library or not. That is the most helpful thing you can do, so that one 
fails early and programmer can go fix the bug.

The use of EINVAL and attempts to return error in the face of API 
violations is a bad practice, in my opinion. What could the program 
possible do, if it learns it's using a function the wrong way? If the 
two memory areas in memmove() overlap, should there be a error code, so 
that caller can retry but "please not with overlapping memory regions 
this time".

I know that the EINVAL pattern is wide-spread practice in DPDK, at the 
cost of performance and complexity, I would argue. I guess this practice 
originates from the kernel/libc tradition of validating system calls (a 
place where this can be actually be done in a reliable manner, unlike in 
normal user code).

^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v6 0/3] Add dispatcher library
  2023-09-28  7:30                                         ` [PATCH v5 1/3] lib: introduce " Mattias Rönnblom
  2023-10-05  8:36                                           ` David Marchand
@ 2023-10-09 18:17                                           ` Mattias Rönnblom
  2023-10-09 18:17                                             ` [PATCH v6 1/3] lib: introduce " Mattias Rönnblom
                                                               ` (2 more replies)
  1 sibling, 3 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-09 18:17 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to decouple different parts
of an eventdev-based application (e.g., processing pipeline stages),
sharing the same underlying event device.

The dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on event meta data, such as the queue id or scheduling type.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the dispatcher relies on application-supplied matching callback
functions to decide where to deliver events.

A dispatcher is configured to dequeue events from a specific event
device, and ties into the service core framework, to do its (and the
application's) work.

The dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

Although the dispatcher adds some overhead, experience suggests that
the net effect on the application (both synthetic benchmarks and more
real-world applications) may well be positive. This is primarily due
to clustering (see programming guide) reducing cache misses.

Benchmarking indicates that the overhead is ~10 cc/event (on a
large core), with a handful of often-used handlers.

The dispatcher does not support run-time reconfiguration.

The use of the dispatcher library is optional, and an eventdev-based
application may still opt to access the event device using direct
eventdev API calls, or by some other means.

Mattias Rönnblom (3):
  lib: introduce dispatcher library
  test: add dispatcher test suite
  doc: add dispatcher programming guide

 MAINTAINERS                              |    6 +
 app/test/meson.build                     |    1 +
 app/test/test_dispatcher.c               | 1045 ++++++++++++++++++++++
 doc/api/doxy-api-index.md                |    1 +
 doc/api/doxy-api.conf.in                 |    1 +
 doc/guides/prog_guide/dispatcher_lib.rst |  433 +++++++++
 doc/guides/prog_guide/index.rst          |    1 +
 doc/guides/rel_notes/release_23_11.rst   |    5 +
 lib/dispatcher/meson.build               |   13 +
 lib/dispatcher/rte_dispatcher.c          |  691 ++++++++++++++
 lib/dispatcher/rte_dispatcher.h          |  466 ++++++++++
 lib/dispatcher/version.map               |   20 +
 lib/meson.build                          |    2 +
 13 files changed, 2685 insertions(+)
 create mode 100644 app/test/test_dispatcher.c
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v6 1/3] lib: introduce dispatcher library
  2023-10-09 18:17                                           ` [PATCH v6 0/3] Add " Mattias Rönnblom
@ 2023-10-09 18:17                                             ` Mattias Rönnblom
  2023-10-11  7:16                                               ` [PATCH v7 0/3] Add " Mattias Rönnblom
  2023-10-09 18:17                                             ` [PATCH v6 2/3] test: add dispatcher test suite Mattias Rönnblom
  2023-10-09 18:17                                             ` [PATCH v6 3/3] doc: add dispatcher programming guide Mattias Rönnblom
  2 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-09 18:17 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to help reduce coupling in an
Eventdev-based DPDK application.

In addition, the dispatcher also provides a convenient and flexible
way for the application to use service cores for application-level
processing.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
Reviewed-by: Heng Wang <heng.wang@ericsson.com>

--

PATCH v6:
 o Use single tab as indentation for continuation lines in multiple-line
   function prototypes. (David Marchand)
 o Add dispatcher library release note. (David Marchand)
 o Various indentation and spelling improvements. (David Marchand)
 o Add direct <stdint.h>, <stdbool.h> and <rte_compat.h> includes,
   instead of relying on <rte_eventdev.h>. (David Marchand)
 o Avoid Doxygen post annotations for struct fields. (David Marchand)

PATCH v5:
 o Move from using an integer id to a pointer to reference a dispatcher
   instance, to simplify the API.
 o Fix bug where dispatcher stats retrieval function erroneously depended
   on the user-supplied stats buffer being all-zero.

PATCH v4:
 o Fix bugs in handler and finalizer unregistration. (Naga Harish)
 o Return -EINVAL in cases where NULL pointers were provided in
   calls requiring non-NULL pointers. (Naga Harish)
 o Add experimental warning for the whole API. (Jerin Jacob)

PATCH v3:
 o To underline its optional character and since it does not provide
   hardware abstraction, the event dispatcher is now a separate
   library.
 o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
   shorter and to avoid the rte_event_* namespace.

PATCH v2:
 o Add dequeue batch count statistic.
 o Add statistics reset function to API.
 o Clarify MT safety guarantees (or lack thereof) in the API documentation.
 o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
   to be consistent with similar loops elsewhere in the dispatcher.
 o Fix variable names in finalizer unregister function.

PATCH:
 o Change prefix from RED to EVD, to avoid confusion with random
   early detection.

RFC v4:
 o Move handlers to per-lcore data structures.
 o Introduce mechanism which rearranges handlers so that often-used
   handlers tend to be tried first.
 o Terminate dispatch loop in case all events are delivered.
 o To avoid the dispatcher's service function hogging the CPU, process
   only one batch per call.
 o Have service function return -EAGAIN if no work is performed.
 o Events delivered in the process function is no longer marked 'const',
   since modifying them may be useful for the application and cause
   no difficulties for the dispatcher.
 o Various minor API documentation improvements.

RFC v3:
 o Add stats_get() function to the version.map file.
---
 MAINTAINERS                            |   4 +
 doc/api/doxy-api-index.md              |   1 +
 doc/api/doxy-api.conf.in               |   1 +
 doc/guides/rel_notes/release_23_11.rst |   5 +
 lib/dispatcher/meson.build             |  13 +
 lib/dispatcher/rte_dispatcher.c        | 691 +++++++++++++++++++++++++
 lib/dispatcher/rte_dispatcher.h        | 466 +++++++++++++++++
 lib/dispatcher/version.map             |  20 +
 lib/meson.build                        |   2 +
 9 files changed, 1203 insertions(+)
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 00f5a5f9e6..a4372701c4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1733,6 +1733,10 @@ M: Nithin Dabilpuram <ndabilpuram@marvell.com>
 M: Pavan Nikhilesh <pbhagavatula@marvell.com>
 F: lib/node/
 
+Dispatcher - EXPERIMENTAL
+M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
+F: lib/dispatcher/
+
 
 Test Applications
 -----------------
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index fdeda13932..7d0cad9fed 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -155,6 +155,7 @@ The public API headers are grouped by topics:
 
 - **classification**
   [reorder](@ref rte_reorder.h),
+  [dispatcher](@ref rte_dispatcher.h),
   [distributor](@ref rte_distributor.h),
   [EFD](@ref rte_efd.h),
   [ACL](@ref rte_acl.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index df801d32f9..93709e1d2c 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dispatcher \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 9319c86cd8..b5c5073018 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -80,6 +80,11 @@ New Features
   device is different from the Tx Ethernet device with respective driver
   callback functions in ``rte_eth_recycle_mbufs``.
 
+* **Added dispatcher library.**
+
+  Added dispatcher library which purpose is to help decouple different
+  parts (modules) of an eventdev-based application.
+
 * **Updated Solarflare net driver.**
 
   * Added support for transfer flow action ``INDIRECT`` with subtype ``VXLAN_ENCAP``.
diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build
new file mode 100644
index 0000000000..ffaef26a6d
--- /dev/null
+++ b/lib/dispatcher/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2023 Ericsson AB
+
+if is_windows
+    build = false
+    reason = 'not supported on Windows'
+    subdir_done()
+endif
+
+sources = files('rte_dispatcher.c')
+headers = files('rte_dispatcher.h')
+
+deps += ['eventdev']
diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
new file mode 100644
index 0000000000..83e80ede96
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.c
@@ -0,0 +1,691 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_random.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_dispatcher.h>
+
+#define EVD_MAX_PORTS_PER_LCORE 4
+#define EVD_MAX_HANDLERS 32
+#define EVD_MAX_FINALIZERS 16
+#define EVD_AVG_PRIO_INTERVAL 2000
+#define EVD_SERVICE_NAME "dispatcher"
+
+struct rte_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_dispatcher_handler {
+	int id;
+	rte_dispatcher_match_t match_fun;
+	void *match_data;
+	rte_dispatcher_process_t process_fun;
+	void *process_data;
+};
+
+struct rte_dispatcher_finalizer {
+	int id;
+	rte_dispatcher_finalize_t finalize_fun;
+	void *finalize_data;
+};
+
+struct rte_dispatcher_lcore {
+	uint8_t num_ports;
+	uint16_t num_handlers;
+	int32_t prio_count;
+	struct rte_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
+	struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
+	struct rte_dispatcher_stats stats;
+} __rte_cache_aligned;
+
+struct rte_dispatcher {
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	uint16_t num_finalizers;
+	struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
+};
+
+static int
+evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
+	const struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->match_fun(event, handler->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static void
+evd_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+	int handler_idx)
+{
+	struct rte_dispatcher_handler tmp;
+
+	if (handler_idx == 0)
+		return;
+
+	/* Let the lucky handler "bubble" up the list */
+
+	tmp = lcore->handlers[handler_idx - 1];
+	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
+	lcore->handlers[handler_idx] = tmp;
+}
+
+static inline void
+evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+	int handler_idx, uint16_t handler_events)
+{
+	lcore->prio_count -= handler_events;
+
+	if (unlikely(lcore->prio_count <= 0)) {
+		evd_prioritize_handler(lcore, handler_idx);
+
+		/*
+		 * Randomize the interval in the unlikely case
+		 * the traffic follow some very strict pattern.
+		 */
+		lcore->prio_count =
+			rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
+			EVD_AVG_PRIO_INTERVAL / 2;
+	}
+}
+
+static inline void
+evd_dispatch_events(struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_lcore *lcore,
+	struct rte_dispatcher_lcore_port *port,
+	struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
+	uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
+	uint16_t drop_count = 0;
+	uint16_t dispatch_count;
+	uint16_t dispatched = 0;
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *event = &events[i];
+		int handler_idx;
+
+		handler_idx = evd_lookup_handler_idx(lcore, event);
+
+		if (unlikely(handler_idx < 0)) {
+			drop_count++;
+			continue;
+		}
+
+		bursts[handler_idx][burst_lens[handler_idx]] = *event;
+		burst_lens[handler_idx]++;
+	}
+
+	dispatch_count = num_events - drop_count;
+
+	for (i = 0; i < lcore->num_handlers &&
+		 dispatched < dispatch_count; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		handler->process_fun(dispatcher->event_dev_id, port->port_id,
+				     bursts[i], len, handler->process_data);
+
+		dispatched += len;
+
+		/*
+		 * Safe, since any reshuffling will only involve
+		 * already-processed handlers.
+		 */
+		evd_consider_prioritize_handler(lcore, i, len);
+	}
+
+	lcore->stats.ev_batch_count++;
+	lcore->stats.ev_dispatch_count += dispatch_count;
+	lcore->stats.ev_drop_count += drop_count;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		finalizer->finalize_fun(dispatcher->event_dev_id,
+					port->port_id,
+					finalizer->finalize_data);
+	}
+}
+
+static __rte_always_inline uint16_t
+evd_port_dequeue(struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_lcore *lcore,
+	struct rte_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	if (likely(n > 0))
+		evd_dispatch_events(dispatcher, lcore, port, events, n);
+
+	lcore->stats.poll_count++;
+
+	return n;
+}
+
+static __rte_always_inline uint16_t
+evd_lcore_process(struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_lcore *lcore)
+{
+	uint16_t i;
+	uint16_t event_count = 0;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		event_count += evd_port_dequeue(dispatcher, lcore, port);
+	}
+
+	return event_count;
+}
+
+static int32_t
+evd_process(void *userdata)
+{
+	struct rte_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+	uint64_t event_count;
+
+	event_count = evd_lcore_process(dispatcher, lcore);
+
+	if (unlikely(event_count == 0))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int
+evd_service_register(struct rte_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = evd_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, sizeof(service.name), EVD_SERVICE_NAME);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+	if (rc != 0)
+		RTE_EDEV_LOG_ERR("Registration of dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	return rc;
+}
+
+static int
+evd_service_unregister(struct rte_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+	if (rc != 0)
+		RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+struct rte_dispatcher *
+rte_dispatcher_create(uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for dispatcher\n");
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	*dispatcher = (struct rte_dispatcher) {
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = evd_service_register(dispatcher);
+	if (rc < 0) {
+		rte_free(dispatcher);
+		rte_errno = -rc;
+		return NULL;
+	}
+
+	return dispatcher;
+}
+
+int
+rte_dispatcher_free(struct rte_dispatcher *dispatcher)
+{
+	int rc;
+
+	if (dispatcher == NULL)
+		return 0;
+
+	rc = evd_service_unregister(dispatcher);
+	if (rc != 0)
+		return rc;
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+uint32_t
+rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher)
+{
+	return dispatcher->service_id;
+}
+
+static int
+lcore_port_index(struct rte_dispatcher_lcore *lcore,
+	uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, uint16_t batch_size, uint64_t timeout,
+	unsigned int lcore_id)
+{
+	struct rte_dispatcher_lcore *lcore;
+	struct rte_dispatcher_lcore_port *port;
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, unsigned int lcore_id)
+{
+	struct rte_dispatcher_lcore *lcore;
+	int port_idx;
+	struct rte_dispatcher_lcore_port *port;
+	struct rte_dispatcher_lcore_port *last;
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static struct rte_dispatcher_handler *
+evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore, int handler_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->id == handler_id)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_handler_id(struct rte_dispatcher *dispatcher)
+{
+	int handler_id = 0;
+	struct rte_dispatcher_lcore *reference_lcore =
+		&dispatcher->lcores[0];
+
+	if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
+		return -1;
+
+	while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
+		handler_id++;
+
+	return handler_id;
+}
+
+static void
+evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore,
+	const struct rte_dispatcher_handler *handler)
+{
+	int handler_idx = lcore->num_handlers;
+
+	lcore->handlers[handler_idx] = *handler;
+	lcore->num_handlers++;
+}
+
+static void
+evd_install_handler(struct rte_dispatcher *dispatcher,
+	const struct rte_dispatcher_handler *handler)
+{
+	int i;
+
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[i];
+		evd_lcore_install_handler(lcore, handler);
+	}
+}
+
+int
+rte_dispatcher_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_match_t match_fun, void *match_data,
+	rte_dispatcher_process_t process_fun, void *process_data)
+{
+	struct rte_dispatcher_handler handler = {
+		.match_fun = match_fun,
+		.match_data = match_data,
+		.process_fun = process_fun,
+		.process_data = process_data
+	};
+
+	handler.id = evd_alloc_handler_id(dispatcher);
+
+	if (handler.id < 0)
+		return -ENOMEM;
+
+	evd_install_handler(dispatcher, &handler);
+
+	return handler.id;
+}
+
+static int
+evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
+	int handler_id)
+{
+	struct rte_dispatcher_handler *unreg_handler;
+	int handler_idx;
+	uint16_t last_idx;
+
+	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
+
+	if (unreg_handler == NULL) {
+		RTE_EDEV_LOG_ERR("Invalid handler id %d\n", handler_id);
+		return -EINVAL;
+	}
+
+	handler_idx = unreg_handler - &lcore->handlers[0];
+
+	last_idx = lcore->num_handlers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all handlers to maintain handler order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_handler, unreg_handler + 1,
+			sizeof(struct rte_dispatcher_handler) * n);
+	}
+
+	lcore->num_handlers--;
+
+	return 0;
+}
+
+static int
+evd_uninstall_handler(struct rte_dispatcher *dispatcher, int handler_id)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+		int rc;
+
+		rc = evd_lcore_uninstall_handler(lcore, handler_id);
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id)
+{
+	return evd_uninstall_handler(dispatcher, handler_id);
+}
+
+static struct rte_dispatcher_finalizer *
+evd_get_finalizer_by_id(struct rte_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		if (finalizer->id == handler_id)
+			return finalizer;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_id = 0;
+
+	while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
+		finalizer_id++;
+
+	return finalizer_id;
+}
+
+static struct rte_dispatcher_finalizer *
+evd_alloc_finalizer(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_idx;
+	struct rte_dispatcher_finalizer *finalizer;
+
+	if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
+		return NULL;
+
+	finalizer_idx = dispatcher->num_finalizers;
+	finalizer = &dispatcher->finalizers[finalizer_idx];
+
+	finalizer->id = evd_alloc_finalizer_id(dispatcher);
+
+	dispatcher->num_finalizers++;
+
+	return finalizer;
+}
+
+int
+rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_finalize_t finalize_fun, void *finalize_data)
+{
+	struct rte_dispatcher_finalizer *finalizer;
+
+	finalizer = evd_alloc_finalizer(dispatcher);
+
+	if (finalizer == NULL)
+		return -ENOMEM;
+
+	finalizer->finalize_fun = finalize_fun;
+	finalizer->finalize_data = finalize_data;
+
+	return finalizer->id;
+}
+
+int
+rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher,
+	int finalizer_id)
+{
+	struct rte_dispatcher_finalizer *unreg_finalizer;
+	int finalizer_idx;
+	uint16_t last_idx;
+
+	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, finalizer_id);
+
+	if (unreg_finalizer == NULL) {
+		RTE_EDEV_LOG_ERR("Invalid finalizer id %d\n", finalizer_id);
+		return -EINVAL;
+	}
+
+	finalizer_idx = unreg_finalizer - &dispatcher->finalizers[0];
+
+	last_idx = dispatcher->num_finalizers - 1;
+
+	if (finalizer_idx != last_idx) {
+		/* move all finalizers to maintain order */
+		int n = last_idx - finalizer_idx;
+		memmove(unreg_finalizer, unreg_finalizer + 1,
+			sizeof(struct rte_dispatcher_finalizer) * n);
+	}
+
+	dispatcher->num_finalizers--;
+
+	return 0;
+}
+
+static int
+evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
+{
+	int rc;
+
+	rc = rte_service_component_runstate_set(dispatcher->service_id,
+						state);
+	if (rc != 0) {
+		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+				 "service component run state to %d\n", rc,
+				 state);
+		RTE_ASSERT(0);
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_start(struct rte_dispatcher *dispatcher)
+{
+	return evd_set_service_runstate(dispatcher, 1);
+}
+
+int
+rte_dispatcher_stop(struct rte_dispatcher *dispatcher)
+{
+	return evd_set_service_runstate(dispatcher, 0);
+}
+
+static void
+evd_aggregate_stats(struct rte_dispatcher_stats *result,
+	const struct rte_dispatcher_stats *part)
+{
+	result->poll_count += part->poll_count;
+	result->ev_batch_count += part->ev_batch_count;
+	result->ev_dispatch_count += part->ev_dispatch_count;
+	result->ev_drop_count += part->ev_drop_count;
+}
+
+void
+rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_stats *stats)
+{
+	unsigned int lcore_id;
+
+	*stats = (struct rte_dispatcher_stats) {};
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		const struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		evd_aggregate_stats(stats, &lcore->stats);
+	}
+}
+
+void
+rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		lcore->stats = (struct rte_dispatcher_stats) {};
+	}
+}
diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
new file mode 100644
index 0000000000..1f2faaaee8
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.h
@@ -0,0 +1,466 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#ifndef __RTE_DISPATCHER_H__
+#define __RTE_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Dispatcher
+ *
+ * @warning
+ * @b EXPERIMENTAL:
+ * All functions in this file may be changed or removed without prior notice.
+ *
+ * The purpose of the dispatcher is to help decouple different parts
+ * of an application (e.g., modules), sharing the same underlying
+ * event device.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <rte_compat.h>
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * Match callbacks are used by an application to decide how the
+ * dispatcher distributes events to different parts of the
+ * application.
+ *
+ * The application is not expected to process the event at the point
+ * of the match call. Such matters should be deferred to the process
+ * callback invocation.
+ *
+ * The match callback may be used as an opportunity to prefetch data.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this event should be delivered (via
+ *   the process callback), and false otherwise.
+ */
+typedef bool
+(*rte_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
+
+/**
+ * Function prototype for process callbacks.
+ *
+ * The process callbacks are used by the dispatcher to deliver
+ * events for processing.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ */
+
+typedef void
+(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
+	struct rte_event *events, uint16_t num, void *cb_data);
+
+/**
+ * Function prototype for finalize callbacks.
+ *
+ * The finalize callbacks are used by the dispatcher to notify the
+ * application it has delivered all events from a particular batch
+ * dequeued from the event device.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_finalize_register().
+ */
+
+typedef void
+(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
+	void *cb_data);
+
+/**
+ * Dispatcher statistics
+ */
+struct rte_dispatcher_stats {
+	/** Number of event dequeue calls made toward the event device. */
+	uint64_t poll_count;
+	/** Number of non-empty event batches dequeued from event device.*/
+	uint64_t ev_batch_count;
+	/** Number of events dispatched to a handler.*/
+	uint64_t ev_dispatch_count;
+	/** Number of events dropped because no handler was found. */
+	uint64_t ev_drop_count;
+};
+
+/**
+ * Create a dispatcher with the specified id.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   A pointer to a new dispatcher instance, or NULL on failure, in which
+ *   case rte_errno is set.
+ */
+__rte_experimental
+struct rte_dispatcher *
+rte_dispatcher_create(uint8_t event_dev_id);
+
+/**
+ * Free a dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_dispatcher_free(struct rte_dispatcher *dispatcher);
+
+/**
+ * Retrieve the service identifier of a dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  The dispatcher service's id.
+ */
+__rte_experimental
+uint32_t
+rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * dispatcher.
+ *
+ * This function configures the event port id to be used by the event
+ * dispatcher service, if run on the specified lcore.
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port must not be bound to more than one lcore.
+ *
+ * If the dispatcher service is mapped (with rte_service_map_lcore_set())
+ * to a lcore to which no ports are bound, the service function will be a
+ * no-operation.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on lcore
+ * specified by @c lcore_id.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ *  - -EEXISTS: Event port is already configured.
+ *  - -EINVAL: Invalid arguments.
+ */
+__rte_experimental
+int
+rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, uint16_t batch_size, uint64_t timeout,
+	unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * lcore specified by @c lcore_id.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOENT: Event port id not bound to this @c lcore_id.
+ */
+__rte_experimental
+int
+rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, unsigned int lcore_id);
+
+/**
+ * Register an event handler.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding process callback
+ * function.
+ *
+ * The reason for having two distinct steps is to allow the dispatcher
+ * to deliver all events as a batch. This in turn will cause
+ * processing of a particular kind of events to happen in a
+ * back-to-back manner, improving cache locality.
+ *
+ * The list of handler callback functions is shared among all lcores,
+ * but will only be executed on lcores which has an eventdev port
+ * bound to them, and which are running the dispatcher service.
+ *
+ * An event is delivered to at most one handler. Events where no
+ * handler is found are dropped.
+ *
+ * The application must not depend on the order of which the match
+ * functions are invoked.
+ *
+ * Ordering of events is not guaranteed to be maintained between
+ * different deliver callbacks. For example, suppose there are two
+ * callbacks registered, matching different subsets of events arriving
+ * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
+ * on a particular port, all pertaining to the same flow. The match
+ * callback for registration A returns true for ev0 and ev2, and the
+ * matching function for registration B for ev1. In that scenario, the
+ * dispatcher may choose to deliver first [ev0, ev2] using A's deliver
+ * function, and then [ev1] to B - or vice versa.
+ *
+ * rte_dispatcher_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the event
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param process_fun
+ *  The process callback function.
+ *
+ * @param process_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when process_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_match_t match_fun, void *match_cb_data,
+	rte_dispatcher_process_t process_fun, void *process_cb_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param handler_id
+ *  The handler registration id returned by the original
+ *  rte_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c handler_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id);
+
+/**
+ * Register a finalize callback function.
+ *
+ * An application may optionally install one or more finalize
+ * callbacks.
+ *
+ * All finalize callbacks are invoked by the dispatcher when a
+ * complete batch of events (retrieve using rte_event_dequeue_burst())
+ * have been delivered to the application (or have been dropped).
+ *
+ * The finalize callback is not tied to any particular handler.
+ *
+ * The finalize callback provides an opportunity for the application
+ * to do per-batch processing. One case where this may be useful is if
+ * an event output buffer is used, and is shared among several
+ * handlers. In such a case, proper output buffer flushing may be
+ * assured using a finalize callback.
+ *
+ * rte_dispatcher_finalize_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param finalize_fun
+ *  The function called after completing the processing of a
+ *  dequeue batch.
+ *
+ * @param finalize_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when @c finalize_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_finalize_t finalize_fun, void *finalize_data);
+
+/**
+ * Unregister a finalize callback.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param reg_id
+ *  The finalize registration id returned by the original
+ *  rte_dispatcher_finalize_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c reg_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher, int reg_id);
+
+/**
+ * Start a dispatcher instance.
+ *
+ * Enables the dispatcher service.
+ *
+ * The underlying event device must have been started prior to calling
+ * rte_dispatcher_start().
+ *
+ * For the dispatcher to actually perform work (i.e., dispatch
+ * events), its service must have been mapped to one or more service
+ * lcores, and its service run state set to '1'. A dispatcher's
+ * service is retrieved using rte_dispatcher_service_id_get().
+ *
+ * Each service lcore to which the dispatcher is mapped should
+ * have at least one event port configured. Such configuration is
+ * performed by calling rte_dispatcher_bind_port_to_lcore(), prior to
+ * starting the dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_dispatcher_start(struct rte_dispatcher *dispatcher);
+
+/**
+ * Stop a running dispatcher instance.
+ *
+ * Disables the dispatcher service.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_dispatcher_stop(struct rte_dispatcher *dispatcher);
+
+/**
+ * Retrieve statistics for a dispatcher instance.
+ *
+ * This function is MT safe and may be called by any thread
+ * (including unregistered non-EAL threads).
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ * @param[out] stats
+ *   A pointer to a structure to fill with statistics.
+ */
+__rte_experimental
+void
+rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_stats *stats);
+
+/**
+ * Reset statistics for a dispatcher instance.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but may not produce the correct result if the
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ */
+__rte_experimental
+void
+rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DISPATCHER__ */
diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map
new file mode 100644
index 0000000000..44585e4f15
--- /dev/null
+++ b/lib/dispatcher/version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+	global:
+
+	# added in 23.11
+	rte_dispatcher_bind_port_to_lcore;
+	rte_dispatcher_create;
+	rte_dispatcher_finalize_register;
+	rte_dispatcher_finalize_unregister;
+	rte_dispatcher_free;
+	rte_dispatcher_register;
+	rte_dispatcher_service_id_get;
+	rte_dispatcher_start;
+	rte_dispatcher_stats_get;
+	rte_dispatcher_stats_reset;
+	rte_dispatcher_stop;
+	rte_dispatcher_unbind_port_from_lcore;
+	rte_dispatcher_unregister;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index cf4aa63630..59d381bf7a 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -35,6 +35,7 @@ libraries = [
         'distributor',
         'efd',
         'eventdev',
+        'dispatcher', # dispatcher depends on eventdev
         'gpudev',
         'gro',
         'gso',
@@ -81,6 +82,7 @@ optional_libs = [
         'cfgfile',
         'compressdev',
         'cryptodev',
+        'dispatcher',
         'distributor',
         'dmadev',
         'efd',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v6 2/3] test: add dispatcher test suite
  2023-10-09 18:17                                           ` [PATCH v6 0/3] Add " Mattias Rönnblom
  2023-10-09 18:17                                             ` [PATCH v6 1/3] lib: introduce " Mattias Rönnblom
@ 2023-10-09 18:17                                             ` Mattias Rönnblom
  2023-10-10 11:56                                               ` David Marchand
  2023-10-10 14:02                                               ` David Marchand
  2023-10-09 18:17                                             ` [PATCH v6 3/3] doc: add dispatcher programming guide Mattias Rönnblom
  2 siblings, 2 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-09 18:17 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Add unit tests for the dispatcher.

--

PATCH v6:
 o Register test as "fast". (David Marchand)
 o Use single tab as indentation for continuation lines in multiple-line
   function prototypes. (David Marchand)
 o Add Signed-off-by line. (David Marchand)
 o Use DPDK atomics wrapper API instead of C11 atomics.

PATCH v5:
 o Update test suite to use pointer and not integer id when calling
   dispatcher functions.

PATCH v3:
 o Adapt the test suite to dispatcher API name changes.

PATCH v2:
 o Test finalize callback functionality.
 o Test handler and finalizer count upper limits.
 o Add statistics reset test.
 o Make sure dispatcher supply the proper event dev id and port id back
   to the application.

PATCH:
 o Extend test to cover often-used handler optimization feature.

RFC v4:
 o Adapt to non-const events in process function prototype.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 MAINTAINERS                |    1 +
 app/test/meson.build       |    1 +
 app/test/test_dispatcher.c | 1050 ++++++++++++++++++++++++++++++++++++
 3 files changed, 1052 insertions(+)
 create mode 100644 app/test/test_dispatcher.c

diff --git a/MAINTAINERS b/MAINTAINERS
index a4372701c4..262401d43d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1736,6 +1736,7 @@ F: lib/node/
 Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
+F: app/test/test_dispatcher.c
 
 
 Test Applications
diff --git a/app/test/meson.build b/app/test/meson.build
index bf9fc90612..ace10327f8 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -59,6 +59,7 @@ source_file_deps = {
     'test_cycles.c': [],
     'test_debug.c': [],
     'test_devargs.c': ['kvargs'],
+    'test_dispatcher.c': ['dispatcher'],
     'test_distributor.c': ['distributor'],
     'test_distributor_perf.c': ['distributor'],
     'test_dmadev.c': ['dmadev', 'bus_vdev'],
diff --git a/app/test/test_dispatcher.c b/app/test/test_dispatcher.c
new file mode 100644
index 0000000000..5a9c972d1f
--- /dev/null
+++ b/app/test/test_dispatcher.c
@@ -0,0 +1,1050 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_dispatcher.h>
+#include <rte_eventdev.h>
+#include <rte_random.h>
+#include <rte_service.h>
+#include <rte_stdatomic.h>
+
+#include "test.h"
+
+#define NUM_WORKERS 3
+
+#define NUM_PORTS (NUM_WORKERS + 1)
+#define WORKER_PORT_ID(worker_idx) (worker_idx)
+#define DRIVER_PORT_ID (NUM_PORTS - 1)
+
+#define NUM_SERVICE_CORES NUM_WORKERS
+
+/* Eventdev */
+#define NUM_QUEUES 8
+#define LAST_QUEUE_ID (NUM_QUEUES - 1)
+#define MAX_EVENTS 4096
+#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
+#define DEQUEUE_BURST_SIZE 32
+#define ENQUEUE_BURST_SIZE 32
+
+#define NUM_EVENTS 10000000
+#define NUM_FLOWS 16
+
+#define DSW_VDEV "event_dsw0"
+
+struct app_queue {
+	uint8_t queue_id;
+	uint64_t sn[NUM_FLOWS];
+	int dispatcher_reg_id;
+};
+
+struct cb_count {
+	uint8_t expected_event_dev_id;
+	uint8_t expected_event_port_id[RTE_MAX_LCORE];
+	RTE_ATOMIC(int) count;
+};
+
+struct test_app {
+	uint8_t event_dev_id;
+	struct rte_dispatcher *dispatcher;
+	uint32_t dispatcher_service_id;
+
+	unsigned int service_lcores[NUM_SERVICE_CORES];
+
+	int never_match_reg_id;
+	uint64_t never_match_count;
+	struct cb_count never_process_count;
+
+	struct app_queue queues[NUM_QUEUES];
+
+	int finalize_reg_id;
+	struct cb_count finalize_count;
+
+	bool running;
+
+	RTE_ATOMIC(int) completed_events;
+	RTE_ATOMIC(int) errors;
+};
+
+static struct test_app *
+test_app_create(void)
+{
+	int i;
+	struct test_app *app;
+
+	app = calloc(1, sizeof(struct test_app));
+
+	if (app == NULL)
+		return NULL;
+
+	for (i = 0; i < NUM_QUEUES; i++)
+		app->queues[i].queue_id = i;
+
+	return app;
+}
+
+static void
+test_app_free(struct test_app *app)
+{
+	free(app);
+}
+
+static int
+test_app_create_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_vdev_init(DSW_VDEV, NULL);
+	if (rc < 0)
+		return TEST_SKIPPED;
+
+	rc = rte_event_dev_get_dev_id(DSW_VDEV);
+
+	app->event_dev_id = (uint8_t)rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_destroy_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_close(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
+
+	rc = rte_vdev_uninit(DSW_VDEV);
+	TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_event_dev(struct test_app *app)
+{
+	int rc;
+	int i;
+
+	rc = test_app_create_vdev(app);
+	if (rc < 0)
+		return rc;
+
+	struct rte_event_dev_config config = {
+		.nb_event_queues = NUM_QUEUES,
+		.nb_event_ports = NUM_PORTS,
+		.nb_events_limit = MAX_EVENTS,
+		.nb_event_queue_flows = 64,
+		.nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
+		.nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	rc = rte_event_dev_configure(app->event_dev_id, &config);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
+
+	struct rte_event_queue_conf queue_config = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.nb_atomic_flows = 64
+	};
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		uint8_t queue_id = i;
+
+		rc = rte_event_queue_setup(app->event_dev_id, queue_id,
+					   &queue_config);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
+	}
+
+	struct rte_event_port_conf port_config = {
+		.new_event_threshold = NEW_EVENT_THRESHOLD,
+		.dequeue_depth = DEQUEUE_BURST_SIZE,
+		.enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	for (i = 0; i < NUM_PORTS; i++) {
+		uint8_t event_port_id = i;
+
+		rc = rte_event_port_setup(app->event_dev_id, event_port_id,
+					  &port_config);
+		TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
+				    event_port_id);
+
+		if (event_port_id == DRIVER_PORT_ID)
+			continue;
+
+		rc = rte_event_port_link(app->event_dev_id, event_port_id,
+					 NULL, NULL, 0);
+
+		TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
+				  event_port_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_event_dev(struct test_app *app)
+{
+	return test_app_destroy_vdev(app);
+}
+
+static int
+test_app_start_event_dev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_start(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
+
+	return TEST_SUCCESS;
+}
+
+static void
+test_app_stop_event_dev(struct test_app *app)
+{
+	rte_event_dev_stop(app->event_dev_id);
+}
+
+static int
+test_app_create_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	app->dispatcher = rte_dispatcher_create(app->event_dev_id);
+
+	TEST_ASSERT(app->dispatcher != NULL, "Unable to create event "
+		    "dispatcher");
+
+	app->dispatcher_service_id =
+		rte_dispatcher_service_id_get(app->dispatcher);
+
+	rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
+			    "stats");
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to set dispatcher service runstate");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_free_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	rc = rte_dispatcher_free(app->dispatcher);
+	TEST_ASSERT_SUCCESS(rc, "Error freeing dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_bind_ports(struct test_app *app)
+{
+	int i;
+
+	app->never_process_count.expected_event_dev_id =
+		app->event_dev_id;
+	app->finalize_count.expected_event_dev_id =
+		app->event_dev_id;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		uint8_t port_id = WORKER_PORT_ID(i);
+
+		int rc = rte_dispatcher_bind_port_to_lcore(
+			app->dispatcher, port_id, DEQUEUE_BURST_SIZE, 0,
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
+				    "to lcore %d", port_id, lcore_id);
+
+		app->never_process_count.expected_event_port_id[lcore_id] =
+			port_id;
+		app->finalize_count.expected_event_port_id[lcore_id] = port_id;
+	}
+
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unbind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_dispatcher_unbind_port_from_lcore(
+			app->dispatcher,
+			WORKER_PORT_ID(i),
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
+				    "from lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static bool
+match_queue(const struct rte_event *event, void *cb_data)
+{
+	uintptr_t queue_id = (uintptr_t)cb_data;
+
+	return event->queue_id == queue_id;
+}
+
+static int
+test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++)
+		if (app->service_lcores[i] == lcore_id)
+			return i;
+
+	return -1;
+}
+
+static int
+test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
+{
+	int worker;
+
+	worker = test_app_get_worker_index(app, lcore_id);
+
+	if (worker < 0)
+		return -1;
+
+	return WORKER_PORT_ID(worker);
+}
+
+static void
+test_app_queue_note_error(struct test_app *app)
+{
+	rte_atomic_fetch_add_explicit(&app->errors, 1, rte_memory_order_relaxed);
+}
+
+static void
+test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
+	struct rte_event *in_events, uint16_t num,
+	void *cb_data)
+{
+	struct app_queue *app_queue = cb_data;
+	struct test_app *app = container_of(app_queue, struct test_app,
+					    queues[app_queue->queue_id]);
+	unsigned int lcore_id = rte_lcore_id();
+	bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
+	int event_port_id;
+	uint16_t i;
+	struct rte_event out_events[num];
+
+	event_port_id = test_app_get_worker_port(app, lcore_id);
+
+	if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
+	    p_event_port_id != event_port_id) {
+		test_app_queue_note_error(app);
+		return;
+	}
+
+	for (i = 0; i < num; i++) {
+		const struct rte_event *in_event = &in_events[i];
+		struct rte_event *out_event = &out_events[i];
+		uint64_t sn = in_event->u64;
+		uint64_t expected_sn;
+
+		if (in_event->queue_id != app_queue->queue_id) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		expected_sn = app_queue->sn[in_event->flow_id]++;
+
+		if (expected_sn != sn) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		if (intermediate_queue)
+			*out_event = (struct rte_event) {
+				.queue_id = in_event->queue_id + 1,
+				.flow_id = in_event->flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_FORWARD,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+	}
+
+	if (intermediate_queue) {
+		uint16_t n = 0;
+
+		do {
+			n += rte_event_enqueue_forward_burst(p_event_dev_id,
+							     p_event_port_id,
+							     out_events + n,
+							     num - n);
+		} while (n != num);
+	} else
+		rte_atomic_fetch_add_explicit(&app->completed_events, num,
+					      rte_memory_order_relaxed);
+}
+
+static bool
+never_match(const struct rte_event *event __rte_unused, void *cb_data)
+{
+	uint64_t *count = cb_data;
+
+	(*count)++;
+
+	return false;
+}
+
+static void
+test_app_never_process(uint8_t event_dev_id, uint8_t event_port_id,
+	struct rte_event *in_events __rte_unused, uint16_t num, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		rte_atomic_fetch_add_explicit(&count->count, num,
+					      rte_memory_order_relaxed);
+}
+
+static void
+finalize(uint8_t event_dev_id, uint8_t event_port_id, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		rte_atomic_fetch_add_explicit(&count->count, 1,
+					      rte_memory_order_relaxed);
+}
+
+static int
+test_app_register_callbacks(struct test_app *app)
+{
+	int i;
+
+	app->never_match_reg_id =
+		rte_dispatcher_register(app->dispatcher, never_match,
+					&app->never_match_count,
+					test_app_never_process,
+					&app->never_process_count);
+
+	TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register "
+		    "never-match handler");
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		struct app_queue *app_queue = &app->queues[i];
+		uintptr_t queue_id = app_queue->queue_id;
+		int reg_id;
+
+		reg_id = rte_dispatcher_register(app->dispatcher,
+						 match_queue, (void *)queue_id,
+						 test_app_process_queue,
+						 app_queue);
+
+		TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
+			    "callback for queue %d", i);
+
+		app_queue->dispatcher_reg_id = reg_id;
+	}
+
+	app->finalize_reg_id =
+		rte_dispatcher_finalize_register(app->dispatcher,
+						       finalize,
+						       &app->finalize_count);
+	TEST_ASSERT_SUCCESS(app->finalize_reg_id, "Error registering "
+			    "finalize callback");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
+{
+	int reg_id = app->queues[queue_id].dispatcher_reg_id;
+	int rc;
+
+	if (reg_id < 0) /* unregistered already */
+		return 0;
+
+	rc = rte_dispatcher_unregister(app->dispatcher, reg_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
+			    "callback for queue %d", queue_id);
+
+	app->queues[queue_id].dispatcher_reg_id = -1;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callbacks(struct test_app *app)
+{
+	int i;
+	int rc;
+
+	if (app->never_match_reg_id >= 0) {
+		rc = rte_dispatcher_unregister(app->dispatcher,
+						     app->never_match_reg_id);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister never-match "
+				    "handler");
+		app->never_match_reg_id = -1;
+	}
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		rc = test_app_unregister_callback(app, i);
+		if (rc != TEST_SUCCESS)
+			return rc;
+	}
+
+	if (app->finalize_reg_id >= 0) {
+		rc = rte_dispatcher_finalize_unregister(
+			app->dispatcher, app->finalize_reg_id
+		);
+		app->finalize_reg_id = -1;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_dispatcher_start(app->dispatcher);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to start the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_dispatcher_stop(app->dispatcher);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to stop the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_reset_dispatcher_stats(struct test_app *app)
+{
+	struct rte_dispatcher_stats stats;
+
+	rte_dispatcher_stats_reset(app->dispatcher);
+
+	memset(&stats, 0xff, sizeof(stats));
+
+	rte_dispatcher_stats_get(app->dispatcher, &stats);
+
+	TEST_ASSERT_EQUAL(stats.poll_count, 0, "Poll count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_batch_count, 0, "Batch count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, "Dispatch count "
+			  "not zero");
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count not zero");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_lcore_add(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
+			    "service core", lcore_id);
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_cores(struct test_app *app)
+{
+	int i;
+	int lcore_id = -1;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
+
+		TEST_ASSERT(lcore_id != RTE_MAX_LCORE,
+			    "Too few lcores. Needs at least %d worker lcores",
+			    NUM_SERVICE_CORES);
+
+		app->service_lcores[i] = lcore_id;
+	}
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		int rc;
+
+		rc = test_app_setup_service_core(app, app->service_lcores[i]);
+		if (rc != TEST_SUCCESS)
+			return rc;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
+
+	rc = rte_service_lcore_del(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
+			    lcore_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = test_app_teardown_service_core(app, lcore_id);
+		if (rc != TEST_SUCCESS)
+			return rc;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_start(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_stop(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_start_event_dev(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	rc = test_app_start_service_cores(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	rc = test_app_start_dispatcher(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	app->running = true;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_stop_dispatcher(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	test_app_stop_service_cores(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	test_app_stop_event_dev(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	app->running = false;
+
+	return TEST_SUCCESS;
+}
+
+struct test_app *test_app;
+
+static int
+test_setup(void)
+{
+	int rc;
+
+	test_app = test_app_create();
+	TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
+
+	rc = test_app_setup_event_dev(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	rc = test_app_create_dispatcher(test_app);
+
+	rc = test_app_setup_service_cores(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	rc = test_app_register_callbacks(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	rc = test_app_bind_ports(test_app);
+
+	return rc;
+}
+
+static void test_teardown(void)
+{
+	if (test_app->running)
+		test_app_stop(test_app);
+
+	test_app_teardown_service_cores(test_app);
+
+	test_app_unregister_callbacks(test_app);
+
+	test_app_unbind_ports(test_app);
+
+	test_app_free_dispatcher(test_app);
+
+	test_app_teardown_event_dev(test_app);
+
+	test_app_free(test_app);
+
+	test_app = NULL;
+}
+
+static int
+test_app_get_completed_events(struct test_app *app)
+{
+	return rte_atomic_load_explicit(&app->completed_events,
+					rte_memory_order_relaxed);
+}
+
+static int
+test_app_get_errors(struct test_app *app)
+{
+	return rte_atomic_load_explicit(&app->errors, rte_memory_order_relaxed);
+}
+
+static int
+test_basic(void)
+{
+	int rc;
+	int i;
+
+	rc = test_app_start(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	uint64_t sns[NUM_FLOWS] = { 0 };
+
+	for (i = 0; i < NUM_EVENTS;) {
+		struct rte_event events[ENQUEUE_BURST_SIZE];
+		int left;
+		int batch_size;
+		int j;
+		uint16_t n = 0;
+
+		batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
+		left = NUM_EVENTS - i;
+
+		batch_size = RTE_MIN(left, batch_size);
+
+		for (j = 0; j < batch_size; j++) {
+			struct rte_event *event = &events[j];
+			uint64_t sn;
+			uint32_t flow_id;
+
+			flow_id = rte_rand_max(NUM_FLOWS);
+
+			sn = sns[flow_id]++;
+
+			*event = (struct rte_event) {
+				.queue_id = 0,
+				.flow_id = flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_NEW,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+		}
+
+		while (n < batch_size)
+			n += rte_event_enqueue_new_burst(test_app->event_dev_id,
+							 DRIVER_PORT_ID,
+							 events + n,
+							 batch_size - n);
+
+		i += batch_size;
+	}
+
+	while (test_app_get_completed_events(test_app) != NUM_EVENTS)
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+
+	rc = test_app_get_errors(test_app);
+	TEST_ASSERT(rc == 0, "%d errors occurred", rc);
+
+	rc = test_app_stop(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	struct rte_dispatcher_stats stats;
+	rte_dispatcher_stats_get(test_app->dispatcher, &stats);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
+			  "Invalid dispatch count");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	TEST_ASSERT_EQUAL(test_app->never_process_count.count, 0,
+			  "Never-match handler's process function has "
+			  "been called");
+
+	int finalize_count =
+		rte_atomic_load_explicit(&test_app->finalize_count.count,
+					 rte_memory_order_relaxed);
+
+	TEST_ASSERT(finalize_count > 0, "Finalize count is zero");
+	TEST_ASSERT(finalize_count <= (int)stats.ev_dispatch_count,
+		    "Finalize count larger than event count");
+
+	TEST_ASSERT_EQUAL(finalize_count, (int)stats.ev_batch_count,
+			  "%"PRIu64" batches dequeued, but finalize called %d "
+			  "times", stats.ev_batch_count, finalize_count);
+
+	/*
+	 * The event dispatcher should call often-matching match functions
+	 * more often, and thus this never-matching match function should
+	 * be called relatively infrequently.
+	 */
+	TEST_ASSERT(test_app->never_match_count <
+		    (stats.ev_dispatch_count / 4),
+		    "Never-matching match function called suspiciously often");
+
+	rc = test_app_reset_dispatcher_stats(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_drop(void)
+{
+	int rc;
+	uint8_t unhandled_queue;
+	struct rte_dispatcher_stats stats;
+
+	unhandled_queue = (uint8_t)rte_rand_max(NUM_QUEUES);
+
+	rc = test_app_start(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	rc = test_app_unregister_callback(test_app, unhandled_queue);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	struct rte_event event = {
+	    .queue_id = unhandled_queue,
+	    .flow_id = 0,
+	    .sched_type = RTE_SCHED_TYPE_ATOMIC,
+	    .op = RTE_EVENT_OP_NEW,
+	    .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	    .u64 = 0
+	};
+
+	do {
+		rc = rte_event_enqueue_burst(test_app->event_dev_id,
+					     DRIVER_PORT_ID, &event, 1);
+	} while (rc == 0);
+
+	do {
+		rte_dispatcher_stats_get(test_app->dispatcher, &stats);
+
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+	} while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
+
+	rc = test_app_stop(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
+			  "Dispatch count is not zero");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+#define MORE_THAN_MAX_HANDLERS 1000
+#define MIN_HANDLERS 32
+
+static int
+test_many_handler_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_HANDLERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	for (i = 0; i < MORE_THAN_MAX_HANDLERS; i++) {
+		reg_id = rte_dispatcher_register(test_app->dispatcher,
+						 never_match, NULL,
+						 test_app_never_process, NULL);
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_HANDLERS, "Registration failed already "
+		    "after %d handler registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_unregister(test_app->dispatcher,
+					       reg_ids[i]);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister handler %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static void
+dummy_finalize(uint8_t event_dev_id __rte_unused,
+	       uint8_t event_port_id __rte_unused,
+	       void *cb_data __rte_unused)
+{
+}
+
+#define MORE_THAN_MAX_FINALIZERS 1000
+#define MIN_FINALIZERS 16
+
+static int
+test_many_finalize_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_FINALIZERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	for (i = 0; i < MORE_THAN_MAX_FINALIZERS; i++) {
+		reg_id = rte_dispatcher_finalize_register(
+			test_app->dispatcher, dummy_finalize, NULL
+		);
+
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_FINALIZERS, "Finalize registration failed "
+		    "already after %d registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_finalize_unregister(
+			test_app->dispatcher, reg_ids[i]
+		);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister finalizer %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite test_suite = {
+	.suite_name = "Event dispatcher test suite",
+	.unit_test_cases = {
+		TEST_CASE_ST(test_setup, test_teardown, test_basic),
+		TEST_CASE_ST(test_setup, test_teardown, test_drop),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_handler_registrations),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_finalize_registrations),
+		TEST_CASES_END()
+	}
+};
+
+static int
+test_dispatcher(void)
+{
+	return unit_test_suite_runner(&test_suite);
+}
+
+REGISTER_FAST_TEST(dispatcher_autotest, false, true, test_dispatcher);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v6 3/3] doc: add dispatcher programming guide
  2023-10-09 18:17                                           ` [PATCH v6 0/3] Add " Mattias Rönnblom
  2023-10-09 18:17                                             ` [PATCH v6 1/3] lib: introduce " Mattias Rönnblom
  2023-10-09 18:17                                             ` [PATCH v6 2/3] test: add dispatcher test suite Mattias Rönnblom
@ 2023-10-09 18:17                                             ` Mattias Rönnblom
  2023-10-10 13:31                                               ` David Marchand
  2 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-09 18:17 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Provide programming guide for the dispatcher library.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--

PATCH v6:
 o Eliminate unneeded white space in code blocks. (David Marchand)

PATCH v5:
 o Update guide to match API changes related to dispatcher ids.

PATCH v3:
 o Adapt guide to the dispatcher API name changes.

PATCH:
 o Improve grammar and spelling.

RFC v4:
 o Extend event matching section of the programming guide.
 o Improve grammar and spelling.
---
 MAINTAINERS                              |   1 +
 doc/guides/prog_guide/dispatcher_lib.rst | 433 +++++++++++++++++++++++
 doc/guides/prog_guide/index.rst          |   1 +
 3 files changed, 435 insertions(+)
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst

diff --git a/MAINTAINERS b/MAINTAINERS
index 262401d43d..748c15cfe9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1737,6 +1737,7 @@ Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
 F: app/test/test_dispatcher.c
+F: doc/guides/prog_guide/dispatcher_lib.rst
 
 
 Test Applications
diff --git a/doc/guides/prog_guide/dispatcher_lib.rst b/doc/guides/prog_guide/dispatcher_lib.rst
new file mode 100644
index 0000000000..2ab798920c
--- /dev/null
+++ b/doc/guides/prog_guide/dispatcher_lib.rst
@@ -0,0 +1,433 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Ericsson AB.
+
+Dispatcher
+==========
+
+Overview
+--------
+
+The purpose of the dispatcher is to help reduce coupling in an
+:doc:`Eventdev <eventdev>`-based DPDK application.
+
+In particular, the dispatcher addresses a scenario where an
+application's modules share the same event device and event device
+ports, and performs work on the same lcore threads.
+
+The dispatcher replaces the conditional logic that follows an event
+device dequeue operation, where events are dispatched to different
+parts of the application, typically based on fields in the
+``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
+``sched_type``.
+
+Below is an excerpt from a fictitious application consisting of two
+modules; A and B. In this example, event-to-module routing is based
+purely on queue id, where module A expects all events to a certain
+queue id, and module B two other queue ids. [#Mapping]_
+
+.. code-block:: c
+
+    for (;;) {
+            struct rte_event events[MAX_BURST];
+            unsigned int n;
+
+            n = rte_event_dequeue_burst(dev_id, port_id, events,
+	                                MAX_BURST, 0);
+
+            for (i = 0; i < n; i++) {
+                    const struct rte_event *event = &events[i];
+
+                    switch (event->queue_id) {
+                    case MODULE_A_QUEUE_ID:
+                            module_a_process(event);
+                            break;
+                    case MODULE_B_STAGE_0_QUEUE_ID:
+                            module_b_process_stage_0(event);
+                            break;
+                    case MODULE_B_STAGE_1_QUEUE_ID:
+                            module_b_process_stage_1(event);
+                            break;
+                    }
+            }
+    }
+
+The issue this example attempts to illustrate is that the centralized
+conditional logic has knowledge of things that should be private to
+the modules. In other words, this pattern leads to a violation of
+module encapsulation.
+
+The shared conditional logic contains explicit knowledge about what
+events should go where. In case, for example, the
+``module_a_process()`` is broken into two processing stages — a
+module-internal affair — the shared conditional code must be updated
+to reflect this change.
+
+The centralized event routing code becomes an issue in larger
+applications, where modules are developed by different organizations.
+This pattern also makes module reuse across different application more
+difficult. The part of the conditional logic relevant for a particular
+application may need to be duplicated across many module
+instantiations (e.g., applications and test setups).
+
+The dispatcher separates the mechanism (routing events to their
+receiver) from the policy (which events should go where).
+
+The basic operation of the dispatcher is as follows:
+
+* Dequeue a batch of events from the event device.
+* For each event determine which handler should receive the event, using
+  a set of application-provided, per-handler event matching callback
+  functions.
+* Provide events matching a particular handler, to that handler, using
+  its process callback.
+
+If the above application would have made use of the dispatcher, the
+code relevant for its module A may have looked something like this:
+
+.. code-block:: c
+
+    static bool
+    module_a_match(const struct rte_event *event, void *cb_data)
+    {
+           return event->queue_id == MODULE_A_QUEUE_ID;
+    }
+
+    static void
+    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
+                            const struct rte_event *events,
+			    uint16_t num, void *cb_data)
+    {
+            uint16_t i;
+
+            for (i = 0; i < num; i++)
+                    module_a_process_event(&events[i]);
+    }
+
+    /* In the module's initialization code */
+    rte_dispatcher_register(dispatcher, module_a_match, NULL,
+			    module_a_process_events, module_a_data);
+
+(Error handling is left out of this and future example code in this
+chapter.)
+
+When the shared conditional logic is removed, a new question arise:
+which part of the system actually runs the dispatching mechanism? Or
+phrased differently, what is replacing the function hosting the shared
+conditional logic (typically launched on all lcores using
+``rte_eal_remote_launch()``)? To solve this issue, the dispatcher is a
+run as a DPDK :doc:`Service <service_cores>`.
+
+The dispatcher is a layer between the application and the event device
+in the receive direction. In the transmit (i.e., item of work
+submission) direction, the application directly accesses the Eventdev
+core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
+forwarded event to the event device.
+
+Dispatcher Creation
+-------------------
+
+A dispatcher is created with using
+``rte_dispatcher_create()``.
+
+The event device must be configured before the dispatcher is created.
+
+Usually, only one dispatcher is needed per event device. A dispatcher
+handles exactly one event device.
+
+An dispatcher is freed using the ``rte_dispatcher_free()``
+function. The dispatcher's service functions must not be running on
+any lcore at the point of this call.
+
+Event Port Binding
+------------------
+
+To be able to dequeue events, the dispatcher must know which event
+ports are to be used, on all the lcores it uses. The application
+provides this information using
+``rte_dispatcher_bind_port_to_lcore()``.
+
+This call is typically made from the part of the application that
+deals with deployment issues (e.g., iterating lcores and determining
+which lcore does what), at the time of application initialization.
+
+The ``rte_dispatcher_unbind_port_from_lcore()`` is used to undo
+this operation.
+
+Multiple lcore threads may not safely use the same event
+port. [#Port-MT-Safety]
+
+Event ports cannot safely be bound or unbound while the dispatcher's
+service function is running on any lcore.
+
+Event Handlers
+--------------
+
+The dispatcher handler is an interface between the dispatcher and an
+application module, used to route events to the appropriate part of
+the application.
+
+Handler Registration
+^^^^^^^^^^^^^^^^^^^^
+
+The event handler interface consists of two function pointers:
+
+* The ``rte_dispatcher_match_t`` callback, which job is to
+  decide if this event is to be the property of this handler.
+* The ``rte_dispatcher_process_t``, which is used by the
+  dispatcher to deliver matched events.
+
+An event handler registration is valid on all lcores.
+
+The functions pointed to by the match and process callbacks resides in
+the application's domain logic, with one or more handlers per
+application module.
+
+A module may use more than one event handler, for convenience or to
+further decouple sub-modules. However, the dispatcher may impose an
+upper limit of the number handlers. In addition, installing a large
+number of handlers increase dispatcher overhead, although this does
+not nessarily translate to a system-level performance degradation. See
+the section on :ref:`Event Clustering` for more information.
+
+Handler registration and unregistration cannot safely be done while
+the dispatcher's service function is running on any lcore.
+
+Event Matching
+^^^^^^^^^^^^^^
+
+A handler's match callback function decides if an event should be
+delivered to this handler, or not.
+
+An event is routed to no more than one handler. Thus, if a match
+function returns true, no further match functions will be invoked for
+that event.
+
+Match functions must not depend on being invocated in any particular
+order (e.g., in the handler registration order).
+
+Events failing to match any handler are dropped, and the
+``ev_drop_count`` counter is updated accordingly.
+
+Event Delivery
+^^^^^^^^^^^^^^
+
+The handler callbacks are invocated by the dispatcher's service
+function, upon the arrival of events to the event ports bound to the
+running service lcore.
+
+A particular event is delivery to at most one handler.
+
+The application must not depend on all match callback invocations for
+a particular event batch being made prior to any process calls are
+being made. For example, if the dispatcher dequeues two events from
+the event device, it may choose to find out the destination for the
+first event, and deliver it, and then continue to find out the
+destination for the second, and then deliver that event as well. The
+dispatcher may also choose a strategy where no event is delivered
+until the destination handler for both events have been determined.
+
+The events provided in a single process call always belong to the same
+event port dequeue burst.
+
+.. _Event Clustering:
+
+Event Clustering
+^^^^^^^^^^^^^^^^
+
+The dispatcher maintains the order of events destined for the same
+handler.
+
+*Order* here refers to the order in which the events were delivered
+from the event device to the dispatcher (i.e., in the event array
+populated by ``rte_event_dequeue_burst()``), in relation to the order
+in which the dispatcher deliveres these events to the application.
+
+The dispatcher *does not* guarantee to maintain the order of events
+delivered to *different* handlers.
+
+For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
+and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
+consider a scenario where the following events are dequeued from the
+event device (qid is short for event queue id).
+
+.. code-block::
+
+    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
+
+The dispatcher may deliver the events in the following manner:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
+   module_a_process([e2: qid=0])
+   module_b_stage_0_process([e2: qid=1])
+
+The dispatcher may also choose to cluster (group) all events destined
+for ``module_b_stage_0_process()`` into one array:
+
+.. code-block::
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
+   module_a_process([e2: qid=0])
+
+Here, the event ``e2`` is reordered and placed behind ``e3``, from a
+delivery order point of view. This kind of reshuffling is allowed,
+since the events are destined for different handlers.
+
+The dispatcher may also deliver ``e2`` before the three events
+destined for module B.
+
+An example of what the dispatcher may not do, is to reorder event
+``e1`` so, that it precedes ``e0`` in the array passed to the module
+B's stage 0 process callback.
+
+Although clustering requires some extra work for the dispatcher, it
+leads to fewer process function calls. In addition, and likely more
+importantly, it improves temporal locality of memory accesses to
+handler-specific data structures in the application, which in turn may
+lead to fewer cache misses and improved overall performance.
+
+Finalize
+--------
+
+The dispatcher may be configured to notify one or more parts of the
+application when the matching and processing of a batch of events has
+completed.
+
+The ``rte_dispatcher_finalize_register`` call is used to
+register a finalize callback. The function
+``rte_dispatcher_finalize_unregister`` is used to remove a
+callback.
+
+The finalize hook may be used by a set of event handlers (in the same
+modules, or a set of cooperating modules) sharing an event output
+buffer, since it allows for flushing of the buffers at the last
+possible moment. In particular, it allows for buffering of
+``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
+``rte_event_dequeue_burst()`` call is made (assuming implicit release
+is employed).
+
+The following is an example with an application-defined event output
+buffer (the ``event_buffer``):
+
+.. code-block:: c
+
+    static void
+    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
+                   void *cb_data)
+    {
+            struct event_buffer *buffer = cb_data;
+            unsigned lcore_id = rte_lcore_id();
+            struct event_buffer_lcore *lcore_buffer =
+                    &buffer->lcore_buffer[lcore_id];
+
+            event_buffer_lcore_flush(lcore_buffer);
+    }
+
+    /* In the module's initialization code */
+    rte_dispatcher_finalize_register(dispatcher, finalize_batch,
+                                     shared_event_buffer);
+
+The dispatcher does not track any relationship between a handler and a
+finalize callback, and all finalize callbacks will be called, if (and
+only if) at least one event was dequeued from the event device.
+
+Finalize callback registration and unregistration cannot safely be
+done while the dispatcher's service function is running on any lcore.
+
+Service
+-------
+
+The dispatcher is a DPDK service, and is managed in a manner similar
+to other DPDK services (e.g., an Event Timer Adapter).
+
+Below is an example of how to configure a particular lcore to serve as
+a service lcore, and to map an already-configured dispatcher
+(identified by ``DISPATCHER_ID``) to that lcore.
+
+.. code-block:: c
+
+    static void
+    launch_dispatcher_core(struct rte_dispatcher *dispatcher,
+                           unsigned lcore_id)
+    {
+            uint32_t service_id;
+
+            rte_service_lcore_add(lcore_id);
+
+            rte_dispatcher_service_id_get(dispatcher, &service_id);
+
+            rte_service_map_lcore_set(service_id, lcore_id, 1);
+
+            rte_service_lcore_start(lcore_id);
+
+            rte_service_runstate_set(service_id, 1);
+    }
+
+As the final step, the dispatcher must be started.
+
+.. code-block:: c
+
+    rte_dispatcher_start(dispatcher);
+
+
+Multi Service Dispatcher Lcores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In an Eventdev application, most (or all) compute-intensive and
+performance-sensitive processing is done in an event-driven manner,
+where CPU cycles spent on application domain logic is the direct
+result of items of work (i.e., ``rte_event`` events) dequeued from an
+event device.
+
+In the light of this, it makes sense to have the dispatcher service be
+the only DPDK service on all lcores used for packet processing — at
+least in principle.
+
+However, there is nothing in DPDK that prevents colocating other
+services with the dispatcher service on the same lcore.
+
+Tasks that prior to the introduction of the dispatcher into the
+application was performed on the lcore, even though no events were
+received, are prime targets for being converted into such auxiliary
+services, running on the dispatcher core set.
+
+An example of such a task would be the management of a per-lcore timer
+wheel (i.e., calling ``rte_timer_manage()``).
+
+For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
+similar technique), may opt for having quiescent state (e.g., calling
+``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
+service, to assure resource reclaimination occurs even in though some
+lcores currently do not process any events.
+
+If more services than the dispatcher service is mapped to a service
+lcore, it's important that the other service are well-behaved and
+don't interfere with event processing to the extent the system's
+throughput and/or latency requirements are at risk of not being met.
+
+In particular, to avoid jitter, they should have an small upper bound
+for the maximum amount of time spent in a single service function
+call.
+
+An example of scenario with a more CPU-heavy colocated service is a
+low-lcore count deployment, where the event device lacks the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
+require software to feed incoming packets into the event device). In
+this case, the best performance may be achieved if the Event Ethernet
+RX and/or TX Adapters are mapped to lcores also used by for event
+dispatching, since otherwise the adapter lcores would have a lot of
+idle CPU cycles.
+
+.. rubric:: Footnotes
+
+.. [#Mapping]
+   Event routing may reasonably be done based on other ``rte_event``
+   fields (or even event user data). Indeed, that's the very reason to
+   have match callback functions, instead of a simple queue
+   id-to-handler mapping scheme. Queue id-based routing serves well in
+   a simple example.
+
+.. [#Port-MT-Safety]
+   This property (which is a feature, not a bug) is inherited from the
+   core Eventdev APIs.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 52a6d9e7aa..ab05bd6074 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -60,6 +60,7 @@ Programmer's Guide
     event_ethernet_tx_adapter
     event_timer_adapter
     event_crypto_adapter
+    dispatcher_lib
     qos_framework
     power_man
     packet_classif_access_ctrl
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v6 2/3] test: add dispatcher test suite
  2023-10-09 18:17                                             ` [PATCH v6 2/3] test: add dispatcher test suite Mattias Rönnblom
@ 2023-10-10 11:56                                               ` David Marchand
  2023-10-11  6:28                                                 ` Mattias Rönnblom
  2023-10-10 14:02                                               ` David Marchand
  1 sibling, 1 reply; 102+ messages in thread
From: David Marchand @ 2023-10-10 11:56 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, hofors,
	Peter Nilsson, Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On Mon, Oct 9, 2023 at 8:22 PM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
> +static int
> +test_dispatcher(void)
> +{
> +       return unit_test_suite_runner(&test_suite);
> +}
> +
> +REGISTER_FAST_TEST(dispatcher_autotest, false, true, test_dispatcher);

Since this test expects some lcores, wdyt of adding:

@@ -1044,6 +1044,12 @@ static struct unit_test_suite test_suite = {
 static int
 test_dispatcher(void)
 {
+       if (rte_lcore_count() < NUM_SERVICE_CORES + 1) {
+               printf("Not enough cores for dispatcher_autotest,
expecting at least %u\n",
+                       NUM_SERVICE_CORES + 1);
+               return TEST_SKIPPED;
+       }
+
        return unit_test_suite_runner(&test_suite);
 }

This should avoid the failures we get with some CI env.
(additionnally, I tested this on my laptop and the test runs fine)


-- 
David Marchand


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v6 3/3] doc: add dispatcher programming guide
  2023-10-09 18:17                                             ` [PATCH v6 3/3] doc: add dispatcher programming guide Mattias Rönnblom
@ 2023-10-10 13:31                                               ` David Marchand
  2023-10-11  6:38                                                 ` Mattias Rönnblom
  0 siblings, 1 reply; 102+ messages in thread
From: David Marchand @ 2023-10-10 13:31 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, hofors,
	Peter Nilsson, Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On Mon, Oct 9, 2023 at 8:23 PM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:

[snip]

> +A module may use more than one event handler, for convenience or to
> +further decouple sub-modules. However, the dispatcher may impose an
> +upper limit of the number handlers. In addition, installing a large
> +number of handlers increase dispatcher overhead, although this does
> +not nessarily translate to a system-level performance degradation. See

necessarily*

[snip]

> +Event Clustering
> +^^^^^^^^^^^^^^^^
> +
> +The dispatcher maintains the order of events destined for the same
> +handler.
> +
> +*Order* here refers to the order in which the events were delivered
> +from the event device to the dispatcher (i.e., in the event array
> +populated by ``rte_event_dequeue_burst()``), in relation to the order
> +in which the dispatcher deliveres these events to the application.
> +
> +The dispatcher *does not* guarantee to maintain the order of events
> +delivered to *different* handlers.
> +
> +For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
> +and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
> +consider a scenario where the following events are dequeued from the
> +event device (qid is short for event queue id).
> +
> +.. code-block::

Surprisingly, Ubuntu in GHA sphinx complains about this code-block
directive while generating on my Fedora runs fine...

FAILED: doc/guides/html
/usr/bin/python3 ../buildtools/call-sphinx-build.py
/usr/bin/sphinx-build 23.11.0-rc0
/home/runner/work/dpdk/dpdk/doc/guides
/home/runner/work/dpdk/dpdk/build/doc/guides -a -W

Warning, treated as error:
/home/runner/work/dpdk/dpdk/doc/guides/prog_guide/dispatcher_lib.rst:253:Error
in "code-block" directive:
1 argument(s) required, 0 supplied.

.. code-block::

    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]

Looking at https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#directive-code-block,
I suspect there is probably a difference in the default settings of
sphinx in those Ubuntu containers.

This is pseudo-code / close to C, so we could probably mark this block
as "C", but "none" works fine too.
WDYT?


-- 
David Marchand


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v6 2/3] test: add dispatcher test suite
  2023-10-09 18:17                                             ` [PATCH v6 2/3] test: add dispatcher test suite Mattias Rönnblom
  2023-10-10 11:56                                               ` David Marchand
@ 2023-10-10 14:02                                               ` David Marchand
  2023-10-11  6:45                                                 ` Mattias Rönnblom
  1 sibling, 1 reply; 102+ messages in thread
From: David Marchand @ 2023-10-10 14:02 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, hofors,
	Peter Nilsson, Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On Mon, Oct 9, 2023 at 8:22 PM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> Add unit tests for the dispatcher.

Fyi, this patch is the first external user of rte_event_maintain and
it revealed an issue:
http://mails.dpdk.org/archives/test-report/2023-October/475671.html

I sent a fix, can you have a look?
https://patchwork.dpdk.org/project/dpdk/patch/20231010140029.66159-1-david.marchand@redhat.com/

Thanks.

-- 
David Marchand


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v6 2/3] test: add dispatcher test suite
  2023-10-10 11:56                                               ` David Marchand
@ 2023-10-11  6:28                                                 ` Mattias Rönnblom
  2023-10-11  7:26                                                   ` David Marchand
  0 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-11  6:28 UTC (permalink / raw)
  To: David Marchand, Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On 2023-10-10 13:56, David Marchand wrote:
> On Mon, Oct 9, 2023 at 8:22 PM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>> +static int
>> +test_dispatcher(void)
>> +{
>> +       return unit_test_suite_runner(&test_suite);
>> +}
>> +
>> +REGISTER_FAST_TEST(dispatcher_autotest, false, true, test_dispatcher);
> 
> Since this test expects some lcores, wdyt of adding:
> 
> @@ -1044,6 +1044,12 @@ static struct unit_test_suite test_suite = {
>   static int
>   test_dispatcher(void)
>   {
> +       if (rte_lcore_count() < NUM_SERVICE_CORES + 1) {
> +               printf("Not enough cores for dispatcher_autotest,
> expecting at least %u\n",
> +                       NUM_SERVICE_CORES + 1);
> +               return TEST_SKIPPED;
> +       }
> +
>          return unit_test_suite_runner(&test_suite);
>   }
> 
> This should avoid the failures we get with some CI env.
> (additionnally, I tested this on my laptop and the test runs fine)
> 
> 

Indeed, this is a much better way than to fail the test case.

I'm thinking this is best done in test_setup(), since it's related to 
the setup. In case other test cases are added that required a different 
setup, there may be no minimum lcore requirement.

You will get multiple (four, for the moment) print-outs though, in case 
you run with fewer than 4 lcores.

I'll also make sure I skip (and not fail) the tests in case the DSW 
event device is not included in the build.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v6 3/3] doc: add dispatcher programming guide
  2023-10-10 13:31                                               ` David Marchand
@ 2023-10-11  6:38                                                 ` Mattias Rönnblom
  0 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-11  6:38 UTC (permalink / raw)
  To: David Marchand, Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On 2023-10-10 15:31, David Marchand wrote:
> On Mon, Oct 9, 2023 at 8:23 PM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
> 
> [snip]
> 
>> +A module may use more than one event handler, for convenience or to
>> +further decouple sub-modules. However, the dispatcher may impose an
>> +upper limit of the number handlers. In addition, installing a large
>> +number of handlers increase dispatcher overhead, although this does
>> +not nessarily translate to a system-level performance degradation. See
> 
> necessarily*
> 

Will fix.

> [snip]
> 
>> +Event Clustering
>> +^^^^^^^^^^^^^^^^
>> +
>> +The dispatcher maintains the order of events destined for the same
>> +handler.
>> +
>> +*Order* here refers to the order in which the events were delivered
>> +from the event device to the dispatcher (i.e., in the event array
>> +populated by ``rte_event_dequeue_burst()``), in relation to the order
>> +in which the dispatcher deliveres these events to the application.
>> +
>> +The dispatcher *does not* guarantee to maintain the order of events
>> +delivered to *different* handlers.
>> +
>> +For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
>> +and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
>> +consider a scenario where the following events are dequeued from the
>> +event device (qid is short for event queue id).
>> +
>> +.. code-block::
> 
> Surprisingly, Ubuntu in GHA sphinx complains about this code-block
> directive while generating on my Fedora runs fine...
> 
> FAILED: doc/guides/html
> /usr/bin/python3 ../buildtools/call-sphinx-build.py
> /usr/bin/sphinx-build 23.11.0-rc0
> /home/runner/work/dpdk/dpdk/doc/guides
> /home/runner/work/dpdk/dpdk/build/doc/guides -a -W
> 
> Warning, treated as error:
> /home/runner/work/dpdk/dpdk/doc/guides/prog_guide/dispatcher_lib.rst:253:Error
> in "code-block" directive:
> 1 argument(s) required, 0 supplied.
> 
> .. code-block::
> 
>      [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
> 
> Looking at https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#directive-code-block,
> I suspect there is probably a difference in the default settings of
> sphinx in those Ubuntu containers.
> 
> This is pseudo-code / close to C, so we could probably mark this block
> as "C", but "none" works fine too.
> WDYT?
> 
> 

I'm also running Ubuntu, and thus didn't experience this issue.

"none" seems better to me, to avoid potential future failures of syntax 
highlighting.

Thanks!

^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v6 2/3] test: add dispatcher test suite
  2023-10-10 14:02                                               ` David Marchand
@ 2023-10-11  6:45                                                 ` Mattias Rönnblom
  0 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-11  6:45 UTC (permalink / raw)
  To: David Marchand, Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On 2023-10-10 16:02, David Marchand wrote:
> On Mon, Oct 9, 2023 at 8:22 PM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>>
>> Add unit tests for the dispatcher.
> 
> Fyi, this patch is the first external user of rte_event_maintain and
> it revealed an issue:
> http://mails.dpdk.org/archives/test-report/2023-October/475671.html
> 

"External" as in calling it over a shared object boundary.

> I sent a fix, can you have a look?

Will do.

> https://patchwork.dpdk.org/project/dpdk/patch/20231010140029.66159-1-david.marchand@redhat.com/
> 
> Thanks.
> 

^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v7 0/3] Add dispatcher library
  2023-10-09 18:17                                             ` [PATCH v6 1/3] lib: introduce " Mattias Rönnblom
@ 2023-10-11  7:16                                               ` Mattias Rönnblom
  2023-10-11  7:16                                                 ` [PATCH v7 1/3] lib: introduce " Mattias Rönnblom
                                                                   ` (2 more replies)
  0 siblings, 3 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-11  7:16 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to decouple different parts
of an eventdev-based application (e.g., processing pipeline stages),
sharing the same underlying event device.

The dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on event meta data, such as the queue id or scheduling type.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the dispatcher relies on application-supplied matching callback
functions to decide where to deliver events.

A dispatcher is configured to dequeue events from a specific event
device, and ties into the service core framework, to do its (and the
application's) work.

The dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

Although the dispatcher adds some overhead, experience suggests that
the net effect on the application (both synthetic benchmarks and more
real-world applications) may well be positive. This is primarily due
to clustering (see programming guide) reducing cache misses.

Benchmarking indicates that the overhead is ~10 cc/event (on a
large core), with a handful of often-used handlers.

The dispatcher does not support run-time reconfiguration.

The use of the dispatcher library is optional, and an eventdev-based
application may still opt to access the event device using direct
eventdev API calls, or by some other means.

Mattias Rönnblom (3):
  lib: introduce dispatcher library
  test: add dispatcher test suite
  doc: add dispatcher programming guide

 MAINTAINERS                              |    6 +
 app/test/meson.build                     |    1 +
 app/test/test_dispatcher.c               | 1074 ++++++++++++++++++++++
 doc/api/doxy-api-index.md                |    1 +
 doc/api/doxy-api.conf.in                 |    1 +
 doc/guides/prog_guide/dispatcher_lib.rst |  433 +++++++++
 doc/guides/prog_guide/index.rst          |    1 +
 doc/guides/rel_notes/release_23_11.rst   |    5 +
 lib/dispatcher/meson.build               |   13 +
 lib/dispatcher/rte_dispatcher.c          |  691 ++++++++++++++
 lib/dispatcher/rte_dispatcher.h          |  466 ++++++++++
 lib/dispatcher/version.map               |   20 +
 lib/meson.build                          |    2 +
 13 files changed, 2714 insertions(+)
 create mode 100644 app/test/test_dispatcher.c
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v7 1/3] lib: introduce dispatcher library
  2023-10-11  7:16                                               ` [PATCH v7 0/3] Add " Mattias Rönnblom
@ 2023-10-11  7:16                                                 ` Mattias Rönnblom
  2023-10-12  8:50                                                   ` [PATCH v8 0/3] Add " Mattias Rönnblom
  2023-10-11  7:16                                                 ` [PATCH v7 2/3] test: add dispatcher test suite Mattias Rönnblom
  2023-10-11  7:17                                                 ` [PATCH v7 3/3] doc: add dispatcher programming guide Mattias Rönnblom
  2 siblings, 1 reply; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-11  7:16 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to help reduce coupling in an
Eventdev-based DPDK application.

In addition, the dispatcher also provides a convenient and flexible
way for the application to use service cores for application-level
processing.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
Reviewed-by: Heng Wang <heng.wang@ericsson.com>

--

PATCH v6:
 o Use single tab as indentation for continuation lines in multiple-line
   function prototypes. (David Marchand)
 o Add dispatcher library release note. (David Marchand)
 o Various indentation and spelling improvements. (David Marchand)
 o Add direct <stdint.h>, <stdbool.h> and <rte_compat.h> includes,
   instead of relying on <rte_eventdev.h>. (David Marchand)
 o Avoid Doxygen post annotations for struct fields. (David Marchand)

PATCH v5:
 o Move from using an integer id to a pointer to reference a dispatcher
   instance, to simplify the API.
 o Fix bug where dispatcher stats retrieval function erroneously depended
   on the user-supplied stats buffer being all-zero.

PATCH v4:
 o Fix bugs in handler and finalizer unregistration. (Naga Harish)
 o Return -EINVAL in cases where NULL pointers were provided in
   calls requiring non-NULL pointers. (Naga Harish)
 o Add experimental warning for the whole API. (Jerin Jacob)

PATCH v3:
 o To underline its optional character and since it does not provide
   hardware abstraction, the event dispatcher is now a separate
   library.
 o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
   shorter and to avoid the rte_event_* namespace.

PATCH v2:
 o Add dequeue batch count statistic.
 o Add statistics reset function to API.
 o Clarify MT safety guarantees (or lack thereof) in the API documentation.
 o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
   to be consistent with similar loops elsewhere in the dispatcher.
 o Fix variable names in finalizer unregister function.

PATCH:
 o Change prefix from RED to EVD, to avoid confusion with random
   early detection.

RFC v4:
 o Move handlers to per-lcore data structures.
 o Introduce mechanism which rearranges handlers so that often-used
   handlers tend to be tried first.
 o Terminate dispatch loop in case all events are delivered.
 o To avoid the dispatcher's service function hogging the CPU, process
   only one batch per call.
 o Have service function return -EAGAIN if no work is performed.
 o Events delivered in the process function is no longer marked 'const',
   since modifying them may be useful for the application and cause
   no difficulties for the dispatcher.
 o Various minor API documentation improvements.

RFC v3:
 o Add stats_get() function to the version.map file.
---
 MAINTAINERS                            |   4 +
 doc/api/doxy-api-index.md              |   1 +
 doc/api/doxy-api.conf.in               |   1 +
 doc/guides/rel_notes/release_23_11.rst |   5 +
 lib/dispatcher/meson.build             |  13 +
 lib/dispatcher/rte_dispatcher.c        | 691 +++++++++++++++++++++++++
 lib/dispatcher/rte_dispatcher.h        | 466 +++++++++++++++++
 lib/dispatcher/version.map             |  20 +
 lib/meson.build                        |   2 +
 9 files changed, 1203 insertions(+)
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 00f5a5f9e6..a4372701c4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1733,6 +1733,10 @@ M: Nithin Dabilpuram <ndabilpuram@marvell.com>
 M: Pavan Nikhilesh <pbhagavatula@marvell.com>
 F: lib/node/
 
+Dispatcher - EXPERIMENTAL
+M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
+F: lib/dispatcher/
+
 
 Test Applications
 -----------------
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index fdeda13932..7d0cad9fed 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -155,6 +155,7 @@ The public API headers are grouped by topics:
 
 - **classification**
   [reorder](@ref rte_reorder.h),
+  [dispatcher](@ref rte_dispatcher.h),
   [distributor](@ref rte_distributor.h),
   [EFD](@ref rte_efd.h),
   [ACL](@ref rte_acl.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index df801d32f9..93709e1d2c 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dispatcher \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 9319c86cd8..b5c5073018 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -80,6 +80,11 @@ New Features
   device is different from the Tx Ethernet device with respective driver
   callback functions in ``rte_eth_recycle_mbufs``.
 
+* **Added dispatcher library.**
+
+  Added dispatcher library which purpose is to help decouple different
+  parts (modules) of an eventdev-based application.
+
 * **Updated Solarflare net driver.**
 
   * Added support for transfer flow action ``INDIRECT`` with subtype ``VXLAN_ENCAP``.
diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build
new file mode 100644
index 0000000000..ffaef26a6d
--- /dev/null
+++ b/lib/dispatcher/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2023 Ericsson AB
+
+if is_windows
+    build = false
+    reason = 'not supported on Windows'
+    subdir_done()
+endif
+
+sources = files('rte_dispatcher.c')
+headers = files('rte_dispatcher.h')
+
+deps += ['eventdev']
diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
new file mode 100644
index 0000000000..83e80ede96
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.c
@@ -0,0 +1,691 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_random.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_dispatcher.h>
+
+#define EVD_MAX_PORTS_PER_LCORE 4
+#define EVD_MAX_HANDLERS 32
+#define EVD_MAX_FINALIZERS 16
+#define EVD_AVG_PRIO_INTERVAL 2000
+#define EVD_SERVICE_NAME "dispatcher"
+
+struct rte_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_dispatcher_handler {
+	int id;
+	rte_dispatcher_match_t match_fun;
+	void *match_data;
+	rte_dispatcher_process_t process_fun;
+	void *process_data;
+};
+
+struct rte_dispatcher_finalizer {
+	int id;
+	rte_dispatcher_finalize_t finalize_fun;
+	void *finalize_data;
+};
+
+struct rte_dispatcher_lcore {
+	uint8_t num_ports;
+	uint16_t num_handlers;
+	int32_t prio_count;
+	struct rte_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
+	struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
+	struct rte_dispatcher_stats stats;
+} __rte_cache_aligned;
+
+struct rte_dispatcher {
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	uint16_t num_finalizers;
+	struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
+};
+
+static int
+evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
+	const struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->match_fun(event, handler->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static void
+evd_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+	int handler_idx)
+{
+	struct rte_dispatcher_handler tmp;
+
+	if (handler_idx == 0)
+		return;
+
+	/* Let the lucky handler "bubble" up the list */
+
+	tmp = lcore->handlers[handler_idx - 1];
+	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
+	lcore->handlers[handler_idx] = tmp;
+}
+
+static inline void
+evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+	int handler_idx, uint16_t handler_events)
+{
+	lcore->prio_count -= handler_events;
+
+	if (unlikely(lcore->prio_count <= 0)) {
+		evd_prioritize_handler(lcore, handler_idx);
+
+		/*
+		 * Randomize the interval in the unlikely case
+		 * the traffic follow some very strict pattern.
+		 */
+		lcore->prio_count =
+			rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
+			EVD_AVG_PRIO_INTERVAL / 2;
+	}
+}
+
+static inline void
+evd_dispatch_events(struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_lcore *lcore,
+	struct rte_dispatcher_lcore_port *port,
+	struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
+	uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
+	uint16_t drop_count = 0;
+	uint16_t dispatch_count;
+	uint16_t dispatched = 0;
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *event = &events[i];
+		int handler_idx;
+
+		handler_idx = evd_lookup_handler_idx(lcore, event);
+
+		if (unlikely(handler_idx < 0)) {
+			drop_count++;
+			continue;
+		}
+
+		bursts[handler_idx][burst_lens[handler_idx]] = *event;
+		burst_lens[handler_idx]++;
+	}
+
+	dispatch_count = num_events - drop_count;
+
+	for (i = 0; i < lcore->num_handlers &&
+		 dispatched < dispatch_count; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		handler->process_fun(dispatcher->event_dev_id, port->port_id,
+				     bursts[i], len, handler->process_data);
+
+		dispatched += len;
+
+		/*
+		 * Safe, since any reshuffling will only involve
+		 * already-processed handlers.
+		 */
+		evd_consider_prioritize_handler(lcore, i, len);
+	}
+
+	lcore->stats.ev_batch_count++;
+	lcore->stats.ev_dispatch_count += dispatch_count;
+	lcore->stats.ev_drop_count += drop_count;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		finalizer->finalize_fun(dispatcher->event_dev_id,
+					port->port_id,
+					finalizer->finalize_data);
+	}
+}
+
+static __rte_always_inline uint16_t
+evd_port_dequeue(struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_lcore *lcore,
+	struct rte_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	if (likely(n > 0))
+		evd_dispatch_events(dispatcher, lcore, port, events, n);
+
+	lcore->stats.poll_count++;
+
+	return n;
+}
+
+static __rte_always_inline uint16_t
+evd_lcore_process(struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_lcore *lcore)
+{
+	uint16_t i;
+	uint16_t event_count = 0;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		event_count += evd_port_dequeue(dispatcher, lcore, port);
+	}
+
+	return event_count;
+}
+
+static int32_t
+evd_process(void *userdata)
+{
+	struct rte_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+	uint64_t event_count;
+
+	event_count = evd_lcore_process(dispatcher, lcore);
+
+	if (unlikely(event_count == 0))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int
+evd_service_register(struct rte_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = evd_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, sizeof(service.name), EVD_SERVICE_NAME);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+	if (rc != 0)
+		RTE_EDEV_LOG_ERR("Registration of dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	return rc;
+}
+
+static int
+evd_service_unregister(struct rte_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+	if (rc != 0)
+		RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+struct rte_dispatcher *
+rte_dispatcher_create(uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for dispatcher\n");
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	*dispatcher = (struct rte_dispatcher) {
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = evd_service_register(dispatcher);
+	if (rc < 0) {
+		rte_free(dispatcher);
+		rte_errno = -rc;
+		return NULL;
+	}
+
+	return dispatcher;
+}
+
+int
+rte_dispatcher_free(struct rte_dispatcher *dispatcher)
+{
+	int rc;
+
+	if (dispatcher == NULL)
+		return 0;
+
+	rc = evd_service_unregister(dispatcher);
+	if (rc != 0)
+		return rc;
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+uint32_t
+rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher)
+{
+	return dispatcher->service_id;
+}
+
+static int
+lcore_port_index(struct rte_dispatcher_lcore *lcore,
+	uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, uint16_t batch_size, uint64_t timeout,
+	unsigned int lcore_id)
+{
+	struct rte_dispatcher_lcore *lcore;
+	struct rte_dispatcher_lcore_port *port;
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, unsigned int lcore_id)
+{
+	struct rte_dispatcher_lcore *lcore;
+	int port_idx;
+	struct rte_dispatcher_lcore_port *port;
+	struct rte_dispatcher_lcore_port *last;
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static struct rte_dispatcher_handler *
+evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore, int handler_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->id == handler_id)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_handler_id(struct rte_dispatcher *dispatcher)
+{
+	int handler_id = 0;
+	struct rte_dispatcher_lcore *reference_lcore =
+		&dispatcher->lcores[0];
+
+	if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
+		return -1;
+
+	while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
+		handler_id++;
+
+	return handler_id;
+}
+
+static void
+evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore,
+	const struct rte_dispatcher_handler *handler)
+{
+	int handler_idx = lcore->num_handlers;
+
+	lcore->handlers[handler_idx] = *handler;
+	lcore->num_handlers++;
+}
+
+static void
+evd_install_handler(struct rte_dispatcher *dispatcher,
+	const struct rte_dispatcher_handler *handler)
+{
+	int i;
+
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[i];
+		evd_lcore_install_handler(lcore, handler);
+	}
+}
+
+int
+rte_dispatcher_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_match_t match_fun, void *match_data,
+	rte_dispatcher_process_t process_fun, void *process_data)
+{
+	struct rte_dispatcher_handler handler = {
+		.match_fun = match_fun,
+		.match_data = match_data,
+		.process_fun = process_fun,
+		.process_data = process_data
+	};
+
+	handler.id = evd_alloc_handler_id(dispatcher);
+
+	if (handler.id < 0)
+		return -ENOMEM;
+
+	evd_install_handler(dispatcher, &handler);
+
+	return handler.id;
+}
+
+static int
+evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
+	int handler_id)
+{
+	struct rte_dispatcher_handler *unreg_handler;
+	int handler_idx;
+	uint16_t last_idx;
+
+	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
+
+	if (unreg_handler == NULL) {
+		RTE_EDEV_LOG_ERR("Invalid handler id %d\n", handler_id);
+		return -EINVAL;
+	}
+
+	handler_idx = unreg_handler - &lcore->handlers[0];
+
+	last_idx = lcore->num_handlers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all handlers to maintain handler order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_handler, unreg_handler + 1,
+			sizeof(struct rte_dispatcher_handler) * n);
+	}
+
+	lcore->num_handlers--;
+
+	return 0;
+}
+
+static int
+evd_uninstall_handler(struct rte_dispatcher *dispatcher, int handler_id)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+		int rc;
+
+		rc = evd_lcore_uninstall_handler(lcore, handler_id);
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id)
+{
+	return evd_uninstall_handler(dispatcher, handler_id);
+}
+
+static struct rte_dispatcher_finalizer *
+evd_get_finalizer_by_id(struct rte_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		if (finalizer->id == handler_id)
+			return finalizer;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_id = 0;
+
+	while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
+		finalizer_id++;
+
+	return finalizer_id;
+}
+
+static struct rte_dispatcher_finalizer *
+evd_alloc_finalizer(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_idx;
+	struct rte_dispatcher_finalizer *finalizer;
+
+	if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
+		return NULL;
+
+	finalizer_idx = dispatcher->num_finalizers;
+	finalizer = &dispatcher->finalizers[finalizer_idx];
+
+	finalizer->id = evd_alloc_finalizer_id(dispatcher);
+
+	dispatcher->num_finalizers++;
+
+	return finalizer;
+}
+
+int
+rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_finalize_t finalize_fun, void *finalize_data)
+{
+	struct rte_dispatcher_finalizer *finalizer;
+
+	finalizer = evd_alloc_finalizer(dispatcher);
+
+	if (finalizer == NULL)
+		return -ENOMEM;
+
+	finalizer->finalize_fun = finalize_fun;
+	finalizer->finalize_data = finalize_data;
+
+	return finalizer->id;
+}
+
+int
+rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher,
+	int finalizer_id)
+{
+	struct rte_dispatcher_finalizer *unreg_finalizer;
+	int finalizer_idx;
+	uint16_t last_idx;
+
+	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, finalizer_id);
+
+	if (unreg_finalizer == NULL) {
+		RTE_EDEV_LOG_ERR("Invalid finalizer id %d\n", finalizer_id);
+		return -EINVAL;
+	}
+
+	finalizer_idx = unreg_finalizer - &dispatcher->finalizers[0];
+
+	last_idx = dispatcher->num_finalizers - 1;
+
+	if (finalizer_idx != last_idx) {
+		/* move all finalizers to maintain order */
+		int n = last_idx - finalizer_idx;
+		memmove(unreg_finalizer, unreg_finalizer + 1,
+			sizeof(struct rte_dispatcher_finalizer) * n);
+	}
+
+	dispatcher->num_finalizers--;
+
+	return 0;
+}
+
+static int
+evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
+{
+	int rc;
+
+	rc = rte_service_component_runstate_set(dispatcher->service_id,
+						state);
+	if (rc != 0) {
+		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+				 "service component run state to %d\n", rc,
+				 state);
+		RTE_ASSERT(0);
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_start(struct rte_dispatcher *dispatcher)
+{
+	return evd_set_service_runstate(dispatcher, 1);
+}
+
+int
+rte_dispatcher_stop(struct rte_dispatcher *dispatcher)
+{
+	return evd_set_service_runstate(dispatcher, 0);
+}
+
+static void
+evd_aggregate_stats(struct rte_dispatcher_stats *result,
+	const struct rte_dispatcher_stats *part)
+{
+	result->poll_count += part->poll_count;
+	result->ev_batch_count += part->ev_batch_count;
+	result->ev_dispatch_count += part->ev_dispatch_count;
+	result->ev_drop_count += part->ev_drop_count;
+}
+
+void
+rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_stats *stats)
+{
+	unsigned int lcore_id;
+
+	*stats = (struct rte_dispatcher_stats) {};
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		const struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		evd_aggregate_stats(stats, &lcore->stats);
+	}
+}
+
+void
+rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		lcore->stats = (struct rte_dispatcher_stats) {};
+	}
+}
diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
new file mode 100644
index 0000000000..1f2faaaee8
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.h
@@ -0,0 +1,466 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#ifndef __RTE_DISPATCHER_H__
+#define __RTE_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Dispatcher
+ *
+ * @warning
+ * @b EXPERIMENTAL:
+ * All functions in this file may be changed or removed without prior notice.
+ *
+ * The purpose of the dispatcher is to help decouple different parts
+ * of an application (e.g., modules), sharing the same underlying
+ * event device.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <rte_compat.h>
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * Match callbacks are used by an application to decide how the
+ * dispatcher distributes events to different parts of the
+ * application.
+ *
+ * The application is not expected to process the event at the point
+ * of the match call. Such matters should be deferred to the process
+ * callback invocation.
+ *
+ * The match callback may be used as an opportunity to prefetch data.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this event should be delivered (via
+ *   the process callback), and false otherwise.
+ */
+typedef bool
+(*rte_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
+
+/**
+ * Function prototype for process callbacks.
+ *
+ * The process callbacks are used by the dispatcher to deliver
+ * events for processing.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ */
+
+typedef void
+(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
+	struct rte_event *events, uint16_t num, void *cb_data);
+
+/**
+ * Function prototype for finalize callbacks.
+ *
+ * The finalize callbacks are used by the dispatcher to notify the
+ * application it has delivered all events from a particular batch
+ * dequeued from the event device.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_finalize_register().
+ */
+
+typedef void
+(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
+	void *cb_data);
+
+/**
+ * Dispatcher statistics
+ */
+struct rte_dispatcher_stats {
+	/** Number of event dequeue calls made toward the event device. */
+	uint64_t poll_count;
+	/** Number of non-empty event batches dequeued from event device.*/
+	uint64_t ev_batch_count;
+	/** Number of events dispatched to a handler.*/
+	uint64_t ev_dispatch_count;
+	/** Number of events dropped because no handler was found. */
+	uint64_t ev_drop_count;
+};
+
+/**
+ * Create a dispatcher with the specified id.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   A pointer to a new dispatcher instance, or NULL on failure, in which
+ *   case rte_errno is set.
+ */
+__rte_experimental
+struct rte_dispatcher *
+rte_dispatcher_create(uint8_t event_dev_id);
+
+/**
+ * Free a dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_dispatcher_free(struct rte_dispatcher *dispatcher);
+
+/**
+ * Retrieve the service identifier of a dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  The dispatcher service's id.
+ */
+__rte_experimental
+uint32_t
+rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * dispatcher.
+ *
+ * This function configures the event port id to be used by the event
+ * dispatcher service, if run on the specified lcore.
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port must not be bound to more than one lcore.
+ *
+ * If the dispatcher service is mapped (with rte_service_map_lcore_set())
+ * to a lcore to which no ports are bound, the service function will be a
+ * no-operation.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on lcore
+ * specified by @c lcore_id.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ *  - -EEXISTS: Event port is already configured.
+ *  - -EINVAL: Invalid arguments.
+ */
+__rte_experimental
+int
+rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, uint16_t batch_size, uint64_t timeout,
+	unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * lcore specified by @c lcore_id.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOENT: Event port id not bound to this @c lcore_id.
+ */
+__rte_experimental
+int
+rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, unsigned int lcore_id);
+
+/**
+ * Register an event handler.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding process callback
+ * function.
+ *
+ * The reason for having two distinct steps is to allow the dispatcher
+ * to deliver all events as a batch. This in turn will cause
+ * processing of a particular kind of events to happen in a
+ * back-to-back manner, improving cache locality.
+ *
+ * The list of handler callback functions is shared among all lcores,
+ * but will only be executed on lcores which has an eventdev port
+ * bound to them, and which are running the dispatcher service.
+ *
+ * An event is delivered to at most one handler. Events where no
+ * handler is found are dropped.
+ *
+ * The application must not depend on the order of which the match
+ * functions are invoked.
+ *
+ * Ordering of events is not guaranteed to be maintained between
+ * different deliver callbacks. For example, suppose there are two
+ * callbacks registered, matching different subsets of events arriving
+ * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
+ * on a particular port, all pertaining to the same flow. The match
+ * callback for registration A returns true for ev0 and ev2, and the
+ * matching function for registration B for ev1. In that scenario, the
+ * dispatcher may choose to deliver first [ev0, ev2] using A's deliver
+ * function, and then [ev1] to B - or vice versa.
+ *
+ * rte_dispatcher_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the event
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param process_fun
+ *  The process callback function.
+ *
+ * @param process_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when process_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_match_t match_fun, void *match_cb_data,
+	rte_dispatcher_process_t process_fun, void *process_cb_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param handler_id
+ *  The handler registration id returned by the original
+ *  rte_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c handler_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id);
+
+/**
+ * Register a finalize callback function.
+ *
+ * An application may optionally install one or more finalize
+ * callbacks.
+ *
+ * All finalize callbacks are invoked by the dispatcher when a
+ * complete batch of events (retrieve using rte_event_dequeue_burst())
+ * have been delivered to the application (or have been dropped).
+ *
+ * The finalize callback is not tied to any particular handler.
+ *
+ * The finalize callback provides an opportunity for the application
+ * to do per-batch processing. One case where this may be useful is if
+ * an event output buffer is used, and is shared among several
+ * handlers. In such a case, proper output buffer flushing may be
+ * assured using a finalize callback.
+ *
+ * rte_dispatcher_finalize_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param finalize_fun
+ *  The function called after completing the processing of a
+ *  dequeue batch.
+ *
+ * @param finalize_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when @c finalize_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_finalize_t finalize_fun, void *finalize_data);
+
+/**
+ * Unregister a finalize callback.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param reg_id
+ *  The finalize registration id returned by the original
+ *  rte_dispatcher_finalize_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c reg_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher, int reg_id);
+
+/**
+ * Start a dispatcher instance.
+ *
+ * Enables the dispatcher service.
+ *
+ * The underlying event device must have been started prior to calling
+ * rte_dispatcher_start().
+ *
+ * For the dispatcher to actually perform work (i.e., dispatch
+ * events), its service must have been mapped to one or more service
+ * lcores, and its service run state set to '1'. A dispatcher's
+ * service is retrieved using rte_dispatcher_service_id_get().
+ *
+ * Each service lcore to which the dispatcher is mapped should
+ * have at least one event port configured. Such configuration is
+ * performed by calling rte_dispatcher_bind_port_to_lcore(), prior to
+ * starting the dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_dispatcher_start(struct rte_dispatcher *dispatcher);
+
+/**
+ * Stop a running dispatcher instance.
+ *
+ * Disables the dispatcher service.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_dispatcher_stop(struct rte_dispatcher *dispatcher);
+
+/**
+ * Retrieve statistics for a dispatcher instance.
+ *
+ * This function is MT safe and may be called by any thread
+ * (including unregistered non-EAL threads).
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ * @param[out] stats
+ *   A pointer to a structure to fill with statistics.
+ */
+__rte_experimental
+void
+rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_stats *stats);
+
+/**
+ * Reset statistics for a dispatcher instance.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but may not produce the correct result if the
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ */
+__rte_experimental
+void
+rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DISPATCHER__ */
diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map
new file mode 100644
index 0000000000..44585e4f15
--- /dev/null
+++ b/lib/dispatcher/version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+	global:
+
+	# added in 23.11
+	rte_dispatcher_bind_port_to_lcore;
+	rte_dispatcher_create;
+	rte_dispatcher_finalize_register;
+	rte_dispatcher_finalize_unregister;
+	rte_dispatcher_free;
+	rte_dispatcher_register;
+	rte_dispatcher_service_id_get;
+	rte_dispatcher_start;
+	rte_dispatcher_stats_get;
+	rte_dispatcher_stats_reset;
+	rte_dispatcher_stop;
+	rte_dispatcher_unbind_port_from_lcore;
+	rte_dispatcher_unregister;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index cf4aa63630..59d381bf7a 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -35,6 +35,7 @@ libraries = [
         'distributor',
         'efd',
         'eventdev',
+        'dispatcher', # dispatcher depends on eventdev
         'gpudev',
         'gro',
         'gso',
@@ -81,6 +82,7 @@ optional_libs = [
         'cfgfile',
         'compressdev',
         'cryptodev',
+        'dispatcher',
         'distributor',
         'dmadev',
         'efd',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v7 2/3] test: add dispatcher test suite
  2023-10-11  7:16                                               ` [PATCH v7 0/3] Add " Mattias Rönnblom
  2023-10-11  7:16                                                 ` [PATCH v7 1/3] lib: introduce " Mattias Rönnblom
@ 2023-10-11  7:16                                                 ` Mattias Rönnblom
  2023-10-11  7:17                                                 ` [PATCH v7 3/3] doc: add dispatcher programming guide Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-11  7:16 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Add unit tests for the dispatcher.

--

PATCH v7:
 o Skip (not fail) tests in case too few lcores are available or if
   the DSW event device is not available. (David Marchand)
 o Properly clean up resources in the above-mentioned scenarios.

PATCH v6:
 o Register test as "fast". (David Marchand)
 o Use single tab as indentation for continuation lines in multiple-line
   function prototypes. (David Marchand)
 o Add Signed-off-by line. (David Marchand)
 o Use DPDK atomics wrapper API instead of C11 atomics.

PATCH v5:
 o Update test suite to use pointer and not integer id when calling
   dispatcher functions.

PATCH v3:
 o Adapt the test suite to dispatcher API name changes.

PATCH v2:
 o Test finalize callback functionality.
 o Test handler and finalizer count upper limits.
 o Add statistics reset test.
 o Make sure dispatcher supply the proper event dev id and port id back
   to the application.

PATCH:
 o Extend test to cover often-used handler optimization feature.

RFC v4:
 o Adapt to non-const events in process function prototype.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 MAINTAINERS                |    1 +
 app/test/meson.build       |    1 +
 app/test/test_dispatcher.c | 1074 ++++++++++++++++++++++++++++++++++++
 3 files changed, 1076 insertions(+)
 create mode 100644 app/test/test_dispatcher.c

diff --git a/MAINTAINERS b/MAINTAINERS
index a4372701c4..262401d43d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1736,6 +1736,7 @@ F: lib/node/
 Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
+F: app/test/test_dispatcher.c
 
 
 Test Applications
diff --git a/app/test/meson.build b/app/test/meson.build
index bf9fc90612..ace10327f8 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -59,6 +59,7 @@ source_file_deps = {
     'test_cycles.c': [],
     'test_debug.c': [],
     'test_devargs.c': ['kvargs'],
+    'test_dispatcher.c': ['dispatcher'],
     'test_distributor.c': ['distributor'],
     'test_distributor_perf.c': ['distributor'],
     'test_dmadev.c': ['dmadev', 'bus_vdev'],
diff --git a/app/test/test_dispatcher.c b/app/test/test_dispatcher.c
new file mode 100644
index 0000000000..f9dc097f32
--- /dev/null
+++ b/app/test/test_dispatcher.c
@@ -0,0 +1,1074 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_dispatcher.h>
+#include <rte_eventdev.h>
+#include <rte_random.h>
+#include <rte_service.h>
+#include <rte_stdatomic.h>
+
+#include "test.h"
+
+#define NUM_WORKERS 3
+#define NUM_PORTS (NUM_WORKERS + 1)
+#define WORKER_PORT_ID(worker_idx) (worker_idx)
+#define DRIVER_PORT_ID (NUM_PORTS - 1)
+
+#define NUM_SERVICE_CORES NUM_WORKERS
+#define MIN_LCORES (NUM_SERVICE_CORES + 1)
+
+/* Eventdev */
+#define NUM_QUEUES 8
+#define LAST_QUEUE_ID (NUM_QUEUES - 1)
+#define MAX_EVENTS 4096
+#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
+#define DEQUEUE_BURST_SIZE 32
+#define ENQUEUE_BURST_SIZE 32
+
+#define NUM_EVENTS 10000000
+#define NUM_FLOWS 16
+
+#define DSW_VDEV "event_dsw0"
+
+struct app_queue {
+	uint8_t queue_id;
+	uint64_t sn[NUM_FLOWS];
+	int dispatcher_reg_id;
+};
+
+struct cb_count {
+	uint8_t expected_event_dev_id;
+	uint8_t expected_event_port_id[RTE_MAX_LCORE];
+	RTE_ATOMIC(int) count;
+};
+
+struct test_app {
+	uint8_t event_dev_id;
+	struct rte_dispatcher *dispatcher;
+	uint32_t dispatcher_service_id;
+
+	unsigned int service_lcores[NUM_SERVICE_CORES];
+
+	int never_match_reg_id;
+	uint64_t never_match_count;
+	struct cb_count never_process_count;
+
+	struct app_queue queues[NUM_QUEUES];
+
+	int finalize_reg_id;
+	struct cb_count finalize_count;
+
+	bool running;
+
+	RTE_ATOMIC(int) completed_events;
+	RTE_ATOMIC(int) errors;
+};
+
+static struct test_app *
+test_app_create(void)
+{
+	int i;
+	struct test_app *app;
+
+	app = calloc(1, sizeof(struct test_app));
+
+	if (app == NULL)
+		return NULL;
+
+	for (i = 0; i < NUM_QUEUES; i++)
+		app->queues[i].queue_id = i;
+
+	return app;
+}
+
+static void
+test_app_free(struct test_app *app)
+{
+	free(app);
+}
+
+static int
+test_app_create_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_vdev_init(DSW_VDEV, NULL);
+	if (rc < 0)
+		return TEST_SKIPPED;
+
+	rc = rte_event_dev_get_dev_id(DSW_VDEV);
+
+	app->event_dev_id = (uint8_t)rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_destroy_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_close(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
+
+	rc = rte_vdev_uninit(DSW_VDEV);
+	TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_event_dev(struct test_app *app)
+{
+	int rc;
+	int i;
+
+	rc = test_app_create_vdev(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	struct rte_event_dev_config config = {
+		.nb_event_queues = NUM_QUEUES,
+		.nb_event_ports = NUM_PORTS,
+		.nb_events_limit = MAX_EVENTS,
+		.nb_event_queue_flows = 64,
+		.nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
+		.nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	rc = rte_event_dev_configure(app->event_dev_id, &config);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
+
+	struct rte_event_queue_conf queue_config = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.nb_atomic_flows = 64
+	};
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		uint8_t queue_id = i;
+
+		rc = rte_event_queue_setup(app->event_dev_id, queue_id,
+					   &queue_config);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
+	}
+
+	struct rte_event_port_conf port_config = {
+		.new_event_threshold = NEW_EVENT_THRESHOLD,
+		.dequeue_depth = DEQUEUE_BURST_SIZE,
+		.enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	for (i = 0; i < NUM_PORTS; i++) {
+		uint8_t event_port_id = i;
+
+		rc = rte_event_port_setup(app->event_dev_id, event_port_id,
+					  &port_config);
+		TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
+				    event_port_id);
+
+		if (event_port_id == DRIVER_PORT_ID)
+			continue;
+
+		rc = rte_event_port_link(app->event_dev_id, event_port_id,
+					 NULL, NULL, 0);
+
+		TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
+				  event_port_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_event_dev(struct test_app *app)
+{
+	return test_app_destroy_vdev(app);
+}
+
+static int
+test_app_start_event_dev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_start(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
+
+	return TEST_SUCCESS;
+}
+
+static void
+test_app_stop_event_dev(struct test_app *app)
+{
+	rte_event_dev_stop(app->event_dev_id);
+}
+
+static int
+test_app_create_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	app->dispatcher = rte_dispatcher_create(app->event_dev_id);
+
+	TEST_ASSERT(app->dispatcher != NULL, "Unable to create event "
+		    "dispatcher");
+
+	app->dispatcher_service_id =
+		rte_dispatcher_service_id_get(app->dispatcher);
+
+	rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
+			    "stats");
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to set dispatcher service runstate");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_free_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	rc = rte_dispatcher_free(app->dispatcher);
+	TEST_ASSERT_SUCCESS(rc, "Error freeing dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_bind_ports(struct test_app *app)
+{
+	int i;
+
+	app->never_process_count.expected_event_dev_id =
+		app->event_dev_id;
+	app->finalize_count.expected_event_dev_id =
+		app->event_dev_id;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		uint8_t port_id = WORKER_PORT_ID(i);
+
+		int rc = rte_dispatcher_bind_port_to_lcore(
+			app->dispatcher, port_id, DEQUEUE_BURST_SIZE, 0,
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
+				    "to lcore %d", port_id, lcore_id);
+
+		app->never_process_count.expected_event_port_id[lcore_id] =
+			port_id;
+		app->finalize_count.expected_event_port_id[lcore_id] = port_id;
+	}
+
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unbind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_dispatcher_unbind_port_from_lcore(
+			app->dispatcher,
+			WORKER_PORT_ID(i),
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
+				    "from lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static bool
+match_queue(const struct rte_event *event, void *cb_data)
+{
+	uintptr_t queue_id = (uintptr_t)cb_data;
+
+	return event->queue_id == queue_id;
+}
+
+static int
+test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++)
+		if (app->service_lcores[i] == lcore_id)
+			return i;
+
+	return -1;
+}
+
+static int
+test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
+{
+	int worker;
+
+	worker = test_app_get_worker_index(app, lcore_id);
+
+	if (worker < 0)
+		return -1;
+
+	return WORKER_PORT_ID(worker);
+}
+
+static void
+test_app_queue_note_error(struct test_app *app)
+{
+	rte_atomic_fetch_add_explicit(&app->errors, 1, rte_memory_order_relaxed);
+}
+
+static void
+test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
+	struct rte_event *in_events, uint16_t num,
+	void *cb_data)
+{
+	struct app_queue *app_queue = cb_data;
+	struct test_app *app = container_of(app_queue, struct test_app,
+					    queues[app_queue->queue_id]);
+	unsigned int lcore_id = rte_lcore_id();
+	bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
+	int event_port_id;
+	uint16_t i;
+	struct rte_event out_events[num];
+
+	event_port_id = test_app_get_worker_port(app, lcore_id);
+
+	if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
+	    p_event_port_id != event_port_id) {
+		test_app_queue_note_error(app);
+		return;
+	}
+
+	for (i = 0; i < num; i++) {
+		const struct rte_event *in_event = &in_events[i];
+		struct rte_event *out_event = &out_events[i];
+		uint64_t sn = in_event->u64;
+		uint64_t expected_sn;
+
+		if (in_event->queue_id != app_queue->queue_id) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		expected_sn = app_queue->sn[in_event->flow_id]++;
+
+		if (expected_sn != sn) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		if (intermediate_queue)
+			*out_event = (struct rte_event) {
+				.queue_id = in_event->queue_id + 1,
+				.flow_id = in_event->flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_FORWARD,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+	}
+
+	if (intermediate_queue) {
+		uint16_t n = 0;
+
+		do {
+			n += rte_event_enqueue_forward_burst(p_event_dev_id,
+							     p_event_port_id,
+							     out_events + n,
+							     num - n);
+		} while (n != num);
+	} else
+		rte_atomic_fetch_add_explicit(&app->completed_events, num,
+					      rte_memory_order_relaxed);
+}
+
+static bool
+never_match(const struct rte_event *event __rte_unused, void *cb_data)
+{
+	uint64_t *count = cb_data;
+
+	(*count)++;
+
+	return false;
+}
+
+static void
+test_app_never_process(uint8_t event_dev_id, uint8_t event_port_id,
+	struct rte_event *in_events __rte_unused, uint16_t num, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		rte_atomic_fetch_add_explicit(&count->count, num,
+					      rte_memory_order_relaxed);
+}
+
+static void
+finalize(uint8_t event_dev_id, uint8_t event_port_id, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		rte_atomic_fetch_add_explicit(&count->count, 1,
+					      rte_memory_order_relaxed);
+}
+
+static int
+test_app_register_callbacks(struct test_app *app)
+{
+	int i;
+
+	app->never_match_reg_id =
+		rte_dispatcher_register(app->dispatcher, never_match,
+					&app->never_match_count,
+					test_app_never_process,
+					&app->never_process_count);
+
+	TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register "
+		    "never-match handler");
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		struct app_queue *app_queue = &app->queues[i];
+		uintptr_t queue_id = app_queue->queue_id;
+		int reg_id;
+
+		reg_id = rte_dispatcher_register(app->dispatcher,
+						 match_queue, (void *)queue_id,
+						 test_app_process_queue,
+						 app_queue);
+
+		TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
+			    "callback for queue %d", i);
+
+		app_queue->dispatcher_reg_id = reg_id;
+	}
+
+	app->finalize_reg_id =
+		rte_dispatcher_finalize_register(app->dispatcher,
+						       finalize,
+						       &app->finalize_count);
+	TEST_ASSERT_SUCCESS(app->finalize_reg_id, "Error registering "
+			    "finalize callback");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
+{
+	int reg_id = app->queues[queue_id].dispatcher_reg_id;
+	int rc;
+
+	if (reg_id < 0) /* unregistered already */
+		return 0;
+
+	rc = rte_dispatcher_unregister(app->dispatcher, reg_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
+			    "callback for queue %d", queue_id);
+
+	app->queues[queue_id].dispatcher_reg_id = -1;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callbacks(struct test_app *app)
+{
+	int i;
+	int rc;
+
+	if (app->never_match_reg_id >= 0) {
+		rc = rte_dispatcher_unregister(app->dispatcher,
+						     app->never_match_reg_id);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister never-match "
+				    "handler");
+		app->never_match_reg_id = -1;
+	}
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		rc = test_app_unregister_callback(app, i);
+		if (rc != TEST_SUCCESS)
+			return rc;
+	}
+
+	if (app->finalize_reg_id >= 0) {
+		rc = rte_dispatcher_finalize_unregister(
+			app->dispatcher, app->finalize_reg_id
+		);
+		app->finalize_reg_id = -1;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_dispatcher_start(app->dispatcher);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to start the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_dispatcher_stop(app->dispatcher);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to stop the event dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_reset_dispatcher_stats(struct test_app *app)
+{
+	struct rte_dispatcher_stats stats;
+
+	rte_dispatcher_stats_reset(app->dispatcher);
+
+	memset(&stats, 0xff, sizeof(stats));
+
+	rte_dispatcher_stats_get(app->dispatcher, &stats);
+
+	TEST_ASSERT_EQUAL(stats.poll_count, 0, "Poll count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_batch_count, 0, "Batch count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, "Dispatch count "
+			  "not zero");
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count not zero");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_lcore_add(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
+			    "service core", lcore_id);
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_cores(struct test_app *app)
+{
+	int i;
+	int lcore_id = -1;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
+
+		app->service_lcores[i] = lcore_id;
+	}
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		int rc;
+
+		rc = test_app_setup_service_core(app, app->service_lcores[i]);
+		if (rc != TEST_SUCCESS)
+			return rc;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
+
+	rc = rte_service_lcore_del(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
+			    lcore_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = test_app_teardown_service_core(app, lcore_id);
+		if (rc != TEST_SUCCESS)
+			return rc;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_start(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_stop(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_start_event_dev(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	rc = test_app_start_service_cores(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	rc = test_app_start_dispatcher(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	app->running = true;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_stop_dispatcher(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	test_app_stop_service_cores(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	test_app_stop_event_dev(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	app->running = false;
+
+	return TEST_SUCCESS;
+}
+
+struct test_app *test_app;
+
+static int
+test_setup(void)
+{
+	int rc;
+
+	if (rte_lcore_count() < MIN_LCORES) {
+		printf("Not enough cores for dispatcher_autotest; expecting at "
+		       "least %d.\n", MIN_LCORES);
+		return TEST_SKIPPED;
+	}
+
+	test_app = test_app_create();
+	TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
+
+	rc = test_app_setup_event_dev(test_app);
+	if (rc != TEST_SUCCESS)
+		goto err_free_app;
+
+	rc = test_app_create_dispatcher(test_app);
+	if (rc != TEST_SUCCESS)
+		goto err_teardown_event_dev;
+
+	rc = test_app_setup_service_cores(test_app);
+	if (rc != TEST_SUCCESS)
+		goto err_free_dispatcher;
+
+	rc = test_app_register_callbacks(test_app);
+	if (rc != TEST_SUCCESS)
+		goto err_teardown_service_cores;
+
+	rc = test_app_bind_ports(test_app);
+	if (rc != TEST_SUCCESS)
+		goto err_unregister_callbacks;
+
+	return TEST_SUCCESS;
+
+err_unregister_callbacks:
+	test_app_unregister_callbacks(test_app);
+err_teardown_service_cores:
+	test_app_teardown_service_cores(test_app);
+err_free_dispatcher:
+	test_app_free_dispatcher(test_app);
+err_teardown_event_dev:
+	test_app_teardown_event_dev(test_app);
+err_free_app:
+	test_app_free(test_app);
+
+	test_app = NULL;
+
+	return rc;
+}
+
+static void test_teardown(void)
+{
+	if (test_app == NULL)
+		return;
+
+	if (test_app->running)
+		test_app_stop(test_app);
+
+	test_app_teardown_service_cores(test_app);
+
+	test_app_unregister_callbacks(test_app);
+
+	test_app_unbind_ports(test_app);
+
+	test_app_free_dispatcher(test_app);
+
+	test_app_teardown_event_dev(test_app);
+
+	test_app_free(test_app);
+
+	test_app = NULL;
+}
+
+static int
+test_app_get_completed_events(struct test_app *app)
+{
+	return rte_atomic_load_explicit(&app->completed_events,
+					rte_memory_order_relaxed);
+}
+
+static int
+test_app_get_errors(struct test_app *app)
+{
+	return rte_atomic_load_explicit(&app->errors, rte_memory_order_relaxed);
+}
+
+static int
+test_basic(void)
+{
+	int rc;
+	int i;
+
+	rc = test_app_start(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	uint64_t sns[NUM_FLOWS] = { 0 };
+
+	for (i = 0; i < NUM_EVENTS;) {
+		struct rte_event events[ENQUEUE_BURST_SIZE];
+		int left;
+		int batch_size;
+		int j;
+		uint16_t n = 0;
+
+		batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
+		left = NUM_EVENTS - i;
+
+		batch_size = RTE_MIN(left, batch_size);
+
+		for (j = 0; j < batch_size; j++) {
+			struct rte_event *event = &events[j];
+			uint64_t sn;
+			uint32_t flow_id;
+
+			flow_id = rte_rand_max(NUM_FLOWS);
+
+			sn = sns[flow_id]++;
+
+			*event = (struct rte_event) {
+				.queue_id = 0,
+				.flow_id = flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_NEW,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+		}
+
+		while (n < batch_size)
+			n += rte_event_enqueue_new_burst(test_app->event_dev_id,
+							 DRIVER_PORT_ID,
+							 events + n,
+							 batch_size - n);
+
+		i += batch_size;
+	}
+
+	while (test_app_get_completed_events(test_app) != NUM_EVENTS)
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+
+	rc = test_app_get_errors(test_app);
+	TEST_ASSERT(rc == 0, "%d errors occurred", rc);
+
+	rc = test_app_stop(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	struct rte_dispatcher_stats stats;
+	rte_dispatcher_stats_get(test_app->dispatcher, &stats);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
+			  "Invalid dispatch count");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	TEST_ASSERT_EQUAL(test_app->never_process_count.count, 0,
+			  "Never-match handler's process function has "
+			  "been called");
+
+	int finalize_count =
+		rte_atomic_load_explicit(&test_app->finalize_count.count,
+					 rte_memory_order_relaxed);
+
+	TEST_ASSERT(finalize_count > 0, "Finalize count is zero");
+	TEST_ASSERT(finalize_count <= (int)stats.ev_dispatch_count,
+		    "Finalize count larger than event count");
+
+	TEST_ASSERT_EQUAL(finalize_count, (int)stats.ev_batch_count,
+			  "%"PRIu64" batches dequeued, but finalize called %d "
+			  "times", stats.ev_batch_count, finalize_count);
+
+	/*
+	 * The event dispatcher should call often-matching match functions
+	 * more often, and thus this never-matching match function should
+	 * be called relatively infrequently.
+	 */
+	TEST_ASSERT(test_app->never_match_count <
+		    (stats.ev_dispatch_count / 4),
+		    "Never-matching match function called suspiciously often");
+
+	rc = test_app_reset_dispatcher_stats(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_drop(void)
+{
+	int rc;
+	uint8_t unhandled_queue;
+	struct rte_dispatcher_stats stats;
+
+	unhandled_queue = (uint8_t)rte_rand_max(NUM_QUEUES);
+
+	rc = test_app_start(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	rc = test_app_unregister_callback(test_app, unhandled_queue);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	struct rte_event event = {
+	    .queue_id = unhandled_queue,
+	    .flow_id = 0,
+	    .sched_type = RTE_SCHED_TYPE_ATOMIC,
+	    .op = RTE_EVENT_OP_NEW,
+	    .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	    .u64 = 0
+	};
+
+	do {
+		rc = rte_event_enqueue_burst(test_app->event_dev_id,
+					     DRIVER_PORT_ID, &event, 1);
+	} while (rc == 0);
+
+	do {
+		rte_dispatcher_stats_get(test_app->dispatcher, &stats);
+
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+	} while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
+
+	rc = test_app_stop(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
+			  "Dispatch count is not zero");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+#define MORE_THAN_MAX_HANDLERS 1000
+#define MIN_HANDLERS 32
+
+static int
+test_many_handler_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_HANDLERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	for (i = 0; i < MORE_THAN_MAX_HANDLERS; i++) {
+		reg_id = rte_dispatcher_register(test_app->dispatcher,
+						 never_match, NULL,
+						 test_app_never_process, NULL);
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_HANDLERS, "Registration failed already "
+		    "after %d handler registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_unregister(test_app->dispatcher,
+					       reg_ids[i]);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister handler %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static void
+dummy_finalize(uint8_t event_dev_id __rte_unused,
+	       uint8_t event_port_id __rte_unused,
+	       void *cb_data __rte_unused)
+{
+}
+
+#define MORE_THAN_MAX_FINALIZERS 1000
+#define MIN_FINALIZERS 16
+
+static int
+test_many_finalize_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_FINALIZERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	for (i = 0; i < MORE_THAN_MAX_FINALIZERS; i++) {
+		reg_id = rte_dispatcher_finalize_register(
+			test_app->dispatcher, dummy_finalize, NULL
+		);
+
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_FINALIZERS, "Finalize registration failed "
+		    "already after %d registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_finalize_unregister(
+			test_app->dispatcher, reg_ids[i]
+		);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister finalizer %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite test_suite = {
+	.suite_name = "Event dispatcher test suite",
+	.unit_test_cases = {
+		TEST_CASE_ST(test_setup, test_teardown, test_basic),
+		TEST_CASE_ST(test_setup, test_teardown, test_drop),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_handler_registrations),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_finalize_registrations),
+		TEST_CASES_END()
+	}
+};
+
+static int
+test_dispatcher(void)
+{
+	return unit_test_suite_runner(&test_suite);
+}
+
+REGISTER_FAST_TEST(dispatcher_autotest, false, true, test_dispatcher);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v7 3/3] doc: add dispatcher programming guide
  2023-10-11  7:16                                               ` [PATCH v7 0/3] Add " Mattias Rönnblom
  2023-10-11  7:16                                                 ` [PATCH v7 1/3] lib: introduce " Mattias Rönnblom
  2023-10-11  7:16                                                 ` [PATCH v7 2/3] test: add dispatcher test suite Mattias Rönnblom
@ 2023-10-11  7:17                                                 ` Mattias Rönnblom
  2 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-11  7:17 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Provide programming guide for the dispatcher library.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--

PATCH v7:
 o Mark pseudo code blocks as being type "none", to avoid Sphinx failures
   on non-Ubuntu systems. (David Marchand)
 o "Necessarily" necessarily needs to be spelled just so. (David Marchand)

PATCH v6:
 o Eliminate unneeded white space in code blocks. (David Marchand)

PATCH v5:
 o Update guide to match API changes related to dispatcher ids.

PATCH v3:
 o Adapt guide to the dispatcher API name changes.

PATCH:
 o Improve grammar and spelling.

RFC v4:
 o Extend event matching section of the programming guide.
 o Improve grammar and spelling.
---
 MAINTAINERS                              |   1 +
 doc/guides/prog_guide/dispatcher_lib.rst | 433 +++++++++++++++++++++++
 doc/guides/prog_guide/index.rst          |   1 +
 3 files changed, 435 insertions(+)
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst

diff --git a/MAINTAINERS b/MAINTAINERS
index 262401d43d..748c15cfe9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1737,6 +1737,7 @@ Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
 F: app/test/test_dispatcher.c
+F: doc/guides/prog_guide/dispatcher_lib.rst
 
 
 Test Applications
diff --git a/doc/guides/prog_guide/dispatcher_lib.rst b/doc/guides/prog_guide/dispatcher_lib.rst
new file mode 100644
index 0000000000..6de1ea78b0
--- /dev/null
+++ b/doc/guides/prog_guide/dispatcher_lib.rst
@@ -0,0 +1,433 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Ericsson AB.
+
+Dispatcher
+==========
+
+Overview
+--------
+
+The purpose of the dispatcher is to help reduce coupling in an
+:doc:`Eventdev <eventdev>`-based DPDK application.
+
+In particular, the dispatcher addresses a scenario where an
+application's modules share the same event device and event device
+ports, and performs work on the same lcore threads.
+
+The dispatcher replaces the conditional logic that follows an event
+device dequeue operation, where events are dispatched to different
+parts of the application, typically based on fields in the
+``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
+``sched_type``.
+
+Below is an excerpt from a fictitious application consisting of two
+modules; A and B. In this example, event-to-module routing is based
+purely on queue id, where module A expects all events to a certain
+queue id, and module B two other queue ids. [#Mapping]_
+
+.. code-block:: c
+
+    for (;;) {
+            struct rte_event events[MAX_BURST];
+            unsigned int n;
+
+            n = rte_event_dequeue_burst(dev_id, port_id, events,
+	                                MAX_BURST, 0);
+
+            for (i = 0; i < n; i++) {
+                    const struct rte_event *event = &events[i];
+
+                    switch (event->queue_id) {
+                    case MODULE_A_QUEUE_ID:
+                            module_a_process(event);
+                            break;
+                    case MODULE_B_STAGE_0_QUEUE_ID:
+                            module_b_process_stage_0(event);
+                            break;
+                    case MODULE_B_STAGE_1_QUEUE_ID:
+                            module_b_process_stage_1(event);
+                            break;
+                    }
+            }
+    }
+
+The issue this example attempts to illustrate is that the centralized
+conditional logic has knowledge of things that should be private to
+the modules. In other words, this pattern leads to a violation of
+module encapsulation.
+
+The shared conditional logic contains explicit knowledge about what
+events should go where. In case, for example, the
+``module_a_process()`` is broken into two processing stages — a
+module-internal affair — the shared conditional code must be updated
+to reflect this change.
+
+The centralized event routing code becomes an issue in larger
+applications, where modules are developed by different organizations.
+This pattern also makes module reuse across different application more
+difficult. The part of the conditional logic relevant for a particular
+application may need to be duplicated across many module
+instantiations (e.g., applications and test setups).
+
+The dispatcher separates the mechanism (routing events to their
+receiver) from the policy (which events should go where).
+
+The basic operation of the dispatcher is as follows:
+
+* Dequeue a batch of events from the event device.
+* For each event determine which handler should receive the event, using
+  a set of application-provided, per-handler event matching callback
+  functions.
+* Provide events matching a particular handler, to that handler, using
+  its process callback.
+
+If the above application would have made use of the dispatcher, the
+code relevant for its module A may have looked something like this:
+
+.. code-block:: c
+
+    static bool
+    module_a_match(const struct rte_event *event, void *cb_data)
+    {
+           return event->queue_id == MODULE_A_QUEUE_ID;
+    }
+
+    static void
+    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
+                            const struct rte_event *events,
+			    uint16_t num, void *cb_data)
+    {
+            uint16_t i;
+
+            for (i = 0; i < num; i++)
+                    module_a_process_event(&events[i]);
+    }
+
+    /* In the module's initialization code */
+    rte_dispatcher_register(dispatcher, module_a_match, NULL,
+			    module_a_process_events, module_a_data);
+
+(Error handling is left out of this and future example code in this
+chapter.)
+
+When the shared conditional logic is removed, a new question arise:
+which part of the system actually runs the dispatching mechanism? Or
+phrased differently, what is replacing the function hosting the shared
+conditional logic (typically launched on all lcores using
+``rte_eal_remote_launch()``)? To solve this issue, the dispatcher is a
+run as a DPDK :doc:`Service <service_cores>`.
+
+The dispatcher is a layer between the application and the event device
+in the receive direction. In the transmit (i.e., item of work
+submission) direction, the application directly accesses the Eventdev
+core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
+forwarded event to the event device.
+
+Dispatcher Creation
+-------------------
+
+A dispatcher is created with using
+``rte_dispatcher_create()``.
+
+The event device must be configured before the dispatcher is created.
+
+Usually, only one dispatcher is needed per event device. A dispatcher
+handles exactly one event device.
+
+An dispatcher is freed using the ``rte_dispatcher_free()``
+function. The dispatcher's service functions must not be running on
+any lcore at the point of this call.
+
+Event Port Binding
+------------------
+
+To be able to dequeue events, the dispatcher must know which event
+ports are to be used, on all the lcores it uses. The application
+provides this information using
+``rte_dispatcher_bind_port_to_lcore()``.
+
+This call is typically made from the part of the application that
+deals with deployment issues (e.g., iterating lcores and determining
+which lcore does what), at the time of application initialization.
+
+The ``rte_dispatcher_unbind_port_from_lcore()`` is used to undo
+this operation.
+
+Multiple lcore threads may not safely use the same event
+port. [#Port-MT-Safety]
+
+Event ports cannot safely be bound or unbound while the dispatcher's
+service function is running on any lcore.
+
+Event Handlers
+--------------
+
+The dispatcher handler is an interface between the dispatcher and an
+application module, used to route events to the appropriate part of
+the application.
+
+Handler Registration
+^^^^^^^^^^^^^^^^^^^^
+
+The event handler interface consists of two function pointers:
+
+* The ``rte_dispatcher_match_t`` callback, which job is to
+  decide if this event is to be the property of this handler.
+* The ``rte_dispatcher_process_t``, which is used by the
+  dispatcher to deliver matched events.
+
+An event handler registration is valid on all lcores.
+
+The functions pointed to by the match and process callbacks resides in
+the application's domain logic, with one or more handlers per
+application module.
+
+A module may use more than one event handler, for convenience or to
+further decouple sub-modules. However, the dispatcher may impose an
+upper limit of the number handlers. In addition, installing a large
+number of handlers increase dispatcher overhead, although this does
+not necessarily translate to a system-level performance degradation. See
+the section on :ref:`Event Clustering` for more information.
+
+Handler registration and unregistration cannot safely be done while
+the dispatcher's service function is running on any lcore.
+
+Event Matching
+^^^^^^^^^^^^^^
+
+A handler's match callback function decides if an event should be
+delivered to this handler, or not.
+
+An event is routed to no more than one handler. Thus, if a match
+function returns true, no further match functions will be invoked for
+that event.
+
+Match functions must not depend on being invocated in any particular
+order (e.g., in the handler registration order).
+
+Events failing to match any handler are dropped, and the
+``ev_drop_count`` counter is updated accordingly.
+
+Event Delivery
+^^^^^^^^^^^^^^
+
+The handler callbacks are invocated by the dispatcher's service
+function, upon the arrival of events to the event ports bound to the
+running service lcore.
+
+A particular event is delivery to at most one handler.
+
+The application must not depend on all match callback invocations for
+a particular event batch being made prior to any process calls are
+being made. For example, if the dispatcher dequeues two events from
+the event device, it may choose to find out the destination for the
+first event, and deliver it, and then continue to find out the
+destination for the second, and then deliver that event as well. The
+dispatcher may also choose a strategy where no event is delivered
+until the destination handler for both events have been determined.
+
+The events provided in a single process call always belong to the same
+event port dequeue burst.
+
+.. _Event Clustering:
+
+Event Clustering
+^^^^^^^^^^^^^^^^
+
+The dispatcher maintains the order of events destined for the same
+handler.
+
+*Order* here refers to the order in which the events were delivered
+from the event device to the dispatcher (i.e., in the event array
+populated by ``rte_event_dequeue_burst()``), in relation to the order
+in which the dispatcher deliveres these events to the application.
+
+The dispatcher *does not* guarantee to maintain the order of events
+delivered to *different* handlers.
+
+For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
+and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
+consider a scenario where the following events are dequeued from the
+event device (qid is short for event queue id).
+
+.. code-block:: none
+
+    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
+
+The dispatcher may deliver the events in the following manner:
+
+.. code-block:: none
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
+   module_a_process([e2: qid=0])
+   module_b_stage_0_process([e2: qid=1])
+
+The dispatcher may also choose to cluster (group) all events destined
+for ``module_b_stage_0_process()`` into one array:
+
+.. code-block:: none
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
+   module_a_process([e2: qid=0])
+
+Here, the event ``e2`` is reordered and placed behind ``e3``, from a
+delivery order point of view. This kind of reshuffling is allowed,
+since the events are destined for different handlers.
+
+The dispatcher may also deliver ``e2`` before the three events
+destined for module B.
+
+An example of what the dispatcher may not do, is to reorder event
+``e1`` so, that it precedes ``e0`` in the array passed to the module
+B's stage 0 process callback.
+
+Although clustering requires some extra work for the dispatcher, it
+leads to fewer process function calls. In addition, and likely more
+importantly, it improves temporal locality of memory accesses to
+handler-specific data structures in the application, which in turn may
+lead to fewer cache misses and improved overall performance.
+
+Finalize
+--------
+
+The dispatcher may be configured to notify one or more parts of the
+application when the matching and processing of a batch of events has
+completed.
+
+The ``rte_dispatcher_finalize_register`` call is used to
+register a finalize callback. The function
+``rte_dispatcher_finalize_unregister`` is used to remove a
+callback.
+
+The finalize hook may be used by a set of event handlers (in the same
+modules, or a set of cooperating modules) sharing an event output
+buffer, since it allows for flushing of the buffers at the last
+possible moment. In particular, it allows for buffering of
+``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
+``rte_event_dequeue_burst()`` call is made (assuming implicit release
+is employed).
+
+The following is an example with an application-defined event output
+buffer (the ``event_buffer``):
+
+.. code-block:: c
+
+    static void
+    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
+                   void *cb_data)
+    {
+            struct event_buffer *buffer = cb_data;
+            unsigned lcore_id = rte_lcore_id();
+            struct event_buffer_lcore *lcore_buffer =
+                    &buffer->lcore_buffer[lcore_id];
+
+            event_buffer_lcore_flush(lcore_buffer);
+    }
+
+    /* In the module's initialization code */
+    rte_dispatcher_finalize_register(dispatcher, finalize_batch,
+                                     shared_event_buffer);
+
+The dispatcher does not track any relationship between a handler and a
+finalize callback, and all finalize callbacks will be called, if (and
+only if) at least one event was dequeued from the event device.
+
+Finalize callback registration and unregistration cannot safely be
+done while the dispatcher's service function is running on any lcore.
+
+Service
+-------
+
+The dispatcher is a DPDK service, and is managed in a manner similar
+to other DPDK services (e.g., an Event Timer Adapter).
+
+Below is an example of how to configure a particular lcore to serve as
+a service lcore, and to map an already-configured dispatcher
+(identified by ``DISPATCHER_ID``) to that lcore.
+
+.. code-block:: c
+
+    static void
+    launch_dispatcher_core(struct rte_dispatcher *dispatcher,
+                           unsigned lcore_id)
+    {
+            uint32_t service_id;
+
+            rte_service_lcore_add(lcore_id);
+
+            rte_dispatcher_service_id_get(dispatcher, &service_id);
+
+            rte_service_map_lcore_set(service_id, lcore_id, 1);
+
+            rte_service_lcore_start(lcore_id);
+
+            rte_service_runstate_set(service_id, 1);
+    }
+
+As the final step, the dispatcher must be started.
+
+.. code-block:: c
+
+    rte_dispatcher_start(dispatcher);
+
+
+Multi Service Dispatcher Lcores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In an Eventdev application, most (or all) compute-intensive and
+performance-sensitive processing is done in an event-driven manner,
+where CPU cycles spent on application domain logic is the direct
+result of items of work (i.e., ``rte_event`` events) dequeued from an
+event device.
+
+In the light of this, it makes sense to have the dispatcher service be
+the only DPDK service on all lcores used for packet processing — at
+least in principle.
+
+However, there is nothing in DPDK that prevents colocating other
+services with the dispatcher service on the same lcore.
+
+Tasks that prior to the introduction of the dispatcher into the
+application was performed on the lcore, even though no events were
+received, are prime targets for being converted into such auxiliary
+services, running on the dispatcher core set.
+
+An example of such a task would be the management of a per-lcore timer
+wheel (i.e., calling ``rte_timer_manage()``).
+
+For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
+similar technique), may opt for having quiescent state (e.g., calling
+``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
+service, to assure resource reclaimination occurs even in though some
+lcores currently do not process any events.
+
+If more services than the dispatcher service is mapped to a service
+lcore, it's important that the other service are well-behaved and
+don't interfere with event processing to the extent the system's
+throughput and/or latency requirements are at risk of not being met.
+
+In particular, to avoid jitter, they should have an small upper bound
+for the maximum amount of time spent in a single service function
+call.
+
+An example of scenario with a more CPU-heavy colocated service is a
+low-lcore count deployment, where the event device lacks the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
+require software to feed incoming packets into the event device). In
+this case, the best performance may be achieved if the Event Ethernet
+RX and/or TX Adapters are mapped to lcores also used by for event
+dispatching, since otherwise the adapter lcores would have a lot of
+idle CPU cycles.
+
+.. rubric:: Footnotes
+
+.. [#Mapping]
+   Event routing may reasonably be done based on other ``rte_event``
+   fields (or even event user data). Indeed, that's the very reason to
+   have match callback functions, instead of a simple queue
+   id-to-handler mapping scheme. Queue id-based routing serves well in
+   a simple example.
+
+.. [#Port-MT-Safety]
+   This property (which is a feature, not a bug) is inherited from the
+   core Eventdev APIs.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 52a6d9e7aa..ab05bd6074 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -60,6 +60,7 @@ Programmer's Guide
     event_ethernet_tx_adapter
     event_timer_adapter
     event_crypto_adapter
+    dispatcher_lib
     qos_framework
     power_man
     packet_classif_access_ctrl
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v6 2/3] test: add dispatcher test suite
  2023-10-11  6:28                                                 ` Mattias Rönnblom
@ 2023-10-11  7:26                                                   ` David Marchand
  0 siblings, 0 replies; 102+ messages in thread
From: David Marchand @ 2023-10-11  7:26 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan

On Wed, Oct 11, 2023 at 8:28 AM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>
> On 2023-10-10 13:56, David Marchand wrote:
> > On Mon, Oct 9, 2023 at 8:22 PM Mattias Rönnblom
> > <mattias.ronnblom@ericsson.com> wrote:
> >> +static int
> >> +test_dispatcher(void)
> >> +{
> >> +       return unit_test_suite_runner(&test_suite);
> >> +}
> >> +
> >> +REGISTER_FAST_TEST(dispatcher_autotest, false, true, test_dispatcher);
> >
> > Since this test expects some lcores, wdyt of adding:
> >
> > @@ -1044,6 +1044,12 @@ static struct unit_test_suite test_suite = {
> >   static int
> >   test_dispatcher(void)
> >   {
> > +       if (rte_lcore_count() < NUM_SERVICE_CORES + 1) {
> > +               printf("Not enough cores for dispatcher_autotest,
> > expecting at least %u\n",
> > +                       NUM_SERVICE_CORES + 1);
> > +               return TEST_SKIPPED;
> > +       }
> > +
> >          return unit_test_suite_runner(&test_suite);
> >   }
> >
> > This should avoid the failures we get with some CI env.
> > (additionnally, I tested this on my laptop and the test runs fine)
> >
> >
>
> Indeed, this is a much better way than to fail the test case.
>
> I'm thinking this is best done in test_setup(), since it's related to
> the setup. In case other test cases are added that required a different
> setup, there may be no minimum lcore requirement.

This is what I had tried as a first attempt but as I hit some crashes
in the teardown step, I went with the easiest fix.

>
> You will get multiple (four, for the moment) print-outs though, in case
> you run with fewer than 4 lcores.
>
> I'll also make sure I skip (and not fail) the tests in case the DSW
> event device is not included in the build.
>

Yep, it is better like this.
Thanks for v7, I'll have a look today.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 1/3] lib: introduce dispatcher library
  2023-10-09 16:49                                                 ` Mattias Rönnblom
@ 2023-10-11 14:57                                                   ` David Marchand
  2023-10-11 20:51                                                     ` Mattias Rönnblom
  0 siblings, 1 reply; 102+ messages in thread
From: David Marchand @ 2023-10-11 14:57 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan

On Mon, Oct 9, 2023 at 6:50 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:

[snip]

> >>>> +static int
> >>>> +evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
> >>>> +{
> >>>> +       int rc;
> >>>> +
> >>>> +       rc = rte_service_component_runstate_set(dispatcher->service_id,
> >>>> +                                               state);
> >>>> +
> >>>> +       if (rc != 0) {
> >>>> +               RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
> >>>> +                                "service component run state to %d\n", rc,
> >>>> +                                state);
> >>>> +               RTE_ASSERT(0);
> >>>
> >>> Why not propagating the error to callers?
> >>>
> >>>
> >>
> >> The root cause would be a programming error, hence an assertion is more
> >> appropriate way to deal with the situation.
> >
> > Without building RTE_ENABLE_ASSERT (disabled by default), the code
> > later in this function will still be executed.
> >
>
> If RTE_ASSERT() is not the way to assure a consistent internal library
> state, what is? RTE_VERIFY()?

The usual way in DPDK is to use RTE_VERIFY or rte_panic with the error message.
There is also libc assert().

RTE_ASSERT is more of a debug macro since it is under a build option.


But by making the library "panic" on some assertion, I have followup comments:
- what is the point of returning an int for rte_dispatcher_start() /
rte_dispatcher_stop()?
- rte_dispatcher_start() and rte_dispatcher_stop() (doxygen)
documentation needs updating, as they can't return anything but 0.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v5 1/3] lib: introduce dispatcher library
  2023-10-11 14:57                                                   ` David Marchand
@ 2023-10-11 20:51                                                     ` Mattias Rönnblom
  0 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-11 20:51 UTC (permalink / raw)
  To: David Marchand
  Cc: Mattias Rönnblom, dev, Jerin Jacob, techboard,
	harry.van.haaren, Peter Nilsson, Heng Wang, Naga Harish K S V,
	Pavan Nikhilesh, Gujjar Abhinandan S, Erik Gabriel Carrillo,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Liang Ma,
	Peter Mccarthy, Zhirun Yan

On 2023-10-11 16:57, David Marchand wrote:
> On Mon, Oct 9, 2023 at 6:50 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
> 
> [snip]
> 
>>>>>> +static int
>>>>>> +evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
>>>>>> +{
>>>>>> +       int rc;
>>>>>> +
>>>>>> +       rc = rte_service_component_runstate_set(dispatcher->service_id,
>>>>>> +                                               state);
>>>>>> +
>>>>>> +       if (rc != 0) {
>>>>>> +               RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
>>>>>> +                                "service component run state to %d\n", rc,
>>>>>> +                                state);
>>>>>> +               RTE_ASSERT(0);
>>>>>
>>>>> Why not propagating the error to callers?
>>>>>
>>>>>
>>>>
>>>> The root cause would be a programming error, hence an assertion is more
>>>> appropriate way to deal with the situation.
>>>
>>> Without building RTE_ENABLE_ASSERT (disabled by default), the code
>>> later in this function will still be executed.
>>>
>>
>> If RTE_ASSERT() is not the way to assure a consistent internal library
>> state, what is? RTE_VERIFY()?
> 
> The usual way in DPDK is to use RTE_VERIFY or rte_panic with the error message.
> There is also libc assert().
> 
> RTE_ASSERT is more of a debug macro since it is under a build option.
> 
> 
> But by making the library "panic" on some assertion, I have followup comments:
> - what is the point of returning an int for rte_dispatcher_start() /
> rte_dispatcher_stop()?
> - rte_dispatcher_start() and rte_dispatcher_stop() (doxygen)
> documentation needs updating, as they can't return anything but 0.
> 
> 

Those return vales are purely there for reasons of symmetry, or maybe 
you can call it API consistent, with Eventdev APIs (e.g., the event 
ethernet RX adapter). I guess that's less of an issue now, when it's a 
separate library, but still relevant, since the programmer will be 
familiar with those APIs.

You could also argue that in the future, there may be errors which needs 
to be signaled to the caller. But that's a weak argument, since we don't 
know exactly what those would be (and thus they can't be documented), so 
it's still an API/ABI change, even though the function signature doesn't 
change.

Now I'm leaning toward removing the return value. Please advise. Over.

^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v8 0/3] Add dispatcher library
  2023-10-11  7:16                                                 ` [PATCH v7 1/3] lib: introduce " Mattias Rönnblom
@ 2023-10-12  8:50                                                   ` Mattias Rönnblom
  2023-10-12  8:50                                                     ` [PATCH v8 1/3] lib: introduce " Mattias Rönnblom
                                                                       ` (3 more replies)
  0 siblings, 4 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-12  8:50 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to decouple different parts
of an eventdev-based application (e.g., processing pipeline stages),
sharing the same underlying event device.

The dispatcher replaces the conditional logic (often, a switch
statement) that typically follows an event device dequeue operation,
where events are dispatched to different parts of the application
based on event meta data, such as the queue id or scheduling type.

The concept is similar to a UNIX file descriptor event loop library.
Instead of tying callback functions to fds as for example libevent
does, the dispatcher relies on application-supplied matching callback
functions to decide where to deliver events.

A dispatcher is configured to dequeue events from a specific event
device, and ties into the service core framework, to do its (and the
application's) work.

The dispatcher provides a convenient way for an eventdev-based
application to use service cores for application-level processing, and
thus for sharing those cores with other DPDK services.

Although the dispatcher adds some overhead, experience suggests that
the net effect on the application (both synthetic benchmarks and more
real-world applications) may well be positive. This is primarily due
to clustering (see programming guide) reducing cache misses.

Benchmarking indicates that the overhead is ~10 cc/event (on a
large core), with a handful of often-used handlers.

The dispatcher does not support run-time reconfiguration.

The use of the dispatcher library is optional, and an eventdev-based
application may still opt to access the event device using direct
eventdev API calls, or by some other means.

Mattias Rönnblom (3):
  lib: introduce dispatcher library
  test: add dispatcher test suite
  doc: add dispatcher programming guide

 MAINTAINERS                              |    6 +
 app/test/meson.build                     |    1 +
 app/test/test_dispatcher.c               | 1056 ++++++++++++++++++++++
 doc/api/doxy-api-index.md                |    1 +
 doc/api/doxy-api.conf.in                 |    1 +
 doc/guides/prog_guide/dispatcher_lib.rst |  433 +++++++++
 doc/guides/prog_guide/index.rst          |    1 +
 doc/guides/rel_notes/release_23_11.rst   |    5 +
 lib/dispatcher/meson.build               |   13 +
 lib/dispatcher/rte_dispatcher.c          |  694 ++++++++++++++
 lib/dispatcher/rte_dispatcher.h          |  458 ++++++++++
 lib/dispatcher/version.map               |   20 +
 lib/meson.build                          |    2 +
 13 files changed, 2691 insertions(+)
 create mode 100644 app/test/test_dispatcher.c
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v8 1/3] lib: introduce dispatcher library
  2023-10-12  8:50                                                   ` [PATCH v8 0/3] Add " Mattias Rönnblom
@ 2023-10-12  8:50                                                     ` Mattias Rönnblom
  2023-10-12  8:50                                                     ` [PATCH v8 2/3] test: add dispatcher test suite Mattias Rönnblom
                                                                       ` (2 subsequent siblings)
  3 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-12  8:50 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

The purpose of the dispatcher library is to help reduce coupling in an
Eventdev-based DPDK application.

In addition, the dispatcher also provides a convenient and flexible
way for the application to use service cores for application-level
processing.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>
Reviewed-by: Heng Wang <heng.wang@ericsson.com>

--

PATCH v8:
 o Since starting and stopping a dispatcher is always successful (save
   for an inconsistent dispatcher state), have the start and stop
   calls return void.
 o Fix merge conflict in the release notes file.

PATCH v6:
 o Use single tab as indentation for continuation lines in multiple-line
   function prototypes. (David Marchand)
 o Add dispatcher library release note. (David Marchand)
 o Various indentation and spelling improvements. (David Marchand)
 o Add direct <stdint.h>, <stdbool.h> and <rte_compat.h> includes,
   instead of relying on <rte_eventdev.h>. (David Marchand)
 o Avoid Doxygen post annotations for struct fields. (David Marchand)

PATCH v5:
 o Move from using an integer id to a pointer to reference a dispatcher
   instance, to simplify the API.
 o Fix bug where dispatcher stats retrieval function erroneously depended
   on the user-supplied stats buffer being all-zero.

PATCH v4:
 o Fix bugs in handler and finalizer unregistration. (Naga Harish)
 o Return -EINVAL in cases where NULL pointers were provided in
   calls requiring non-NULL pointers. (Naga Harish)
 o Add experimental warning for the whole API. (Jerin Jacob)

PATCH v3:
 o To underline its optional character and since it does not provide
   hardware abstraction, the event dispatcher is now a separate
   library.
 o Change name from rte_event_dispatcher -> rte_dispatcher, to make it
   shorter and to avoid the rte_event_* namespace.

PATCH v2:
 o Add dequeue batch count statistic.
 o Add statistics reset function to API.
 o Clarify MT safety guarantees (or lack thereof) in the API documentation.
 o Change loop variable type in evd_lcore_get_handler_by_id() to uint16_t,
   to be consistent with similar loops elsewhere in the dispatcher.
 o Fix variable names in finalizer unregister function.

PATCH:
 o Change prefix from RED to EVD, to avoid confusion with random
   early detection.

RFC v4:
 o Move handlers to per-lcore data structures.
 o Introduce mechanism which rearranges handlers so that often-used
   handlers tend to be tried first.
 o Terminate dispatch loop in case all events are delivered.
 o To avoid the dispatcher's service function hogging the CPU, process
   only one batch per call.
 o Have service function return -EAGAIN if no work is performed.
 o Events delivered in the process function is no longer marked 'const',
   since modifying them may be useful for the application and cause
   no difficulties for the dispatcher.
 o Various minor API documentation improvements.

RFC v3:
 o Add stats_get() function to the version.map file.
---
 MAINTAINERS                            |   4 +
 doc/api/doxy-api-index.md              |   1 +
 doc/api/doxy-api.conf.in               |   1 +
 doc/guides/rel_notes/release_23_11.rst |   5 +
 lib/dispatcher/meson.build             |  13 +
 lib/dispatcher/rte_dispatcher.c        | 694 +++++++++++++++++++++++++
 lib/dispatcher/rte_dispatcher.h        | 458 ++++++++++++++++
 lib/dispatcher/version.map             |  20 +
 lib/meson.build                        |   2 +
 9 files changed, 1198 insertions(+)
 create mode 100644 lib/dispatcher/meson.build
 create mode 100644 lib/dispatcher/rte_dispatcher.c
 create mode 100644 lib/dispatcher/rte_dispatcher.h
 create mode 100644 lib/dispatcher/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 9af332ae6b..a7039b06dc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1734,6 +1734,10 @@ M: Nithin Dabilpuram <ndabilpuram@marvell.com>
 M: Pavan Nikhilesh <pbhagavatula@marvell.com>
 F: lib/node/
 
+Dispatcher - EXPERIMENTAL
+M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
+F: lib/dispatcher/
+
 
 Test Applications
 -----------------
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 732e2ecb28..30918995d3 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -157,6 +157,7 @@ The public API headers are grouped by topics:
 
 - **classification**
   [reorder](@ref rte_reorder.h),
+  [dispatcher](@ref rte_dispatcher.h),
   [distributor](@ref rte_distributor.h),
   [EFD](@ref rte_efd.h),
   [ACL](@ref rte_acl.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index df801d32f9..93709e1d2c 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dispatcher \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 34442e9c6b..00260455b2 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -122,6 +122,11 @@ New Features
   a group's miss actions, which are the actions to be performed on packets
   that didn't match any of the flow rules in the group.
 
+* **Added dispatcher library.**
+
+  Added dispatcher library which purpose is to help decouple different
+  parts (modules) of an eventdev-based application.
+
 * **Updated Solarflare net driver.**
 
   * Added support for transfer flow action ``INDIRECT`` with subtype ``VXLAN_ENCAP``.
diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build
new file mode 100644
index 0000000000..ffaef26a6d
--- /dev/null
+++ b/lib/dispatcher/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2023 Ericsson AB
+
+if is_windows
+    build = false
+    reason = 'not supported on Windows'
+    subdir_done()
+endif
+
+sources = files('rte_dispatcher.c')
+headers = files('rte_dispatcher.h')
+
+deps += ['eventdev']
diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c
new file mode 100644
index 0000000000..10d02edde9
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.c
@@ -0,0 +1,694 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_random.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_dispatcher.h>
+
+#define EVD_MAX_PORTS_PER_LCORE 4
+#define EVD_MAX_HANDLERS 32
+#define EVD_MAX_FINALIZERS 16
+#define EVD_AVG_PRIO_INTERVAL 2000
+#define EVD_SERVICE_NAME "dispatcher"
+
+struct rte_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_dispatcher_handler {
+	int id;
+	rte_dispatcher_match_t match_fun;
+	void *match_data;
+	rte_dispatcher_process_t process_fun;
+	void *process_data;
+};
+
+struct rte_dispatcher_finalizer {
+	int id;
+	rte_dispatcher_finalize_t finalize_fun;
+	void *finalize_data;
+};
+
+struct rte_dispatcher_lcore {
+	uint8_t num_ports;
+	uint16_t num_handlers;
+	int32_t prio_count;
+	struct rte_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE];
+	struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS];
+	struct rte_dispatcher_stats stats;
+} __rte_cache_aligned;
+
+struct rte_dispatcher {
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	uint16_t num_finalizers;
+	struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS];
+};
+
+static int
+evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore,
+	const struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->match_fun(event, handler->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static void
+evd_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+	int handler_idx)
+{
+	struct rte_dispatcher_handler tmp;
+
+	if (handler_idx == 0)
+		return;
+
+	/* Let the lucky handler "bubble" up the list */
+
+	tmp = lcore->handlers[handler_idx - 1];
+	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
+	lcore->handlers[handler_idx] = tmp;
+}
+
+static inline void
+evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore,
+	int handler_idx, uint16_t handler_events)
+{
+	lcore->prio_count -= handler_events;
+
+	if (unlikely(lcore->prio_count <= 0)) {
+		evd_prioritize_handler(lcore, handler_idx);
+
+		/*
+		 * Randomize the interval in the unlikely case
+		 * the traffic follow some very strict pattern.
+		 */
+		lcore->prio_count =
+			rte_rand_max(EVD_AVG_PRIO_INTERVAL) +
+			EVD_AVG_PRIO_INTERVAL / 2;
+	}
+}
+
+static inline void
+evd_dispatch_events(struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_lcore *lcore,
+	struct rte_dispatcher_lcore_port *port,
+	struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[EVD_MAX_HANDLERS][num_events];
+	uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 };
+	uint16_t drop_count = 0;
+	uint16_t dispatch_count;
+	uint16_t dispatched = 0;
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *event = &events[i];
+		int handler_idx;
+
+		handler_idx = evd_lookup_handler_idx(lcore, event);
+
+		if (unlikely(handler_idx < 0)) {
+			drop_count++;
+			continue;
+		}
+
+		bursts[handler_idx][burst_lens[handler_idx]] = *event;
+		burst_lens[handler_idx]++;
+	}
+
+	dispatch_count = num_events - drop_count;
+
+	for (i = 0; i < lcore->num_handlers &&
+		 dispatched < dispatch_count; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		handler->process_fun(dispatcher->event_dev_id, port->port_id,
+				     bursts[i], len, handler->process_data);
+
+		dispatched += len;
+
+		/*
+		 * Safe, since any reshuffling will only involve
+		 * already-processed handlers.
+		 */
+		evd_consider_prioritize_handler(lcore, i, len);
+	}
+
+	lcore->stats.ev_batch_count++;
+	lcore->stats.ev_dispatch_count += dispatch_count;
+	lcore->stats.ev_drop_count += drop_count;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		finalizer->finalize_fun(dispatcher->event_dev_id,
+					port->port_id,
+					finalizer->finalize_data);
+	}
+}
+
+static __rte_always_inline uint16_t
+evd_port_dequeue(struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_lcore *lcore,
+	struct rte_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	if (likely(n > 0))
+		evd_dispatch_events(dispatcher, lcore, port, events, n);
+
+	lcore->stats.poll_count++;
+
+	return n;
+}
+
+static __rte_always_inline uint16_t
+evd_lcore_process(struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_lcore *lcore)
+{
+	uint16_t i;
+	uint16_t event_count = 0;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		event_count += evd_port_dequeue(dispatcher, lcore, port);
+	}
+
+	return event_count;
+}
+
+static int32_t
+evd_process(void *userdata)
+{
+	struct rte_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+	uint64_t event_count;
+
+	event_count = evd_lcore_process(dispatcher, lcore);
+
+	if (unlikely(event_count == 0))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int
+evd_service_register(struct rte_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = evd_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, sizeof(service.name), EVD_SERVICE_NAME);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+	if (rc != 0)
+		RTE_EDEV_LOG_ERR("Registration of dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	return rc;
+}
+
+static int
+evd_service_unregister(struct rte_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+	if (rc != 0)
+		RTE_EDEV_LOG_ERR("Unregistration of dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+struct rte_dispatcher *
+rte_dispatcher_create(uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_dispatcher *dispatcher;
+	int rc;
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for dispatcher\n");
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	*dispatcher = (struct rte_dispatcher) {
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = evd_service_register(dispatcher);
+	if (rc < 0) {
+		rte_free(dispatcher);
+		rte_errno = -rc;
+		return NULL;
+	}
+
+	return dispatcher;
+}
+
+int
+rte_dispatcher_free(struct rte_dispatcher *dispatcher)
+{
+	int rc;
+
+	if (dispatcher == NULL)
+		return 0;
+
+	rc = evd_service_unregister(dispatcher);
+	if (rc != 0)
+		return rc;
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+uint32_t
+rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher)
+{
+	return dispatcher->service_id;
+}
+
+static int
+lcore_port_index(struct rte_dispatcher_lcore *lcore,
+	uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, uint16_t batch_size, uint64_t timeout,
+	unsigned int lcore_id)
+{
+	struct rte_dispatcher_lcore *lcore;
+	struct rte_dispatcher_lcore_port *port;
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, unsigned int lcore_id)
+{
+	struct rte_dispatcher_lcore *lcore;
+	int port_idx;
+	struct rte_dispatcher_lcore_port *port;
+	struct rte_dispatcher_lcore_port *last;
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static struct rte_dispatcher_handler *
+evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore, int handler_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->id == handler_id)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_handler_id(struct rte_dispatcher *dispatcher)
+{
+	int handler_id = 0;
+	struct rte_dispatcher_lcore *reference_lcore =
+		&dispatcher->lcores[0];
+
+	if (reference_lcore->num_handlers == EVD_MAX_HANDLERS)
+		return -1;
+
+	while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
+		handler_id++;
+
+	return handler_id;
+}
+
+static void
+evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore,
+	const struct rte_dispatcher_handler *handler)
+{
+	int handler_idx = lcore->num_handlers;
+
+	lcore->handlers[handler_idx] = *handler;
+	lcore->num_handlers++;
+}
+
+static void
+evd_install_handler(struct rte_dispatcher *dispatcher,
+	const struct rte_dispatcher_handler *handler)
+{
+	int i;
+
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[i];
+		evd_lcore_install_handler(lcore, handler);
+	}
+}
+
+int
+rte_dispatcher_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_match_t match_fun, void *match_data,
+	rte_dispatcher_process_t process_fun, void *process_data)
+{
+	struct rte_dispatcher_handler handler = {
+		.match_fun = match_fun,
+		.match_data = match_data,
+		.process_fun = process_fun,
+		.process_data = process_data
+	};
+
+	handler.id = evd_alloc_handler_id(dispatcher);
+
+	if (handler.id < 0)
+		return -ENOMEM;
+
+	evd_install_handler(dispatcher, &handler);
+
+	return handler.id;
+}
+
+static int
+evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore,
+	int handler_id)
+{
+	struct rte_dispatcher_handler *unreg_handler;
+	int handler_idx;
+	uint16_t last_idx;
+
+	unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id);
+
+	if (unreg_handler == NULL) {
+		RTE_EDEV_LOG_ERR("Invalid handler id %d\n", handler_id);
+		return -EINVAL;
+	}
+
+	handler_idx = unreg_handler - &lcore->handlers[0];
+
+	last_idx = lcore->num_handlers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all handlers to maintain handler order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_handler, unreg_handler + 1,
+			sizeof(struct rte_dispatcher_handler) * n);
+	}
+
+	lcore->num_handlers--;
+
+	return 0;
+}
+
+static int
+evd_uninstall_handler(struct rte_dispatcher *dispatcher, int handler_id)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+		int rc;
+
+		rc = evd_lcore_uninstall_handler(lcore, handler_id);
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+int
+rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id)
+{
+	return evd_uninstall_handler(dispatcher, handler_id);
+}
+
+static struct rte_dispatcher_finalizer *
+evd_get_finalizer_by_id(struct rte_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		if (finalizer->id == handler_id)
+			return finalizer;
+	}
+
+	return NULL;
+}
+
+static int
+evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_id = 0;
+
+	while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
+		finalizer_id++;
+
+	return finalizer_id;
+}
+
+static struct rte_dispatcher_finalizer *
+evd_alloc_finalizer(struct rte_dispatcher *dispatcher)
+{
+	int finalizer_idx;
+	struct rte_dispatcher_finalizer *finalizer;
+
+	if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS)
+		return NULL;
+
+	finalizer_idx = dispatcher->num_finalizers;
+	finalizer = &dispatcher->finalizers[finalizer_idx];
+
+	finalizer->id = evd_alloc_finalizer_id(dispatcher);
+
+	dispatcher->num_finalizers++;
+
+	return finalizer;
+}
+
+int
+rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_finalize_t finalize_fun, void *finalize_data)
+{
+	struct rte_dispatcher_finalizer *finalizer;
+
+	finalizer = evd_alloc_finalizer(dispatcher);
+
+	if (finalizer == NULL)
+		return -ENOMEM;
+
+	finalizer->finalize_fun = finalize_fun;
+	finalizer->finalize_data = finalize_data;
+
+	return finalizer->id;
+}
+
+int
+rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher,
+	int finalizer_id)
+{
+	struct rte_dispatcher_finalizer *unreg_finalizer;
+	int finalizer_idx;
+	uint16_t last_idx;
+
+	unreg_finalizer = evd_get_finalizer_by_id(dispatcher, finalizer_id);
+
+	if (unreg_finalizer == NULL) {
+		RTE_EDEV_LOG_ERR("Invalid finalizer id %d\n", finalizer_id);
+		return -EINVAL;
+	}
+
+	finalizer_idx = unreg_finalizer - &dispatcher->finalizers[0];
+
+	last_idx = dispatcher->num_finalizers - 1;
+
+	if (finalizer_idx != last_idx) {
+		/* move all finalizers to maintain order */
+		int n = last_idx - finalizer_idx;
+		memmove(unreg_finalizer, unreg_finalizer + 1,
+			sizeof(struct rte_dispatcher_finalizer) * n);
+	}
+
+	dispatcher->num_finalizers--;
+
+	return 0;
+}
+
+static void
+evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state)
+{
+	int rc;
+
+	rc = rte_service_component_runstate_set(dispatcher->service_id,
+						state);
+	/*
+	 * The only cause of a runstate_set() failure is an invalid
+	 * service id, which in turns means the dispatcher instance's
+	 * state is invalid.
+	 */
+	if (rc != 0)
+		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+				 "service component run state to %d\n", rc,
+				 state);
+
+	RTE_VERIFY(rc == 0);
+}
+
+void
+rte_dispatcher_start(struct rte_dispatcher *dispatcher)
+{
+	evd_set_service_runstate(dispatcher, 1);
+}
+
+void
+rte_dispatcher_stop(struct rte_dispatcher *dispatcher)
+{
+	evd_set_service_runstate(dispatcher, 0);
+}
+
+static void
+evd_aggregate_stats(struct rte_dispatcher_stats *result,
+	const struct rte_dispatcher_stats *part)
+{
+	result->poll_count += part->poll_count;
+	result->ev_batch_count += part->ev_batch_count;
+	result->ev_dispatch_count += part->ev_dispatch_count;
+	result->ev_drop_count += part->ev_drop_count;
+}
+
+void
+rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_stats *stats)
+{
+	unsigned int lcore_id;
+
+	*stats = (struct rte_dispatcher_stats) {};
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		const struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		evd_aggregate_stats(stats, &lcore->stats);
+	}
+}
+
+void
+rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		lcore->stats = (struct rte_dispatcher_stats) {};
+	}
+}
diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h
new file mode 100644
index 0000000000..0ad039d6d5
--- /dev/null
+++ b/lib/dispatcher/rte_dispatcher.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#ifndef __RTE_DISPATCHER_H__
+#define __RTE_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Dispatcher
+ *
+ * @warning
+ * @b EXPERIMENTAL:
+ * All functions in this file may be changed or removed without prior notice.
+ *
+ * The purpose of the dispatcher is to help decouple different parts
+ * of an application (e.g., modules), sharing the same underlying
+ * event device.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <rte_compat.h>
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * Match callbacks are used by an application to decide how the
+ * dispatcher distributes events to different parts of the
+ * application.
+ *
+ * The application is not expected to process the event at the point
+ * of the match call. Such matters should be deferred to the process
+ * callback invocation.
+ *
+ * The match callback may be used as an opportunity to prefetch data.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this event should be delivered (via
+ *   the process callback), and false otherwise.
+ */
+typedef bool
+(*rte_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
+
+/**
+ * Function prototype for process callbacks.
+ *
+ * The process callbacks are used by the dispatcher to deliver
+ * events for processing.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_register().
+ */
+
+typedef void
+(*rte_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
+	struct rte_event *events, uint16_t num, void *cb_data);
+
+/**
+ * Function prototype for finalize callbacks.
+ *
+ * The finalize callbacks are used by the dispatcher to notify the
+ * application it has delivered all events from a particular batch
+ * dequeued from the event device.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_dispatcher_finalize_register().
+ */
+
+typedef void
+(*rte_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
+	void *cb_data);
+
+/**
+ * Dispatcher statistics
+ */
+struct rte_dispatcher_stats {
+	/** Number of event dequeue calls made toward the event device. */
+	uint64_t poll_count;
+	/** Number of non-empty event batches dequeued from event device.*/
+	uint64_t ev_batch_count;
+	/** Number of events dispatched to a handler.*/
+	uint64_t ev_dispatch_count;
+	/** Number of events dropped because no handler was found. */
+	uint64_t ev_drop_count;
+};
+
+/**
+ * Create a dispatcher with the specified id.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   A pointer to a new dispatcher instance, or NULL on failure, in which
+ *   case rte_errno is set.
+ */
+__rte_experimental
+struct rte_dispatcher *
+rte_dispatcher_create(uint8_t event_dev_id);
+
+/**
+ * Free a dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_dispatcher_free(struct rte_dispatcher *dispatcher);
+
+/**
+ * Retrieve the service identifier of a dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @return
+ *  The dispatcher service's id.
+ */
+__rte_experimental
+uint32_t
+rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * dispatcher.
+ *
+ * This function configures the event port id to be used by the event
+ * dispatcher service, if run on the specified lcore.
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port must not be bound to more than one lcore.
+ *
+ * If the dispatcher service is mapped (with rte_service_map_lcore_set())
+ * to a lcore to which no ports are bound, the service function will be a
+ * no-operation.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on lcore
+ * specified by @c lcore_id.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ *  - -EEXISTS: Event port is already configured.
+ *  - -EINVAL: Invalid arguments.
+ */
+__rte_experimental
+int
+rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, uint16_t batch_size, uint64_t timeout,
+	unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * lcore specified by @c lcore_id.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOENT: Event port id not bound to this @c lcore_id.
+ */
+__rte_experimental
+int
+rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher,
+	uint8_t event_port_id, unsigned int lcore_id);
+
+/**
+ * Register an event handler.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding process callback
+ * function.
+ *
+ * The reason for having two distinct steps is to allow the dispatcher
+ * to deliver all events as a batch. This in turn will cause
+ * processing of a particular kind of events to happen in a
+ * back-to-back manner, improving cache locality.
+ *
+ * The list of handler callback functions is shared among all lcores,
+ * but will only be executed on lcores which has an eventdev port
+ * bound to them, and which are running the dispatcher service.
+ *
+ * An event is delivered to at most one handler. Events where no
+ * handler is found are dropped.
+ *
+ * The application must not depend on the order of which the match
+ * functions are invoked.
+ *
+ * Ordering of events is not guaranteed to be maintained between
+ * different deliver callbacks. For example, suppose there are two
+ * callbacks registered, matching different subsets of events arriving
+ * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
+ * on a particular port, all pertaining to the same flow. The match
+ * callback for registration A returns true for ev0 and ev2, and the
+ * matching function for registration B for ev1. In that scenario, the
+ * dispatcher may choose to deliver first [ev0, ev2] using A's deliver
+ * function, and then [ev1] to B - or vice versa.
+ *
+ * rte_dispatcher_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the event
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param process_fun
+ *  The process callback function.
+ *
+ * @param process_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when process_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_match_t match_fun, void *match_cb_data,
+	rte_dispatcher_process_t process_fun, void *process_cb_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param handler_id
+ *  The handler registration id returned by the original
+ *  rte_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c handler_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id);
+
+/**
+ * Register a finalize callback function.
+ *
+ * An application may optionally install one or more finalize
+ * callbacks.
+ *
+ * All finalize callbacks are invoked by the dispatcher when a
+ * complete batch of events (retrieve using rte_event_dequeue_burst())
+ * have been delivered to the application (or have been dropped).
+ *
+ * The finalize callback is not tied to any particular handler.
+ *
+ * The finalize callback provides an opportunity for the application
+ * to do per-batch processing. One case where this may be useful is if
+ * an event output buffer is used, and is shared among several
+ * handlers. In such a case, proper output buffer flushing may be
+ * assured using a finalize callback.
+ *
+ * rte_dispatcher_finalize_register() may be called by any thread
+ * (including unregistered non-EAL threads), but not while the
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param finalize_fun
+ *  The function called after completing the processing of a
+ *  dequeue batch.
+ *
+ * @param finalize_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when @c finalize_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher,
+	rte_dispatcher_finalize_t finalize_fun, void *finalize_data);
+
+/**
+ * Unregister a finalize callback.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but not while the dispatcher is running on
+ * any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ *
+ * @param reg_id
+ *  The finalize registration id returned by the original
+ *  rte_dispatcher_finalize_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c reg_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher, int reg_id);
+
+/**
+ * Start a dispatcher instance.
+ *
+ * Enables the dispatcher service.
+ *
+ * The underlying event device must have been started prior to calling
+ * rte_dispatcher_start().
+ *
+ * For the dispatcher to actually perform work (i.e., dispatch
+ * events), its service must have been mapped to one or more service
+ * lcores, and its service run state set to '1'. A dispatcher's
+ * service is retrieved using rte_dispatcher_service_id_get().
+ *
+ * Each service lcore to which the dispatcher is mapped should
+ * have at least one event port configured. Such configuration is
+ * performed by calling rte_dispatcher_bind_port_to_lcore(), prior to
+ * starting the dispatcher.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ */
+__rte_experimental
+void
+rte_dispatcher_start(struct rte_dispatcher *dispatcher);
+
+/**
+ * Stop a running dispatcher instance.
+ *
+ * Disables the dispatcher service.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ */
+__rte_experimental
+void
+rte_dispatcher_stop(struct rte_dispatcher *dispatcher);
+
+/**
+ * Retrieve statistics for a dispatcher instance.
+ *
+ * This function is MT safe and may be called by any thread
+ * (including unregistered non-EAL threads).
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ * @param[out] stats
+ *   A pointer to a structure to fill with statistics.
+ */
+__rte_experimental
+void
+rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher,
+	struct rte_dispatcher_stats *stats);
+
+/**
+ * Reset statistics for a dispatcher instance.
+ *
+ * This function may be called by any thread (including unregistered
+ * non-EAL threads), but may not produce the correct result if the
+ * dispatcher is running on any service lcore.
+ *
+ * @param dispatcher
+ *  The dispatcher instance.
+ */
+__rte_experimental
+void
+rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DISPATCHER__ */
diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map
new file mode 100644
index 0000000000..44585e4f15
--- /dev/null
+++ b/lib/dispatcher/version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+	global:
+
+	# added in 23.11
+	rte_dispatcher_bind_port_to_lcore;
+	rte_dispatcher_create;
+	rte_dispatcher_finalize_register;
+	rte_dispatcher_finalize_unregister;
+	rte_dispatcher_free;
+	rte_dispatcher_register;
+	rte_dispatcher_service_id_get;
+	rte_dispatcher_start;
+	rte_dispatcher_stats_get;
+	rte_dispatcher_stats_reset;
+	rte_dispatcher_stop;
+	rte_dispatcher_unbind_port_from_lcore;
+	rte_dispatcher_unregister;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index cf4aa63630..59d381bf7a 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -35,6 +35,7 @@ libraries = [
         'distributor',
         'efd',
         'eventdev',
+        'dispatcher', # dispatcher depends on eventdev
         'gpudev',
         'gro',
         'gso',
@@ -81,6 +82,7 @@ optional_libs = [
         'cfgfile',
         'compressdev',
         'cryptodev',
+        'dispatcher',
         'distributor',
         'dmadev',
         'efd',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v8 2/3] test: add dispatcher test suite
  2023-10-12  8:50                                                   ` [PATCH v8 0/3] Add " Mattias Rönnblom
  2023-10-12  8:50                                                     ` [PATCH v8 1/3] lib: introduce " Mattias Rönnblom
@ 2023-10-12  8:50                                                     ` Mattias Rönnblom
  2023-10-12  8:50                                                     ` [PATCH v8 3/3] doc: add dispatcher programming guide Mattias Rönnblom
  2023-10-12 12:48                                                     ` [PATCH v8 0/3] Add dispatcher library David Marchand
  3 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-12  8:50 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Add unit tests for the dispatcher.

--

PATCH v8:
 o Adjust test code to match the fact that the dispatcher start and
   stop functions no longer return a value.

PATCH v7:
 o Skip (not fail) tests in case too few lcores are available or if
   the DSW event device is not available. (David Marchand)
 o Properly clean up resources in the above-mentioned scenarios.

PATCH v6:
 o Register test as "fast". (David Marchand)
 o Use single tab as indentation for continuation lines in multiple-line
   function prototypes. (David Marchand)
 o Add Signed-off-by line. (David Marchand)
 o Use DPDK atomics wrapper API instead of C11 atomics.

PATCH v5:
 o Update test suite to use pointer and not integer id when calling
   dispatcher functions.

PATCH v3:
 o Adapt the test suite to dispatcher API name changes.

PATCH v2:
 o Test finalize callback functionality.
 o Test handler and finalizer count upper limits.
 o Add statistics reset test.
 o Make sure dispatcher supply the proper event dev id and port id back
   to the application.

PATCH:
 o Extend test to cover often-used handler optimization feature.

RFC v4:
 o Adapt to non-const events in process function prototype.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 MAINTAINERS                |    1 +
 app/test/meson.build       |    1 +
 app/test/test_dispatcher.c | 1056 ++++++++++++++++++++++++++++++++++++
 3 files changed, 1058 insertions(+)
 create mode 100644 app/test/test_dispatcher.c

diff --git a/MAINTAINERS b/MAINTAINERS
index a7039b06dc..0e24da11fe 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1737,6 +1737,7 @@ F: lib/node/
 Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
+F: app/test/test_dispatcher.c
 
 
 Test Applications
diff --git a/app/test/meson.build b/app/test/meson.build
index 20a9333c72..c238f4b21c 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -59,6 +59,7 @@ source_file_deps = {
     'test_cycles.c': [],
     'test_debug.c': [],
     'test_devargs.c': ['kvargs'],
+    'test_dispatcher.c': ['dispatcher'],
     'test_distributor.c': ['distributor'],
     'test_distributor_perf.c': ['distributor'],
     'test_dmadev.c': ['dmadev', 'bus_vdev'],
diff --git a/app/test/test_dispatcher.c b/app/test/test_dispatcher.c
new file mode 100644
index 0000000000..6eb3f572cf
--- /dev/null
+++ b/app/test/test_dispatcher.c
@@ -0,0 +1,1056 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_dispatcher.h>
+#include <rte_eventdev.h>
+#include <rte_random.h>
+#include <rte_service.h>
+#include <rte_stdatomic.h>
+
+#include "test.h"
+
+#define NUM_WORKERS 3
+#define NUM_PORTS (NUM_WORKERS + 1)
+#define WORKER_PORT_ID(worker_idx) (worker_idx)
+#define DRIVER_PORT_ID (NUM_PORTS - 1)
+
+#define NUM_SERVICE_CORES NUM_WORKERS
+#define MIN_LCORES (NUM_SERVICE_CORES + 1)
+
+/* Eventdev */
+#define NUM_QUEUES 8
+#define LAST_QUEUE_ID (NUM_QUEUES - 1)
+#define MAX_EVENTS 4096
+#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
+#define DEQUEUE_BURST_SIZE 32
+#define ENQUEUE_BURST_SIZE 32
+
+#define NUM_EVENTS 10000000
+#define NUM_FLOWS 16
+
+#define DSW_VDEV "event_dsw0"
+
+struct app_queue {
+	uint8_t queue_id;
+	uint64_t sn[NUM_FLOWS];
+	int dispatcher_reg_id;
+};
+
+struct cb_count {
+	uint8_t expected_event_dev_id;
+	uint8_t expected_event_port_id[RTE_MAX_LCORE];
+	RTE_ATOMIC(int) count;
+};
+
+struct test_app {
+	uint8_t event_dev_id;
+	struct rte_dispatcher *dispatcher;
+	uint32_t dispatcher_service_id;
+
+	unsigned int service_lcores[NUM_SERVICE_CORES];
+
+	int never_match_reg_id;
+	uint64_t never_match_count;
+	struct cb_count never_process_count;
+
+	struct app_queue queues[NUM_QUEUES];
+
+	int finalize_reg_id;
+	struct cb_count finalize_count;
+
+	bool running;
+
+	RTE_ATOMIC(int) completed_events;
+	RTE_ATOMIC(int) errors;
+};
+
+static struct test_app *
+test_app_create(void)
+{
+	int i;
+	struct test_app *app;
+
+	app = calloc(1, sizeof(struct test_app));
+
+	if (app == NULL)
+		return NULL;
+
+	for (i = 0; i < NUM_QUEUES; i++)
+		app->queues[i].queue_id = i;
+
+	return app;
+}
+
+static void
+test_app_free(struct test_app *app)
+{
+	free(app);
+}
+
+static int
+test_app_create_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_vdev_init(DSW_VDEV, NULL);
+	if (rc < 0)
+		return TEST_SKIPPED;
+
+	rc = rte_event_dev_get_dev_id(DSW_VDEV);
+
+	app->event_dev_id = (uint8_t)rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_destroy_vdev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_close(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
+
+	rc = rte_vdev_uninit(DSW_VDEV);
+	TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_event_dev(struct test_app *app)
+{
+	int rc;
+	int i;
+
+	rc = test_app_create_vdev(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	struct rte_event_dev_config config = {
+		.nb_event_queues = NUM_QUEUES,
+		.nb_event_ports = NUM_PORTS,
+		.nb_events_limit = MAX_EVENTS,
+		.nb_event_queue_flows = 64,
+		.nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
+		.nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	rc = rte_event_dev_configure(app->event_dev_id, &config);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
+
+	struct rte_event_queue_conf queue_config = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.nb_atomic_flows = 64
+	};
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		uint8_t queue_id = i;
+
+		rc = rte_event_queue_setup(app->event_dev_id, queue_id,
+					   &queue_config);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
+	}
+
+	struct rte_event_port_conf port_config = {
+		.new_event_threshold = NEW_EVENT_THRESHOLD,
+		.dequeue_depth = DEQUEUE_BURST_SIZE,
+		.enqueue_depth = ENQUEUE_BURST_SIZE
+	};
+
+	for (i = 0; i < NUM_PORTS; i++) {
+		uint8_t event_port_id = i;
+
+		rc = rte_event_port_setup(app->event_dev_id, event_port_id,
+					  &port_config);
+		TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
+				    event_port_id);
+
+		if (event_port_id == DRIVER_PORT_ID)
+			continue;
+
+		rc = rte_event_port_link(app->event_dev_id, event_port_id,
+					 NULL, NULL, 0);
+
+		TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
+				  event_port_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_event_dev(struct test_app *app)
+{
+	return test_app_destroy_vdev(app);
+}
+
+static int
+test_app_start_event_dev(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_event_dev_start(app->event_dev_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
+
+	return TEST_SUCCESS;
+}
+
+static void
+test_app_stop_event_dev(struct test_app *app)
+{
+	rte_event_dev_stop(app->event_dev_id);
+}
+
+static int
+test_app_create_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	app->dispatcher = rte_dispatcher_create(app->event_dev_id);
+
+	TEST_ASSERT(app->dispatcher != NULL, "Unable to create event "
+		    "dispatcher");
+
+	app->dispatcher_service_id =
+		rte_dispatcher_service_id_get(app->dispatcher);
+
+	rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
+			    "stats");
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to set dispatcher service runstate");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_free_dispatcher(struct test_app *app)
+{
+	int rc;
+
+	rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
+
+	rc = rte_dispatcher_free(app->dispatcher);
+	TEST_ASSERT_SUCCESS(rc, "Error freeing dispatcher");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_bind_ports(struct test_app *app)
+{
+	int i;
+
+	app->never_process_count.expected_event_dev_id =
+		app->event_dev_id;
+	app->finalize_count.expected_event_dev_id =
+		app->event_dev_id;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		uint8_t port_id = WORKER_PORT_ID(i);
+
+		int rc = rte_dispatcher_bind_port_to_lcore(
+			app->dispatcher, port_id, DEQUEUE_BURST_SIZE, 0,
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
+				    "to lcore %d", port_id, lcore_id);
+
+		app->never_process_count.expected_event_port_id[lcore_id] =
+			port_id;
+		app->finalize_count.expected_event_port_id[lcore_id] = port_id;
+	}
+
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unbind_ports(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_WORKERS; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+
+		int rc = rte_dispatcher_unbind_port_from_lcore(
+			app->dispatcher,
+			WORKER_PORT_ID(i),
+			lcore_id
+		);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
+				    "from lcore %d", WORKER_PORT_ID(i),
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static bool
+match_queue(const struct rte_event *event, void *cb_data)
+{
+	uintptr_t queue_id = (uintptr_t)cb_data;
+
+	return event->queue_id == queue_id;
+}
+
+static int
+test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++)
+		if (app->service_lcores[i] == lcore_id)
+			return i;
+
+	return -1;
+}
+
+static int
+test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
+{
+	int worker;
+
+	worker = test_app_get_worker_index(app, lcore_id);
+
+	if (worker < 0)
+		return -1;
+
+	return WORKER_PORT_ID(worker);
+}
+
+static void
+test_app_queue_note_error(struct test_app *app)
+{
+	rte_atomic_fetch_add_explicit(&app->errors, 1, rte_memory_order_relaxed);
+}
+
+static void
+test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
+	struct rte_event *in_events, uint16_t num,
+	void *cb_data)
+{
+	struct app_queue *app_queue = cb_data;
+	struct test_app *app = container_of(app_queue, struct test_app,
+					    queues[app_queue->queue_id]);
+	unsigned int lcore_id = rte_lcore_id();
+	bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
+	int event_port_id;
+	uint16_t i;
+	struct rte_event out_events[num];
+
+	event_port_id = test_app_get_worker_port(app, lcore_id);
+
+	if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
+	    p_event_port_id != event_port_id) {
+		test_app_queue_note_error(app);
+		return;
+	}
+
+	for (i = 0; i < num; i++) {
+		const struct rte_event *in_event = &in_events[i];
+		struct rte_event *out_event = &out_events[i];
+		uint64_t sn = in_event->u64;
+		uint64_t expected_sn;
+
+		if (in_event->queue_id != app_queue->queue_id) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		expected_sn = app_queue->sn[in_event->flow_id]++;
+
+		if (expected_sn != sn) {
+			test_app_queue_note_error(app);
+			return;
+		}
+
+		if (intermediate_queue)
+			*out_event = (struct rte_event) {
+				.queue_id = in_event->queue_id + 1,
+				.flow_id = in_event->flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_FORWARD,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+	}
+
+	if (intermediate_queue) {
+		uint16_t n = 0;
+
+		do {
+			n += rte_event_enqueue_forward_burst(p_event_dev_id,
+							     p_event_port_id,
+							     out_events + n,
+							     num - n);
+		} while (n != num);
+	} else
+		rte_atomic_fetch_add_explicit(&app->completed_events, num,
+					      rte_memory_order_relaxed);
+}
+
+static bool
+never_match(const struct rte_event *event __rte_unused, void *cb_data)
+{
+	uint64_t *count = cb_data;
+
+	(*count)++;
+
+	return false;
+}
+
+static void
+test_app_never_process(uint8_t event_dev_id, uint8_t event_port_id,
+	struct rte_event *in_events __rte_unused, uint16_t num, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		rte_atomic_fetch_add_explicit(&count->count, num,
+					      rte_memory_order_relaxed);
+}
+
+static void
+finalize(uint8_t event_dev_id, uint8_t event_port_id, void *cb_data)
+{
+	struct cb_count *count = cb_data;
+	unsigned int lcore_id = rte_lcore_id();
+
+	if (event_dev_id == count->expected_event_dev_id &&
+	    event_port_id == count->expected_event_port_id[lcore_id])
+		rte_atomic_fetch_add_explicit(&count->count, 1,
+					      rte_memory_order_relaxed);
+}
+
+static int
+test_app_register_callbacks(struct test_app *app)
+{
+	int i;
+
+	app->never_match_reg_id =
+		rte_dispatcher_register(app->dispatcher, never_match,
+					&app->never_match_count,
+					test_app_never_process,
+					&app->never_process_count);
+
+	TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register "
+		    "never-match handler");
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		struct app_queue *app_queue = &app->queues[i];
+		uintptr_t queue_id = app_queue->queue_id;
+		int reg_id;
+
+		reg_id = rte_dispatcher_register(app->dispatcher,
+						 match_queue, (void *)queue_id,
+						 test_app_process_queue,
+						 app_queue);
+
+		TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
+			    "callback for queue %d", i);
+
+		app_queue->dispatcher_reg_id = reg_id;
+	}
+
+	app->finalize_reg_id =
+		rte_dispatcher_finalize_register(app->dispatcher,
+						       finalize,
+						       &app->finalize_count);
+	TEST_ASSERT_SUCCESS(app->finalize_reg_id, "Error registering "
+			    "finalize callback");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
+{
+	int reg_id = app->queues[queue_id].dispatcher_reg_id;
+	int rc;
+
+	if (reg_id < 0) /* unregistered already */
+		return 0;
+
+	rc = rte_dispatcher_unregister(app->dispatcher, reg_id);
+
+	TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
+			    "callback for queue %d", queue_id);
+
+	app->queues[queue_id].dispatcher_reg_id = -1;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_unregister_callbacks(struct test_app *app)
+{
+	int i;
+	int rc;
+
+	if (app->never_match_reg_id >= 0) {
+		rc = rte_dispatcher_unregister(app->dispatcher,
+						     app->never_match_reg_id);
+
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister never-match "
+				    "handler");
+		app->never_match_reg_id = -1;
+	}
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		rc = test_app_unregister_callback(app, i);
+		if (rc != TEST_SUCCESS)
+			return rc;
+	}
+
+	if (app->finalize_reg_id >= 0) {
+		rc = rte_dispatcher_finalize_unregister(
+			app->dispatcher, app->finalize_reg_id
+		);
+		app->finalize_reg_id = -1;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static void
+test_app_start_dispatcher(struct test_app *app)
+{
+	rte_dispatcher_start(app->dispatcher);
+}
+
+static void
+test_app_stop_dispatcher(struct test_app *app)
+{
+	rte_dispatcher_stop(app->dispatcher);
+}
+
+static int
+test_app_reset_dispatcher_stats(struct test_app *app)
+{
+	struct rte_dispatcher_stats stats;
+
+	rte_dispatcher_stats_reset(app->dispatcher);
+
+	memset(&stats, 0xff, sizeof(stats));
+
+	rte_dispatcher_stats_get(app->dispatcher, &stats);
+
+	TEST_ASSERT_EQUAL(stats.poll_count, 0, "Poll count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_batch_count, 0, "Batch count not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, "Dispatch count "
+			  "not zero");
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count not zero");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_lcore_add(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
+			    "service core", lcore_id);
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
+	TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_setup_service_cores(struct test_app *app)
+{
+	int i;
+	int lcore_id = -1;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
+
+		app->service_lcores[i] = lcore_id;
+	}
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		int rc;
+
+		rc = test_app_setup_service_core(app, app->service_lcores[i]);
+		if (rc != TEST_SUCCESS)
+			return rc;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
+{
+	int rc;
+
+	rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
+	TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
+
+	rc = rte_service_lcore_del(lcore_id);
+	TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
+			    lcore_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_teardown_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = test_app_teardown_service_core(app, lcore_id);
+		if (rc != TEST_SUCCESS)
+			return rc;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_start(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop_service_cores(struct test_app *app)
+{
+	int i;
+
+	for (i = 0; i < NUM_SERVICE_CORES; i++) {
+		unsigned int lcore_id = app->service_lcores[i];
+		int rc;
+
+		rc = rte_service_lcore_stop(lcore_id);
+		TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
+				    lcore_id);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_start(struct test_app *app)
+{
+	int rc;
+
+	rc = test_app_start_event_dev(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	rc = test_app_start_service_cores(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	test_app_start_dispatcher(app);
+
+	app->running = true;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_app_stop(struct test_app *app)
+{
+	int rc;
+
+	test_app_stop_dispatcher(app);
+
+	rc = test_app_stop_service_cores(app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	test_app_stop_event_dev(app);
+
+	app->running = false;
+
+	return TEST_SUCCESS;
+}
+
+struct test_app *test_app;
+
+static int
+test_setup(void)
+{
+	int rc;
+
+	if (rte_lcore_count() < MIN_LCORES) {
+		printf("Not enough cores for dispatcher_autotest; expecting at "
+		       "least %d.\n", MIN_LCORES);
+		return TEST_SKIPPED;
+	}
+
+	test_app = test_app_create();
+	TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
+
+	rc = test_app_setup_event_dev(test_app);
+	if (rc != TEST_SUCCESS)
+		goto err_free_app;
+
+	rc = test_app_create_dispatcher(test_app);
+	if (rc != TEST_SUCCESS)
+		goto err_teardown_event_dev;
+
+	rc = test_app_setup_service_cores(test_app);
+	if (rc != TEST_SUCCESS)
+		goto err_free_dispatcher;
+
+	rc = test_app_register_callbacks(test_app);
+	if (rc != TEST_SUCCESS)
+		goto err_teardown_service_cores;
+
+	rc = test_app_bind_ports(test_app);
+	if (rc != TEST_SUCCESS)
+		goto err_unregister_callbacks;
+
+	return TEST_SUCCESS;
+
+err_unregister_callbacks:
+	test_app_unregister_callbacks(test_app);
+err_teardown_service_cores:
+	test_app_teardown_service_cores(test_app);
+err_free_dispatcher:
+	test_app_free_dispatcher(test_app);
+err_teardown_event_dev:
+	test_app_teardown_event_dev(test_app);
+err_free_app:
+	test_app_free(test_app);
+
+	test_app = NULL;
+
+	return rc;
+}
+
+static void test_teardown(void)
+{
+	if (test_app == NULL)
+		return;
+
+	if (test_app->running)
+		test_app_stop(test_app);
+
+	test_app_teardown_service_cores(test_app);
+
+	test_app_unregister_callbacks(test_app);
+
+	test_app_unbind_ports(test_app);
+
+	test_app_free_dispatcher(test_app);
+
+	test_app_teardown_event_dev(test_app);
+
+	test_app_free(test_app);
+
+	test_app = NULL;
+}
+
+static int
+test_app_get_completed_events(struct test_app *app)
+{
+	return rte_atomic_load_explicit(&app->completed_events,
+					rte_memory_order_relaxed);
+}
+
+static int
+test_app_get_errors(struct test_app *app)
+{
+	return rte_atomic_load_explicit(&app->errors, rte_memory_order_relaxed);
+}
+
+static int
+test_basic(void)
+{
+	int rc;
+	int i;
+
+	rc = test_app_start(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	uint64_t sns[NUM_FLOWS] = { 0 };
+
+	for (i = 0; i < NUM_EVENTS;) {
+		struct rte_event events[ENQUEUE_BURST_SIZE];
+		int left;
+		int batch_size;
+		int j;
+		uint16_t n = 0;
+
+		batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
+		left = NUM_EVENTS - i;
+
+		batch_size = RTE_MIN(left, batch_size);
+
+		for (j = 0; j < batch_size; j++) {
+			struct rte_event *event = &events[j];
+			uint64_t sn;
+			uint32_t flow_id;
+
+			flow_id = rte_rand_max(NUM_FLOWS);
+
+			sn = sns[flow_id]++;
+
+			*event = (struct rte_event) {
+				.queue_id = 0,
+				.flow_id = flow_id,
+				.sched_type = RTE_SCHED_TYPE_ATOMIC,
+				.op = RTE_EVENT_OP_NEW,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.u64 = sn
+			};
+		}
+
+		while (n < batch_size)
+			n += rte_event_enqueue_new_burst(test_app->event_dev_id,
+							 DRIVER_PORT_ID,
+							 events + n,
+							 batch_size - n);
+
+		i += batch_size;
+	}
+
+	while (test_app_get_completed_events(test_app) != NUM_EVENTS)
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+
+	rc = test_app_get_errors(test_app);
+	TEST_ASSERT(rc == 0, "%d errors occurred", rc);
+
+	rc = test_app_stop(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	struct rte_dispatcher_stats stats;
+	rte_dispatcher_stats_get(test_app->dispatcher, &stats);
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
+			  "Invalid dispatch count");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	TEST_ASSERT_EQUAL(test_app->never_process_count.count, 0,
+			  "Never-match handler's process function has "
+			  "been called");
+
+	int finalize_count =
+		rte_atomic_load_explicit(&test_app->finalize_count.count,
+					 rte_memory_order_relaxed);
+
+	TEST_ASSERT(finalize_count > 0, "Finalize count is zero");
+	TEST_ASSERT(finalize_count <= (int)stats.ev_dispatch_count,
+		    "Finalize count larger than event count");
+
+	TEST_ASSERT_EQUAL(finalize_count, (int)stats.ev_batch_count,
+			  "%"PRIu64" batches dequeued, but finalize called %d "
+			  "times", stats.ev_batch_count, finalize_count);
+
+	/*
+	 * The event dispatcher should call often-matching match functions
+	 * more often, and thus this never-matching match function should
+	 * be called relatively infrequently.
+	 */
+	TEST_ASSERT(test_app->never_match_count <
+		    (stats.ev_dispatch_count / 4),
+		    "Never-matching match function called suspiciously often");
+
+	rc = test_app_reset_dispatcher_stats(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_drop(void)
+{
+	int rc;
+	uint8_t unhandled_queue;
+	struct rte_dispatcher_stats stats;
+
+	unhandled_queue = (uint8_t)rte_rand_max(NUM_QUEUES);
+
+	rc = test_app_start(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	rc = test_app_unregister_callback(test_app, unhandled_queue);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	struct rte_event event = {
+	    .queue_id = unhandled_queue,
+	    .flow_id = 0,
+	    .sched_type = RTE_SCHED_TYPE_ATOMIC,
+	    .op = RTE_EVENT_OP_NEW,
+	    .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	    .u64 = 0
+	};
+
+	do {
+		rc = rte_event_enqueue_burst(test_app->event_dev_id,
+					     DRIVER_PORT_ID, &event, 1);
+	} while (rc == 0);
+
+	do {
+		rte_dispatcher_stats_get(test_app->dispatcher, &stats);
+
+		rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
+	} while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
+
+	rc = test_app_stop(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
+	TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
+			  "Dispatch count is not zero");
+	TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
+
+	return TEST_SUCCESS;
+}
+
+#define MORE_THAN_MAX_HANDLERS 1000
+#define MIN_HANDLERS 32
+
+static int
+test_many_handler_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_HANDLERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	for (i = 0; i < MORE_THAN_MAX_HANDLERS; i++) {
+		reg_id = rte_dispatcher_register(test_app->dispatcher,
+						 never_match, NULL,
+						 test_app_never_process, NULL);
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_HANDLERS, "Registration failed already "
+		    "after %d handler registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_unregister(test_app->dispatcher,
+					       reg_ids[i]);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister handler %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static void
+dummy_finalize(uint8_t event_dev_id __rte_unused,
+	       uint8_t event_port_id __rte_unused,
+	       void *cb_data __rte_unused)
+{
+}
+
+#define MORE_THAN_MAX_FINALIZERS 1000
+#define MIN_FINALIZERS 16
+
+static int
+test_many_finalize_registrations(void)
+{
+	int rc;
+	int num_regs = 0;
+	int reg_ids[MORE_THAN_MAX_FINALIZERS];
+	int reg_id;
+	int i;
+
+	rc = test_app_unregister_callbacks(test_app);
+	if (rc != TEST_SUCCESS)
+		return rc;
+
+	for (i = 0; i < MORE_THAN_MAX_FINALIZERS; i++) {
+		reg_id = rte_dispatcher_finalize_register(
+			test_app->dispatcher, dummy_finalize, NULL
+		);
+
+		if (reg_id < 0)
+			break;
+
+		reg_ids[num_regs++] = reg_id;
+	}
+
+	TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
+			  "%d but was %d", -ENOMEM, reg_id);
+	TEST_ASSERT(num_regs >= MIN_FINALIZERS, "Finalize registration failed "
+		    "already after %d registrations.", num_regs);
+
+	for (i = 0; i < num_regs; i++) {
+		rc = rte_dispatcher_finalize_unregister(
+			test_app->dispatcher, reg_ids[i]
+		);
+		TEST_ASSERT_SUCCESS(rc, "Unable to unregister finalizer %d",
+				    reg_ids[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite test_suite = {
+	.suite_name = "Event dispatcher test suite",
+	.unit_test_cases = {
+		TEST_CASE_ST(test_setup, test_teardown, test_basic),
+		TEST_CASE_ST(test_setup, test_teardown, test_drop),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_handler_registrations),
+		TEST_CASE_ST(test_setup, test_teardown,
+			     test_many_finalize_registrations),
+		TEST_CASES_END()
+	}
+};
+
+static int
+test_dispatcher(void)
+{
+	return unit_test_suite_runner(&test_suite);
+}
+
+REGISTER_FAST_TEST(dispatcher_autotest, false, true, test_dispatcher);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* [PATCH v8 3/3] doc: add dispatcher programming guide
  2023-10-12  8:50                                                   ` [PATCH v8 0/3] Add " Mattias Rönnblom
  2023-10-12  8:50                                                     ` [PATCH v8 1/3] lib: introduce " Mattias Rönnblom
  2023-10-12  8:50                                                     ` [PATCH v8 2/3] test: add dispatcher test suite Mattias Rönnblom
@ 2023-10-12  8:50                                                     ` Mattias Rönnblom
  2023-10-12 12:48                                                     ` [PATCH v8 0/3] Add dispatcher library David Marchand
  3 siblings, 0 replies; 102+ messages in thread
From: Mattias Rönnblom @ 2023-10-12  8:50 UTC (permalink / raw)
  To: dev, david.marchand
  Cc: Jerin Jacob, techboard, harry.van.haaren, hofors, Peter Nilsson,
	Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan, Mattias Rönnblom

Provide programming guide for the dispatcher library.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--

PATCH v7:
 o Mark pseudo code blocks as being type "none", to avoid Sphinx failures
   on non-Ubuntu systems. (David Marchand)
 o "Necessarily" necessarily needs to be spelled just so. (David Marchand)

PATCH v6:
 o Eliminate unneeded white space in code blocks. (David Marchand)

PATCH v5:
 o Update guide to match API changes related to dispatcher ids.

PATCH v3:
 o Adapt guide to the dispatcher API name changes.

PATCH:
 o Improve grammar and spelling.

RFC v4:
 o Extend event matching section of the programming guide.
 o Improve grammar and spelling.
---
 MAINTAINERS                              |   1 +
 doc/guides/prog_guide/dispatcher_lib.rst | 433 +++++++++++++++++++++++
 doc/guides/prog_guide/index.rst          |   1 +
 3 files changed, 435 insertions(+)
 create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst

diff --git a/MAINTAINERS b/MAINTAINERS
index 0e24da11fe..affb4b9410 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1738,6 +1738,7 @@ Dispatcher - EXPERIMENTAL
 M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
 F: lib/dispatcher/
 F: app/test/test_dispatcher.c
+F: doc/guides/prog_guide/dispatcher_lib.rst
 
 
 Test Applications
diff --git a/doc/guides/prog_guide/dispatcher_lib.rst b/doc/guides/prog_guide/dispatcher_lib.rst
new file mode 100644
index 0000000000..6de1ea78b0
--- /dev/null
+++ b/doc/guides/prog_guide/dispatcher_lib.rst
@@ -0,0 +1,433 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Ericsson AB.
+
+Dispatcher
+==========
+
+Overview
+--------
+
+The purpose of the dispatcher is to help reduce coupling in an
+:doc:`Eventdev <eventdev>`-based DPDK application.
+
+In particular, the dispatcher addresses a scenario where an
+application's modules share the same event device and event device
+ports, and performs work on the same lcore threads.
+
+The dispatcher replaces the conditional logic that follows an event
+device dequeue operation, where events are dispatched to different
+parts of the application, typically based on fields in the
+``rte_event``, such as the ``queue_id``, ``sub_event_type``, or
+``sched_type``.
+
+Below is an excerpt from a fictitious application consisting of two
+modules; A and B. In this example, event-to-module routing is based
+purely on queue id, where module A expects all events to a certain
+queue id, and module B two other queue ids. [#Mapping]_
+
+.. code-block:: c
+
+    for (;;) {
+            struct rte_event events[MAX_BURST];
+            unsigned int n;
+
+            n = rte_event_dequeue_burst(dev_id, port_id, events,
+	                                MAX_BURST, 0);
+
+            for (i = 0; i < n; i++) {
+                    const struct rte_event *event = &events[i];
+
+                    switch (event->queue_id) {
+                    case MODULE_A_QUEUE_ID:
+                            module_a_process(event);
+                            break;
+                    case MODULE_B_STAGE_0_QUEUE_ID:
+                            module_b_process_stage_0(event);
+                            break;
+                    case MODULE_B_STAGE_1_QUEUE_ID:
+                            module_b_process_stage_1(event);
+                            break;
+                    }
+            }
+    }
+
+The issue this example attempts to illustrate is that the centralized
+conditional logic has knowledge of things that should be private to
+the modules. In other words, this pattern leads to a violation of
+module encapsulation.
+
+The shared conditional logic contains explicit knowledge about what
+events should go where. In case, for example, the
+``module_a_process()`` is broken into two processing stages — a
+module-internal affair — the shared conditional code must be updated
+to reflect this change.
+
+The centralized event routing code becomes an issue in larger
+applications, where modules are developed by different organizations.
+This pattern also makes module reuse across different application more
+difficult. The part of the conditional logic relevant for a particular
+application may need to be duplicated across many module
+instantiations (e.g., applications and test setups).
+
+The dispatcher separates the mechanism (routing events to their
+receiver) from the policy (which events should go where).
+
+The basic operation of the dispatcher is as follows:
+
+* Dequeue a batch of events from the event device.
+* For each event determine which handler should receive the event, using
+  a set of application-provided, per-handler event matching callback
+  functions.
+* Provide events matching a particular handler, to that handler, using
+  its process callback.
+
+If the above application would have made use of the dispatcher, the
+code relevant for its module A may have looked something like this:
+
+.. code-block:: c
+
+    static bool
+    module_a_match(const struct rte_event *event, void *cb_data)
+    {
+           return event->queue_id == MODULE_A_QUEUE_ID;
+    }
+
+    static void
+    module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id,
+                            const struct rte_event *events,
+			    uint16_t num, void *cb_data)
+    {
+            uint16_t i;
+
+            for (i = 0; i < num; i++)
+                    module_a_process_event(&events[i]);
+    }
+
+    /* In the module's initialization code */
+    rte_dispatcher_register(dispatcher, module_a_match, NULL,
+			    module_a_process_events, module_a_data);
+
+(Error handling is left out of this and future example code in this
+chapter.)
+
+When the shared conditional logic is removed, a new question arise:
+which part of the system actually runs the dispatching mechanism? Or
+phrased differently, what is replacing the function hosting the shared
+conditional logic (typically launched on all lcores using
+``rte_eal_remote_launch()``)? To solve this issue, the dispatcher is a
+run as a DPDK :doc:`Service <service_cores>`.
+
+The dispatcher is a layer between the application and the event device
+in the receive direction. In the transmit (i.e., item of work
+submission) direction, the application directly accesses the Eventdev
+core API (e.g., ``rte_event_enqueue_burst()``) to submit new or
+forwarded event to the event device.
+
+Dispatcher Creation
+-------------------
+
+A dispatcher is created with using
+``rte_dispatcher_create()``.
+
+The event device must be configured before the dispatcher is created.
+
+Usually, only one dispatcher is needed per event device. A dispatcher
+handles exactly one event device.
+
+An dispatcher is freed using the ``rte_dispatcher_free()``
+function. The dispatcher's service functions must not be running on
+any lcore at the point of this call.
+
+Event Port Binding
+------------------
+
+To be able to dequeue events, the dispatcher must know which event
+ports are to be used, on all the lcores it uses. The application
+provides this information using
+``rte_dispatcher_bind_port_to_lcore()``.
+
+This call is typically made from the part of the application that
+deals with deployment issues (e.g., iterating lcores and determining
+which lcore does what), at the time of application initialization.
+
+The ``rte_dispatcher_unbind_port_from_lcore()`` is used to undo
+this operation.
+
+Multiple lcore threads may not safely use the same event
+port. [#Port-MT-Safety]
+
+Event ports cannot safely be bound or unbound while the dispatcher's
+service function is running on any lcore.
+
+Event Handlers
+--------------
+
+The dispatcher handler is an interface between the dispatcher and an
+application module, used to route events to the appropriate part of
+the application.
+
+Handler Registration
+^^^^^^^^^^^^^^^^^^^^
+
+The event handler interface consists of two function pointers:
+
+* The ``rte_dispatcher_match_t`` callback, which job is to
+  decide if this event is to be the property of this handler.
+* The ``rte_dispatcher_process_t``, which is used by the
+  dispatcher to deliver matched events.
+
+An event handler registration is valid on all lcores.
+
+The functions pointed to by the match and process callbacks resides in
+the application's domain logic, with one or more handlers per
+application module.
+
+A module may use more than one event handler, for convenience or to
+further decouple sub-modules. However, the dispatcher may impose an
+upper limit of the number handlers. In addition, installing a large
+number of handlers increase dispatcher overhead, although this does
+not necessarily translate to a system-level performance degradation. See
+the section on :ref:`Event Clustering` for more information.
+
+Handler registration and unregistration cannot safely be done while
+the dispatcher's service function is running on any lcore.
+
+Event Matching
+^^^^^^^^^^^^^^
+
+A handler's match callback function decides if an event should be
+delivered to this handler, or not.
+
+An event is routed to no more than one handler. Thus, if a match
+function returns true, no further match functions will be invoked for
+that event.
+
+Match functions must not depend on being invocated in any particular
+order (e.g., in the handler registration order).
+
+Events failing to match any handler are dropped, and the
+``ev_drop_count`` counter is updated accordingly.
+
+Event Delivery
+^^^^^^^^^^^^^^
+
+The handler callbacks are invocated by the dispatcher's service
+function, upon the arrival of events to the event ports bound to the
+running service lcore.
+
+A particular event is delivery to at most one handler.
+
+The application must not depend on all match callback invocations for
+a particular event batch being made prior to any process calls are
+being made. For example, if the dispatcher dequeues two events from
+the event device, it may choose to find out the destination for the
+first event, and deliver it, and then continue to find out the
+destination for the second, and then deliver that event as well. The
+dispatcher may also choose a strategy where no event is delivered
+until the destination handler for both events have been determined.
+
+The events provided in a single process call always belong to the same
+event port dequeue burst.
+
+.. _Event Clustering:
+
+Event Clustering
+^^^^^^^^^^^^^^^^
+
+The dispatcher maintains the order of events destined for the same
+handler.
+
+*Order* here refers to the order in which the events were delivered
+from the event device to the dispatcher (i.e., in the event array
+populated by ``rte_event_dequeue_burst()``), in relation to the order
+in which the dispatcher deliveres these events to the application.
+
+The dispatcher *does not* guarantee to maintain the order of events
+delivered to *different* handlers.
+
+For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0,
+and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then
+consider a scenario where the following events are dequeued from the
+event device (qid is short for event queue id).
+
+.. code-block:: none
+
+    [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1]
+
+The dispatcher may deliver the events in the following manner:
+
+.. code-block:: none
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1])
+   module_a_process([e2: qid=0])
+   module_b_stage_0_process([e2: qid=1])
+
+The dispatcher may also choose to cluster (group) all events destined
+for ``module_b_stage_0_process()`` into one array:
+
+.. code-block:: none
+
+   module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1])
+   module_a_process([e2: qid=0])
+
+Here, the event ``e2`` is reordered and placed behind ``e3``, from a
+delivery order point of view. This kind of reshuffling is allowed,
+since the events are destined for different handlers.
+
+The dispatcher may also deliver ``e2`` before the three events
+destined for module B.
+
+An example of what the dispatcher may not do, is to reorder event
+``e1`` so, that it precedes ``e0`` in the array passed to the module
+B's stage 0 process callback.
+
+Although clustering requires some extra work for the dispatcher, it
+leads to fewer process function calls. In addition, and likely more
+importantly, it improves temporal locality of memory accesses to
+handler-specific data structures in the application, which in turn may
+lead to fewer cache misses and improved overall performance.
+
+Finalize
+--------
+
+The dispatcher may be configured to notify one or more parts of the
+application when the matching and processing of a batch of events has
+completed.
+
+The ``rte_dispatcher_finalize_register`` call is used to
+register a finalize callback. The function
+``rte_dispatcher_finalize_unregister`` is used to remove a
+callback.
+
+The finalize hook may be used by a set of event handlers (in the same
+modules, or a set of cooperating modules) sharing an event output
+buffer, since it allows for flushing of the buffers at the last
+possible moment. In particular, it allows for buffering of
+``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next
+``rte_event_dequeue_burst()`` call is made (assuming implicit release
+is employed).
+
+The following is an example with an application-defined event output
+buffer (the ``event_buffer``):
+
+.. code-block:: c
+
+    static void
+    finalize_batch(uint8_t event_dev_id, uint8_t event_port_id,
+                   void *cb_data)
+    {
+            struct event_buffer *buffer = cb_data;
+            unsigned lcore_id = rte_lcore_id();
+            struct event_buffer_lcore *lcore_buffer =
+                    &buffer->lcore_buffer[lcore_id];
+
+            event_buffer_lcore_flush(lcore_buffer);
+    }
+
+    /* In the module's initialization code */
+    rte_dispatcher_finalize_register(dispatcher, finalize_batch,
+                                     shared_event_buffer);
+
+The dispatcher does not track any relationship between a handler and a
+finalize callback, and all finalize callbacks will be called, if (and
+only if) at least one event was dequeued from the event device.
+
+Finalize callback registration and unregistration cannot safely be
+done while the dispatcher's service function is running on any lcore.
+
+Service
+-------
+
+The dispatcher is a DPDK service, and is managed in a manner similar
+to other DPDK services (e.g., an Event Timer Adapter).
+
+Below is an example of how to configure a particular lcore to serve as
+a service lcore, and to map an already-configured dispatcher
+(identified by ``DISPATCHER_ID``) to that lcore.
+
+.. code-block:: c
+
+    static void
+    launch_dispatcher_core(struct rte_dispatcher *dispatcher,
+                           unsigned lcore_id)
+    {
+            uint32_t service_id;
+
+            rte_service_lcore_add(lcore_id);
+
+            rte_dispatcher_service_id_get(dispatcher, &service_id);
+
+            rte_service_map_lcore_set(service_id, lcore_id, 1);
+
+            rte_service_lcore_start(lcore_id);
+
+            rte_service_runstate_set(service_id, 1);
+    }
+
+As the final step, the dispatcher must be started.
+
+.. code-block:: c
+
+    rte_dispatcher_start(dispatcher);
+
+
+Multi Service Dispatcher Lcores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In an Eventdev application, most (or all) compute-intensive and
+performance-sensitive processing is done in an event-driven manner,
+where CPU cycles spent on application domain logic is the direct
+result of items of work (i.e., ``rte_event`` events) dequeued from an
+event device.
+
+In the light of this, it makes sense to have the dispatcher service be
+the only DPDK service on all lcores used for packet processing — at
+least in principle.
+
+However, there is nothing in DPDK that prevents colocating other
+services with the dispatcher service on the same lcore.
+
+Tasks that prior to the introduction of the dispatcher into the
+application was performed on the lcore, even though no events were
+received, are prime targets for being converted into such auxiliary
+services, running on the dispatcher core set.
+
+An example of such a task would be the management of a per-lcore timer
+wheel (i.e., calling ``rte_timer_manage()``).
+
+For applications employing :doc:`Read-Copy-Update (RCU) <rcu_lib>` (or
+similar technique), may opt for having quiescent state (e.g., calling
+``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate
+service, to assure resource reclaimination occurs even in though some
+lcores currently do not process any events.
+
+If more services than the dispatcher service is mapped to a service
+lcore, it's important that the other service are well-behaved and
+don't interfere with event processing to the extent the system's
+throughput and/or latency requirements are at risk of not being met.
+
+In particular, to avoid jitter, they should have an small upper bound
+for the maximum amount of time spent in a single service function
+call.
+
+An example of scenario with a more CPU-heavy colocated service is a
+low-lcore count deployment, where the event device lacks the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus
+require software to feed incoming packets into the event device). In
+this case, the best performance may be achieved if the Event Ethernet
+RX and/or TX Adapters are mapped to lcores also used by for event
+dispatching, since otherwise the adapter lcores would have a lot of
+idle CPU cycles.
+
+.. rubric:: Footnotes
+
+.. [#Mapping]
+   Event routing may reasonably be done based on other ``rte_event``
+   fields (or even event user data). Indeed, that's the very reason to
+   have match callback functions, instead of a simple queue
+   id-to-handler mapping scheme. Queue id-based routing serves well in
+   a simple example.
+
+.. [#Port-MT-Safety]
+   This property (which is a feature, not a bug) is inherited from the
+   core Eventdev APIs.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 52a6d9e7aa..ab05bd6074 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -60,6 +60,7 @@ Programmer's Guide
     event_ethernet_tx_adapter
     event_timer_adapter
     event_crypto_adapter
+    dispatcher_lib
     qos_framework
     power_man
     packet_classif_access_ctrl
-- 
2.34.1


^ permalink raw reply	[flat|nested] 102+ messages in thread

* Re: [PATCH v8 0/3] Add dispatcher library
  2023-10-12  8:50                                                   ` [PATCH v8 0/3] Add " Mattias Rönnblom
                                                                       ` (2 preceding siblings ...)
  2023-10-12  8:50                                                     ` [PATCH v8 3/3] doc: add dispatcher programming guide Mattias Rönnblom
@ 2023-10-12 12:48                                                     ` David Marchand
  3 siblings, 0 replies; 102+ messages in thread
From: David Marchand @ 2023-10-12 12:48 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: dev, Jerin Jacob, techboard, harry.van.haaren, hofors,
	Peter Nilsson, Heng Wang, Naga Harish K S V, Pavan Nikhilesh,
	Gujjar Abhinandan S, Erik Gabriel Carrillo, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Liang Ma, Peter Mccarthy,
	Zhirun Yan

On Thu, Oct 12, 2023 at 10:55 AM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> The purpose of the dispatcher library is to decouple different parts
> of an eventdev-based application (e.g., processing pipeline stages),
> sharing the same underlying event device.
>
> The dispatcher replaces the conditional logic (often, a switch
> statement) that typically follows an event device dequeue operation,
> where events are dispatched to different parts of the application
> based on event meta data, such as the queue id or scheduling type.
>
> The concept is similar to a UNIX file descriptor event loop library.
> Instead of tying callback functions to fds as for example libevent
> does, the dispatcher relies on application-supplied matching callback
> functions to decide where to deliver events.
>
> A dispatcher is configured to dequeue events from a specific event
> device, and ties into the service core framework, to do its (and the
> application's) work.
>
> The dispatcher provides a convenient way for an eventdev-based
> application to use service cores for application-level processing, and
> thus for sharing those cores with other DPDK services.
>
> Although the dispatcher adds some overhead, experience suggests that
> the net effect on the application (both synthetic benchmarks and more
> real-world applications) may well be positive. This is primarily due
> to clustering (see programming guide) reducing cache misses.
>
> Benchmarking indicates that the overhead is ~10 cc/event (on a
> large core), with a handful of often-used handlers.
>
> The dispatcher does not support run-time reconfiguration.
>
> The use of the dispatcher library is optional, and an eventdev-based
> application may still opt to access the event device using direct
> eventdev API calls, or by some other means.
>
> Mattias Rönnblom (3):
>   lib: introduce dispatcher library
>   test: add dispatcher test suite
>   doc: add dispatcher programming guide
>
>  MAINTAINERS                              |    6 +
>  app/test/meson.build                     |    1 +
>  app/test/test_dispatcher.c               | 1056 ++++++++++++++++++++++
>  doc/api/doxy-api-index.md                |    1 +
>  doc/api/doxy-api.conf.in                 |    1 +
>  doc/guides/prog_guide/dispatcher_lib.rst |  433 +++++++++
>  doc/guides/prog_guide/index.rst          |    1 +
>  doc/guides/rel_notes/release_23_11.rst   |    5 +
>  lib/dispatcher/meson.build               |   13 +
>  lib/dispatcher/rte_dispatcher.c          |  694 ++++++++++++++
>  lib/dispatcher/rte_dispatcher.h          |  458 ++++++++++
>  lib/dispatcher/version.map               |   20 +
>  lib/meson.build                          |    2 +
>  13 files changed, 2691 insertions(+)
>  create mode 100644 app/test/test_dispatcher.c
>  create mode 100644 doc/guides/prog_guide/dispatcher_lib.rst
>  create mode 100644 lib/dispatcher/meson.build
>  create mode 100644 lib/dispatcher/rte_dispatcher.c
>  create mode 100644 lib/dispatcher/rte_dispatcher.h
>  create mode 100644 lib/dispatcher/version.map
>

Thanks for this latest revision, it lgtm.
I fixed a few grammar issues in the documentation and used simple ..
note:: blocks to be consistent with the rest of our docs.

Applied, thanks Mattias.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 102+ messages in thread

end of thread, other threads:[~2023-10-12 12:48 UTC | newest]

Thread overview: 102+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-18 18:30 [dpdk-dev] [RFC] eventdev: introduce event dispatcher Mattias Rönnblom
2021-02-22 15:28 ` Luca Boccassi
2021-02-26  7:48   ` Mattias Rönnblom
2021-02-25 12:32 ` Jerin Jacob
2021-02-26  8:01   ` Mattias Rönnblom
2021-03-07 13:04     ` Jerin Jacob
2021-03-15 14:44       ` Mattias Rönnblom
2021-03-15 15:00         ` Van Haaren, Harry
2021-03-22  9:50           ` Mattias Rönnblom
2021-04-09 11:32             ` [dpdk-dev] [RFC v2] " Mattias Rönnblom
2023-05-22  9:16               ` [RFC v3 0/3] Add " Mattias Rönnblom
2023-05-22  9:16                 ` [RFC v3 1/3] eventdev: introduce " Mattias Rönnblom
2023-06-09  7:08                   ` [RFC v4 0/3] Add " Mattias Rönnblom
2023-06-09  7:08                     ` [RFC v4 1/3] eventdev: introduce " Mattias Rönnblom
2023-06-09 14:34                       ` Stephen Hemminger
2023-06-09 17:51                         ` Mattias Rönnblom
2023-06-14 17:25                       ` [PATCH 0/3] Add " Mattias Rönnblom
2023-06-14 17:25                         ` [PATCH 1/3] eventdev: introduce " Mattias Rönnblom
2023-06-14 18:13                           ` Stephen Hemminger
2023-06-15  6:07                             ` Mattias Rönnblom
2023-06-16  7:40                           ` [PATCH v2 0/3] Add " Mattias Rönnblom
2023-06-16  7:40                             ` [PATCH v2 1/3] eventdev: introduce " Mattias Rönnblom
2023-08-18  6:09                               ` Jerin Jacob
2023-08-22  8:42                                 ` Mattias Rönnblom
2023-08-22 12:32                                   ` Jerin Jacob
2023-08-24 11:17                                     ` Mattias Rönnblom
2023-08-25  7:27                                       ` Jerin Jacob
2023-09-01 10:53                                 ` Mattias Rönnblom
2023-09-01 10:56                                   ` Jerin Jacob
2023-09-04 13:03                               ` [PATCH v3 0/3] Add dispatcher library Mattias Rönnblom
2023-09-04 13:03                                 ` [PATCH v3 1/3] lib: introduce " Mattias Rönnblom
2023-09-17 16:46                                   ` Naga Harish K, S V
2023-09-19  9:20                                     ` Mattias Rönnblom
2023-09-20  9:11                                       ` Naga Harish K, S V
2023-09-20  9:32                                     ` Jerin Jacob
2023-09-21  5:59                                       ` Naga Harish K, S V
2023-09-21  7:23                                         ` Jerin Jacob
2023-09-19 10:58                                   ` Jerin Jacob
2023-09-21 16:47                                     ` Mattias Rönnblom
2023-09-21 17:47                                       ` Jerin Jacob
2023-09-21 18:36                                   ` Jerin Jacob
2023-09-22  6:32                                     ` Mattias Rönnblom
2023-09-22  7:38                                   ` [PATCH v4 0/3] Add " Mattias Rönnblom
2023-09-22  7:38                                     ` [PATCH v4 1/3] lib: introduce " Mattias Rönnblom
2023-09-25  7:11                                       ` Mattias Rönnblom
2023-09-25  7:59                                         ` Bruce Richardson
2023-09-26 18:28                                         ` Jerin Jacob
2023-09-27  8:13                                           ` Bruce Richardson
2023-09-28  7:44                                             ` Mattias Rönnblom
2023-10-03 17:31                                             ` Jerin Jacob
2023-09-28  7:30                                       ` [PATCH v5 0/3] Add " Mattias Rönnblom
2023-09-28  7:30                                         ` [PATCH v5 1/3] lib: introduce " Mattias Rönnblom
2023-10-05  8:36                                           ` David Marchand
2023-10-05 10:08                                             ` Mattias Rönnblom
2023-10-06  8:46                                               ` David Marchand
2023-10-06  9:03                                                 ` Thomas Monjalon
2023-10-09 17:40                                                   ` Mattias Rönnblom
2023-10-09 16:49                                                 ` Mattias Rönnblom
2023-10-11 14:57                                                   ` David Marchand
2023-10-11 20:51                                                     ` Mattias Rönnblom
2023-10-09 18:17                                           ` [PATCH v6 0/3] Add " Mattias Rönnblom
2023-10-09 18:17                                             ` [PATCH v6 1/3] lib: introduce " Mattias Rönnblom
2023-10-11  7:16                                               ` [PATCH v7 0/3] Add " Mattias Rönnblom
2023-10-11  7:16                                                 ` [PATCH v7 1/3] lib: introduce " Mattias Rönnblom
2023-10-12  8:50                                                   ` [PATCH v8 0/3] Add " Mattias Rönnblom
2023-10-12  8:50                                                     ` [PATCH v8 1/3] lib: introduce " Mattias Rönnblom
2023-10-12  8:50                                                     ` [PATCH v8 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-10-12  8:50                                                     ` [PATCH v8 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-10-12 12:48                                                     ` [PATCH v8 0/3] Add dispatcher library David Marchand
2023-10-11  7:16                                                 ` [PATCH v7 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-10-11  7:17                                                 ` [PATCH v7 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-10-09 18:17                                             ` [PATCH v6 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-10-10 11:56                                               ` David Marchand
2023-10-11  6:28                                                 ` Mattias Rönnblom
2023-10-11  7:26                                                   ` David Marchand
2023-10-10 14:02                                               ` David Marchand
2023-10-11  6:45                                                 ` Mattias Rönnblom
2023-10-09 18:17                                             ` [PATCH v6 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-10-10 13:31                                               ` David Marchand
2023-10-11  6:38                                                 ` Mattias Rönnblom
2023-09-28  7:30                                         ` [PATCH v5 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-10-05  8:36                                           ` David Marchand
2023-10-05 11:25                                             ` Mattias Rönnblom
2023-10-06  8:52                                               ` David Marchand
2023-10-09 17:16                                                 ` Mattias Rönnblom
2023-09-28  7:30                                         ` [PATCH v5 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-10-05  8:36                                           ` David Marchand
2023-10-05 11:33                                             ` Mattias Rönnblom
2023-09-22  7:38                                     ` [PATCH v4 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-09-22  7:38                                     ` [PATCH v4 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-09-04 13:03                                 ` [PATCH v3 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-09-04 13:03                                 ` [PATCH v3 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-09-06 19:32                                 ` [PATCH v3 0/3] Add dispatcher library Stephen Hemminger
2023-09-06 20:28                                   ` Mattias Rönnblom
2023-06-16  7:40                             ` [PATCH v2 2/3] test: add event dispatcher test suite Mattias Rönnblom
2023-06-16  7:40                             ` [PATCH v2 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
2023-06-14 17:25                         ` [PATCH 2/3] test: add event dispatcher test suite Mattias Rönnblom
2023-06-14 17:25                         ` [PATCH 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
2023-06-09  7:08                     ` [RFC v4 2/3] test: add event dispatcher test suite Mattias Rönnblom
2023-06-09  7:08                     ` [RFC v4 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
2023-05-22  9:16                 ` [RFC v3 2/3] test: add event dispatcher test suite Mattias Rönnblom
2023-05-22  9:16                 ` [RFC v3 3/3] doc: add event dispatcher programming guide Mattias Rönnblom

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).