DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal
@ 2021-08-23 19:40 pbhagavatula
  2021-08-23 19:40 ` [dpdk-dev] [RFC 02/15] eventdev: separate internal structures pbhagavatula
                   ` (16 more replies)
  0 siblings, 17 replies; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Timothy McDaniel,
	Hemant Agrawal, Nipun Gupta, Mattias Rönnblom, Liang Ma,
	Peter Mccarthy, Harry van Haaren, Abhinandan Gujjar,
	Ray Kinsella
  Cc: konstantin.ananyev, dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Mark all the driver specific functions as internal, remove
`rte` prefix from `struct rte_eventdev_ops`.
Remove experimental tag from internal functions.
Remove `eventdev_pmd.h` from non-internal header files.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c        |  2 +-
 drivers/event/cnxk/cn9k_eventdev.c         |  2 +-
 drivers/event/dlb2/dlb2.c                  |  2 +-
 drivers/event/dpaa/dpaa_eventdev.c         |  2 +-
 drivers/event/dpaa2/dpaa2_eventdev.c       |  2 +-
 drivers/event/dsw/dsw_evdev.c              |  2 +-
 drivers/event/octeontx/ssovf_evdev.c       |  2 +-
 drivers/event/octeontx2/otx2_evdev.c       |  2 +-
 drivers/event/opdl/opdl_evdev.c            |  2 +-
 drivers/event/skeleton/skeleton_eventdev.c |  2 +-
 drivers/event/sw/sw_evdev.c                |  2 +-
 lib/eventdev/eventdev_pmd.h                |  6 +++++-
 lib/eventdev/eventdev_pmd_pci.h            |  4 +++-
 lib/eventdev/eventdev_pmd_vdev.h           |  2 ++
 lib/eventdev/meson.build                   |  6 ++++++
 lib/eventdev/rte_event_crypto_adapter.h    |  1 -
 lib/eventdev/rte_eventdev.h                |  4 ++--
 lib/eventdev/version.map                   | 17 +++++++++--------
 18 files changed, 38 insertions(+), 24 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 6f37c5bd23..697b134041 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -821,7 +821,7 @@ cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
 	return cn10k_sso_updt_tx_adptr_data(event_dev);
 }
 
-static struct rte_eventdev_ops cn10k_sso_dev_ops = {
+static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
 	.queue_def_conf = cnxk_sso_queue_def_conf,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index a69edff195..9b439947e5 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1069,7 +1069,7 @@ cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
 	return cn9k_sso_updt_tx_adptr_data(event_dev);
 }
 
-static struct rte_eventdev_ops cn9k_sso_dev_ops = {
+static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
 	.queue_def_conf = cnxk_sso_queue_def_conf,
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 252bbd8d5e..c8742ddb2c 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -4384,7 +4384,7 @@ dlb2_entry_points_init(struct rte_eventdev *dev)
 	struct dlb2_eventdev *dlb2;
 
 	/* Expose PMD's eventdev interface */
-	static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
+	static struct eventdev_ops dlb2_eventdev_entry_ops = {
 		.dev_infos_get    = dlb2_eventdev_info_get,
 		.dev_configure    = dlb2_eventdev_configure,
 		.dev_start        = dlb2_eventdev_start,
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index ec74160325..9f14390d28 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -925,7 +925,7 @@ dpaa_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }
 
-static struct rte_eventdev_ops dpaa_eventdev_ops = {
+static struct eventdev_ops dpaa_eventdev_ops = {
 	.dev_infos_get    = dpaa_event_dev_info_get,
 	.dev_configure    = dpaa_event_dev_configure,
 	.dev_start        = dpaa_event_dev_start,
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 5ccf22f77f..d577f64824 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1015,7 +1015,7 @@ dpaa2_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }
 
-static struct rte_eventdev_ops dpaa2_eventdev_ops = {
+static struct eventdev_ops dpaa2_eventdev_ops = {
 	.dev_infos_get    = dpaa2_eventdev_info_get,
 	.dev_configure    = dpaa2_eventdev_configure,
 	.dev_start        = dpaa2_eventdev_start,
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 2301a4b7a0..01f060fff3 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -398,7 +398,7 @@ dsw_crypto_adapter_caps_get(const struct rte_eventdev *dev  __rte_unused,
 	return 0;
 }
 
-static struct rte_eventdev_ops dsw_evdev_ops = {
+static struct eventdev_ops dsw_evdev_ops = {
 	.port_setup = dsw_port_setup,
 	.port_def_conf = dsw_port_def_conf,
 	.port_release = dsw_port_release,
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index b93f6ec8c6..4a8c6a13a5 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -790,7 +790,7 @@ ssovf_crypto_adapter_qp_del(const struct rte_eventdev *dev,
 }
 
 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops ssovf_ops = {
+static struct eventdev_ops ssovf_ops = {
 	.dev_infos_get    = ssovf_info_get,
 	.dev_configure    = ssovf_configure,
 	.queue_def_conf   = ssovf_queue_def_conf,
diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index 38a6b651d9..00902ebf53 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -1596,7 +1596,7 @@ otx2_sso_close(struct rte_eventdev *event_dev)
 }
 
 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops otx2_sso_ops = {
+static struct eventdev_ops otx2_sso_ops = {
 	.dev_infos_get    = otx2_sso_info_get,
 	.dev_configure    = otx2_sso_configure,
 	.queue_def_conf   = otx2_sso_queue_def_conf,
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index cfa9733b64..739dc64c82 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -609,7 +609,7 @@ set_do_test(const char *key __rte_unused, const char *value, void *opaque)
 static int
 opdl_probe(struct rte_vdev_device *vdev)
 {
-	static struct rte_eventdev_ops evdev_opdl_ops = {
+	static struct eventdev_ops evdev_opdl_ops = {
 		.dev_configure = opdl_dev_configure,
 		.dev_infos_get = opdl_info_get,
 		.dev_close = opdl_close,
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index 6fd1102596..c9e17e7cb1 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -320,7 +320,7 @@ skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
 
 
 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops skeleton_eventdev_ops = {
+static struct eventdev_ops skeleton_eventdev_ops = {
 	.dev_infos_get    = skeleton_eventdev_info_get,
 	.dev_configure    = skeleton_eventdev_configure,
 	.dev_start        = skeleton_eventdev_start,
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index a5e6ca22e8..9b72073322 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -945,7 +945,7 @@ static int32_t sw_sched_service_func(void *args)
 static int
 sw_probe(struct rte_vdev_device *vdev)
 {
-	static struct rte_eventdev_ops evdev_sw_ops = {
+	static struct eventdev_ops evdev_sw_ops = {
 			.dev_configure = sw_dev_configure,
 			.dev_infos_get = sw_info_get,
 			.dev_close = sw_close,
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 0f724ac85d..5dab9e2f70 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -103,6 +103,7 @@ extern struct rte_eventdev *rte_eventdevs;
  * @return
  *   - The rte_eventdev structure pointer for the given device ID.
  */
+__rte_internal
 static inline struct rte_eventdev *
 rte_event_pmd_get_named_dev(const char *name)
 {
@@ -131,6 +132,7 @@ rte_event_pmd_get_named_dev(const char *name)
  * @return
  *   - If the device index is valid (1) or not (0).
  */
+__rte_internal
 static inline unsigned
 rte_event_pmd_is_valid_dev(uint8_t dev_id)
 {
@@ -1060,7 +1062,7 @@ typedef int (*eventdev_eth_tx_adapter_stats_reset_t)(uint8_t id,
 					const struct rte_eventdev *dev);
 
 /** Event device operations function pointer table */
-struct rte_eventdev_ops {
+struct eventdev_ops {
 	eventdev_info_get_t dev_infos_get;	/**< Get device info. */
 	eventdev_configure_t dev_configure;	/**< Configure device. */
 	eventdev_start_t dev_start;		/**< Start device. */
@@ -1178,6 +1180,7 @@ struct rte_eventdev_ops {
  * @return
  *   - Slot in the rte_dev_devices array for a new device;
  */
+__rte_internal
 struct rte_eventdev *
 rte_event_pmd_allocate(const char *name, int socket_id);
 
@@ -1189,6 +1192,7 @@ rte_event_pmd_allocate(const char *name, int socket_id);
  * @return
  *   - 0 on success, negative on error
  */
+__rte_internal
 int
 rte_event_pmd_release(struct rte_eventdev *eventdev);
 
diff --git a/lib/eventdev/eventdev_pmd_pci.h b/lib/eventdev/eventdev_pmd_pci.h
index d14ea634b8..02c6d42a80 100644
--- a/lib/eventdev/eventdev_pmd_pci.h
+++ b/lib/eventdev/eventdev_pmd_pci.h
@@ -36,7 +36,7 @@ typedef int (*eventdev_pmd_pci_callback_t)(struct rte_eventdev *dev);
  * interface.  Same as rte_event_pmd_pci_probe, except caller can specify
  * the name.
  */
-__rte_experimental
+__rte_internal
 static inline int
 rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
 			      struct rte_pci_device *pci_dev,
@@ -90,6 +90,7 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
  * Wrapper for use by pci drivers as a .probe function to attach to a event
  * interface.
  */
+__rte_internal
 static inline int
 rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
 			    struct rte_pci_device *pci_dev,
@@ -113,6 +114,7 @@ rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
  * Wrapper for use by pci drivers as a .remove function to detach a event
  * interface.
  */
+__rte_internal
 static inline int
 rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev,
 			     eventdev_pmd_pci_callback_t devuninit)
diff --git a/lib/eventdev/eventdev_pmd_vdev.h b/lib/eventdev/eventdev_pmd_vdev.h
index bc0cf44c8c..e645a21aad 100644
--- a/lib/eventdev/eventdev_pmd_vdev.h
+++ b/lib/eventdev/eventdev_pmd_vdev.h
@@ -41,6 +41,7 @@ extern "C" {
  *   - Eventdev pointer if device is successfully created.
  *   - NULL if device cannot be created.
  */
+__rte_internal
 static inline struct rte_eventdev *
 rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
 		int socket_id)
@@ -78,6 +79,7 @@ rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
  * @return
  *   - 0 on success, negative on error
  */
+__rte_internal
 static inline int
 rte_event_pmd_vdev_uninit(const char *name)
 {
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 32abeba794..523ea9ccae 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -27,5 +27,11 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
+driver_sdk_headers += files(
+        'eventdev_pmd.h',
+        'eventdev_pmd_pci.h',
+        'eventdev_pmd_vdev.h',
+)
+
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
 deps += ['telemetry']
diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index f8c6cca87c..431d05b6ed 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -171,7 +171,6 @@ extern "C" {
 #include <stdint.h>
 
 #include "rte_eventdev.h"
-#include "eventdev_pmd.h"
 
 /**
  * Crypto event adapter mode
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index a9c496fb62..6ba116002f 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1324,7 +1324,7 @@ int
 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 				uint32_t *caps);
 
-struct rte_eventdev_ops;
+struct eventdev_ops;
 struct rte_eventdev;
 
 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
@@ -1429,7 +1429,7 @@ struct rte_eventdev {
 	/**< Pointer to PMD eth Tx adapter enqueue function. */
 	struct rte_eventdev_data *data;
 	/**< Pointer to device data */
-	struct rte_eventdev_ops *dev_ops;
+	struct eventdev_ops *dev_ops;
 	/**< Functions exported by PMD */
 	struct rte_device *dev;
 	/**< Device info. supplied by probing */
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 88625621ec..5f1fe412a4 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -55,12 +55,6 @@ DPDK_22 {
 	rte_event_eth_tx_adapter_stats_get;
 	rte_event_eth_tx_adapter_stats_reset;
 	rte_event_eth_tx_adapter_stop;
-	rte_event_pmd_allocate;
-	rte_event_pmd_pci_probe;
-	rte_event_pmd_pci_remove;
-	rte_event_pmd_release;
-	rte_event_pmd_vdev_init;
-	rte_event_pmd_vdev_uninit;
 	rte_event_port_attr_get;
 	rte_event_port_default_conf_get;
 	rte_event_port_link;
@@ -136,8 +130,6 @@ EXPERIMENTAL {
 
 	# changed in 20.11
 	__rte_eventdev_trace_port_setup;
-	# added in 20.11
-	rte_event_pmd_pci_probe_named;
 
 	#added in 21.05
 	rte_event_vector_pool_create;
@@ -150,4 +142,13 @@ INTERNAL {
 	global:
 
 	rte_event_pmd_selftest_seqn_dynfield_offset;
+	rte_event_pmd_allocate;
+	rte_event_pmd_get_named_dev;
+	rte_event_pmd_is_valid_dev;
+	rte_event_pmd_pci_probe;
+	rte_event_pmd_pci_probe_named;
+	rte_event_pmd_pci_remove;
+	rte_event_pmd_release;
+	rte_event_pmd_vdev_init;
+	rte_event_pmd_vdev_uninit;
 };
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 02/15] eventdev: separate internal structures
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-10-14  9:11   ` Jerin Jacob
  2021-08-23 19:40 ` [dpdk-dev] [RFC 03/15] eventdev: move eventdevs globals to hugepage mem pbhagavatula
                   ` (15 subsequent siblings)
  16 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj; +Cc: konstantin.ananyev, dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Create rte_eventdev_core.h and move all the internal data structures
to this file. These structures are mostly used by drivers, but they
need to be in the public header file as they are accessed by datapath
inline functions for performance reasons.
The accessibility of these data structures is not changed.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/eventdev_pmd.h      |   3 -
 lib/eventdev/meson.build         |   3 +
 lib/eventdev/rte_eventdev.h      | 715 +++++++++++++------------------
 lib/eventdev/rte_eventdev_core.h | 144 +++++++
 4 files changed, 443 insertions(+), 422 deletions(-)
 create mode 100644 lib/eventdev/rte_eventdev_core.h

diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 5dab9e2f70..a25d3f1fb5 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -91,9 +91,6 @@ struct rte_eventdev_global {
 	uint8_t nb_devs;	/**< Number of devices found */
 };
 
-extern struct rte_eventdev *rte_eventdevs;
-/** The pool of rte_eventdev structures. */
-
 /**
  * Get the rte_eventdev structure device pointer for the named device.
  *
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 523ea9ccae..8b51fde361 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -27,6 +27,9 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
+indirect_headers += files(
+        'rte_eventdev_core.h',
+)
 driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 6ba116002f..1b11d4576d 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1324,314 +1324,6 @@ int
 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 				uint32_t *caps);
 
-struct eventdev_ops;
-struct rte_eventdev;
-
-typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
-/**< @internal Enqueue event on port of a device */
-
-typedef uint16_t (*event_enqueue_burst_t)(void *port,
-			const struct rte_event ev[], uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device */
-
-typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
-		uint64_t timeout_ticks);
-/**< @internal Dequeue event from port of a device */
-
-typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
-		uint16_t nb_events, uint64_t timeout_ticks);
-/**< @internal Dequeue burst of events from port of a device */
-
-typedef uint16_t (*event_tx_adapter_enqueue)(void *port,
-				struct rte_event ev[], uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device */
-
-typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
-		struct rte_event ev[], uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device supporting
- * burst having same destination Ethernet port & Tx queue.
- */
-
-typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
-				struct rte_event ev[], uint16_t nb_events);
-/**< @internal Enqueue burst of events on crypto adapter */
-
-#define RTE_EVENTDEV_NAME_MAX_LEN	(64)
-/**< @internal Max length of name of event PMD */
-
-/**
- * @internal
- * The data part, with no function pointers, associated with each device.
- *
- * This structure is safe to place in shared memory to be common among
- * different processes in a multi-process configuration.
- */
-struct rte_eventdev_data {
-	int socket_id;
-	/**< Socket ID where memory is allocated */
-	uint8_t dev_id;
-	/**< Device ID for this instance */
-	uint8_t nb_queues;
-	/**< Number of event queues. */
-	uint8_t nb_ports;
-	/**< Number of event ports. */
-	void **ports;
-	/**< Array of pointers to ports. */
-	struct rte_event_port_conf *ports_cfg;
-	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf *queues_cfg;
-	/**< Array of queue configuration structures. */
-	uint16_t *links_map;
-	/**< Memory to store queues to port connections. */
-	void *dev_private;
-	/**< PMD-specific private data */
-	uint32_t event_dev_cap;
-	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
-	struct rte_event_dev_config dev_conf;
-	/**< Configuration applied to device. */
-	uint8_t service_inited;
-	/* Service initialization state */
-	uint32_t service_id;
-	/* Service ID*/
-	void *dev_stop_flush_arg;
-	/**< User-provided argument for event flush function */
-
-	RTE_STD_C11
-	uint8_t dev_started : 1;
-	/**< Device state: STARTED(1)/STOPPED(0) */
-
-	char name[RTE_EVENTDEV_NAME_MAX_LEN];
-	/**< Unique identifier name */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[4];   /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-/** @internal The data structure associated with each event device. */
-struct rte_eventdev {
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
-	event_enqueue_burst_t enqueue_burst;
-	/**< Pointer to PMD enqueue burst function. */
-	event_enqueue_burst_t enqueue_new_burst;
-	/**< Pointer to PMD enqueue burst function(op new variant) */
-	event_enqueue_burst_t enqueue_forward_burst;
-	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
-	event_dequeue_burst_t dequeue_burst;
-	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
-	/**< Pointer to PMD eth Tx adapter burst enqueue function with
-	 * events destined to same Eth port & Tx queue.
-	 */
-	event_tx_adapter_enqueue txa_enqueue;
-	/**< Pointer to PMD eth Tx adapter enqueue function. */
-	struct rte_eventdev_data *data;
-	/**< Pointer to device data */
-	struct eventdev_ops *dev_ops;
-	/**< Functions exported by PMD */
-	struct rte_device *dev;
-	/**< Device info. supplied by probing */
-
-	RTE_STD_C11
-	uint8_t attached : 1;
-	/**< Flag indicating the device is attached */
-
-	event_crypto_adapter_enqueue ca_enqueue;
-	/**< Pointer to PMD crypto adapter enqueue function. */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[3];   /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-extern struct rte_eventdev *rte_eventdevs;
-/** @internal The pool of rte_eventdev structures. */
-
-static __rte_always_inline uint16_t
-__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events,
-			const event_enqueue_burst_t fn)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->enqueue)(dev->data->ports[port_id], ev);
-	else
-		return fn(dev->data->ports[port_id], ev, nb_events);
-}
-
-/**
- * Enqueue a burst of events objects or an event object supplied in *rte_event*
- * structure on an  event device designated by its *dev_id* through the event
- * port specified by *port_id*. Each event object specifies the event queue on
- * which it will be enqueued.
- *
- * The *nb_events* parameter is the number of event objects to enqueue which are
- * supplied in the *ev* array of *rte_event* structure.
- *
- * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
- * enqueued to the same port that their associated events were dequeued from.
- *
- * The rte_event_enqueue_burst() function returns the number of
- * events objects it actually enqueued. A return value equal to *nb_events*
- * means that all event objects have been enqueued.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- */
-static inline uint16_t
-rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_burst);
-}
-
-/**
- * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
- * an event device designated by its *dev_id* through the event port specified
- * by *port_id*.
- *
- * Provides the same functionality as rte_event_enqueue_burst(), expect that
- * application can use this API when the all objects in the burst contains
- * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
- * function can provide the additional hint to the PMD and optimize if possible.
- *
- * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
- * has event object of operation type != RTE_EVENT_OP_NEW.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- * @see rte_event_enqueue_burst()
- */
-static inline uint16_t
-rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_new_burst);
-}
-
-/**
- * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
- * on an event device designated by its *dev_id* through the event port
- * specified by *port_id*.
- *
- * Provides the same functionality as rte_event_enqueue_burst(), expect that
- * application can use this API when the all objects in the burst contains
- * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
- * function can provide the additional hint to the PMD and optimize if possible.
- *
- * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
- * has event object of operation type != RTE_EVENT_OP_FORWARD.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- * @see rte_event_enqueue_burst()
- */
-static inline uint16_t
-rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_forward_burst);
-}
-
 /**
  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
  *
@@ -1662,124 +1354,27 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
 					uint64_t *timeout_ticks);
 
 /**
- * Dequeue a burst of events objects or an event object from the event port
- * designated by its *event_port_id*, on an event device designated
- * by its *dev_id*.
- *
- * rte_event_dequeue_burst() does not dictate the specifics of scheduling
- * algorithm as each eventdev driver may have different criteria to schedule
- * an event. However, in general, from an application perspective scheduler may
- * use the following scheme to dispatch an event to the port.
- *
- * 1) Selection of event queue based on
- *   a) The list of event queues are linked to the event port.
- *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
- *   queue selection from list is based on event queue priority relative to
- *   other event queue supplied as *priority* in rte_event_queue_setup()
- *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
- *   queue selection from the list is based on event priority supplied as
- *   *priority* in rte_event_enqueue_burst()
- * 2) Selection of event
- *   a) The number of flows available in selected event queue.
- *   b) Schedule type method associated with the event
- *
- * The *nb_events* parameter is the maximum number of event objects to dequeue
- * which are returned in the *ev* array of *rte_event* structure.
+ * Link multiple source event queues supplied in *queues* to the destination
+ * event port designated by its *port_id* with associated service priority
+ * supplied in *priorities* on the event device designated by its *dev_id*.
  *
- * The rte_event_dequeue_burst() function returns the number of events objects
- * it actually dequeued. A return value equal to *nb_events* means that all
- * event objects have been dequeued.
+ * The link establishment shall enable the event port *port_id* from
+ * receiving events from the specified event queue(s) supplied in *queues*
  *
- * The number of events dequeued is the number of scheduler contexts held by
- * this port. These contexts are automatically released in the next
- * rte_event_dequeue_burst() invocation if the port supports implicit
- * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
- * operation can be used to release the contexts early.
+ * An event queue may link to one or more event ports.
+ * The number of links can be established from an event queue to event port is
+ * implementation defined.
  *
- * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
- * enqueued to the same port that their associated events were dequeued from.
+ * Event queue(s) to event port link establishment can be changed at runtime
+ * without re-configuring the device to support scaling and to reduce the
+ * latency of critical work by establishing the link with more event ports
+ * at runtime.
  *
  * @param dev_id
  *   The identifier of the device.
+ *
  * @param port_id
- *   The identifier of the event port.
- * @param[out] ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   for output to be populated with the dequeued event objects.
- * @param nb_events
- *   The maximum number of event objects to dequeue, typically number of
- *   rte_event_port_dequeue_depth() available for this port.
- *
- * @param timeout_ticks
- *   - 0 no-wait, returns immediately if there is no event.
- *   - >0 wait for the event, if the device is configured with
- *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
- *   at least one event is available or *timeout_ticks* time.
- *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
- *   then this function will wait until the event available or
- *   *dequeue_timeout_ns* ns which was previously supplied to
- *   rte_event_dev_configure()
- *
- * @return
- * The number of event objects actually dequeued from the port. The return
- * value can be less than the value of the *nb_events* parameter when the
- * event port's queue is not full.
- *
- * @see rte_event_port_dequeue_depth()
- */
-static inline uint16_t
-rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
-			uint16_t nb_events, uint64_t timeout_ticks)
-{
-	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->dequeue)(
-			dev->data->ports[port_id], ev, timeout_ticks);
-	else
-		return (*dev->dequeue_burst)(
-			dev->data->ports[port_id], ev, nb_events,
-				timeout_ticks);
-}
-
-/**
- * Link multiple source event queues supplied in *queues* to the destination
- * event port designated by its *port_id* with associated service priority
- * supplied in *priorities* on the event device designated by its *dev_id*.
- *
- * The link establishment shall enable the event port *port_id* from
- * receiving events from the specified event queue(s) supplied in *queues*
- *
- * An event queue may link to one or more event ports.
- * The number of links can be established from an event queue to event port is
- * implementation defined.
- *
- * Event queue(s) to event port link establishment can be changed at runtime
- * without re-configuring the device to support scaling and to reduce the
- * latency of critical work by establishing the link with more event ports
- * at runtime.
- *
- * @param dev_id
- *   The identifier of the device.
- *
- * @param port_id
- *   Event port identifier to select the destination port to link.
+ *   Event port identifier to select the destination port to link.
  *
  * @param queues
  *   Points to an array of *nb_links* event queues to be linked
@@ -2145,6 +1740,288 @@ rte_event_vector_pool_create(const char *name, unsigned int n,
 			     unsigned int cache_size, uint16_t nb_elem,
 			     int socket_id);
 
+#include <rte_eventdev_core.h>
+
+static __rte_always_inline uint16_t
+__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			  const struct rte_event ev[], uint16_t nb_events,
+			  const event_enqueue_burst_t fn)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->enqueue)(dev->data->ports[port_id], ev);
+	else
+		return fn(dev->data->ports[port_id], ev, nb_events);
+}
+
+/**
+ * Enqueue a burst of events objects or an event object supplied in *rte_event*
+ * structure on an  event device designated by its *dev_id* through the event
+ * port specified by *port_id*. Each event object specifies the event queue on
+ * which it will be enqueued.
+ *
+ * The *nb_events* parameter is the number of event objects to enqueue which are
+ * supplied in the *ev* array of *rte_event* structure.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * The rte_event_enqueue_burst() function returns the number of
+ * events objects it actually enqueued. A return value equal to *nb_events*
+ * means that all event objects have been enqueued.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ */
+static inline uint16_t
+rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
+ * an event device designated by its *dev_id* through the event port specified
+ * by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_NEW.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
+			    const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_new_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
+ * on an event device designated by its *dev_id* through the event port
+ * specified by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_FORWARD.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
+				const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_forward_burst);
+}
+
+/**
+ * Dequeue a burst of events objects or an event object from the event port
+ * designated by its *event_port_id*, on an event device designated
+ * by its *dev_id*.
+ *
+ * rte_event_dequeue_burst() does not dictate the specifics of scheduling
+ * algorithm as each eventdev driver may have different criteria to schedule
+ * an event. However, in general, from an application perspective scheduler may
+ * use the following scheme to dispatch an event to the port.
+ *
+ * 1) Selection of event queue based on
+ *   a) The list of event queues are linked to the event port.
+ *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
+ *   queue selection from list is based on event queue priority relative to
+ *   other event queue supplied as *priority* in rte_event_queue_setup()
+ *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
+ *   queue selection from the list is based on event priority supplied as
+ *   *priority* in rte_event_enqueue_burst()
+ * 2) Selection of event
+ *   a) The number of flows available in selected event queue.
+ *   b) Schedule type method associated with the event
+ *
+ * The *nb_events* parameter is the maximum number of event objects to dequeue
+ * which are returned in the *ev* array of *rte_event* structure.
+ *
+ * The rte_event_dequeue_burst() function returns the number of events objects
+ * it actually dequeued. A return value equal to *nb_events* means that all
+ * event objects have been dequeued.
+ *
+ * The number of events dequeued is the number of scheduler contexts held by
+ * this port. These contexts are automatically released in the next
+ * rte_event_dequeue_burst() invocation if the port supports implicit
+ * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
+ * operation can be used to release the contexts early.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param[out] ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   for output to be populated with the dequeued event objects.
+ * @param nb_events
+ *   The maximum number of event objects to dequeue, typically number of
+ *   rte_event_port_dequeue_depth() available for this port.
+ *
+ * @param timeout_ticks
+ *   - 0 no-wait, returns immediately if there is no event.
+ *   - >0 wait for the event, if the device is configured with
+ *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
+ *   at least one event is available or *timeout_ticks* time.
+ *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ *   then this function will wait until the event available or
+ *   *dequeue_timeout_ns* ns which was previously supplied to
+ *   rte_event_dev_configure()
+ *
+ * @return
+ * The number of event objects actually dequeued from the port. The return
+ * value can be less than the value of the *nb_events* parameter when the
+ * event port's queue is not full.
+ *
+ * @see rte_event_port_dequeue_depth()
+ */
+static inline uint16_t
+rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
+			uint16_t nb_events, uint64_t timeout_ticks)
+{
+	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->dequeue)(dev->data->ports[port_id], ev,
+				       timeout_ticks);
+	else
+		return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
+					     nb_events, timeout_ticks);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
new file mode 100644
index 0000000000..97dfec1ae1
--- /dev/null
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright(C) 2021 Marvell.
+ * Copyright 2016 NXP
+ * All rights reserved.
+ */
+
+#ifndef _RTE_EVENTDEV_CORE_H_
+#define _RTE_EVENTDEV_CORE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
+/**< @internal Enqueue event on port of a device */
+
+typedef uint16_t (*event_enqueue_burst_t)(void *port,
+					  const struct rte_event ev[],
+					  uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
+				    uint64_t timeout_ticks);
+/**< @internal Dequeue event from port of a device */
+
+typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
+					  uint16_t nb_events,
+					  uint64_t timeout_ticks);
+/**< @internal Dequeue burst of events from port of a device */
+
+typedef uint16_t (*event_tx_adapter_enqueue)(void *port, struct rte_event ev[],
+					     uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
+						       struct rte_event ev[],
+						       uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device supporting
+ * burst having same destination Ethernet port & Tx queue.
+ */
+
+typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
+						 struct rte_event ev[],
+						 uint16_t nb_events);
+/**< @internal Enqueue burst of events on crypto adapter */
+
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+	int socket_id;
+	/**< Socket ID where memory is allocated */
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t nb_queues;
+	/**< Number of event queues. */
+	uint8_t nb_ports;
+	/**< Number of event ports. */
+	void **ports;
+	/**< Array of pointers to ports. */
+	struct rte_event_port_conf *ports_cfg;
+	/**< Array of port configuration structures. */
+	struct rte_event_queue_conf *queues_cfg;
+	/**< Array of queue configuration structures. */
+	uint16_t *links_map;
+	/**< Memory to store queues to port connections. */
+	void *dev_private;
+	/**< PMD-specific private data */
+	uint32_t event_dev_cap;
+	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+	struct rte_event_dev_config dev_conf;
+	/**< Configuration applied to device. */
+	uint8_t service_inited;
+	/* Service initialization state */
+	uint32_t service_id;
+	/* Service ID*/
+	void *dev_stop_flush_arg;
+	/**< User-provided argument for event flush function */
+
+	RTE_STD_C11
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	char name[RTE_EVENTDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[4];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+	event_enqueue_t enqueue;
+	/**< Pointer to PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< Pointer to PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< Pointer to PMD enqueue burst function(op new variant) */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< Pointer to PMD enqueue burst function(op forward variant) */
+	event_dequeue_t dequeue;
+	/**< Pointer to PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< Pointer to PMD dequeue burst function. */
+	event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
+	/**< Pointer to PMD eth Tx adapter burst enqueue function with
+	 * events destined to same Eth port & Tx queue.
+	 */
+	event_tx_adapter_enqueue txa_enqueue;
+	/**< Pointer to PMD eth Tx adapter enqueue function. */
+	struct rte_eventdev_data *data;
+	/**< Pointer to device data */
+	struct eventdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	struct rte_device *dev;
+	/**< Device info. supplied by probing */
+
+	RTE_STD_C11
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
+
+	event_crypto_adapter_enqueue ca_enqueue;
+	/**< Pointer to PMD crypto adapter enqueue function. */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[3];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*_RTE_EVENTDEV_CORE_H_*/
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 03/15] eventdev: move eventdevs globals to hugepage mem
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
  2021-08-23 19:40 ` [dpdk-dev] [RFC 02/15] eventdev: separate internal structures pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-08-23 19:40 ` [dpdk-dev] [RFC 04/15] eventdev: move inline APIs into separate structure pbhagavatula
                   ` (14 subsequent siblings)
  16 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj; +Cc: konstantin.ananyev, dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move the global struct eventdevs to hugepage memory, allocate
memory on the first pmd allocate request for both primary and
secondary process.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_eventdev.c | 30 ++++++++++++++++++++----------
 1 file changed, 20 insertions(+), 10 deletions(-)

diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 594dd5e759..21c5c55086 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -38,9 +38,7 @@
 #include "eventdev_pmd.h"
 #include "rte_eventdev_trace.h"
 
-static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
-
-struct rte_eventdev *rte_eventdevs = rte_event_devices;
+struct rte_eventdev *rte_eventdevs;
 
 static struct rte_eventdev_global eventdev_globals = {
 	.nb_devs		= 0
@@ -64,13 +62,13 @@ rte_event_dev_get_dev_id(const char *name)
 		return -EINVAL;
 
 	for (i = 0; i < eventdev_globals.nb_devs; i++) {
-		cmp = (strncmp(rte_event_devices[i].data->name, name,
-				RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
-			(rte_event_devices[i].dev ? (strncmp(
-				rte_event_devices[i].dev->driver->name, name,
-					 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
-		if (cmp && (rte_event_devices[i].attached ==
-					RTE_EVENTDEV_ATTACHED))
+		cmp = (strncmp(rte_eventdevs[i].data->name, name,
+			       RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
+		      (rte_eventdevs[i].dev ?
+				    (strncmp(rte_eventdevs[i].dev->driver->name,
+					name, RTE_EVENTDEV_NAME_MAX_LEN) == 0) :
+				    0);
+		if (cmp && (rte_eventdevs[i].attached == RTE_EVENTDEV_ATTACHED))
 			return i;
 	}
 	return -ENODEV;
@@ -1469,6 +1467,18 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 	struct rte_eventdev *eventdev;
 	uint8_t dev_id;
 
+	if (rte_eventdevs == NULL) {
+		rte_eventdevs = rte_zmalloc("Eventdev",
+					    sizeof(struct rte_eventdev) *
+						    RTE_EVENT_MAX_DEVS,
+					    RTE_CACHE_LINE_SIZE);
+		if (rte_eventdevs == NULL) {
+			RTE_EDEV_LOG_ERR(
+				"Unable to allocate memory for event devices");
+			return NULL;
+		}
+	}
+
 	if (rte_event_pmd_get_named_dev(name) != NULL) {
 		RTE_EDEV_LOG_ERR("Event device with name %s already "
 				"allocated!", name);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 04/15] eventdev: move inline APIs into separate structure
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
  2021-08-23 19:40 ` [dpdk-dev] [RFC 02/15] eventdev: separate internal structures pbhagavatula
  2021-08-23 19:40 ` [dpdk-dev] [RFC 03/15] eventdev: move eventdevs globals to hugepage mem pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-09-08 12:03   ` Kinsella, Ray
  2021-08-23 19:40 ` [dpdk-dev] [RFC 05/15] eventdev: add helper functions for new driver API pbhagavatula
                   ` (13 subsequent siblings)
  16 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Ray Kinsella; +Cc: konstantin.ananyev, dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move fastpath inline function pointers from rte_eventdev into a
separate structure accessed via a flat array.
The intension is to make rte_eventdev and related structures private
to avoid future API/ABI breakages.`

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/eventdev_pmd.h      |  10 ++++
 lib/eventdev/eventdev_private.c  | 100 +++++++++++++++++++++++++++++++
 lib/eventdev/meson.build         |   1 +
 lib/eventdev/rte_eventdev.c      |  25 +++++++-
 lib/eventdev/rte_eventdev_core.h |  44 ++++++++++++++
 lib/eventdev/version.map         |   4 ++
 6 files changed, 183 insertions(+), 1 deletion(-)
 create mode 100644 lib/eventdev/eventdev_private.c

diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index a25d3f1fb5..5eaa29fe14 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -1193,6 +1193,16 @@ __rte_internal
 int
 rte_event_pmd_release(struct rte_eventdev *eventdev);
 
+/**
+ * Reset eventdevice fastpath APIs to dummy values.
+ *
+ * @param rba
+ * The *api* pointer to reset.
+ */
+__rte_internal
+void
+rte_event_dev_api_reset(struct rte_eventdev_api *api);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
new file mode 100644
index 0000000000..c60fd2b522
--- /dev/null
+++ b/lib/eventdev/eventdev_private.c
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "eventdev_pmd.h"
+#include "rte_eventdev.h"
+
+static uint16_t
+dummy_event_enqueue(__rte_unused uint8_t dev_id, __rte_unused uint8_t port_id,
+		    __rte_unused const struct rte_event *ev)
+{
+	RTE_EDEV_LOG_ERR(
+		"event enqueue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_enqueue_burst(__rte_unused uint8_t dev_id,
+			  __rte_unused uint8_t port_id,
+			  __rte_unused const struct rte_event ev[],
+			  __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event enqueue burst requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_dequeue(__rte_unused uint8_t dev_id, __rte_unused uint8_t port_id,
+		    __rte_unused struct rte_event *ev,
+		    __rte_unused uint64_t timeout_ticks)
+{
+	RTE_EDEV_LOG_ERR(
+		"event dequeue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_dequeue_burst(__rte_unused uint8_t dev_id,
+			  __rte_unused uint8_t port_id,
+			  __rte_unused struct rte_event ev[],
+			  __rte_unused uint16_t nb_events,
+			  __rte_unused uint64_t timeout_ticks)
+{
+	RTE_EDEV_LOG_ERR(
+		"event dequeue burst requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue(__rte_unused uint8_t dev_id,
+			       __rte_unused uint8_t port_id,
+			       __rte_unused struct rte_event ev[],
+			       __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event Tx adapter enqueue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue_same_dest(__rte_unused uint8_t dev_id,
+					 __rte_unused uint8_t port_id,
+					 __rte_unused struct rte_event ev[],
+					 __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event Tx adapter enqueue same destination requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_crypto_adapter_enqueue(__rte_unused uint8_t dev_id,
+				   __rte_unused uint8_t port_id,
+				   __rte_unused struct rte_event ev[],
+				   __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event crypto adapter enqueue requested for unconfigured event device");
+	return 0;
+}
+
+void
+rte_event_dev_api_reset(struct rte_eventdev_api *api)
+{
+	static const struct rte_eventdev_api dummy = {
+		.enqueue = dummy_event_enqueue,
+		.enqueue_burst = dummy_event_enqueue_burst,
+		.enqueue_new_burst = dummy_event_enqueue_burst,
+		.enqueue_forward_burst = dummy_event_enqueue_burst,
+		.dequeue = dummy_event_dequeue,
+		.dequeue_burst = dummy_event_dequeue_burst,
+		.txa_enqueue = dummy_event_tx_adapter_enqueue,
+		.txa_enqueue_same_dest =
+			dummy_event_tx_adapter_enqueue_same_dest,
+		.ca_enqueue = dummy_event_crypto_adapter_enqueue,
+	};
+
+	*api = dummy;
+}
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 8b51fde361..9051ff04b7 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -8,6 +8,7 @@ else
 endif
 
 sources = files(
+        'eventdev_private.c',
         'rte_eventdev.c',
         'rte_event_ring.c',
         'eventdev_trace_points.c',
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 21c5c55086..5ff8596788 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -44,6 +44,9 @@ static struct rte_eventdev_global eventdev_globals = {
 	.nb_devs		= 0
 };
 
+/* Public fastpath APIs. */
+struct rte_eventdev_api *rte_eventdev_api;
+
 /* Event dev north bound API implementation */
 
 uint8_t
@@ -394,8 +397,9 @@ int
 rte_event_dev_configure(uint8_t dev_id,
 			const struct rte_event_dev_config *dev_conf)
 {
-	struct rte_eventdev *dev;
 	struct rte_event_dev_info info;
+	struct rte_eventdev_api api;
+	struct rte_eventdev *dev;
 	int diag;
 
 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -564,10 +568,14 @@ rte_event_dev_configure(uint8_t dev_id,
 		return diag;
 	}
 
+	api = rte_eventdev_api[dev_id];
+	rte_event_dev_api_reset(&api);
+
 	/* Configure the device */
 	diag = (*dev->dev_ops->dev_configure)(dev);
 	if (diag != 0) {
 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
+		rte_event_dev_api_reset(&api);
 		rte_event_dev_queue_config(dev, 0);
 		rte_event_dev_port_config(dev, 0);
 	}
@@ -1396,6 +1404,7 @@ rte_event_dev_close(uint8_t dev_id)
 		return -EBUSY;
 	}
 
+	rte_event_dev_api_reset(&rte_eventdev_api[dev_id]);
 	rte_eventdev_trace_close(dev_id);
 	return (*dev->dev_ops->dev_close)(dev);
 }
@@ -1479,6 +1488,20 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 		}
 	}
 
+	if (rte_eventdev_api == NULL) {
+		rte_eventdev_api = rte_zmalloc("Eventdev_api",
+					       sizeof(struct rte_eventdev_api) *
+						       RTE_EVENT_MAX_DEVS,
+					       RTE_CACHE_LINE_SIZE);
+		if (rte_eventdev_api == NULL) {
+			RTE_EDEV_LOG_ERR(
+				"Unable to allocate memory for fastpath eventdev API array");
+			return NULL;
+		}
+		for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++)
+			rte_event_dev_api_reset(&rte_eventdev_api[dev_id]);
+	}
+
 	if (rte_event_pmd_get_named_dev(name) != NULL) {
 		RTE_EDEV_LOG_ERR("Event device with name %s already "
 				"allocated!", name);
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 97dfec1ae1..4a7edacb0e 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -12,23 +12,39 @@
 extern "C" {
 #endif
 
+typedef uint16_t (*rte_event_enqueue_t)(uint8_t dev_id, uint8_t port_id,
+					const struct rte_event *ev);
 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
 /**< @internal Enqueue event on port of a device */
 
+typedef uint16_t (*rte_event_enqueue_burst_t)(uint8_t dev_id, uint8_t port_id,
+					      const struct rte_event ev[],
+					      uint16_t nb_events);
 typedef uint16_t (*event_enqueue_burst_t)(void *port,
 					  const struct rte_event ev[],
 					  uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */
 
+typedef uint16_t (*rte_event_dequeue_t)(uint8_t dev_id, uint8_t port_id,
+					struct rte_event *ev,
+					uint64_t timeout_ticks);
 typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
 				    uint64_t timeout_ticks);
 /**< @internal Dequeue event from port of a device */
 
+typedef uint16_t (*rte_event_dequeue_burst_t)(uint8_t dev_id, uint8_t port_id,
+					      struct rte_event ev[],
+					      uint16_t nb_events,
+					      uint64_t timeout_ticks);
 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
 					  uint16_t nb_events,
 					  uint64_t timeout_ticks);
 /**< @internal Dequeue burst of events from port of a device */
 
+typedef uint16_t (*rte_event_tx_adapter_enqueue_t)(uint8_t dev_id,
+						   uint8_t port_id,
+						   struct rte_event ev[],
+						   uint16_t nb_events);
 typedef uint16_t (*event_tx_adapter_enqueue)(void *port, struct rte_event ev[],
 					     uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */
@@ -40,11 +56,39 @@ typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
  * burst having same destination Ethernet port & Tx queue.
  */
 
+typedef uint16_t (*rte_event_crypto_adapter_enqueue_t)(uint8_t dev_id,
+						       uint8_t port_id,
+						       struct rte_event ev[],
+						       uint16_t nb_events);
 typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
 						 struct rte_event ev[],
 						 uint16_t nb_events);
 /**< @internal Enqueue burst of events on crypto adapter */
 
+struct rte_eventdev_api {
+	rte_event_enqueue_t enqueue;
+	/**< PMD enqueue function. */
+	rte_event_enqueue_burst_t enqueue_burst;
+	/**< PMD enqueue burst function. */
+	rte_event_enqueue_burst_t enqueue_new_burst;
+	/**< PMD enqueue burst new function. */
+	rte_event_enqueue_burst_t enqueue_forward_burst;
+	/**< PMD enqueue burst fwd function. */
+	rte_event_dequeue_t dequeue;
+	/**< PMD dequeue function. */
+	rte_event_dequeue_burst_t dequeue_burst;
+	/**< PMD dequeue burst function. */
+	rte_event_tx_adapter_enqueue_t txa_enqueue;
+	/**< PMD Tx adapter enqueue function. */
+	rte_event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< PMD Tx adapter enqueue same destination function. */
+	rte_event_crypto_adapter_enqueue_t ca_enqueue;
+	/**< PMD Crypto adapter enqueue function. */
+	uintptr_t reserved[2];
+} __rte_cache_aligned;
+
+extern struct rte_eventdev_api *rte_eventdev_api;
+
 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
 /**< @internal Max length of name of event PMD */
 
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 5f1fe412a4..bc2912dcfd 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -85,6 +85,9 @@ DPDK_22 {
 	rte_event_timer_cancel_burst;
 	rte_eventdevs;
 
+	#added in 21.11
+	rte_eventdev_api;
+
 	local: *;
 };
 
@@ -141,6 +144,7 @@ EXPERIMENTAL {
 INTERNAL {
 	global:
 
+	rte_event_dev_api_reset;
 	rte_event_pmd_selftest_seqn_dynfield_offset;
 	rte_event_pmd_allocate;
 	rte_event_pmd_get_named_dev;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 05/15] eventdev: add helper functions for new driver API
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (2 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 04/15] eventdev: move inline APIs into separate structure pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-09-08 12:04   ` Kinsella, Ray
  2021-08-23 19:40 ` [dpdk-dev] [RFC 06/15] eventdev: use new API for inline functions pbhagavatula
                   ` (12 subsequent siblings)
  16 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Ray Kinsella; +Cc: konstantin.ananyev, dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add helper functions and macros to help drivers to transition to new
fastpath interface.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/eventdev_pmd.h | 396 ++++++++++++++++++++++++++++++++++++
 lib/eventdev/rte_eventdev.c | 174 ++++++++++++++++
 lib/eventdev/version.map    |  18 ++
 3 files changed, 588 insertions(+)

diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 5eaa29fe14..f3a221e688 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -1203,6 +1203,402 @@ __rte_internal
 void
 rte_event_dev_api_reset(struct rte_eventdev_api *api);
 
+/**
+ * @internal
+ * Helper routine for event fastpath APIs.
+ * Should be called as first thing on entrance to the PMD's rte_event_*
+ * implementation.
+ * Does necessary checks and returns pointer to event port identifier.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ * @param port
+ *  The identifier of the event port.
+ *
+ * @return
+ *  Pointer to the event port.
+ */
+__rte_internal
+static inline void *
+_rte_event_dev_prolog(uint8_t dev_id, uint8_t port_id)
+{
+	struct rte_eventdev *dev;
+
+	dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+
+	return dev->data->ports[port_id];
+}
+
+#define _RTE_EVENT_ENQ_FUNC(fn)		     _rte_event_enq_##fn
+#define _RTE_EVENT_ENQ_BURST_FUNC(fn)	     _rte_event_enq_burst_##fn
+#define _RTE_EVENT_DEQ_FUNC(fn)		     _rte_event_deq_##fn
+#define _RTE_EVENT_DEQ_BURST_FUNC(fn)	     _rte_event_deq_burst_##fn
+#define _RTE_EVENT_TXA_ENQ_BURST_FUNC(fn)    _rte_event_txa_enq_burst_##fn
+#define _RTE_EVENT_CA_ENQ_BURST_FUNC(fn)     _rte_event_ca_enq_burst_##fn
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD enqueue/dequeue
+ * functions.
+ */
+#define _RTE_EVENT_ENQ_PROTO(fn)                                               \
+	uint16_t _RTE_EVENT_ENQ_FUNC(fn)(uint8_t dev_id, uint8_t port_id,      \
+					 const struct rte_event *ev)
+
+#define _RTE_EVENT_ENQ_BURST_PROTO(fn)                                         \
+	uint16_t _RTE_EVENT_ENQ_BURST_FUNC(fn)(                                \
+		uint8_t dev_id, uint8_t port_id, const struct rte_event ev[],  \
+		uint16_t nb_events)
+
+#define _RTE_EVENT_DEQ_PROTO(fn)                                               \
+	uint16_t _RTE_EVENT_DEQ_FUNC(fn)(uint8_t dev_id, uint8_t port_id,      \
+					 struct rte_event *ev,                 \
+					 uint64_t timeout_ticks)
+
+#define _RTE_EVENT_DEQ_BURST_PROTO(fn)                                         \
+	uint16_t _RTE_EVENT_DEQ_BURST_FUNC(fn)(                                \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)
+
+#define _RTE_EVENT_TXA_ENQ_BURST_PROTO(fn)                                     \
+	uint16_t _RTE_EVENT_TXA_ENQ_BURST_FUNC(fn)(                            \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events)
+
+#define _RTE_EVENT_CA_ENQ_BURST_PROTO(fn)                                      \
+	uint16_t _RTE_EVENT_CA_ENQ_BURST_FUNC(fn)(                             \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events)
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD enqueue/dequeue
+ * functions.
+ */
+#define _RTE_EVENT_ENQ_DEF(fn)                                                 \
+	_RTE_EVENT_ENQ_PROTO(fn)                                               \
+	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
+		return fn(port, ev);                                           \
+	}
+
+#define _RTE_EVENT_ENQ_BURST_DEF(fn)                                           \
+	_RTE_EVENT_ENQ_BURST_PROTO(fn)                                         \
+	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
+		return fn(port, ev, nb_events);                                \
+	}
+
+#define _RTE_EVENT_DEQ_DEF(fn)                                                 \
+	_RTE_EVENT_DEQ_PROTO(fn)                                               \
+	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
+		return fn(port, ev, timeout_ticks);                            \
+	}
+
+#define _RTE_EVENT_DEQ_BURST_DEF(fn)                                           \
+	_RTE_EVENT_DEQ_BURST_PROTO(fn)                                         \
+	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
+		return fn(port, ev, nb_events, timeout_ticks);                 \
+	}
+
+#define _RTE_EVENT_TXA_ENQ_BURST_DEF(fn)                                       \
+	_RTE_EVENT_TXA_ENQ_BURST_PROTO(fn)                                     \
+	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
+		return fn(port, ev, nb_events);                                \
+	}
+
+#define _RTE_EVENT_CA_ENQ_BURST_DEF(fn)                                        \
+	_RTE_EVENT_CA_ENQ_BURST_PROTO(fn)                                      \
+	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
+		return fn(port, ev, nb_events);                                \
+	}
+
+/**
+ * @internal
+ * Helper routine to get enqueue function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_event_enqueue_t
+rte_event_get_enq_fn(uint8_t dev_id);
+
+/**
+ * @internal
+ * Helper routine to get enqueue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_event_enqueue_burst_t
+rte_event_get_enq_burst_fn(uint8_t dev_id);
+
+/**
+ * @internal
+ * Helper routine to get enqueue new events function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_event_enqueue_burst_t
+rte_event_get_enq_new_burst_fn(uint8_t dev_id);
+
+/**
+ * @internal
+ * Helper routine to get enqueue forward events function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_event_enqueue_burst_t
+rte_event_get_enq_fwd_burst_fn(uint8_t dev_id);
+
+
+/**
+ * @internal
+ * Helper routine to get dequeue function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_event_dequeue_t
+rte_event_get_deq_fn(uint8_t dev_id);
+
+/**
+ * @internal
+ * Helper routine to get dequeue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_event_dequeue_burst_t
+rte_event_get_deq_burst_fn(uint8_t dev_id);
+
+/**
+ * @internal
+ * Helper routine to get Tx adapter enqueue function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_event_tx_adapter_enqueue_t
+rte_event_get_tx_adapter_enq_fn(uint8_t dev_id);
+
+/**
+ *
+ * @internal
+ * Helper routine to get Tx adapter enqueue same port function of a given
+ * device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_event_tx_adapter_enqueue_t
+rte_event_get_tx_adapter_enq_same_dest_fn(uint8_t dev_id);
+
+
+/**
+ * @internal
+ * Helper routine to get crypto adapter enqueue function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  The enqueue function if valid else NULL
+ */
+__rte_internal
+rte_event_crypto_adapter_enqueue_t
+rte_event_get_crypto_adapter_enq_fn(uint8_t dev_id);
+
+/**
+ * @internal
+ * Helper routine to set enqueue function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVALID	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_event_set_enq_fn(uint8_t dev_id, rte_event_enqueue_t fn);
+
+/**
+ * @internal
+ * Helper routine to set enqueue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVALID	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_event_set_enq_burst_fn(uint8_t dev_id, rte_event_enqueue_burst_t fn);
+
+/**
+ * @internal
+ * Helper routine to set enqueue new burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVALID	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_event_set_enq_new_burst_fn(uint8_t dev_id, rte_event_enqueue_burst_t fn);
+
+/**
+ * @internal
+ * Helper routine to set enqueue forward burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVALID	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_event_set_enq_fwd_burst_fn(uint8_t dev_id, rte_event_enqueue_burst_t fn);
+
+/**
+ * @internal
+ * Helper routine to set dequeue function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVALID	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_event_set_deq_fn(uint8_t dev_id, rte_event_dequeue_t fn);
+
+/**
+ * @internal
+ * Helper routine to set dequeue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVALID	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_event_set_deq_burst_fn(uint8_t dev_id, rte_event_dequeue_burst_t fn);
+
+/**
+ * @internal
+ * Helper routine to set Tx adapter enqueue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVALID	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_event_set_tx_adapter_enq_fn(uint8_t dev_id,
+				rte_event_tx_adapter_enqueue_t fn);
+
+/**
+ * @internal
+ * Helper routine to set Tx adapter enqueue same destination burst function of
+ * a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVALID	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_event_set_tx_adapter_enq_same_dest_fn(uint8_t dev_id,
+					  rte_event_tx_adapter_enqueue_t fn);
+
+/**
+ * @internal
+ * Helper routine to set crypto adapter enqueue burst function of a given
+ * device.
+ *
+ * @param dev_id
+ *  The device identifier of the Event device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVALID	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_event_set_crypto_adapter_enq_fn(uint8_t dev_id,
+				    rte_event_crypto_adapter_enqueue_t fn);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 5ff8596788..941e1e7c8e 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -1866,6 +1866,180 @@ handle_queue_xstats(const char *cmd __rte_unused,
 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
 }
 
+rte_event_enqueue_t
+rte_event_get_enq_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_eventdev_api[dev_id].enqueue;
+}
+
+rte_event_enqueue_burst_t
+rte_event_get_enq_burst_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_eventdev_api[dev_id].enqueue_burst;
+}
+
+rte_event_enqueue_burst_t
+rte_event_get_enq_new_burst_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_eventdev_api[dev_id].enqueue_new_burst;
+}
+
+rte_event_enqueue_burst_t
+rte_event_get_enq_fwd_burst_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_eventdev_api[dev_id].enqueue_forward_burst;
+}
+
+rte_event_dequeue_t
+rte_event_get_deq_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_eventdev_api[dev_id].dequeue;
+}
+
+rte_event_dequeue_burst_t
+rte_event_get_deq_burst_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_eventdev_api[dev_id].dequeue_burst;
+}
+
+rte_event_tx_adapter_enqueue_t
+rte_event_get_tx_adapter_enq_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_eventdev_api[dev_id].txa_enqueue;
+}
+
+rte_event_tx_adapter_enqueue_t
+rte_event_get_tx_adapter_enq_same_dest_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_eventdev_api[dev_id].txa_enqueue_same_dest;
+}
+
+rte_event_crypto_adapter_enqueue_t
+rte_event_get_crypto_adapter_enq_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_eventdev_api[dev_id].ca_enqueue;
+}
+
+int
+rte_event_set_enq_fn(uint8_t dev_id, rte_event_enqueue_t fn)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_eventdev_api[dev_id].enqueue = fn;
+	return 0;
+}
+
+int
+rte_event_set_enq_burst_fn(uint8_t dev_id, rte_event_enqueue_burst_t fn)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_eventdev_api[dev_id].enqueue_burst = fn;
+	return 0;
+}
+
+int
+rte_event_set_enq_new_burst_fn(uint8_t dev_id, rte_event_enqueue_burst_t fn)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_eventdev_api[dev_id].enqueue_new_burst = fn;
+	return 0;
+}
+
+int
+rte_event_set_enq_fwd_burst_fn(uint8_t dev_id, rte_event_enqueue_burst_t fn)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_eventdev_api[dev_id].enqueue_forward_burst = fn;
+	return 0;
+}
+
+int
+rte_event_set_deq_fn(uint8_t dev_id, rte_event_dequeue_t fn)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_eventdev_api[dev_id].dequeue = fn;
+	return 0;
+}
+
+int
+rte_event_set_deq_burst_fn(uint8_t dev_id, rte_event_dequeue_burst_t fn)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_eventdev_api[dev_id].dequeue_burst = fn;
+	return 0;
+}
+
+int
+rte_event_set_tx_adapter_enq_fn(uint8_t dev_id,
+				rte_event_tx_adapter_enqueue_t fn)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_eventdev_api[dev_id].txa_enqueue = fn;
+	return 0;
+}
+
+int
+rte_event_set_tx_adapter_enq_same_dest_fn(uint8_t dev_id,
+					  rte_event_tx_adapter_enqueue_t fn)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_eventdev_api[dev_id].txa_enqueue_same_dest = fn;
+	return 0;
+}
+
+int
+rte_event_set_crypto_adapter_enq_fn(uint8_t dev_id,
+				    rte_event_crypto_adapter_enqueue_t fn)
+{
+	if (dev_id >= RTE_EVENT_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_eventdev_api[dev_id].ca_enqueue = fn;
+	return 0;
+}
+
 RTE_INIT(eventdev_init_telemetry)
 {
 	rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index bc2912dcfd..d89cbc337e 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -145,6 +145,24 @@ INTERNAL {
 	global:
 
 	rte_event_dev_api_reset;
+	rte_event_get_crypto_adapter_enq_fn;
+	rte_event_get_deq_burst_fn;
+	rte_event_get_deq_fn;
+	rte_event_get_enq_burst_fn;
+	rte_event_get_enq_fn;
+	rte_event_get_enq_fwd_burst_fn;
+	rte_event_get_enq_new_burst_fn;
+	rte_event_get_tx_adapter_enq_fn;
+	rte_event_get_tx_adapter_enq_same_dest_fn;
+	rte_event_set_crypto_adapter_enq_fn;
+	rte_event_set_deq_burst_fn;
+	rte_event_set_deq_fn;
+	rte_event_set_enq_burst_fn;
+	rte_event_set_enq_fn;
+	rte_event_set_enq_fwd_burst_fn;
+	rte_event_set_enq_new_burst_fn;
+	rte_event_set_tx_adapter_enq_fn;
+	rte_event_set_tx_adapter_enq_same_dest_fn;
 	rte_event_pmd_selftest_seqn_dynfield_offset;
 	rte_event_pmd_allocate;
 	rte_event_pmd_get_named_dev;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 06/15] eventdev: use new API for inline functions
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (3 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 05/15] eventdev: add helper functions for new driver API pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-08-30 14:41   ` Jayatheerthan, Jay
  2021-08-30 14:46   ` David Marchand
  2021-08-23 19:40 ` [dpdk-dev] [RFC 07/15] eventdev: make drivers to use new API pbhagavatula
                   ` (11 subsequent siblings)
  16 siblings, 2 replies; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan
  Cc: konstantin.ananyev, dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Use new driver interface for the fastpath enqueue/dequeue inline
functions.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_crypto_adapter.h | 13 +-----
 lib/eventdev/rte_event_eth_tx_adapter.h | 22 ++-------
 lib/eventdev/rte_eventdev.h             | 61 +++++++------------------
 3 files changed, 22 insertions(+), 74 deletions(-)

diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index 431d05b6ed..a91585a369 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -568,20 +568,11 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id,
 				struct rte_event ev[],
 				uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
 	rte_eventdev_trace_crypto_adapter_enqueue(dev_id, port_id, ev,
 		nb_events);
 
-	return dev->ca_enqueue(dev->data->ports[port_id], ev, nb_events);
+	return rte_eventdev_api[dev_id].ca_enqueue(dev_id, port_id, ev,
+						   nb_events);
 }
 
 #ifdef __cplusplus
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.h b/lib/eventdev/rte_event_eth_tx_adapter.h
index 8c59547165..e3e78a5616 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.h
+++ b/lib/eventdev/rte_event_eth_tx_adapter.h
@@ -355,28 +355,14 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
 				uint16_t nb_events,
 				const uint8_t flags)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS ||
-		!rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
 	rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
 		nb_events, flags);
 	if (flags)
-		return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
-						  ev, nb_events);
+		return rte_eventdev_api[dev_id].txa_enqueue_same_dest(
+			dev_id, port_id, ev, nb_events);
 	else
-		return dev->txa_enqueue(dev->data->ports[port_id], ev,
-					nb_events);
+		return rte_eventdev_api[dev_id].txa_enqueue(dev_id, port_id, ev,
+							    nb_events);
 }
 
 /**
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 1b11d4576d..7378597846 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1745,30 +1745,17 @@ rte_event_vector_pool_create(const char *name, unsigned int n,
 static __rte_always_inline uint16_t
 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 			  const struct rte_event ev[], uint16_t nb_events,
-			  const event_enqueue_burst_t fn)
+			  const rte_event_enqueue_burst_t fn)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
 	/*
 	 * Allow zero cost non burst mode routine invocation if application
 	 * requests nb_events as const one
 	 */
 	if (nb_events == 1)
-		return (*dev->enqueue)(dev->data->ports[port_id], ev);
+		return rte_eventdev_api[dev_id].enqueue(dev_id, port_id, ev);
 	else
-		return fn(dev->data->ports[port_id], ev, nb_events);
+		return fn(dev_id, port_id, ev, nb_events);
 }
 
 /**
@@ -1818,10 +1805,9 @@ static inline uint16_t
 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 			const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_burst);
+	return __rte_event_enqueue_burst(
+		dev_id, port_id, ev, nb_events,
+		rte_eventdev_api[dev_id].enqueue_burst);
 }
 
 /**
@@ -1869,10 +1855,9 @@ static inline uint16_t
 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
 			    const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_new_burst);
+	return __rte_event_enqueue_burst(
+		dev_id, port_id, ev, nb_events,
+		rte_eventdev_api[dev_id].enqueue_new_burst);
 }
 
 /**
@@ -1920,10 +1905,9 @@ static inline uint16_t
 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
 				const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_forward_burst);
+	return __rte_event_enqueue_burst(
+		dev_id, port_id, ev, nb_events,
+		rte_eventdev_api[dev_id].enqueue_forward_burst);
 }
 
 /**
@@ -1996,30 +1980,17 @@ static inline uint16_t
 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 			uint16_t nb_events, uint64_t timeout_ticks)
 {
-	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
 	/*
 	 * Allow zero cost non burst mode routine invocation if application
 	 * requests nb_events as const one
 	 */
 	if (nb_events == 1)
-		return (*dev->dequeue)(dev->data->ports[port_id], ev,
-				       timeout_ticks);
+		return rte_eventdev_api[dev_id].dequeue(dev_id, port_id, ev,
+							timeout_ticks);
 	else
-		return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
-					     nb_events, timeout_ticks);
+		return rte_eventdev_api[dev_id].dequeue_burst(
+			dev_id, port_id, ev, nb_events, timeout_ticks);
 }
 
 #ifdef __cplusplus
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 07/15] eventdev: make drivers to use new API
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (4 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 06/15] eventdev: use new API for inline functions pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-09-08  6:43   ` Hemant Agrawal
  2021-08-23 19:40 ` [dpdk-dev] [RFC 08/15] eventdev: hide event device related structures pbhagavatula
                   ` (10 subsequent siblings)
  16 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Timothy McDaniel,
	Hemant Agrawal, Nipun Gupta, Mattias Rönnblom, Liang Ma,
	Peter Mccarthy, Harry van Haaren
  Cc: konstantin.ananyev, dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Make drivers use the new API for all enqueue and dequeue paths.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c           |  63 ++++---
 drivers/event/cnxk/cn10k_worker.c             |  22 +--
 drivers/event/cnxk/cn10k_worker.h             |  49 ++---
 drivers/event/cnxk/cn10k_worker_deq.c         |   8 +-
 drivers/event/cnxk/cn10k_worker_deq_burst.c   |  14 +-
 drivers/event/cnxk/cn10k_worker_deq_tmo.c     |  21 ++-
 drivers/event/cnxk/cn10k_worker_tx_enq.c      |   4 +-
 drivers/event/cnxk/cn10k_worker_tx_enq_seg.c  |   4 +-
 drivers/event/cnxk/cn9k_eventdev.c            | 168 +++++++++---------
 drivers/event/cnxk/cn9k_worker.c              |  45 ++---
 drivers/event/cnxk/cn9k_worker.h              |  87 +++++----
 drivers/event/cnxk/cn9k_worker_deq.c          |   8 +-
 drivers/event/cnxk/cn9k_worker_deq_burst.c    |  14 +-
 drivers/event/cnxk/cn9k_worker_deq_tmo.c      |  21 ++-
 drivers/event/cnxk/cn9k_worker_dual_deq.c     |   8 +-
 .../event/cnxk/cn9k_worker_dual_deq_burst.c   |  13 +-
 drivers/event/cnxk/cn9k_worker_dual_deq_tmo.c |  22 ++-
 drivers/event/cnxk/cn9k_worker_dual_tx_enq.c  |   4 +-
 .../event/cnxk/cn9k_worker_dual_tx_enq_seg.c  |   4 +-
 drivers/event/cnxk/cn9k_worker_tx_enq.c       |   4 +-
 drivers/event/cnxk/cn9k_worker_tx_enq_seg.c   |   4 +-
 drivers/event/dlb2/dlb2.c                     |  77 ++++++--
 drivers/event/dpaa/dpaa_eventdev.c            |  45 ++++-
 drivers/event/dpaa2/dpaa2_eventdev.c          |  47 ++++-
 drivers/event/dsw/dsw_evdev.c                 |  28 ++-
 drivers/event/octeontx/ssovf_evdev.h          |  14 +-
 drivers/event/octeontx/ssovf_worker.c         | 110 +++++++-----
 drivers/event/octeontx2/otx2_evdev.c          | 111 ++++++------
 drivers/event/octeontx2/otx2_evdev.h          | 151 ++++++++--------
 .../octeontx2/otx2_evdev_crypto_adptr_tx.h    |  10 +-
 drivers/event/octeontx2/otx2_worker.c         |  88 +++++----
 drivers/event/octeontx2/otx2_worker_dual.c    |  92 ++++++----
 drivers/event/opdl/opdl_evdev.c               |  28 ++-
 drivers/event/skeleton/skeleton_eventdev.c    |  37 +++-
 drivers/event/sw/sw_evdev.c                   |  29 ++-
 35 files changed, 885 insertions(+), 569 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 697b134041..5dfebc5e54 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -271,56 +271,61 @@ static void
 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-	const event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
+	struct rte_eventdev_api *api;
+
+	api = &rte_eventdev_api[event_dev->data->dev_id];
+	const rte_event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
+	const rte_event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_burst_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_t sso_hws_tmo_deq[2][2][2][2][2][2] = {
+	const rte_event_dequeue_t sso_hws_tmo_deq[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t sso_hws_tmo_deq_burst[2][2][2][2][2][2] = {
+	const rte_event_dequeue_burst_t
+		sso_hws_tmo_deq_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_burst_##name,
-		NIX_RX_FASTPATH_MODES
+			NIX_RX_FASTPATH_MODES
 #undef R
-	};
+		};
 
-	const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
+	const rte_event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
+	const rte_event_dequeue_burst_t
+		sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_burst_##name,
-		NIX_RX_FASTPATH_MODES
+			NIX_RX_FASTPATH_MODES
 #undef R
-	};
+		};
 
-	const event_dequeue_t sso_hws_tmo_deq_seg[2][2][2][2][2][2] = {
+	const rte_event_dequeue_t sso_hws_tmo_deq_seg[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		sso_hws_tmo_deq_seg_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
@@ -329,7 +334,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		};
 
 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const rte_event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
@@ -337,7 +342,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};
 
-	const event_tx_adapter_enqueue
+	const rte_event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
@@ -345,19 +350,19 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};
 
-	event_dev->enqueue = cn10k_sso_hws_enq;
-	event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
-	event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
-	event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
+	api->enqueue = cn10k_sso_hws_enq;
+	api->enqueue_burst = cn10k_sso_hws_enq_burst;
+	api->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
+	api->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
 	if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
-		event_dev->dequeue = sso_hws_deq_seg
+		api->dequeue = sso_hws_deq_seg
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-		event_dev->dequeue_burst = sso_hws_deq_seg_burst
+		api->dequeue_burst = sso_hws_deq_seg_burst
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
@@ -365,7 +370,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 		if (dev->is_timeout_deq) {
-			event_dev->dequeue = sso_hws_tmo_deq_seg
+			api->dequeue = sso_hws_tmo_deq_seg
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -375,7 +380,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 				    NIX_RX_OFFLOAD_CHECKSUM_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-			event_dev->dequeue_burst = sso_hws_tmo_deq_seg_burst
+			api->dequeue_burst = sso_hws_tmo_deq_seg_burst
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -387,14 +392,14 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 		}
 	} else {
-		event_dev->dequeue = sso_hws_deq
+		api->dequeue = sso_hws_deq
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-		event_dev->dequeue_burst = sso_hws_deq_burst
+		api->dequeue_burst = sso_hws_deq_burst
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
@@ -402,7 +407,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 		if (dev->is_timeout_deq) {
-			event_dev->dequeue = sso_hws_tmo_deq
+			api->dequeue = sso_hws_tmo_deq
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -412,7 +417,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 				    NIX_RX_OFFLOAD_CHECKSUM_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-			event_dev->dequeue_burst = sso_hws_tmo_deq_burst
+			api->dequeue_burst = sso_hws_tmo_deq_burst
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -427,7 +432,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
 		/* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
-		event_dev->txa_enqueue = sso_hws_tx_adptr_enq_seg
+		api->txa_enqueue = sso_hws_tx_adptr_enq_seg
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
@@ -435,7 +440,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
 	} else {
-		event_dev->txa_enqueue = sso_hws_tx_adptr_enq
+		api->txa_enqueue = sso_hws_tx_adptr_enq
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
@@ -444,7 +449,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
 	}
 
-	event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
+	api->txa_enqueue_same_dest = api->txa_enqueue;
 }
 
 static void
diff --git a/drivers/event/cnxk/cn10k_worker.c b/drivers/event/cnxk/cn10k_worker.c
index c71aa37327..a43ca9f524 100644
--- a/drivers/event/cnxk/cn10k_worker.c
+++ b/drivers/event/cnxk/cn10k_worker.c
@@ -7,9 +7,9 @@
 #include "cnxk_worker.h"
 
 uint16_t __rte_hot
-cn10k_sso_hws_enq(void *port, const struct rte_event *ev)
+cn10k_sso_hws_enq(uint8_t dev_id, uint8_t port_id, const struct rte_event *ev)
 {
-	struct cn10k_sso_hws *ws = port;
+	struct cn10k_sso_hws *ws = _rte_event_dev_prolog(dev_id, port_id);
 
 	switch (ev->op) {
 	case RTE_EVENT_OP_NEW:
@@ -29,18 +29,18 @@ cn10k_sso_hws_enq(void *port, const struct rte_event *ev)
 }
 
 uint16_t __rte_hot
-cn10k_sso_hws_enq_burst(void *port, const struct rte_event ev[],
-			uint16_t nb_events)
+cn10k_sso_hws_enq_burst(uint8_t dev_id, uint8_t port_id,
+			const struct rte_event ev[], uint16_t nb_events)
 {
 	RTE_SET_USED(nb_events);
-	return cn10k_sso_hws_enq(port, ev);
+	return cn10k_sso_hws_enq(dev_id, port_id, ev);
 }
 
 uint16_t __rte_hot
-cn10k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[],
-			    uint16_t nb_events)
+cn10k_sso_hws_enq_new_burst(uint8_t dev_id, uint8_t port_id,
+			    const struct rte_event ev[], uint16_t nb_events)
 {
-	struct cn10k_sso_hws *ws = port;
+	struct cn10k_sso_hws *ws = _rte_event_dev_prolog(dev_id, port_id);
 	uint16_t i, rc = 1;
 
 	for (i = 0; i < nb_events && rc; i++)
@@ -50,10 +50,10 @@ cn10k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[],
 }
 
 uint16_t __rte_hot
-cn10k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[],
-			    uint16_t nb_events)
+cn10k_sso_hws_enq_fwd_burst(uint8_t dev_id, uint8_t port_id,
+			    const struct rte_event ev[], uint16_t nb_events)
 {
-	struct cn10k_sso_hws *ws = port;
+	struct cn10k_sso_hws *ws = _rte_event_dev_prolog(dev_id, port_id);
 
 	RTE_SET_USED(nb_events);
 	cn10k_sso_hws_forward_event(ws, ev);
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 9cc0992063..f3725ff48f 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -272,38 +272,43 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
 }
 
 /* CN10K Fastpath functions. */
-uint16_t __rte_hot cn10k_sso_hws_enq(void *port, const struct rte_event *ev);
-uint16_t __rte_hot cn10k_sso_hws_enq_burst(void *port,
+uint16_t __rte_hot cn10k_sso_hws_enq(uint8_t dev_id, uint8_t port_id,
+				     const struct rte_event *ev);
+uint16_t __rte_hot cn10k_sso_hws_enq_burst(uint8_t dev_id, uint8_t port_id,
 					   const struct rte_event ev[],
 					   uint16_t nb_events);
-uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(void *port,
+uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(uint8_t dev_id, uint8_t port_id,
 					       const struct rte_event ev[],
 					       uint16_t nb_events);
-uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
+uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(uint8_t dev_id, uint8_t port_id,
 					       const struct rte_event ev[],
 					       uint16_t nb_events);
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn10k_sso_hws_deq_##name(                           \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
-	uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name(                     \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
 		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name(                     \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
 	uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name(                       \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
-	uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name(                 \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
 		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name(                 \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
 	uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name(                       \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
-	uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name(                 \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
 		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name(                 \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
 	uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_##name(                   \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks);                                       \
 	uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_burst_##name(             \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks);
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);
 
 NIX_RX_FASTPATH_MODES
 #undef R
@@ -453,13 +458,17 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
 
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_##name(                  \
-		void *port, struct rte_event ev[], uint16_t nb_events);        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);                                           \
 	uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_seg_##name(              \
-		void *port, struct rte_event ev[], uint16_t nb_events);        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);                                           \
 	uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_##name(             \
-		void *port, struct rte_event ev[], uint16_t nb_events);        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);                                           \
 	uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_seg_##name(         \
-		void *port, struct rte_event ev[], uint16_t nb_events);
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);
 
 NIX_TX_FASTPATH_MODES
 #undef T
diff --git a/drivers/event/cnxk/cn10k_worker_deq.c b/drivers/event/cnxk/cn10k_worker_deq.c
index 36ec454ccc..72aa97c114 100644
--- a/drivers/event/cnxk/cn10k_worker_deq.c
+++ b/drivers/event/cnxk/cn10k_worker_deq.c
@@ -8,8 +8,10 @@
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn10k_sso_hws_deq_##name(                           \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn10k_sso_hws *ws = port;                               \
 									       \
 		RTE_SET_USED(timeout_ticks);                                   \
@@ -24,8 +26,10 @@
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name(                       \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn10k_sso_hws *ws = port;                               \
 									       \
 		RTE_SET_USED(timeout_ticks);                                   \
diff --git a/drivers/event/cnxk/cn10k_worker_deq_burst.c b/drivers/event/cnxk/cn10k_worker_deq_burst.c
index 29ecc551cf..15b8a49412 100644
--- a/drivers/event/cnxk/cn10k_worker_deq_burst.c
+++ b/drivers/event/cnxk/cn10k_worker_deq_burst.c
@@ -8,21 +8,23 @@
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name(                     \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn10k_sso_hws_deq_##name(port, ev, timeout_ticks);      \
+		return cn10k_sso_hws_deq_##name(dev_id, port_id, ev,           \
+						timeout_ticks);                \
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name(                 \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn10k_sso_hws_deq_seg_##name(port, ev, timeout_ticks);  \
+		return cn10k_sso_hws_deq_seg_##name(dev_id, port_id, ev,       \
+						    timeout_ticks);            \
 	}
 
 NIX_RX_FASTPATH_MODES
diff --git a/drivers/event/cnxk/cn10k_worker_deq_tmo.c b/drivers/event/cnxk/cn10k_worker_deq_tmo.c
index c8524a27bd..4e6c3c7cb5 100644
--- a/drivers/event/cnxk/cn10k_worker_deq_tmo.c
+++ b/drivers/event/cnxk/cn10k_worker_deq_tmo.c
@@ -8,8 +8,10 @@
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name(                       \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn10k_sso_hws *ws = port;                               \
 		uint16_t ret = 1;                                              \
 		uint64_t iter;                                                 \
@@ -29,17 +31,20 @@
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name(                 \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn10k_sso_hws_deq_tmo_##name(port, ev, timeout_ticks);  \
+		return cn10k_sso_hws_deq_tmo_##name(dev_id, port_id, ev,       \
+		timeout_ticks);                                                \
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_##name(                   \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn10k_sso_hws *ws = port;                               \
 		uint16_t ret = 1;                                              \
 		uint64_t iter;                                                 \
@@ -59,12 +64,12 @@
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_burst_##name(             \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn10k_sso_hws_deq_tmo_seg_##name(port, ev,              \
+		return cn10k_sso_hws_deq_tmo_seg_##name(dev_id, port_id, ev,   \
 							timeout_ticks);        \
 	}
 
diff --git a/drivers/event/cnxk/cn10k_worker_tx_enq.c b/drivers/event/cnxk/cn10k_worker_tx_enq.c
index f9968ac0d0..bfb657c1de 100644
--- a/drivers/event/cnxk/cn10k_worker_tx_enq.c
+++ b/drivers/event/cnxk/cn10k_worker_tx_enq.c
@@ -6,8 +6,10 @@
 
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_##name(                  \
-		void *port, struct rte_event ev[], uint16_t nb_events)         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events)                                            \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn10k_sso_hws *ws = port;                               \
 		uint64_t cmd[sz];                                              \
 									       \
diff --git a/drivers/event/cnxk/cn10k_worker_tx_enq_seg.c b/drivers/event/cnxk/cn10k_worker_tx_enq_seg.c
index a24fc42e5a..6fbccd7fd4 100644
--- a/drivers/event/cnxk/cn10k_worker_tx_enq_seg.c
+++ b/drivers/event/cnxk/cn10k_worker_tx_enq_seg.c
@@ -6,8 +6,10 @@
 
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_seg_##name(              \
-		void *port, struct rte_event ev[], uint16_t nb_events)         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events)                                            \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		uint64_t cmd[(sz) + CNXK_NIX_TX_MSEG_SG_DWORDS - 2];           \
 		struct cn10k_sso_hws *ws = port;                               \
 									       \
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 9b439947e5..48c8114c6e 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -312,57 +312,62 @@ static void
 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	struct rte_eventdev_api *api;
+
+	api = &rte_eventdev_api[event_dev->data->dev_id];
 	/* Single WS modes */
-	const event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
+	const rte_event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
+	const rte_event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2] = {
+	const rte_event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t sso_hws_deq_tmo_burst[2][2][2][2][2][2] = {
+	const rte_event_dequeue_burst_t
+		sso_hws_deq_tmo_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
-		NIX_RX_FASTPATH_MODES
+			NIX_RX_FASTPATH_MODES
 #undef R
-	};
+		};
 
-	const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
+	const rte_event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
+	const rte_event_dequeue_burst_t
+		sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
-		NIX_RX_FASTPATH_MODES
+			NIX_RX_FASTPATH_MODES
 #undef R
-	};
+		};
 
-	const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2] = {
+	const rte_event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
@@ -371,28 +376,29 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		};
 
 	/* Dual WS modes */
-	const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2] = {
+	const rte_event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t sso_hws_dual_deq_burst[2][2][2][2][2][2] = {
+	const rte_event_dequeue_burst_t
+		sso_hws_dual_deq_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
-		NIX_RX_FASTPATH_MODES
+			NIX_RX_FASTPATH_MODES
 #undef R
-	};
+		};
 
-	const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2] = {
+	const rte_event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
@@ -400,14 +406,14 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef R
 		};
 
-	const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2] = {
+	const rte_event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		sso_hws_dual_deq_seg_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
@@ -415,14 +421,14 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef R
 		};
 
-	const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2] = {
+	const rte_event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2] = {
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
@@ -431,7 +437,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		};
 
 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const rte_event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
@@ -439,7 +445,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};
 
-	const event_tx_adapter_enqueue
+	const rte_event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
@@ -447,7 +453,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};
 
-	const event_tx_adapter_enqueue
+	const rte_event_tx_adapter_enqueue_t
 		sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
@@ -455,7 +461,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};
 
-	const event_tx_adapter_enqueue
+	const rte_event_tx_adapter_enqueue_t
 		sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
@@ -463,19 +469,19 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};
 
-	event_dev->enqueue = cn9k_sso_hws_enq;
-	event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
-	event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
-	event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
+	api->enqueue = cn9k_sso_hws_enq;
+	api->enqueue_burst = cn9k_sso_hws_enq_burst;
+	api->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
+	api->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
 	if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
-		event_dev->dequeue = sso_hws_deq_seg
+		api->dequeue = sso_hws_deq_seg
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-		event_dev->dequeue_burst = sso_hws_deq_seg_burst
+		api->dequeue_burst = sso_hws_deq_seg_burst
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
@@ -483,7 +489,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 		if (dev->is_timeout_deq) {
-			event_dev->dequeue = sso_hws_deq_tmo_seg
+			api->dequeue = sso_hws_deq_tmo_seg
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -493,7 +499,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 				    NIX_RX_OFFLOAD_CHECKSUM_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-			event_dev->dequeue_burst = sso_hws_deq_tmo_seg_burst
+			api->dequeue_burst = sso_hws_deq_tmo_seg_burst
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -505,14 +511,14 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 		}
 	} else {
-		event_dev->dequeue = sso_hws_deq
+		api->dequeue = sso_hws_deq
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-		event_dev->dequeue_burst = sso_hws_deq_burst
+		api->dequeue_burst = sso_hws_deq_burst
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
@@ -520,7 +526,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 		if (dev->is_timeout_deq) {
-			event_dev->dequeue = sso_hws_deq_tmo
+			api->dequeue = sso_hws_deq_tmo
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -530,7 +536,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 				    NIX_RX_OFFLOAD_CHECKSUM_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-			event_dev->dequeue_burst = sso_hws_deq_tmo_burst
+			api->dequeue_burst = sso_hws_deq_tmo_burst
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -545,7 +551,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
 		/* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
-		event_dev->txa_enqueue = sso_hws_tx_adptr_enq_seg
+		api->txa_enqueue = sso_hws_tx_adptr_enq_seg
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
@@ -553,7 +559,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
 	} else {
-		event_dev->txa_enqueue = sso_hws_tx_adptr_enq
+		api->txa_enqueue = sso_hws_tx_adptr_enq
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
@@ -563,14 +569,13 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	}
 
 	if (dev->dual_ws) {
-		event_dev->enqueue = cn9k_sso_hws_dual_enq;
-		event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
-		event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
-		event_dev->enqueue_forward_burst =
-			cn9k_sso_hws_dual_enq_fwd_burst;
+		api->enqueue = cn9k_sso_hws_dual_enq;
+		api->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
+		api->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
+		api->enqueue_forward_burst = cn9k_sso_hws_dual_enq_fwd_burst;
 
 		if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
-			event_dev->dequeue = sso_hws_dual_deq_seg
+			api->dequeue = sso_hws_dual_deq_seg
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -580,7 +585,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 				    NIX_RX_OFFLOAD_CHECKSUM_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-			event_dev->dequeue_burst = sso_hws_dual_deq_seg_burst
+			api->dequeue_burst = sso_hws_dual_deq_seg_burst
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -591,7 +596,21 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 			if (dev->is_timeout_deq) {
-				event_dev->dequeue = sso_hws_dual_deq_tmo_seg
+				api->dequeue = sso_hws_dual_deq_tmo_seg
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_TSTAMP_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_CHECKSUM_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_PTYPE_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_RSS_F)];
+				api->dequeue_burst =
+						sso_hws_dual_deq_tmo_seg_burst
 					[!!(dev->rx_offloads &
 					    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 					[!!(dev->rx_offloads &
@@ -604,23 +623,9 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 					    NIX_RX_OFFLOAD_PTYPE_F)]
 					[!!(dev->rx_offloads &
 					    NIX_RX_OFFLOAD_RSS_F)];
-				event_dev->dequeue_burst =
-					sso_hws_dual_deq_tmo_seg_burst
-						[!!(dev->rx_offloads &
-						  NIX_RX_OFFLOAD_VLAN_STRIP_F)]
-						[!!(dev->rx_offloads &
-						    NIX_RX_OFFLOAD_TSTAMP_F)]
-						[!!(dev->rx_offloads &
-						  NIX_RX_OFFLOAD_MARK_UPDATE_F)]
-						[!!(dev->rx_offloads &
-						    NIX_RX_OFFLOAD_CHECKSUM_F)]
-						[!!(dev->rx_offloads &
-						    NIX_RX_OFFLOAD_PTYPE_F)]
-						[!!(dev->rx_offloads &
-						    NIX_RX_OFFLOAD_RSS_F)];
 			}
 		} else {
-			event_dev->dequeue = sso_hws_dual_deq
+			api->dequeue = sso_hws_dual_deq
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -630,7 +635,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 				    NIX_RX_OFFLOAD_CHECKSUM_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-			event_dev->dequeue_burst = sso_hws_dual_deq_burst
+			api->dequeue_burst = sso_hws_dual_deq_burst
 				[!!(dev->rx_offloads &
 				    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -641,7 +646,20 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 			if (dev->is_timeout_deq) {
-				event_dev->dequeue = sso_hws_dual_deq_tmo
+				api->dequeue = sso_hws_dual_deq_tmo
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_TSTAMP_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_CHECKSUM_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_PTYPE_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_RSS_F)];
+				api->dequeue_burst = sso_hws_dual_deq_tmo_burst
 					[!!(dev->rx_offloads &
 					    NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 					[!!(dev->rx_offloads &
@@ -654,27 +672,13 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 					    NIX_RX_OFFLOAD_PTYPE_F)]
 					[!!(dev->rx_offloads &
 					    NIX_RX_OFFLOAD_RSS_F)];
-				event_dev->dequeue_burst =
-					sso_hws_dual_deq_tmo_burst
-						[!!(dev->rx_offloads &
-						  NIX_RX_OFFLOAD_VLAN_STRIP_F)]
-						[!!(dev->rx_offloads &
-						  NIX_RX_OFFLOAD_TSTAMP_F)]
-						[!!(dev->rx_offloads &
-						  NIX_RX_OFFLOAD_MARK_UPDATE_F)]
-						[!!(dev->rx_offloads &
-						  NIX_RX_OFFLOAD_CHECKSUM_F)]
-						[!!(dev->rx_offloads &
-						  NIX_RX_OFFLOAD_PTYPE_F)]
-						[!!(dev->rx_offloads &
-						  NIX_RX_OFFLOAD_RSS_F)];
 			}
 		}
 
 		if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
 			/* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM]
 			 */
-			event_dev->txa_enqueue = sso_hws_dual_tx_adptr_enq_seg
+			api->txa_enqueue = sso_hws_dual_tx_adptr_enq_seg
 				[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
 				[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
 				[!!(dev->tx_offloads &
@@ -686,7 +690,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 				[!!(dev->tx_offloads &
 				    NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
 		} else {
-			event_dev->txa_enqueue = sso_hws_dual_tx_adptr_enq
+			api->txa_enqueue = sso_hws_dual_tx_adptr_enq
 				[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
 				[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
 				[!!(dev->tx_offloads &
@@ -700,7 +704,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		}
 	}
 
-	event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
+	api->txa_enqueue_same_dest = api->txa_enqueue;
 	rte_mb();
 }
 
diff --git a/drivers/event/cnxk/cn9k_worker.c b/drivers/event/cnxk/cn9k_worker.c
index 538bc4b0b3..d0a3b684dd 100644
--- a/drivers/event/cnxk/cn9k_worker.c
+++ b/drivers/event/cnxk/cn9k_worker.c
@@ -7,9 +7,9 @@
 #include "cn9k_worker.h"
 
 uint16_t __rte_hot
-cn9k_sso_hws_enq(void *port, const struct rte_event *ev)
+cn9k_sso_hws_enq(uint8_t dev_id, uint8_t port_id, const struct rte_event *ev)
 {
-	struct cn9k_sso_hws *ws = port;
+	struct cn9k_sso_hws *ws = _rte_event_dev_prolog(dev_id, port_id);
 
 	switch (ev->op) {
 	case RTE_EVENT_OP_NEW:
@@ -28,18 +28,18 @@ cn9k_sso_hws_enq(void *port, const struct rte_event *ev)
 }
 
 uint16_t __rte_hot
-cn9k_sso_hws_enq_burst(void *port, const struct rte_event ev[],
-		       uint16_t nb_events)
+cn9k_sso_hws_enq_burst(uint8_t dev_id, uint8_t port_id,
+		       const struct rte_event ev[], uint16_t nb_events)
 {
 	RTE_SET_USED(nb_events);
-	return cn9k_sso_hws_enq(port, ev);
+	return cn9k_sso_hws_enq(dev_id, port_id, ev);
 }
 
 uint16_t __rte_hot
-cn9k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[],
-			   uint16_t nb_events)
+cn9k_sso_hws_enq_new_burst(uint8_t dev_id, uint8_t port_id,
+			   const struct rte_event ev[], uint16_t nb_events)
 {
-	struct cn9k_sso_hws *ws = port;
+	struct cn9k_sso_hws *ws = _rte_event_dev_prolog(dev_id, port_id);
 	uint16_t i, rc = 1;
 
 	for (i = 0; i < nb_events && rc; i++)
@@ -49,10 +49,10 @@ cn9k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[],
 }
 
 uint16_t __rte_hot
-cn9k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[],
-			   uint16_t nb_events)
+cn9k_sso_hws_enq_fwd_burst(uint8_t dev_id, uint8_t port_id,
+			   const struct rte_event ev[], uint16_t nb_events)
 {
-	struct cn9k_sso_hws *ws = port;
+	struct cn9k_sso_hws *ws = _rte_event_dev_prolog(dev_id, port_id);
 
 	RTE_SET_USED(nb_events);
 	cn9k_sso_hws_forward_event(ws, ev);
@@ -63,9 +63,10 @@ cn9k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[],
 /* Dual ws ops. */
 
 uint16_t __rte_hot
-cn9k_sso_hws_dual_enq(void *port, const struct rte_event *ev)
+cn9k_sso_hws_dual_enq(uint8_t dev_id, uint8_t port_id,
+		      const struct rte_event *ev)
 {
-	struct cn9k_sso_hws_dual *dws = port;
+	struct cn9k_sso_hws_dual *dws = _rte_event_dev_prolog(dev_id, port_id);
 	struct cn9k_sso_hws_state *vws;
 
 	vws = &dws->ws_state[!dws->vws];
@@ -86,18 +87,18 @@ cn9k_sso_hws_dual_enq(void *port, const struct rte_event *ev)
 }
 
 uint16_t __rte_hot
-cn9k_sso_hws_dual_enq_burst(void *port, const struct rte_event ev[],
-			    uint16_t nb_events)
+cn9k_sso_hws_dual_enq_burst(uint8_t dev_id, uint8_t port_id,
+			    const struct rte_event ev[], uint16_t nb_events)
 {
 	RTE_SET_USED(nb_events);
-	return cn9k_sso_hws_dual_enq(port, ev);
+	return cn9k_sso_hws_dual_enq(dev_id, port_id, ev);
 }
 
 uint16_t __rte_hot
-cn9k_sso_hws_dual_enq_new_burst(void *port, const struct rte_event ev[],
-				uint16_t nb_events)
+cn9k_sso_hws_dual_enq_new_burst(uint8_t dev_id, uint8_t port_id,
+				const struct rte_event ev[], uint16_t nb_events)
 {
-	struct cn9k_sso_hws_dual *dws = port;
+	struct cn9k_sso_hws_dual *dws = _rte_event_dev_prolog(dev_id, port_id);
 	uint16_t i, rc = 1;
 
 	for (i = 0; i < nb_events && rc; i++)
@@ -107,10 +108,10 @@ cn9k_sso_hws_dual_enq_new_burst(void *port, const struct rte_event ev[],
 }
 
 uint16_t __rte_hot
-cn9k_sso_hws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
-				uint16_t nb_events)
+cn9k_sso_hws_dual_enq_fwd_burst(uint8_t dev_id, uint8_t port_id,
+				const struct rte_event ev[], uint16_t nb_events)
 {
-	struct cn9k_sso_hws_dual *dws = port;
+	struct cn9k_sso_hws_dual *dws = _rte_event_dev_prolog(dev_id, port_id);
 
 	RTE_SET_USED(nb_events);
 	cn9k_sso_hws_dual_forward_event(dws, &dws->ws_state[!dws->vws], ev);
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 9b2a0bf882..be9ae2a1e2 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -344,75 +344,86 @@ cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
 }
 
 /* CN9K Fastpath functions. */
-uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev);
-uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port,
+uint16_t __rte_hot cn9k_sso_hws_enq(uint8_t dev_id, uint8_t port_id,
+				    const struct rte_event *ev);
+uint16_t __rte_hot cn9k_sso_hws_enq_burst(uint8_t dev_id, uint8_t port_id,
 					  const struct rte_event ev[],
 					  uint16_t nb_events);
-uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port,
+uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(uint8_t dev_id, uint8_t port_id,
 					      const struct rte_event ev[],
 					      uint16_t nb_events);
-uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port,
+uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(uint8_t dev_id, uint8_t port_id,
 					      const struct rte_event ev[],
 					      uint16_t nb_events);
 
-uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port,
+uint16_t __rte_hot cn9k_sso_hws_dual_enq(uint8_t dev_id, uint8_t port_id,
 					 const struct rte_event *ev);
-uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port,
+uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(uint8_t dev_id, uint8_t port_id,
 					       const struct rte_event ev[],
 					       uint16_t nb_events);
-uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port,
+uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(uint8_t dev_id,
+						   uint8_t port_id,
 						   const struct rte_event ev[],
 						   uint16_t nb_events);
-uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
+uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(uint8_t dev_id,
+						   uint8_t port_id,
 						   const struct rte_event ev[],
 						   uint16_t nb_events);
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn9k_sso_hws_deq_##name(                            \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
-	uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name(                      \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
 		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name(                      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
 	uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name(                        \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
-	uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name(                  \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
 		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name(                  \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
 	uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name(                        \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
-	uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name(                  \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
 		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name(                  \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
 	uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name(                    \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks);                                       \
 	uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name(              \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks);
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);
 
 NIX_RX_FASTPATH_MODES
 #undef R
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name(                       \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
-	uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name(                 \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
 		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name(                 \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name(                   \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
-	uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name(             \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
 		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name(             \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name(                   \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
-	uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name(             \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
 		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name(             \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name(               \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks);                                       \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name(         \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks);
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);
 
 NIX_RX_FASTPATH_MODES
 #undef R
@@ -503,13 +514,17 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_##name(                   \
-		void *port, struct rte_event ev[], uint16_t nb_events);        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);                                           \
 	uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_seg_##name(               \
-		void *port, struct rte_event ev[], uint16_t nb_events);        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);                                           \
 	uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_##name(              \
-		void *port, struct rte_event ev[], uint16_t nb_events);        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);                                           \
 	uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_seg_##name(          \
-		void *port, struct rte_event ev[], uint16_t nb_events);
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);
 
 NIX_TX_FASTPATH_MODES
 #undef T
diff --git a/drivers/event/cnxk/cn9k_worker_deq.c b/drivers/event/cnxk/cn9k_worker_deq.c
index 51ccaf4ec4..b60740ea71 100644
--- a/drivers/event/cnxk/cn9k_worker_deq.c
+++ b/drivers/event/cnxk/cn9k_worker_deq.c
@@ -8,8 +8,10 @@
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn9k_sso_hws_deq_##name(                            \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn9k_sso_hws *ws = port;                                \
 									       \
 		RTE_SET_USED(timeout_ticks);                                   \
@@ -24,8 +26,10 @@
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name(                        \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn9k_sso_hws *ws = port;                                \
 									       \
 		RTE_SET_USED(timeout_ticks);                                   \
diff --git a/drivers/event/cnxk/cn9k_worker_deq_burst.c b/drivers/event/cnxk/cn9k_worker_deq_burst.c
index 4e2801459b..2e84683499 100644
--- a/drivers/event/cnxk/cn9k_worker_deq_burst.c
+++ b/drivers/event/cnxk/cn9k_worker_deq_burst.c
@@ -8,21 +8,23 @@
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name(                      \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn9k_sso_hws_deq_##name(port, ev, timeout_ticks);       \
+		return cn9k_sso_hws_deq_##name(dev_id, port_id, ev,            \
+					       timeout_ticks);                 \
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name(                  \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn9k_sso_hws_deq_seg_##name(port, ev, timeout_ticks);   \
+		return cn9k_sso_hws_deq_seg_##name(dev_id, port_id, ev,        \
+						   timeout_ticks);             \
 	}
 
 NIX_RX_FASTPATH_MODES
diff --git a/drivers/event/cnxk/cn9k_worker_deq_tmo.c b/drivers/event/cnxk/cn9k_worker_deq_tmo.c
index 9713d1ef00..7c6ff30dd4 100644
--- a/drivers/event/cnxk/cn9k_worker_deq_tmo.c
+++ b/drivers/event/cnxk/cn9k_worker_deq_tmo.c
@@ -8,8 +8,10 @@
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name(                        \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn9k_sso_hws *ws = port;                                \
 		uint16_t ret = 1;                                              \
 		uint64_t iter;                                                 \
@@ -29,17 +31,20 @@
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name(                  \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn9k_sso_hws_deq_tmo_##name(port, ev, timeout_ticks);   \
+		return cn9k_sso_hws_deq_tmo_##name(dev_id, port_id, ev,        \
+						   timeout_ticks);             \
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name(                    \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn9k_sso_hws *ws = port;                                \
 		uint16_t ret = 1;                                              \
 		uint64_t iter;                                                 \
@@ -59,12 +64,12 @@
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name(              \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn9k_sso_hws_deq_tmo_seg_##name(port, ev,               \
+		return cn9k_sso_hws_deq_tmo_seg_##name(dev_id, port_id, ev,    \
 						       timeout_ticks);         \
 	}
 
diff --git a/drivers/event/cnxk/cn9k_worker_dual_deq.c b/drivers/event/cnxk/cn9k_worker_dual_deq.c
index 709fa2d9ef..14b27ea0a3 100644
--- a/drivers/event/cnxk/cn9k_worker_dual_deq.c
+++ b/drivers/event/cnxk/cn9k_worker_dual_deq.c
@@ -8,8 +8,10 @@
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name(                       \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn9k_sso_hws_dual *dws = port;                          \
 		uint16_t gw;                                                   \
 									       \
@@ -29,8 +31,10 @@
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name(                   \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn9k_sso_hws_dual *dws = port;                          \
 		uint16_t gw;                                                   \
 									       \
diff --git a/drivers/event/cnxk/cn9k_worker_dual_deq_burst.c b/drivers/event/cnxk/cn9k_worker_dual_deq_burst.c
index d50e1cf83f..e746deae36 100644
--- a/drivers/event/cnxk/cn9k_worker_dual_deq_burst.c
+++ b/drivers/event/cnxk/cn9k_worker_dual_deq_burst.c
@@ -8,21 +8,22 @@
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name(                 \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn9k_sso_hws_dual_deq_##name(port, ev, timeout_ticks);  \
+		return cn9k_sso_hws_dual_deq_##name(dev_id, port_id, ev,       \
+						    timeout_ticks);            \
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name(             \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn9k_sso_hws_dual_deq_seg_##name(port, ev,              \
+		return cn9k_sso_hws_dual_deq_seg_##name(dev_id, port_id, ev,   \
 							timeout_ticks);        \
 	}
 
diff --git a/drivers/event/cnxk/cn9k_worker_dual_deq_tmo.c b/drivers/event/cnxk/cn9k_worker_dual_deq_tmo.c
index a0508fdf0d..1db7a8dc86 100644
--- a/drivers/event/cnxk/cn9k_worker_dual_deq_tmo.c
+++ b/drivers/event/cnxk/cn9k_worker_dual_deq_tmo.c
@@ -8,8 +8,10 @@
 
 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name(                   \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn9k_sso_hws_dual *dws = port;                          \
 		uint16_t ret = 1;                                              \
 		uint64_t iter;                                                 \
@@ -37,18 +39,20 @@
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name(             \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn9k_sso_hws_dual_deq_tmo_##name(port, ev,              \
+		return cn9k_sso_hws_dual_deq_tmo_##name(dev_id, port_id, ev,   \
 							timeout_ticks);        \
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name(               \
-		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks)                                        \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn9k_sso_hws_dual *dws = port;                          \
 		uint16_t ret = 1;                                              \
 		uint64_t iter;                                                 \
@@ -76,13 +80,13 @@
 	}                                                                      \
 									       \
 	uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name(         \
-		void *port, struct rte_event ev[], uint16_t nb_events,         \
-		uint64_t timeout_ticks)                                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks)                    \
 	{                                                                      \
 		RTE_SET_USED(nb_events);                                       \
 									       \
-		return cn9k_sso_hws_dual_deq_tmo_seg_##name(port, ev,          \
-							    timeout_ticks);    \
+		return cn9k_sso_hws_dual_deq_tmo_seg_##name(                   \
+			dev_id, port_id, ev, timeout_ticks);                   \
 	}
 
 NIX_RX_FASTPATH_MODES
diff --git a/drivers/event/cnxk/cn9k_worker_dual_tx_enq.c b/drivers/event/cnxk/cn9k_worker_dual_tx_enq.c
index 92e2981f02..87cc3a40d4 100644
--- a/drivers/event/cnxk/cn9k_worker_dual_tx_enq.c
+++ b/drivers/event/cnxk/cn9k_worker_dual_tx_enq.c
@@ -6,8 +6,10 @@
 
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_##name(              \
-		void *port, struct rte_event ev[], uint16_t nb_events)         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events)                                            \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn9k_sso_hws_dual *ws = port;                           \
 		uint64_t cmd[sz];                                              \
 									       \
diff --git a/drivers/event/cnxk/cn9k_worker_dual_tx_enq_seg.c b/drivers/event/cnxk/cn9k_worker_dual_tx_enq_seg.c
index dfb574cf95..f7662431d0 100644
--- a/drivers/event/cnxk/cn9k_worker_dual_tx_enq_seg.c
+++ b/drivers/event/cnxk/cn9k_worker_dual_tx_enq_seg.c
@@ -6,8 +6,10 @@
 
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_seg_##name(          \
-		void *port, struct rte_event ev[], uint16_t nb_events)         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events)                                            \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		uint64_t cmd[(sz) + CNXK_NIX_TX_MSEG_SG_DWORDS - 2];           \
 		struct cn9k_sso_hws_dual *ws = port;                           \
 									       \
diff --git a/drivers/event/cnxk/cn9k_worker_tx_enq.c b/drivers/event/cnxk/cn9k_worker_tx_enq.c
index 3df649c0c8..ca82edd3c3 100644
--- a/drivers/event/cnxk/cn9k_worker_tx_enq.c
+++ b/drivers/event/cnxk/cn9k_worker_tx_enq.c
@@ -6,8 +6,10 @@
 
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_##name(                   \
-		void *port, struct rte_event ev[], uint16_t nb_events)         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events)                                            \
 	{                                                                      \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn9k_sso_hws *ws = port;                                \
 		uint64_t cmd[sz];                                              \
 									       \
diff --git a/drivers/event/cnxk/cn9k_worker_tx_enq_seg.c b/drivers/event/cnxk/cn9k_worker_tx_enq_seg.c
index 0efe29113e..f9024ba20a 100644
--- a/drivers/event/cnxk/cn9k_worker_tx_enq_seg.c
+++ b/drivers/event/cnxk/cn9k_worker_tx_enq_seg.c
@@ -6,9 +6,11 @@
 
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_seg_##name(               \
-		void *port, struct rte_event ev[], uint16_t nb_events)         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events)                                            \
 	{                                                                      \
 		uint64_t cmd[(sz) + CNXK_NIX_TX_MSEG_SG_DWORDS - 2];           \
+		void *port = _rte_event_dev_prolog(dev_id, port_id);           \
 		struct cn9k_sso_hws *ws = port;                                \
 									       \
 		RTE_SET_USED(nb_events);                                       \
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index c8742ddb2c..c69c36c5da 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -1245,21 +1245,29 @@ static inline uint16_t
 dlb2_event_enqueue_delayed(void *event_port,
 			   const struct rte_event events[]);
 
+static _RTE_EVENT_ENQ_PROTO(dlb2_event_enqueue_delayed);
+
 static inline uint16_t
 dlb2_event_enqueue_burst_delayed(void *event_port,
 				 const struct rte_event events[],
 				 uint16_t num);
 
+static _RTE_EVENT_ENQ_BURST_PROTO(dlb2_event_enqueue_burst_delayed);
+
 static inline uint16_t
 dlb2_event_enqueue_new_burst_delayed(void *event_port,
 				     const struct rte_event events[],
 				     uint16_t num);
 
+static _RTE_EVENT_ENQ_BURST_PROTO(dlb2_event_enqueue_new_burst_delayed);
+
 static inline uint16_t
 dlb2_event_enqueue_forward_burst_delayed(void *event_port,
 					 const struct rte_event events[],
 					 uint16_t num);
 
+static _RTE_EVENT_ENQ_BURST_PROTO(dlb2_event_enqueue_forward_burst_delayed);
+
 /* Generate the required bitmask for rotate-style expected QE gen bits.
  * This requires a pattern of 1's and zeros, starting with expected as
  * 1 bits, so when hardware writes 0's they're "new". This requires the
@@ -1422,13 +1430,21 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
 	 * performance reasons.
 	 */
 	if (qm_port->token_pop_mode == DELAYED_POP) {
-		dlb2->event_dev->enqueue = dlb2_event_enqueue_delayed;
-		dlb2->event_dev->enqueue_burst =
-			dlb2_event_enqueue_burst_delayed;
-		dlb2->event_dev->enqueue_new_burst =
-			dlb2_event_enqueue_new_burst_delayed;
-		dlb2->event_dev->enqueue_forward_burst =
-			dlb2_event_enqueue_forward_burst_delayed;
+		rte_event_set_enq_fn(
+			dlb2->event_dev->data->dev_id,
+			_RTE_EVENT_ENQ_FUNC(dlb2_event_enqueue_delayed));
+		rte_event_set_enq_burst_fn(
+			dlb2->event_dev->data->dev_id,
+			_RTE_EVENT_ENQ_BURST_FUNC(
+				dlb2_event_enqueue_burst_delayed));
+		rte_event_set_enq_new_burst_fn(
+			dlb2->event_dev->data->dev_id,
+			_RTE_EVENT_ENQ_BURST_FUNC(
+				dlb2_event_enqueue_new_burst_delayed));
+		rte_event_set_enq_fwd_burst_fn(
+			dlb2->event_dev->data->dev_id,
+			_RTE_EVENT_ENQ_BURST_FUNC(
+				dlb2_event_enqueue_forward_burst_delayed));
 	}
 
 	qm_port->owed_tokens = 0;
@@ -2976,6 +2992,8 @@ dlb2_event_enqueue_burst(void *event_port,
 	return __dlb2_event_enqueue_burst(event_port, events, num, false);
 }
 
+static _RTE_EVENT_ENQ_BURST_DEF(dlb2_event_enqueue_burst);
+
 static uint16_t
 dlb2_event_enqueue_burst_delayed(void *event_port,
 				     const struct rte_event events[],
@@ -2984,6 +3002,8 @@ dlb2_event_enqueue_burst_delayed(void *event_port,
 	return __dlb2_event_enqueue_burst(event_port, events, num, true);
 }
 
+static _RTE_EVENT_ENQ_BURST_DEF(dlb2_event_enqueue_burst_delayed);
+
 static inline uint16_t
 dlb2_event_enqueue(void *event_port,
 		   const struct rte_event events[])
@@ -2991,6 +3011,8 @@ dlb2_event_enqueue(void *event_port,
 	return __dlb2_event_enqueue_burst(event_port, events, 1, false);
 }
 
+static _RTE_EVENT_ENQ_DEF(dlb2_event_enqueue);
+
 static inline uint16_t
 dlb2_event_enqueue_delayed(void *event_port,
 			   const struct rte_event events[])
@@ -2998,6 +3020,8 @@ dlb2_event_enqueue_delayed(void *event_port,
 	return __dlb2_event_enqueue_burst(event_port, events, 1, true);
 }
 
+static _RTE_EVENT_ENQ_DEF(dlb2_event_enqueue_delayed);
+
 static uint16_t
 dlb2_event_enqueue_new_burst(void *event_port,
 			     const struct rte_event events[],
@@ -3006,6 +3030,8 @@ dlb2_event_enqueue_new_burst(void *event_port,
 	return __dlb2_event_enqueue_burst(event_port, events, num, false);
 }
 
+static _RTE_EVENT_ENQ_BURST_DEF(dlb2_event_enqueue_new_burst);
+
 static uint16_t
 dlb2_event_enqueue_new_burst_delayed(void *event_port,
 				     const struct rte_event events[],
@@ -3014,6 +3040,8 @@ dlb2_event_enqueue_new_burst_delayed(void *event_port,
 	return __dlb2_event_enqueue_burst(event_port, events, num, true);
 }
 
+static _RTE_EVENT_ENQ_BURST_DEF(dlb2_event_enqueue_new_burst_delayed);
+
 static uint16_t
 dlb2_event_enqueue_forward_burst(void *event_port,
 				 const struct rte_event events[],
@@ -3022,6 +3050,8 @@ dlb2_event_enqueue_forward_burst(void *event_port,
 	return __dlb2_event_enqueue_burst(event_port, events, num, false);
 }
 
+static _RTE_EVENT_ENQ_BURST_DEF(dlb2_event_enqueue_forward_burst);
+
 static uint16_t
 dlb2_event_enqueue_forward_burst_delayed(void *event_port,
 					 const struct rte_event events[],
@@ -3030,6 +3060,8 @@ dlb2_event_enqueue_forward_burst_delayed(void *event_port,
 	return __dlb2_event_enqueue_burst(event_port, events, num, true);
 }
 
+static _RTE_EVENT_ENQ_BURST_DEF(dlb2_event_enqueue_forward_burst_delayed);
+
 static void
 dlb2_event_release(struct dlb2_eventdev *dlb2,
 		   uint8_t port_id,
@@ -4062,12 +4094,16 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
 	return cnt;
 }
 
+static _RTE_EVENT_DEQ_BURST_DEF(dlb2_event_dequeue_burst);
+
 static uint16_t
 dlb2_event_dequeue(void *event_port, struct rte_event *ev, uint64_t wait)
 {
 	return dlb2_event_dequeue_burst(event_port, ev, 1, wait);
 }
 
+static _RTE_EVENT_DEQ_DEF(dlb2_event_dequeue);
+
 static uint16_t
 dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
 				uint16_t num, uint64_t wait)
@@ -4098,6 +4134,8 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
 	return cnt;
 }
 
+static _RTE_EVENT_DEQ_BURST_DEF(dlb2_event_dequeue_burst_sparse);
+
 static uint16_t
 dlb2_event_dequeue_sparse(void *event_port, struct rte_event *ev,
 			  uint64_t wait)
@@ -4105,6 +4143,8 @@ dlb2_event_dequeue_sparse(void *event_port, struct rte_event *ev,
 	return dlb2_event_dequeue_burst_sparse(event_port, ev, 1, wait);
 }
 
+static _RTE_EVENT_DEQ_DEF(dlb2_event_dequeue_sparse);
+
 static void
 dlb2_flush_port(struct rte_eventdev *dev, int port_id)
 {
@@ -4381,6 +4421,7 @@ dlb2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 static void
 dlb2_entry_points_init(struct rte_eventdev *dev)
 {
+	struct rte_eventdev_api *api;
 	struct dlb2_eventdev *dlb2;
 
 	/* Expose PMD's eventdev interface */
@@ -4409,21 +4450,27 @@ dlb2_entry_points_init(struct rte_eventdev *dev)
 		.dev_selftest     = test_dlb2_eventdev,
 	};
 
+	api = &rte_eventdev_api[dev->data->dev_id];
 	/* Expose PMD's eventdev interface */
 
 	dev->dev_ops = &dlb2_eventdev_entry_ops;
-	dev->enqueue = dlb2_event_enqueue;
-	dev->enqueue_burst = dlb2_event_enqueue_burst;
-	dev->enqueue_new_burst = dlb2_event_enqueue_new_burst;
-	dev->enqueue_forward_burst = dlb2_event_enqueue_forward_burst;
+	api->enqueue = _RTE_EVENT_ENQ_FUNC(dlb2_event_enqueue);
+	api->enqueue_burst =
+		_RTE_EVENT_ENQ_BURST_FUNC(dlb2_event_enqueue_burst);
+	api->enqueue_new_burst =
+		_RTE_EVENT_ENQ_BURST_FUNC(dlb2_event_enqueue_new_burst);
+	api->enqueue_forward_burst =
+		_RTE_EVENT_ENQ_BURST_FUNC(dlb2_event_enqueue_forward_burst);
 
 	dlb2 = dev->data->dev_private;
 	if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE) {
-		dev->dequeue = dlb2_event_dequeue_sparse;
-		dev->dequeue_burst = dlb2_event_dequeue_burst_sparse;
+		api->dequeue = _RTE_EVENT_DEQ_FUNC(dlb2_event_dequeue_sparse);
+		api->dequeue_burst = _RTE_EVENT_DEQ_BURST_FUNC(
+			dlb2_event_dequeue_burst_sparse);
 	} else {
-		dev->dequeue = dlb2_event_dequeue;
-		dev->dequeue_burst = dlb2_event_dequeue_burst;
+		api->dequeue = _RTE_EVENT_DEQ_FUNC(dlb2_event_dequeue);
+		api->dequeue_burst =
+			_RTE_EVENT_DEQ_BURST_FUNC(dlb2_event_dequeue_burst);
 	}
 }
 
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 9f14390d28..08e7f59db4 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -111,12 +111,16 @@ dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
 	return nb_events;
 }
 
+static _RTE_EVENT_ENQ_BURST_DEF(dpaa_event_enqueue_burst);
+
 static uint16_t
 dpaa_event_enqueue(void *port, const struct rte_event *ev)
 {
 	return dpaa_event_enqueue_burst(port, ev, 1);
 }
 
+static _RTE_EVENT_ENQ_DEF(dpaa_event_enqueue);
+
 static void drain_4_bytes(int fd, fd_set *fdset)
 {
 	if (FD_ISSET(fd, fdset)) {
@@ -231,12 +235,16 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
 	return num_frames;
 }
 
+static _RTE_EVENT_DEQ_BURST_DEF(dpaa_event_dequeue_burst);
+
 static uint16_t
 dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
 {
 	return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
 }
 
+static _RTE_EVENT_DEQ_DEF(dpaa_event_dequeue);
+
 static uint16_t
 dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
 			      uint16_t nb_events, uint64_t timeout_ticks)
@@ -309,6 +317,8 @@ dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
 	return num_frames;
 }
 
+static _RTE_EVENT_DEQ_BURST_DEF(dpaa_event_dequeue_burst_intr);
+
 static uint16_t
 dpaa_event_dequeue_intr(void *port,
 			struct rte_event *ev,
@@ -317,6 +327,8 @@ dpaa_event_dequeue_intr(void *port,
 	return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks);
 }
 
+static _RTE_EVENT_DEQ_DEF(dpaa_event_dequeue_intr);
+
 static void
 dpaa_event_dev_info_get(struct rte_eventdev *dev,
 			struct rte_event_dev_info *dev_info)
@@ -907,6 +919,8 @@ dpaa_eventdev_txa_enqueue_same_dest(void *port,
 	return rte_eth_tx_burst(m0->port, qid, m, nb_events);
 }
 
+static _RTE_EVENT_TXA_ENQ_BURST_DEF(dpaa_eventdev_txa_enqueue_same_dest);
+
 static uint16_t
 dpaa_eventdev_txa_enqueue(void *port,
 			   struct rte_event ev[],
@@ -925,6 +939,8 @@ dpaa_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }
 
+static _RTE_EVENT_TXA_ENQ_BURST_DEF(dpaa_eventdev_txa_enqueue);
+
 static struct eventdev_ops dpaa_eventdev_ops = {
 	.dev_infos_get    = dpaa_event_dev_info_get,
 	.dev_configure    = dpaa_event_dev_configure,
@@ -995,6 +1011,7 @@ dpaa_event_dev_create(const char *name, const char *params)
 {
 	struct rte_eventdev *eventdev;
 	struct dpaa_eventdev *priv;
+	uint8_t dev_id;
 
 	eventdev = rte_event_pmd_vdev_init(name,
 					   sizeof(struct dpaa_eventdev),
@@ -1004,23 +1021,35 @@ dpaa_event_dev_create(const char *name, const char *params)
 		goto fail;
 	}
 	priv = eventdev->data->dev_private;
+	dev_id = eventdev->data->dev_id;
 
 	eventdev->dev_ops       = &dpaa_eventdev_ops;
-	eventdev->enqueue       = dpaa_event_enqueue;
-	eventdev->enqueue_burst = dpaa_event_enqueue_burst;
+	rte_event_set_enq_fn(dev_id, _RTE_EVENT_ENQ_FUNC(dpaa_event_enqueue));
+	rte_event_set_enq_burst_fn(
+		dev_id, _RTE_EVENT_ENQ_BURST_FUNC(dpaa_event_enqueue_burst));
 
 	if (dpaa_event_check_flags(params)) {
-		eventdev->dequeue	= dpaa_event_dequeue;
-		eventdev->dequeue_burst = dpaa_event_dequeue_burst;
+		rte_event_set_deq_fn(dev_id,
+				     _RTE_EVENT_DEQ_FUNC(dpaa_event_dequeue));
+		rte_event_set_deq_burst_fn(
+			dev_id,
+			_RTE_EVENT_DEQ_BURST_FUNC(dpaa_event_dequeue_burst));
 	} else {
 		priv->intr_mode = 1;
 		eventdev->dev_ops->timeout_ticks =
 				dpaa_event_dequeue_timeout_ticks_intr;
-		eventdev->dequeue	= dpaa_event_dequeue_intr;
-		eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
+		rte_event_set_deq_fn(
+			dev_id, _RTE_EVENT_DEQ_FUNC(dpaa_event_dequeue_intr));
+		rte_event_set_deq_burst_fn(
+			dev_id, _RTE_EVENT_DEQ_BURST_FUNC(
+					dpaa_event_dequeue_burst_intr));
 	}
-	eventdev->txa_enqueue = dpaa_eventdev_txa_enqueue;
-	eventdev->txa_enqueue_same_dest	= dpaa_eventdev_txa_enqueue_same_dest;
+	rte_event_set_tx_adapter_enq_fn(
+		dev_id,
+		_RTE_EVENT_TXA_ENQ_BURST_FUNC(dpaa_eventdev_txa_enqueue));
+	rte_event_set_tx_adapter_enq_same_dest_fn(
+		dev_id, _RTE_EVENT_TXA_ENQ_BURST_FUNC(
+				dpaa_eventdev_txa_enqueue_same_dest));
 
 	RTE_LOG(INFO, PMD, "%s eventdev added", name);
 
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index d577f64824..1060a9dfcf 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -201,12 +201,16 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 
 }
 
+static _RTE_EVENT_ENQ_BURST_DEF(dpaa2_eventdev_enqueue_burst);
+
 static uint16_t
 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
 {
 	return dpaa2_eventdev_enqueue_burst(port, ev, 1);
 }
 
+static _RTE_EVENT_ENQ_DEF(dpaa2_eventdev_enqueue);
+
 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
 {
 	struct epoll_event epoll_ev;
@@ -362,6 +366,8 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
 	return 0;
 }
 
+static _RTE_EVENT_DEQ_BURST_DEF(dpaa2_eventdev_dequeue_burst);
+
 static uint16_t
 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
 		       uint64_t timeout_ticks)
@@ -369,6 +375,8 @@ dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
 	return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
 }
 
+static _RTE_EVENT_DEQ_DEF(dpaa2_eventdev_dequeue);
+
 static void
 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
 			struct rte_event_dev_info *dev_info)
@@ -997,6 +1005,8 @@ dpaa2_eventdev_txa_enqueue_same_dest(void *port,
 	return rte_eth_tx_burst(m0->port, qid, m, nb_events);
 }
 
+static _RTE_EVENT_TXA_ENQ_BURST_DEF(dpaa2_eventdev_txa_enqueue_same_dest);
+
 static uint16_t
 dpaa2_eventdev_txa_enqueue(void *port,
 			   struct rte_event ev[],
@@ -1015,6 +1025,8 @@ dpaa2_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }
 
+static _RTE_EVENT_TXA_ENQ_BURST_DEF(dpaa2_eventdev_txa_enqueue);
+
 static struct eventdev_ops dpaa2_eventdev_ops = {
 	.dev_infos_get    = dpaa2_eventdev_info_get,
 	.dev_configure    = dpaa2_eventdev_configure,
@@ -1088,6 +1100,7 @@ dpaa2_eventdev_create(const char *name)
 	struct dpaa2_eventdev *priv;
 	struct dpaa2_dpcon_dev *dpcon_dev = NULL;
 	struct dpaa2_dpci_dev *dpci_dev = NULL;
+	uint8_t dev_id;
 	int ret;
 
 	eventdev = rte_event_pmd_vdev_init(name,
@@ -1099,14 +1112,32 @@ dpaa2_eventdev_create(const char *name)
 	}
 
 	eventdev->dev_ops       = &dpaa2_eventdev_ops;
-	eventdev->enqueue       = dpaa2_eventdev_enqueue;
-	eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
-	eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
-	eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
-	eventdev->dequeue       = dpaa2_eventdev_dequeue;
-	eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
-	eventdev->txa_enqueue	= dpaa2_eventdev_txa_enqueue;
-	eventdev->txa_enqueue_same_dest	= dpaa2_eventdev_txa_enqueue_same_dest;
+	dev_id = eventdev->data->dev_id;
+
+	rte_event_set_enq_fn(dev_id,
+			     _RTE_EVENT_ENQ_FUNC(dpaa2_eventdev_enqueue));
+	rte_event_set_enq_burst_fn(
+		dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(dpaa2_eventdev_enqueue_burst));
+	rte_event_set_enq_new_burst_fn(
+		dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(dpaa2_eventdev_enqueue_burst));
+	rte_event_set_enq_fwd_burst_fn(
+		dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(dpaa2_eventdev_enqueue_burst));
+
+	rte_event_set_deq_fn(dev_id,
+			     _RTE_EVENT_DEQ_FUNC(dpaa2_eventdev_dequeue));
+	rte_event_set_deq_burst_fn(
+		dev_id,
+		_RTE_EVENT_DEQ_BURST_FUNC(dpaa2_eventdev_dequeue_burst));
+
+	rte_event_set_tx_adapter_enq_fn(
+		dev_id,
+		_RTE_EVENT_TXA_ENQ_BURST_FUNC(dpaa2_eventdev_txa_enqueue));
+	rte_event_set_tx_adapter_enq_same_dest_fn(
+		dev_id, _RTE_EVENT_TXA_ENQ_BURST_FUNC(
+				dpaa2_eventdev_txa_enqueue_same_dest));
 
 	/* For secondary processes, the primary has done all the work */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 01f060fff3..8e9e29e363 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -420,12 +420,20 @@ static struct eventdev_ops dsw_evdev_ops = {
 	.xstats_get_by_name = dsw_xstats_get_by_name
 };
 
+static _RTE_EVENT_ENQ_DEF(dsw_event_enqueue);
+static _RTE_EVENT_ENQ_BURST_DEF(dsw_event_enqueue_burst);
+static _RTE_EVENT_ENQ_BURST_DEF(dsw_event_enqueue_new_burst);
+static _RTE_EVENT_ENQ_BURST_DEF(dsw_event_enqueue_forward_burst);
+static _RTE_EVENT_DEQ_DEF(dsw_event_dequeue);
+static _RTE_EVENT_DEQ_BURST_DEF(dsw_event_dequeue_burst);
+
 static int
 dsw_probe(struct rte_vdev_device *vdev)
 {
 	const char *name;
 	struct rte_eventdev *dev;
 	struct dsw_evdev *dsw;
+	uint8_t dev_id;
 
 	name = rte_vdev_device_name(vdev);
 
@@ -435,12 +443,20 @@ dsw_probe(struct rte_vdev_device *vdev)
 		return -EFAULT;
 
 	dev->dev_ops = &dsw_evdev_ops;
-	dev->enqueue = dsw_event_enqueue;
-	dev->enqueue_burst = dsw_event_enqueue_burst;
-	dev->enqueue_new_burst = dsw_event_enqueue_new_burst;
-	dev->enqueue_forward_burst = dsw_event_enqueue_forward_burst;
-	dev->dequeue = dsw_event_dequeue;
-	dev->dequeue_burst = dsw_event_dequeue_burst;
+	dev_id = dev->data->dev_id;
+
+	rte_event_set_enq_fn(dev_id, _RTE_EVENT_ENQ_FUNC(dsw_event_enqueue));
+	rte_event_set_enq_burst_fn(
+		dev_id, _RTE_EVENT_ENQ_BURST_FUNC(dsw_event_enqueue_burst));
+	rte_event_set_enq_new_burst_fn(
+		dev_id, _RTE_EVENT_ENQ_BURST_FUNC(dsw_event_enqueue_new_burst));
+	rte_event_set_enq_fwd_burst_fn(
+		dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(dsw_event_enqueue_forward_burst));
+
+	rte_event_set_deq_fn(dev_id, _RTE_EVENT_DEQ_FUNC(dsw_event_dequeue));
+	rte_event_set_deq_burst_fn(
+		dev_id, _RTE_EVENT_DEQ_BURST_FUNC(dsw_event_dequeue_burst));
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index bb1056a955..9950ac9919 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -172,13 +172,13 @@ ssovf_pmd_priv(const struct rte_eventdev *eventdev)
 
 extern int otx_logtype_ssovf;
 
-uint16_t ssows_enq(void *port, const struct rte_event *ev);
-uint16_t ssows_enq_burst(void *port,
-		const struct rte_event ev[], uint16_t nb_events);
-uint16_t ssows_enq_new_burst(void *port,
-		const struct rte_event ev[], uint16_t nb_events);
-uint16_t ssows_enq_fwd_burst(void *port,
-		const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_enq(uint8_t dev_id, uint8_t port_id, const struct rte_event *ev);
+uint16_t ssows_enq_burst(uint8_t dev_id, uint8_t port_id,
+			 const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_enq_new_burst(uint8_t dev_id, uint8_t port_id,
+			     const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_enq_fwd_burst(uint8_t dev_id, uint8_t port_id,
+			     const struct rte_event ev[], uint16_t nb_events);
 typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
 void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
 		ssows_handle_event_t fn, void *arg);
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index 8b056ddc5a..0d463521c6 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -93,9 +93,10 @@ ssows_release_event(struct ssows *ws)
 
 #define R(name, f2, f1, f0, flags)					     \
 static uint16_t __rte_noinline	__rte_hot				     \
-ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks)  \
+ssows_deq_ ##name(uint8_t dev_id, uint8_t port_id, struct rte_event *ev,     \
+		  uint64_t timeout_ticks)				     \
 {									     \
-	struct ssows *ws = port;					     \
+	struct ssows *ws = _rte_event_dev_prolog(dev_id, port_id);	     \
 									     \
 	RTE_SET_USED(timeout_ticks);					     \
 									     \
@@ -109,19 +110,21 @@ ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks)  \
 }									     \
 									     \
 static uint16_t __rte_hot						     \
-ssows_deq_burst_ ##name(void *port, struct rte_event ev[],		     \
+ssows_deq_burst_ ##name(uint8_t dev_id, uint8_t port_id,		     \
+			 struct rte_event ev[],				     \
 			 uint16_t nb_events, uint64_t timeout_ticks)	     \
 {									     \
 	RTE_SET_USED(nb_events);					     \
 									     \
-	return ssows_deq_ ##name(port, ev, timeout_ticks);		     \
+	return ssows_deq_ ##name(dev_id, port_id, ev, timeout_ticks);	     \
 }									     \
 									     \
 static uint16_t __rte_hot						     \
-ssows_deq_timeout_ ##name(void *port, struct rte_event *ev,		     \
+ssows_deq_timeout_ ##name(uint8_t dev_id, uint8_t port_id,		     \
+			  struct rte_event *ev,				     \
 			  uint64_t timeout_ticks)			     \
 {									     \
-	struct ssows *ws = port;					     \
+	struct ssows *ws = _rte_event_dev_prolog(dev_id, port_id);	     \
 	uint64_t iter;							     \
 	uint16_t ret = 1;						     \
 									     \
@@ -137,21 +140,23 @@ ssows_deq_timeout_ ##name(void *port, struct rte_event *ev,		     \
 }									     \
 									     \
 static uint16_t __rte_hot						     \
-ssows_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],	     \
+ssows_deq_timeout_burst_ ##name(uint8_t dev_id, uint8_t port_id,	     \
+				struct rte_event ev[],			     \
 				uint16_t nb_events, uint64_t timeout_ticks)  \
 {									     \
 	RTE_SET_USED(nb_events);					     \
 									     \
-	return ssows_deq_timeout_ ##name(port, ev, timeout_ticks);	     \
+	return ssows_deq_timeout_ ##name(dev_id, port_id, ev,		     \
+					 timeout_ticks);		     \
 }
 
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 
 __rte_always_inline uint16_t __rte_hot
-ssows_enq(void *port, const struct rte_event *ev)
+ssows_enq(uint8_t dev_id, uint8_t port_id, const struct rte_event *ev)
 {
-	struct ssows *ws = port;
+	struct ssows *ws = _rte_event_dev_prolog(dev_id, port_id);
 	uint16_t ret = 1;
 
 	switch (ev->op) {
@@ -172,17 +177,19 @@ ssows_enq(void *port, const struct rte_event *ev)
 }
 
 uint16_t __rte_hot
-ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+ssows_enq_burst(uint8_t dev_id, uint8_t port_id,
+		const struct rte_event ev[], uint16_t nb_events)
 {
 	RTE_SET_USED(nb_events);
-	return ssows_enq(port, ev);
+	return ssows_enq(dev_id, port_id, ev);
 }
 
 uint16_t __rte_hot
-ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+ssows_enq_new_burst(uint8_t dev_id, uint8_t port_id,
+		    const struct rte_event ev[], uint16_t nb_events)
 {
 	uint16_t i;
-	struct ssows *ws = port;
+	struct ssows *ws = _rte_event_dev_prolog(dev_id, port_id);
 
 	rte_smp_wmb();
 	for (i = 0; i < nb_events; i++)
@@ -192,9 +199,10 @@ ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
 }
 
 uint16_t __rte_hot
-ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+ssows_enq_fwd_burst(uint8_t dev_id, uint8_t port_id,
+		    const struct rte_event ev[], uint16_t nb_events)
 {
-	struct ssows *ws = port;
+	struct ssows *ws = _rte_event_dev_prolog(dev_id, port_id);
 	RTE_SET_USED(nb_events);
 
 	ssows_forward_event(ws,  ev);
@@ -311,10 +319,13 @@ __sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
 
 #define T(name, f3, f2, f1, f0, sz, flags)				     \
 static uint16_t __rte_noinline	__rte_hot				     \
-sso_event_tx_adapter_enqueue_ ## name(void *port, struct rte_event ev[],     \
-				  uint16_t nb_events)			     \
+sso_event_tx_adapter_enqueue_ ## name(uint8_t dev_id, uint8_t port_id,	     \
+				      struct rte_event ev[],		     \
+				      uint16_t nb_events)		     \
 {									     \
+	void *port = _rte_event_dev_prolog(dev_id, port_id);		     \
 	uint64_t cmd[sz];						     \
+									     \
 	return __sso_event_tx_adapter_enqueue(port, ev, nb_events, cmd,	     \
 					      flags);			     \
 }
@@ -323,11 +334,12 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 
 static uint16_t __rte_hot
-ssow_crypto_adapter_enqueue(void *port, struct rte_event ev[],
-			    uint16_t nb_events)
+ssow_crypto_adapter_enqueue(uint8_t dev_id, uint8_t port_id,
+			    struct rte_event ev[], uint16_t nb_events)
 {
-	RTE_SET_USED(nb_events);
+	void *port = _rte_event_dev_prolog(dev_id, port_id);
 
+	RTE_SET_USED(nb_events);
 	return otx_crypto_adapter_enqueue(port, ev->event_ptr);
 }
 
@@ -335,15 +347,18 @@ void
 ssovf_fastpath_fns_set(struct rte_eventdev *dev)
 {
 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+	struct rte_eventdev_api *api;
+
+	api = &rte_eventdev_api[dev->data->dev_id];
 
-	dev->enqueue       = ssows_enq;
-	dev->enqueue_burst = ssows_enq_burst;
-	dev->enqueue_new_burst = ssows_enq_new_burst;
-	dev->enqueue_forward_burst = ssows_enq_fwd_burst;
+	api->enqueue = ssows_enq;
+	api->enqueue_burst = ssows_enq_burst;
+	api->enqueue_new_burst = ssows_enq_new_burst;
+	api->enqueue_forward_burst = ssows_enq_fwd_burst;
 
-	dev->ca_enqueue = ssow_crypto_adapter_enqueue;
+	api->ca_enqueue = ssow_crypto_adapter_enqueue;
 
-	const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
+	const rte_event_tx_adapter_enqueue_t ssow_txa_enqueue[2][2][2][2] = {
 #define T(name, f3, f2, f1, f0, sz, flags)				\
 	[f3][f2][f1][f0] =  sso_event_tx_adapter_enqueue_ ##name,
 
@@ -351,16 +366,16 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 	};
 
-	dev->txa_enqueue = ssow_txa_enqueue
+	api->txa_enqueue = ssow_txa_enqueue
 		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
 		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
 		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)]
 		[!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
 
-	dev->txa_enqueue_same_dest = dev->txa_enqueue;
+	api->txa_enqueue_same_dest = api->txa_enqueue;
 
 	/* Assigning dequeue func pointers */
-	const event_dequeue_t ssow_deq[2][2][2] = {
+	const rte_event_dequeue_t ssow_deq[2][2][2] = {
 #define R(name, f2, f1, f0, flags)					\
 	[f2][f1][f0] =  ssows_deq_ ##name,
 
@@ -368,12 +383,12 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	dev->dequeue = ssow_deq
-		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
-		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
-		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+	api->dequeue =
+		ssow_deq[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+			[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
+			[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 
-	const event_dequeue_burst_t ssow_deq_burst[2][2][2] = {
+	const rte_event_dequeue_burst_t ssow_deq_burst[2][2][2] = {
 #define R(name, f2, f1, f0, flags)					\
 	[f2][f1][f0] =  ssows_deq_burst_ ##name,
 
@@ -381,13 +396,13 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	dev->dequeue_burst = ssow_deq_burst
+	api->dequeue_burst = ssow_deq_burst
 		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
 		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
 		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 
 	if (edev->is_timeout_deq) {
-		const event_dequeue_t ssow_deq_timeout[2][2][2] = {
+		const rte_event_dequeue_t ssow_deq_timeout[2][2][2] = {
 #define R(name, f2, f1, f0, flags)					\
 	[f2][f1][f0] =  ssows_deq_timeout_ ##name,
 
@@ -395,23 +410,24 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 		};
 
-	dev->dequeue = ssow_deq_timeout
-		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
-		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
-		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+		api->dequeue = ssow_deq_timeout
+			[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+			[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
+			[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 
-	const event_dequeue_burst_t ssow_deq_timeout_burst[2][2][2] = {
+		const rte_event_dequeue_burst_t
+			ssow_deq_timeout_burst[2][2][2] = {
 #define R(name, f2, f1, f0, flags)					\
 	[f2][f1][f0] =  ssows_deq_timeout_burst_ ##name,
 
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
-		};
+			};
 
-	dev->dequeue_burst = ssow_deq_timeout_burst
-		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
-		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
-		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+		api->dequeue_burst = ssow_deq_timeout_burst
+			[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+			[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
+			[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 	}
 }
 
diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index 00902ebf53..41b9409d66 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -44,29 +44,32 @@ void
 sso_fastpath_fns_set(struct rte_eventdev *event_dev)
 {
 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+	struct rte_eventdev_api *api;
+
 	/* Single WS modes */
-	const event_dequeue_t ssogws_deq[2][2][2][2][2][2][2] = {
+	const rte_event_dequeue_t ssogws_deq[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name,
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2][2] = {
+	const rte_event_dequeue_burst_t
+				ssogws_deq_burst[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name,
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2][2] = {
+	const rte_event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name,
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		ssogws_deq_timeout_burst[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
@@ -75,14 +78,14 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2][2] = {
+	const rte_event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name,
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		ssogws_deq_seg_burst[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
@@ -91,7 +94,8 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2][2] = {
+	const rte_event_dequeue_t
+				ssogws_deq_seg_timeout[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_deq_seg_timeout_ ##name,
@@ -99,7 +103,7 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		ssogws_deq_seg_timeout_burst[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
@@ -110,14 +114,14 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 
 
 	/* Dual WS modes */
-	const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2][2] = {
+	const rte_event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name,
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		ssogws_dual_deq_burst[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
@@ -126,7 +130,8 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2][2] = {
+	const rte_event_dequeue_t
+			ssogws_dual_deq_timeout[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_dual_deq_timeout_ ##name,
@@ -134,7 +139,7 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		ssogws_dual_deq_timeout_burst[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 	[f6][f5][f4][f3][f2][f1][f0] =					\
@@ -143,14 +148,14 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2][2] = {
+	const rte_event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name,
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		ssogws_dual_deq_seg_burst[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
@@ -159,7 +164,7 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_t
+	const rte_event_dequeue_t
 		ssogws_dual_deq_seg_timeout[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
@@ -168,7 +173,7 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
-	const event_dequeue_burst_t
+	const rte_event_dequeue_burst_t
 		ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2][2] = {
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
@@ -178,7 +183,7 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 	};
 
 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const rte_event_tx_adapter_enqueue_t
 		ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
@@ -187,7 +192,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 	};
 
-	const event_tx_adapter_enqueue
+	const rte_event_tx_adapter_enqueue_t
 		ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
@@ -196,7 +201,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 	};
 
-	const event_tx_adapter_enqueue
+	const rte_event_tx_adapter_enqueue_t
 		ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
@@ -205,7 +210,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 	};
 
-	const event_tx_adapter_enqueue
+	const rte_event_tx_adapter_enqueue_t
 		ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
@@ -214,12 +219,14 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 	};
 
-	event_dev->enqueue			= otx2_ssogws_enq;
-	event_dev->enqueue_burst		= otx2_ssogws_enq_burst;
-	event_dev->enqueue_new_burst		= otx2_ssogws_enq_new_burst;
-	event_dev->enqueue_forward_burst	= otx2_ssogws_enq_fwd_burst;
+	api = &rte_eventdev_api[event_dev->data->dev_id];
+
+	api->enqueue			= otx2_ssogws_enq;
+	api->enqueue_burst		= otx2_ssogws_enq_burst;
+	api->enqueue_new_burst		= otx2_ssogws_enq_new_burst;
+	api->enqueue_forward_burst	= otx2_ssogws_enq_fwd_burst;
 	if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
-		event_dev->dequeue		= ssogws_deq_seg
+		api->dequeue		= ssogws_deq_seg
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
@@ -227,7 +234,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-		event_dev->dequeue_burst	= ssogws_deq_seg_burst
+		api->dequeue_burst	= ssogws_deq_seg_burst
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
@@ -236,7 +243,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 		if (dev->is_timeout_deq) {
-			event_dev->dequeue	= ssogws_deq_seg_timeout
+			api->dequeue	= ssogws_deq_seg_timeout
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
@@ -244,7 +251,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-			event_dev->dequeue_burst	=
+			api->dequeue_burst	=
 				ssogws_deq_seg_timeout_burst
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -255,7 +262,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 		}
 	} else {
-		event_dev->dequeue			= ssogws_deq
+		api->dequeue			= ssogws_deq
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
@@ -263,7 +270,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-		event_dev->dequeue_burst		= ssogws_deq_burst
+		api->dequeue_burst		= ssogws_deq_burst
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
@@ -272,7 +279,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 		if (dev->is_timeout_deq) {
-			event_dev->dequeue		= ssogws_deq_timeout
+			api->dequeue		= ssogws_deq_timeout
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
@@ -280,7 +287,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-			event_dev->dequeue_burst	=
+			api->dequeue_burst	=
 				ssogws_deq_timeout_burst
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -294,7 +301,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
 		/* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
-		event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg
+		api->txa_enqueue = ssogws_tx_adptr_enq_seg
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
@@ -303,7 +310,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
 	} else {
-		event_dev->txa_enqueue = ssogws_tx_adptr_enq
+		api->txa_enqueue = ssogws_tx_adptr_enq
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
@@ -312,18 +319,16 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
 	}
-	event_dev->ca_enqueue = otx2_ssogws_ca_enq;
+	api->ca_enqueue = otx2_ssogws_ca_enq;
 
 	if (dev->dual_ws) {
-		event_dev->enqueue		= otx2_ssogws_dual_enq;
-		event_dev->enqueue_burst	= otx2_ssogws_dual_enq_burst;
-		event_dev->enqueue_new_burst	=
-					otx2_ssogws_dual_enq_new_burst;
-		event_dev->enqueue_forward_burst =
-					otx2_ssogws_dual_enq_fwd_burst;
+		api->enqueue		= otx2_ssogws_dual_enq;
+		api->enqueue_burst	= otx2_ssogws_dual_enq_burst;
+		api->enqueue_new_burst	= otx2_ssogws_dual_enq_new_burst;
+		api->enqueue_forward_burst = otx2_ssogws_dual_enq_fwd_burst;
 
 		if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
-			event_dev->dequeue	= ssogws_dual_deq_seg
+			api->dequeue	= ssogws_dual_deq_seg
 				[!!(dev->rx_offloads &
 						NIX_RX_OFFLOAD_SECURITY_F)]
 				[!!(dev->rx_offloads &
@@ -336,7 +341,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 						NIX_RX_OFFLOAD_CHECKSUM_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-			event_dev->dequeue_burst = ssogws_dual_deq_seg_burst
+			api->dequeue_burst = ssogws_dual_deq_seg_burst
 				[!!(dev->rx_offloads &
 						NIX_RX_OFFLOAD_SECURITY_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
@@ -349,7 +354,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 			if (dev->is_timeout_deq) {
-				event_dev->dequeue	=
+				api->dequeue	=
 					ssogws_dual_deq_seg_timeout
 					[!!(dev->rx_offloads &
 						NIX_RX_OFFLOAD_SECURITY_F)]
@@ -365,7 +370,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 							NIX_RX_OFFLOAD_PTYPE_F)]
 					[!!(dev->rx_offloads &
 							NIX_RX_OFFLOAD_RSS_F)];
-				event_dev->dequeue_burst =
+				api->dequeue_burst =
 					ssogws_dual_deq_seg_timeout_burst
 					[!!(dev->rx_offloads &
 						NIX_RX_OFFLOAD_SECURITY_F)]
@@ -383,7 +388,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 							NIX_RX_OFFLOAD_RSS_F)];
 			}
 		} else {
-			event_dev->dequeue		= ssogws_dual_deq
+			api->dequeue		= ssogws_dual_deq
 				[!!(dev->rx_offloads &
 						NIX_RX_OFFLOAD_SECURITY_F)]
 				[!!(dev->rx_offloads &
@@ -396,7 +401,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 						NIX_RX_OFFLOAD_CHECKSUM_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
-			event_dev->dequeue_burst	= ssogws_dual_deq_burst
+			api->dequeue_burst	= ssogws_dual_deq_burst
 				[!!(dev->rx_offloads &
 						NIX_RX_OFFLOAD_SECURITY_F)]
 				[!!(dev->rx_offloads &
@@ -410,7 +415,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
 			if (dev->is_timeout_deq) {
-				event_dev->dequeue	=
+				api->dequeue	=
 					ssogws_dual_deq_timeout
 					[!!(dev->rx_offloads &
 						NIX_RX_OFFLOAD_SECURITY_F)]
@@ -426,7 +431,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 							NIX_RX_OFFLOAD_PTYPE_F)]
 					[!!(dev->rx_offloads &
 							NIX_RX_OFFLOAD_RSS_F)];
-				event_dev->dequeue_burst =
+				api->dequeue_burst =
 					ssogws_dual_deq_timeout_burst
 					[!!(dev->rx_offloads &
 						NIX_RX_OFFLOAD_SECURITY_F)]
@@ -447,7 +452,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 
 		if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
 		/* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
-			event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg
+			api->txa_enqueue = ssogws_dual_tx_adptr_enq_seg
 				[!!(dev->tx_offloads &
 						NIX_TX_OFFLOAD_SECURITY_F)]
 				[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
@@ -461,7 +466,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 				[!!(dev->tx_offloads &
 						NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
 		} else {
-			event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq
+			api->txa_enqueue = ssogws_dual_tx_adptr_enq
 				[!!(dev->tx_offloads &
 						NIX_TX_OFFLOAD_SECURITY_F)]
 				[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
@@ -475,10 +480,10 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 				[!!(dev->tx_offloads &
 						NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
 		}
-		event_dev->ca_enqueue = otx2_ssogws_dual_ca_enq;
+		api->ca_enqueue = otx2_ssogws_dual_ca_enq;
 	}
 
-	event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
+	api->txa_enqueue_same_dest = api->txa_enqueue;
 	rte_mb();
 }
 
diff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h
index a5d34b7df7..64ce165ac1 100644
--- a/drivers/event/octeontx2/otx2_evdev.h
+++ b/drivers/event/octeontx2/otx2_evdev.h
@@ -279,93 +279,98 @@ parse_kvargs_value(const char *key, const char *value, void *opaque)
 #define SSO_TX_ADPTR_ENQ_FASTPATH_FUNC	NIX_TX_FASTPATH_MODES
 
 /* Single WS API's */
-uint16_t otx2_ssogws_enq(void *port, const struct rte_event *ev);
-uint16_t otx2_ssogws_enq_burst(void *port, const struct rte_event ev[],
-			       uint16_t nb_events);
-uint16_t otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
+uint16_t otx2_ssogws_enq(uint8_t dev_id, uint8_t port_id,
+			 const struct rte_event *ev);
+uint16_t otx2_ssogws_enq_burst(uint8_t dev_id, uint8_t port_id,
+			       const struct rte_event ev[], uint16_t nb_events);
+uint16_t otx2_ssogws_enq_new_burst(uint8_t dev_id, uint8_t port_id,
+				   const struct rte_event ev[],
 				   uint16_t nb_events);
-uint16_t otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
+uint16_t otx2_ssogws_enq_fwd_burst(uint8_t dev_id, uint8_t port_id,
+				   const struct rte_event ev[],
 				   uint16_t nb_events);
 
 /* Dual WS API's */
-uint16_t otx2_ssogws_dual_enq(void *port, const struct rte_event *ev);
-uint16_t otx2_ssogws_dual_enq_burst(void *port, const struct rte_event ev[],
+uint16_t otx2_ssogws_dual_enq(uint8_t dev_id, uint8_t port_id,
+			      const struct rte_event *ev);
+uint16_t otx2_ssogws_dual_enq_burst(uint8_t dev_id, uint8_t port_id,
+				    const struct rte_event ev[],
 				    uint16_t nb_events);
-uint16_t otx2_ssogws_dual_enq_new_burst(void *port, const struct rte_event ev[],
+uint16_t otx2_ssogws_dual_enq_new_burst(uint8_t dev_id, uint8_t port_id,
+					const struct rte_event ev[],
 					uint16_t nb_events);
-uint16_t otx2_ssogws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
+uint16_t otx2_ssogws_dual_enq_fwd_burst(uint8_t dev_id, uint8_t port_id,
+					const struct rte_event ev[],
 					uint16_t nb_events);
 
 /* Auto generated API's */
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			       \
-uint16_t otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev,	       \
-				 uint64_t timeout_ticks);		       \
-uint16_t otx2_ssogws_deq_burst_ ##name(void *port, struct rte_event ev[],      \
-				       uint16_t nb_events,		       \
-				       uint64_t timeout_ticks);		       \
-uint16_t otx2_ssogws_deq_timeout_ ##name(void *port,			       \
-					 struct rte_event *ev,		       \
-					 uint64_t timeout_ticks);	       \
-uint16_t otx2_ssogws_deq_timeout_burst_ ##name(void *port,		       \
-					       struct rte_event ev[],	       \
-					       uint16_t nb_events,	       \
-					       uint64_t timeout_ticks);	       \
-uint16_t otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev,	       \
-				     uint64_t timeout_ticks);		       \
-uint16_t otx2_ssogws_deq_seg_burst_ ##name(void *port,			       \
-					   struct rte_event ev[],	       \
-					   uint16_t nb_events,		       \
-					   uint64_t timeout_ticks);	       \
-uint16_t otx2_ssogws_deq_seg_timeout_ ##name(void *port,		       \
-					     struct rte_event *ev,	       \
-					     uint64_t timeout_ticks);	       \
-uint16_t otx2_ssogws_deq_seg_timeout_burst_ ##name(void *port,		       \
-						   struct rte_event ev[],      \
-						   uint16_t nb_events,	       \
-						   uint64_t timeout_ticks);    \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	uint16_t otx2_ssogws_deq_##name(uint8_t dev_id, uint8_t port_id,       \
+					struct rte_event *ev,                  \
+					uint64_t timeout_ticks);               \
+	uint16_t otx2_ssogws_deq_burst_##name(                                 \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
+	uint16_t otx2_ssogws_deq_timeout_##name(                               \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t otx2_ssogws_deq_timeout_burst_##name(                         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
+	uint16_t otx2_ssogws_deq_seg_##name(uint8_t dev_id, uint8_t port_id,   \
+					    struct rte_event *ev,              \
+					    uint64_t timeout_ticks);           \
+	uint16_t otx2_ssogws_deq_seg_burst_##name(                             \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
+	uint16_t otx2_ssogws_deq_seg_timeout_##name(                           \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t otx2_ssogws_deq_seg_timeout_burst_##name(                     \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
 									       \
-uint16_t otx2_ssogws_dual_deq_ ##name(void *port, struct rte_event *ev,	       \
-				      uint64_t timeout_ticks);		       \
-uint16_t otx2_ssogws_dual_deq_burst_ ##name(void *port,			       \
-					    struct rte_event ev[],	       \
-					    uint16_t nb_events,		       \
-					    uint64_t timeout_ticks);	       \
-uint16_t otx2_ssogws_dual_deq_timeout_ ##name(void *port,		       \
-					      struct rte_event *ev,	       \
-					      uint64_t timeout_ticks);	       \
-uint16_t otx2_ssogws_dual_deq_timeout_burst_ ##name(void *port,		       \
-						    struct rte_event ev[],     \
-						    uint16_t nb_events,	       \
-						    uint64_t timeout_ticks);   \
-uint16_t otx2_ssogws_dual_deq_seg_ ##name(void *port, struct rte_event *ev,    \
-					  uint64_t timeout_ticks);	       \
-uint16_t otx2_ssogws_dual_deq_seg_burst_ ##name(void *port,		       \
-						struct rte_event ev[],	       \
-						uint16_t nb_events,	       \
-						uint64_t timeout_ticks);       \
-uint16_t otx2_ssogws_dual_deq_seg_timeout_ ##name(void *port,		       \
-						  struct rte_event *ev,	       \
-						  uint64_t timeout_ticks);     \
-uint16_t otx2_ssogws_dual_deq_seg_timeout_burst_ ##name(void *port,	       \
-							struct rte_event ev[], \
-							uint16_t nb_events,    \
-						       uint64_t timeout_ticks);\
+	uint16_t otx2_ssogws_dual_deq_##name(uint8_t dev_id, uint8_t port_id,  \
+					     struct rte_event *ev,             \
+					     uint64_t timeout_ticks);          \
+	uint16_t otx2_ssogws_dual_deq_burst_##name(                            \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
+	uint16_t otx2_ssogws_dual_deq_timeout_##name(                          \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t otx2_ssogws_dual_deq_timeout_burst_##name(                    \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
+	uint16_t otx2_ssogws_dual_deq_seg_##name(                              \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t otx2_ssogws_dual_deq_seg_burst_##name(                        \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);                   \
+	uint16_t otx2_ssogws_dual_deq_seg_timeout_##name(                      \
+		uint8_t dev_id, uint8_t port_id, struct rte_event *ev,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t otx2_ssogws_dual_deq_seg_timeout_burst_##name(                \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events, uint64_t timeout_ticks);
 
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 
-#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			     \
-uint16_t otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[],\
-					   uint16_t nb_events);		     \
-uint16_t otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port,		     \
-					       struct rte_event ev[],	     \
-					       uint16_t nb_events);	     \
-uint16_t otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port,		     \
-						struct rte_event ev[],	     \
-						uint16_t nb_events);	     \
-uint16_t otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port,		     \
-						    struct rte_event ev[],   \
-						    uint16_t nb_events);     \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
+	uint16_t otx2_ssogws_tx_adptr_enq_##name(                              \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);                                           \
+	uint16_t otx2_ssogws_tx_adptr_enq_seg_##name(                          \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);                                           \
+	uint16_t otx2_ssogws_dual_tx_adptr_enq_##name(                         \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);                                           \
+	uint16_t otx2_ssogws_dual_tx_adptr_enq_seg_##name(                     \
+		uint8_t dev_id, uint8_t port_id, struct rte_event ev[],        \
+		uint16_t nb_events);
 
 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
diff --git a/drivers/event/octeontx2/otx2_evdev_crypto_adptr_tx.h b/drivers/event/octeontx2/otx2_evdev_crypto_adptr_tx.h
index ecf7eb9f56..b9b60a9667 100644
--- a/drivers/event/octeontx2/otx2_evdev_crypto_adptr_tx.h
+++ b/drivers/event/octeontx2/otx2_evdev_crypto_adptr_tx.h
@@ -62,9 +62,10 @@ otx2_ca_enq(uintptr_t tag_op, const struct rte_event *ev)
 }
 
 static uint16_t __rte_hot
-otx2_ssogws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
+otx2_ssogws_ca_enq(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
+		   uint16_t nb_events)
 {
-	struct otx2_ssogws *ws = port;
+	struct otx2_ssogws *ws = _rte_event_dev_prolog(dev_id, port_id);
 
 	RTE_SET_USED(nb_events);
 
@@ -72,9 +73,10 @@ otx2_ssogws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
 }
 
 static uint16_t __rte_hot
-otx2_ssogws_dual_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
+otx2_ssogws_dual_ca_enq(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
+			uint16_t nb_events)
 {
-	struct otx2_ssogws_dual *ws = port;
+	struct otx2_ssogws_dual *ws = _rte_event_dev_prolog(dev_id, port_id);
 
 	RTE_SET_USED(nb_events);
 
diff --git a/drivers/event/octeontx2/otx2_worker.c b/drivers/event/octeontx2/otx2_worker.c
index 95139d27a3..8ea41368e7 100644
--- a/drivers/event/octeontx2/otx2_worker.c
+++ b/drivers/event/octeontx2/otx2_worker.c
@@ -76,11 +76,12 @@ otx2_ssogws_forward_event(struct otx2_ssogws *ws, const struct rte_event *ev)
 }
 
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
-uint16_t __rte_hot								\
-otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev,		\
+uint16_t __rte_hot							\
+otx2_ssogws_deq_ ##name(uint8_t dev_id, uint8_t port_id,		\
+			struct rte_event *ev,				\
 			uint64_t timeout_ticks)				\
 {									\
-	struct otx2_ssogws *ws = port;					\
+	struct otx2_ssogws *ws = _rte_event_dev_prolog(dev_id, port_id);\
 									\
 	RTE_SET_USED(timeout_ticks);					\
 									\
@@ -93,21 +94,24 @@ otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev,		\
 	return otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem);	\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_deq_burst_ ##name(void *port, struct rte_event ev[],	\
+uint16_t __rte_hot							\
+otx2_ssogws_deq_burst_ ##name(uint8_t dev_id, uint8_t port_id,		\
+			      struct rte_event ev[],			\
 			      uint16_t nb_events,			\
 			      uint64_t timeout_ticks)			\
 {									\
 	RTE_SET_USED(nb_events);					\
 									\
-	return otx2_ssogws_deq_ ##name(port, ev, timeout_ticks);	\
+	return otx2_ssogws_deq_ ##name(dev_id, port_id, ev,		\
+				       timeout_ticks);			\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_deq_timeout_ ##name(void *port, struct rte_event *ev,	\
+uint16_t __rte_hot							\
+otx2_ssogws_deq_timeout_ ##name(uint8_t dev_id, uint8_t port_id,	\
+				struct rte_event *ev,			\
 				uint64_t timeout_ticks)			\
 {									\
-	struct otx2_ssogws *ws = port;					\
+	struct otx2_ssogws *ws = _rte_event_dev_prolog(dev_id, port_id);\
 	uint16_t ret = 1;						\
 	uint64_t iter;							\
 									\
@@ -125,21 +129,24 @@ otx2_ssogws_deq_timeout_ ##name(void *port, struct rte_event *ev,	\
 	return ret;							\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],\
+uint16_t __rte_hot							\
+otx2_ssogws_deq_timeout_burst_ ##name(uint8_t dev_id, uint8_t port_id,	\
+				      struct rte_event ev[],		\
 				      uint16_t nb_events,		\
 				      uint64_t timeout_ticks)		\
 {									\
 	RTE_SET_USED(nb_events);					\
 									\
-	return otx2_ssogws_deq_timeout_ ##name(port, ev, timeout_ticks);\
+	return otx2_ssogws_deq_timeout_ ##name(dev_id, port_id,		\
+					       ev, timeout_ticks);	\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev,		\
+uint16_t __rte_hot							\
+otx2_ssogws_deq_seg_ ##name(uint8_t dev_id, uint8_t port_id,		\
+			    struct rte_event *ev,			\
 			    uint64_t timeout_ticks)			\
 {									\
-	struct otx2_ssogws *ws = port;					\
+	struct otx2_ssogws *ws = _rte_event_dev_prolog(dev_id, port_id);\
 									\
 	RTE_SET_USED(timeout_ticks);					\
 									\
@@ -153,21 +160,24 @@ otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev,		\
 				    ws->lookup_mem);			\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_deq_seg_burst_ ##name(void *port, struct rte_event ev[],	\
+uint16_t __rte_hot							\
+otx2_ssogws_deq_seg_burst_ ##name(uint8_t dev_id, uint8_t port_id,	\
+				  struct rte_event ev[],		\
 				  uint16_t nb_events,			\
 				  uint64_t timeout_ticks)		\
 {									\
 	RTE_SET_USED(nb_events);					\
 									\
-	return otx2_ssogws_deq_seg_ ##name(port, ev, timeout_ticks);	\
+	return otx2_ssogws_deq_seg_ ##name(dev_id, port_id, ev,		\
+					   timeout_ticks);		\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_deq_seg_timeout_ ##name(void *port, struct rte_event *ev,	\
+uint16_t __rte_hot							\
+otx2_ssogws_deq_seg_timeout_ ##name(uint8_t dev_id, uint8_t port_id,	\
+				    struct rte_event *ev,		\
 				    uint64_t timeout_ticks)		\
 {									\
-	struct otx2_ssogws *ws = port;					\
+	struct otx2_ssogws *ws = _rte_event_dev_prolog(dev_id, port_id);\
 	uint16_t ret = 1;						\
 	uint64_t iter;							\
 									\
@@ -187,15 +197,16 @@ otx2_ssogws_deq_seg_timeout_ ##name(void *port, struct rte_event *ev,	\
 	return ret;							\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_deq_seg_timeout_burst_ ##name(void *port,			\
+uint16_t __rte_hot							\
+otx2_ssogws_deq_seg_timeout_burst_ ##name(uint8_t dev_id,		\
+					  uint8_t port_id,		\
 					  struct rte_event ev[],	\
 					  uint16_t nb_events,		\
 					  uint64_t timeout_ticks)	\
 {									\
 	RTE_SET_USED(nb_events);					\
 									\
-	return otx2_ssogws_deq_seg_timeout_ ##name(port, ev,		\
+	return otx2_ssogws_deq_seg_timeout_ ##name(dev_id, port_id, ev,	\
 						   timeout_ticks);	\
 }
 
@@ -203,9 +214,9 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 
 uint16_t __rte_hot
-otx2_ssogws_enq(void *port, const struct rte_event *ev)
+otx2_ssogws_enq(uint8_t dev_id, uint8_t port_id, const struct rte_event *ev)
 {
-	struct otx2_ssogws *ws = port;
+	struct otx2_ssogws *ws = _rte_event_dev_prolog(dev_id, port_id);
 
 	switch (ev->op) {
 	case RTE_EVENT_OP_NEW:
@@ -225,18 +236,20 @@ otx2_ssogws_enq(void *port, const struct rte_event *ev)
 }
 
 uint16_t __rte_hot
-otx2_ssogws_enq_burst(void *port, const struct rte_event ev[],
+otx2_ssogws_enq_burst(uint8_t dev_id, uint8_t port_id,
+		      const struct rte_event ev[],
 		      uint16_t nb_events)
 {
 	RTE_SET_USED(nb_events);
-	return otx2_ssogws_enq(port, ev);
+	return otx2_ssogws_enq(dev_id, port_id, ev);
 }
 
 uint16_t __rte_hot
-otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
+otx2_ssogws_enq_new_burst(uint8_t dev_id, uint8_t port_id,
+			  const struct rte_event ev[],
 			  uint16_t nb_events)
 {
-	struct otx2_ssogws *ws = port;
+	struct otx2_ssogws *ws = _rte_event_dev_prolog(dev_id, port_id);
 	uint16_t i, rc = 1;
 
 	rte_smp_mb();
@@ -250,10 +263,11 @@ otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
 }
 
 uint16_t __rte_hot
-otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
+otx2_ssogws_enq_fwd_burst(uint8_t dev_id, uint8_t port_id,
+			  const struct rte_event ev[],
 			  uint16_t nb_events)
 {
-	struct otx2_ssogws *ws = port;
+	struct otx2_ssogws *ws = _rte_event_dev_prolog(dev_id, port_id);
 
 	RTE_SET_USED(nb_events);
 	otx2_ssogws_forward_event(ws,  ev);
@@ -263,10 +277,11 @@ otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
 
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 uint16_t __rte_hot							\
-otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[],	\
+otx2_ssogws_tx_adptr_enq_ ## name(uint8_t dev_id, uint8_t port_id,	\
+				  struct rte_event ev[],		\
 				  uint16_t nb_events)			\
 {									\
-	struct otx2_ssogws *ws = port;					\
+	struct otx2_ssogws *ws = _rte_event_dev_prolog(dev_id, port_id);\
 	uint64_t cmd[sz];						\
 									\
 	RTE_SET_USED(nb_events);					\
@@ -281,11 +296,12 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 uint16_t __rte_hot							\
-otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, struct rte_event ev[],\
+otx2_ssogws_tx_adptr_enq_seg_ ## name(uint8_t dev_id, uint8_t port_id,	\
+				      struct rte_event ev[],		\
 				      uint16_t nb_events)		\
 {									\
 	uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2];			\
-	struct otx2_ssogws *ws = port;					\
+	struct otx2_ssogws *ws = _rte_event_dev_prolog(dev_id, port_id);\
 									\
 	RTE_SET_USED(nb_events);					\
 	return otx2_ssogws_event_tx(ws->base, &ev[0], cmd,		\
diff --git a/drivers/event/octeontx2/otx2_worker_dual.c b/drivers/event/octeontx2/otx2_worker_dual.c
index 81af4ca904..b34160a265 100644
--- a/drivers/event/octeontx2/otx2_worker_dual.c
+++ b/drivers/event/octeontx2/otx2_worker_dual.c
@@ -80,9 +80,10 @@ otx2_ssogws_dual_forward_event(struct otx2_ssogws_dual *ws,
 }
 
 uint16_t __rte_hot
-otx2_ssogws_dual_enq(void *port, const struct rte_event *ev)
+otx2_ssogws_dual_enq(uint8_t dev_id, uint8_t port_id,
+		     const struct rte_event *ev)
 {
-	struct otx2_ssogws_dual *ws = port;
+	struct otx2_ssogws_dual *ws = _rte_event_dev_prolog(dev_id, port_id);
 	struct otx2_ssogws_state *vws = &ws->ws_state[!ws->vws];
 
 	switch (ev->op) {
@@ -103,18 +104,20 @@ otx2_ssogws_dual_enq(void *port, const struct rte_event *ev)
 }
 
 uint16_t __rte_hot
-otx2_ssogws_dual_enq_burst(void *port, const struct rte_event ev[],
+otx2_ssogws_dual_enq_burst(uint8_t dev_id, uint8_t port_id,
+			   const struct rte_event ev[],
 			   uint16_t nb_events)
 {
 	RTE_SET_USED(nb_events);
-	return otx2_ssogws_dual_enq(port, ev);
+	return otx2_ssogws_dual_enq(dev_id, port_id, ev);
 }
 
 uint16_t __rte_hot
-otx2_ssogws_dual_enq_new_burst(void *port, const struct rte_event ev[],
+otx2_ssogws_dual_enq_new_burst(uint8_t dev_id, uint8_t port_id,
+			       const struct rte_event ev[],
 			       uint16_t nb_events)
 {
-	struct otx2_ssogws_dual *ws = port;
+	struct otx2_ssogws_dual *ws = _rte_event_dev_prolog(dev_id, port_id);
 	uint16_t i, rc = 1;
 
 	rte_smp_mb();
@@ -128,10 +131,11 @@ otx2_ssogws_dual_enq_new_burst(void *port, const struct rte_event ev[],
 }
 
 uint16_t __rte_hot
-otx2_ssogws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
+otx2_ssogws_dual_enq_fwd_burst(uint8_t dev_id, uint8_t port_id,
+			       const struct rte_event ev[],
 			       uint16_t nb_events)
 {
-	struct otx2_ssogws_dual *ws = port;
+	struct otx2_ssogws_dual *ws = _rte_event_dev_prolog(dev_id, port_id);
 	struct otx2_ssogws_state *vws = &ws->ws_state[!ws->vws];
 
 	RTE_SET_USED(nb_events);
@@ -141,11 +145,13 @@ otx2_ssogws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
 }
 
 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
-uint16_t __rte_hot								\
-otx2_ssogws_dual_deq_ ##name(void *port, struct rte_event *ev,		\
+uint16_t __rte_hot							\
+otx2_ssogws_dual_deq_ ##name(uint8_t dev_id, uint8_t port_id,		\
+			     struct rte_event *ev,			\
 			     uint64_t timeout_ticks)			\
 {									\
-	struct otx2_ssogws_dual *ws = port;				\
+	struct otx2_ssogws_dual *ws = _rte_event_dev_prolog(dev_id,	\
+							    port_id);	\
 	uint8_t gw;							\
 									\
 	rte_prefetch_non_temporal(ws);					\
@@ -166,21 +172,25 @@ otx2_ssogws_dual_deq_ ##name(void *port, struct rte_event *ev,		\
 	return gw;							\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_dual_deq_burst_ ##name(void *port, struct rte_event ev[],	\
+uint16_t __rte_hot							\
+otx2_ssogws_dual_deq_burst_ ##name(uint8_t dev_id, uint8_t port_id,	\
+				   struct rte_event ev[],		\
 				   uint16_t nb_events,			\
 				   uint64_t timeout_ticks)		\
 {									\
 	RTE_SET_USED(nb_events);					\
 									\
-	return otx2_ssogws_dual_deq_ ##name(port, ev, timeout_ticks);	\
+	return otx2_ssogws_dual_deq_ ##name(dev_id, port_id, ev,	\
+					    timeout_ticks);		\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_dual_deq_timeout_ ##name(void *port, struct rte_event *ev,	\
+uint16_t __rte_hot							\
+otx2_ssogws_dual_deq_timeout_ ##name(uint8_t dev_id, uint8_t port_id,	\
+				     struct rte_event *ev,		\
 				     uint64_t timeout_ticks)		\
 {									\
-	struct otx2_ssogws_dual *ws = port;				\
+	struct otx2_ssogws_dual *ws = _rte_event_dev_prolog(dev_id,	\
+							    port_id);	\
 	uint64_t iter;							\
 	uint8_t gw;							\
 									\
@@ -208,23 +218,26 @@ otx2_ssogws_dual_deq_timeout_ ##name(void *port, struct rte_event *ev,	\
 	return gw;							\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_dual_deq_timeout_burst_ ##name(void *port,			\
+uint16_t __rte_hot							\
+otx2_ssogws_dual_deq_timeout_burst_ ##name(uint8_t dev_id,		\
+					   uint8_t port_id,		\
 					   struct rte_event ev[],	\
 					   uint16_t nb_events,		\
 					   uint64_t timeout_ticks)	\
 {									\
 	RTE_SET_USED(nb_events);					\
 									\
-	return otx2_ssogws_dual_deq_timeout_ ##name(port, ev,		\
+	return otx2_ssogws_dual_deq_timeout_ ##name(dev_id, port_id, ev,\
 						    timeout_ticks);	\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_dual_deq_seg_ ##name(void *port, struct rte_event *ev,	\
+uint16_t __rte_hot							\
+otx2_ssogws_dual_deq_seg_ ##name(uint8_t dev_id, uint8_t port_id,	\
+				 struct rte_event *ev,			\
 				 uint64_t timeout_ticks)		\
 {									\
-	struct otx2_ssogws_dual *ws = port;				\
+	struct otx2_ssogws_dual *ws = _rte_event_dev_prolog(dev_id,	\
+							    port_id);	\
 	uint8_t gw;							\
 									\
 	RTE_SET_USED(timeout_ticks);					\
@@ -245,24 +258,26 @@ otx2_ssogws_dual_deq_seg_ ##name(void *port, struct rte_event *ev,	\
 	return gw;							\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_dual_deq_seg_burst_ ##name(void *port,			\
+uint16_t __rte_hot							\
+otx2_ssogws_dual_deq_seg_burst_ ##name(uint8_t dev_id, uint8_t port_id,	\
 				       struct rte_event ev[],		\
 				       uint16_t nb_events,		\
 				       uint64_t timeout_ticks)		\
 {									\
 	RTE_SET_USED(nb_events);					\
 									\
-	return otx2_ssogws_dual_deq_seg_ ##name(port, ev,		\
+	return otx2_ssogws_dual_deq_seg_ ##name(dev_id, port_id, ev,	\
 						timeout_ticks);		\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_dual_deq_seg_timeout_ ##name(void *port,			\
+uint16_t __rte_hot							\
+otx2_ssogws_dual_deq_seg_timeout_ ##name(uint8_t dev_id,		\
+					 uint8_t port_id,		\
 					 struct rte_event *ev,		\
 					 uint64_t timeout_ticks)	\
 {									\
-	struct otx2_ssogws_dual *ws = port;				\
+	struct otx2_ssogws_dual *ws = _rte_event_dev_prolog(dev_id,	\
+							    port_id);	\
 	uint64_t iter;							\
 	uint8_t gw;							\
 									\
@@ -292,15 +307,17 @@ otx2_ssogws_dual_deq_seg_timeout_ ##name(void *port,			\
 	return gw;							\
 }									\
 									\
-uint16_t __rte_hot								\
-otx2_ssogws_dual_deq_seg_timeout_burst_ ##name(void *port,		\
+uint16_t __rte_hot							\
+otx2_ssogws_dual_deq_seg_timeout_burst_ ##name(uint8_t dev_id,		\
+					       uint8_t port_id,		\
 					       struct rte_event ev[],	\
 					       uint16_t nb_events,	\
 					       uint64_t timeout_ticks)	\
 {									\
 	RTE_SET_USED(nb_events);					\
 									\
-	return otx2_ssogws_dual_deq_seg_timeout_ ##name(port, ev,	\
+	return otx2_ssogws_dual_deq_seg_timeout_ ##name(dev_id, port_id,\
+							ev,		\
 							timeout_ticks);	\
 }
 
@@ -309,11 +326,12 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 uint16_t __rte_hot							\
-otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port,			\
+otx2_ssogws_dual_tx_adptr_enq_ ## name(uint8_t dev_id, uint8_t port_id,	\
 				       struct rte_event ev[],		\
 				       uint16_t nb_events)		\
 {									\
-	struct otx2_ssogws_dual *ws = port;				\
+	struct otx2_ssogws_dual *ws = _rte_event_dev_prolog(dev_id,	\
+							    port_id);	\
 	uint64_t cmd[sz];						\
 									\
 	RTE_SET_USED(nb_events);					\
@@ -327,12 +345,14 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 uint16_t __rte_hot							\
-otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port,			\
+otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(uint8_t dev_id,		\
+					   uint8_t port_id,		\
 					   struct rte_event ev[],	\
 					   uint16_t nb_events)		\
 {									\
 	uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2];			\
-	struct otx2_ssogws_dual *ws = port;				\
+	struct otx2_ssogws_dual *ws = _rte_event_dev_prolog(dev_id,	\
+							    port_id);	\
 									\
 	RTE_SET_USED(nb_events);					\
 	return otx2_ssogws_event_tx(ws->base[!ws->vws], &ev[0],		\
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index 739dc64c82..c3d293ea4b 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -606,6 +606,11 @@ set_do_test(const char *key __rte_unused, const char *value, void *opaque)
 	return 0;
 }
 
+static _RTE_EVENT_ENQ_BURST_DEF(opdl_event_enqueue_burst);
+static _RTE_EVENT_ENQ_DEF(opdl_event_enqueue);
+static _RTE_EVENT_DEQ_BURST_DEF(opdl_event_dequeue_burst);
+static _RTE_EVENT_DEQ_DEF(opdl_event_dequeue);
+
 static int
 opdl_probe(struct rte_vdev_device *vdev)
 {
@@ -712,12 +717,23 @@ opdl_probe(struct rte_vdev_device *vdev)
 
 	dev->dev_ops = &evdev_opdl_ops;
 
-	dev->enqueue = opdl_event_enqueue;
-	dev->enqueue_burst = opdl_event_enqueue_burst;
-	dev->enqueue_new_burst = opdl_event_enqueue_burst;
-	dev->enqueue_forward_burst = opdl_event_enqueue_burst;
-	dev->dequeue = opdl_event_dequeue;
-	dev->dequeue_burst = opdl_event_dequeue_burst;
+	rte_event_set_enq_fn(dev->data->dev_id,
+			     _RTE_EVENT_ENQ_FUNC(opdl_event_enqueue));
+	rte_event_set_enq_burst_fn(
+		dev->data->dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(opdl_event_enqueue_burst));
+	rte_event_set_enq_new_burst_fn(
+		dev->data->dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(opdl_event_enqueue_burst));
+	rte_event_set_enq_fwd_burst_fn(
+		dev->data->dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(opdl_event_enqueue_burst));
+
+	rte_event_set_deq_fn(dev->data->dev_id,
+			     _RTE_EVENT_DEQ_FUNC(opdl_event_dequeue));
+	rte_event_set_deq_burst_fn(
+		dev->data->dev_id,
+		_RTE_EVENT_DEQ_BURST_FUNC(opdl_event_dequeue_burst));
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index c9e17e7cb1..a781bdb0f9 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -338,6 +338,11 @@ static struct eventdev_ops skeleton_eventdev_ops = {
 	.dump             = skeleton_eventdev_dump
 };
 
+static _RTE_EVENT_ENQ_DEF(skeleton_eventdev_enqueue);
+static _RTE_EVENT_ENQ_BURST_DEF(skeleton_eventdev_enqueue_burst);
+static _RTE_EVENT_DEQ_DEF(skeleton_eventdev_dequeue);
+static _RTE_EVENT_DEQ_BURST_DEF(skeleton_eventdev_dequeue_burst);
+
 static int
 skeleton_eventdev_init(struct rte_eventdev *eventdev)
 {
@@ -347,11 +352,17 @@ skeleton_eventdev_init(struct rte_eventdev *eventdev)
 
 	PMD_DRV_FUNC_TRACE();
 
-	eventdev->dev_ops       = &skeleton_eventdev_ops;
-	eventdev->enqueue       = skeleton_eventdev_enqueue;
-	eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
-	eventdev->dequeue       = skeleton_eventdev_dequeue;
-	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
+	rte_event_set_enq_fn(eventdev->data->dev_id,
+			     _RTE_EVENT_ENQ_FUNC(skeleton_eventdev_enqueue));
+	rte_event_set_enq_burst_fn(
+		eventdev->data->dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(skeleton_eventdev_enqueue_burst));
+
+	rte_event_set_deq_fn(eventdev->data->dev_id,
+			     _RTE_EVENT_DEQ_FUNC(skeleton_eventdev_dequeue));
+	rte_event_set_deq_burst_fn(
+		eventdev->data->dev_id,
+		_RTE_EVENT_DEQ_BURST_FUNC(skeleton_eventdev_dequeue_burst));
 
 	/* For secondary processes, the primary has done all the work */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -438,10 +449,18 @@ skeleton_eventdev_create(const char *name, int socket_id)
 	}
 
 	eventdev->dev_ops       = &skeleton_eventdev_ops;
-	eventdev->enqueue       = skeleton_eventdev_enqueue;
-	eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
-	eventdev->dequeue       = skeleton_eventdev_dequeue;
-	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
+
+	rte_event_set_enq_fn(eventdev->data->dev_id,
+			     _RTE_EVENT_ENQ_FUNC(skeleton_eventdev_enqueue));
+	rte_event_set_enq_burst_fn(
+		eventdev->data->dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(skeleton_eventdev_enqueue_burst));
+
+	rte_event_set_deq_fn(eventdev->data->dev_id,
+			     _RTE_EVENT_DEQ_FUNC(skeleton_eventdev_dequeue));
+	rte_event_set_deq_burst_fn(
+		eventdev->data->dev_id,
+		_RTE_EVENT_DEQ_BURST_FUNC(skeleton_eventdev_dequeue_burst));
 
 	return 0;
 fail:
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 9b72073322..494769fd06 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -942,6 +942,11 @@ static int32_t sw_sched_service_func(void *args)
 	return 0;
 }
 
+static _RTE_EVENT_ENQ_BURST_DEF(sw_event_enqueue_burst);
+static _RTE_EVENT_ENQ_DEF(sw_event_enqueue);
+static _RTE_EVENT_DEQ_BURST_DEF(sw_event_dequeue_burst);
+static _RTE_EVENT_DEQ_DEF(sw_event_dequeue);
+
 static int
 sw_probe(struct rte_vdev_device *vdev)
 {
@@ -1085,12 +1090,24 @@ sw_probe(struct rte_vdev_device *vdev)
 		return -EFAULT;
 	}
 	dev->dev_ops = &evdev_sw_ops;
-	dev->enqueue = sw_event_enqueue;
-	dev->enqueue_burst = sw_event_enqueue_burst;
-	dev->enqueue_new_burst = sw_event_enqueue_burst;
-	dev->enqueue_forward_burst = sw_event_enqueue_burst;
-	dev->dequeue = sw_event_dequeue;
-	dev->dequeue_burst = sw_event_dequeue_burst;
+
+	rte_event_set_enq_fn(dev->data->dev_id,
+			     _RTE_EVENT_ENQ_FUNC(sw_event_enqueue));
+	rte_event_set_enq_burst_fn(
+		dev->data->dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(sw_event_enqueue_burst));
+	rte_event_set_enq_new_burst_fn(
+		dev->data->dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(sw_event_enqueue_burst));
+	rte_event_set_enq_fwd_burst_fn(
+		dev->data->dev_id,
+		_RTE_EVENT_ENQ_BURST_FUNC(sw_event_enqueue_burst));
+
+	rte_event_set_deq_fn(dev->data->dev_id,
+			     _RTE_EVENT_DEQ_FUNC(sw_event_dequeue));
+	rte_event_set_deq_burst_fn(
+		dev->data->dev_id,
+		_RTE_EVENT_DEQ_BURST_FUNC(sw_event_dequeue_burst));
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 08/15] eventdev: hide event device related structures
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (5 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 07/15] eventdev: make drivers to use new API pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-08-23 19:40 ` [dpdk-dev] [RFC 09/15] eventdev: hide timer adapter pmd file pbhagavatula
                   ` (9 subsequent siblings)
  16 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Timothy McDaniel, Mattias Rönnblom, Pavan Nikhilesh,
	Harman Kalra
  Cc: konstantin.ananyev, dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move rte_eventdev, rte_eventdev_data structures to eventdev_pmd.h.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/dlb2/dlb2_inline_fns.h   |   2 +
 drivers/event/dsw/dsw_evdev.h          |   2 +
 drivers/event/octeontx/timvf_worker.h  |   2 +
 drivers/net/octeontx/octeontx_ethdev.c |   3 +-
 lib/eventdev/eventdev_pmd.h            |  71 ++++++++++++++++
 lib/eventdev/rte_eventdev.c            |  22 -----
 lib/eventdev/rte_eventdev_core.h       | 113 -------------------------
 7 files changed, 79 insertions(+), 136 deletions(-)

diff --git a/drivers/event/dlb2/dlb2_inline_fns.h b/drivers/event/dlb2/dlb2_inline_fns.h
index ac8d01aa98..1429281cfd 100644
--- a/drivers/event/dlb2/dlb2_inline_fns.h
+++ b/drivers/event/dlb2/dlb2_inline_fns.h
@@ -5,6 +5,8 @@
 #ifndef _DLB2_INLINE_FNS_H_
 #define _DLB2_INLINE_FNS_H_
 
+#include <eventdev_pmd.h>
+
 /* Inline functions required in more than one source file. */
 
 static inline struct dlb2_eventdev *
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index 08889a0990..631daea55c 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -5,6 +5,8 @@
 #ifndef _DSW_EVDEV_H_
 #define _DSW_EVDEV_H_
 
+#include <eventdev_pmd.h>
+
 #include <rte_event_ring.h>
 #include <rte_eventdev.h>
 
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index dede1a4a4f..3f1e77f1d1 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -2,6 +2,8 @@
  * Copyright(c) 2017 Cavium, Inc
  */
 
+#include <eventdev_pmd.h>
+
 #include <rte_common.h>
 #include <rte_branch_prediction.h>
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 9f4c0503b4..c55304839e 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -9,13 +9,14 @@
 #include <string.h>
 #include <unistd.h>
 
+#include <eventdev_pmd.h>
 #include <rte_alarm.h>
 #include <rte_branch_prediction.h>
 #include <rte_bus_vdev.h>
 #include <rte_cycles.h>
 #include <rte_debug.h>
-#include <rte_devargs.h>
 #include <rte_dev.h>
+#include <rte_devargs.h>
 #include <rte_kvargs.h>
 #include <rte_malloc.h>
 #include <rte_mbuf_pool_ops.h>
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index f3a221e688..a7521fbd93 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -84,6 +84,9 @@ extern "C" {
 #define RTE_EVENTDEV_DETACHED  (0)
 #define RTE_EVENTDEV_ATTACHED  (1)
 
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
 struct rte_eth_dev;
 
 /** Global structure used for maintaining state of allocated event devices */
@@ -91,6 +94,74 @@ struct rte_eventdev_global {
 	uint8_t nb_devs;	/**< Number of devices found */
 };
 
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+	int socket_id;
+	/**< Socket ID where memory is allocated */
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t nb_queues;
+	/**< Number of event queues. */
+	uint8_t nb_ports;
+	/**< Number of event ports. */
+	void **ports;
+	/**< Array of pointers to ports. */
+	struct rte_event_port_conf *ports_cfg;
+	/**< Array of port configuration structures. */
+	struct rte_event_queue_conf *queues_cfg;
+	/**< Array of queue configuration structures. */
+	uint16_t *links_map;
+	/**< Memory to store queues to port connections. */
+	void *dev_private;
+	/**< PMD-specific private data */
+	uint32_t event_dev_cap;
+	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+	struct rte_event_dev_config dev_conf;
+	/**< Configuration applied to device. */
+	uint8_t service_inited;
+	/* Service initialization state */
+	uint32_t service_id;
+	/* Service ID*/
+	void *dev_stop_flush_arg;
+	/**< User-provided argument for event flush function */
+
+	RTE_STD_C11
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	char name[RTE_EVENTDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[4];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+	struct rte_eventdev_data *data;
+	/**< Pointer to device data */
+	struct eventdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	struct rte_device *dev;
+	/**< Device info. supplied by probing */
+
+	RTE_STD_C11
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[3];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
 /**
  * Get the rte_eventdev structure device pointer for the named device.
  *
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 941e1e7c8e..882654b788 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -1452,24 +1452,6 @@ rte_eventdev_find_free_device_index(void)
 	return RTE_EVENT_MAX_DEVS;
 }
 
-static uint16_t
-rte_event_tx_adapter_enqueue(__rte_unused void *port,
-			__rte_unused struct rte_event ev[],
-			__rte_unused uint16_t nb_events)
-{
-	rte_errno = ENOTSUP;
-	return 0;
-}
-
-static uint16_t
-rte_event_crypto_adapter_enqueue(__rte_unused void *port,
-			__rte_unused struct rte_event ev[],
-			__rte_unused uint16_t nb_events)
-{
-	rte_errno = ENOTSUP;
-	return 0;
-}
-
 struct rte_eventdev *
 rte_event_pmd_allocate(const char *name, int socket_id)
 {
@@ -1516,10 +1498,6 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 
 	eventdev = &rte_eventdevs[dev_id];
 
-	eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
-	eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
-	eventdev->ca_enqueue = rte_event_crypto_adapter_enqueue;
-
 	if (eventdev->data == NULL) {
 		struct rte_eventdev_data *eventdev_data = NULL;
 
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 4a7edacb0e..640d5ffcb2 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -14,55 +14,34 @@ extern "C" {
 
 typedef uint16_t (*rte_event_enqueue_t)(uint8_t dev_id, uint8_t port_id,
 					const struct rte_event *ev);
-typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
 /**< @internal Enqueue event on port of a device */
 
 typedef uint16_t (*rte_event_enqueue_burst_t)(uint8_t dev_id, uint8_t port_id,
 					      const struct rte_event ev[],
 					      uint16_t nb_events);
-typedef uint16_t (*event_enqueue_burst_t)(void *port,
-					  const struct rte_event ev[],
-					  uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */
 
 typedef uint16_t (*rte_event_dequeue_t)(uint8_t dev_id, uint8_t port_id,
 					struct rte_event *ev,
 					uint64_t timeout_ticks);
-typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
-				    uint64_t timeout_ticks);
 /**< @internal Dequeue event from port of a device */
 
 typedef uint16_t (*rte_event_dequeue_burst_t)(uint8_t dev_id, uint8_t port_id,
 					      struct rte_event ev[],
 					      uint16_t nb_events,
 					      uint64_t timeout_ticks);
-typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
-					  uint16_t nb_events,
-					  uint64_t timeout_ticks);
 /**< @internal Dequeue burst of events from port of a device */
 
 typedef uint16_t (*rte_event_tx_adapter_enqueue_t)(uint8_t dev_id,
 						   uint8_t port_id,
 						   struct rte_event ev[],
 						   uint16_t nb_events);
-typedef uint16_t (*event_tx_adapter_enqueue)(void *port, struct rte_event ev[],
-					     uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */
 
-typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
-						       struct rte_event ev[],
-						       uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device supporting
- * burst having same destination Ethernet port & Tx queue.
- */
-
 typedef uint16_t (*rte_event_crypto_adapter_enqueue_t)(uint8_t dev_id,
 						       uint8_t port_id,
 						       struct rte_event ev[],
 						       uint16_t nb_events);
-typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
-						 struct rte_event ev[],
-						 uint16_t nb_events);
 /**< @internal Enqueue burst of events on crypto adapter */
 
 struct rte_eventdev_api {
@@ -89,98 +68,6 @@ struct rte_eventdev_api {
 
 extern struct rte_eventdev_api *rte_eventdev_api;
 
-#define RTE_EVENTDEV_NAME_MAX_LEN (64)
-/**< @internal Max length of name of event PMD */
-
-/**
- * @internal
- * The data part, with no function pointers, associated with each device.
- *
- * This structure is safe to place in shared memory to be common among
- * different processes in a multi-process configuration.
- */
-struct rte_eventdev_data {
-	int socket_id;
-	/**< Socket ID where memory is allocated */
-	uint8_t dev_id;
-	/**< Device ID for this instance */
-	uint8_t nb_queues;
-	/**< Number of event queues. */
-	uint8_t nb_ports;
-	/**< Number of event ports. */
-	void **ports;
-	/**< Array of pointers to ports. */
-	struct rte_event_port_conf *ports_cfg;
-	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf *queues_cfg;
-	/**< Array of queue configuration structures. */
-	uint16_t *links_map;
-	/**< Memory to store queues to port connections. */
-	void *dev_private;
-	/**< PMD-specific private data */
-	uint32_t event_dev_cap;
-	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
-	struct rte_event_dev_config dev_conf;
-	/**< Configuration applied to device. */
-	uint8_t service_inited;
-	/* Service initialization state */
-	uint32_t service_id;
-	/* Service ID*/
-	void *dev_stop_flush_arg;
-	/**< User-provided argument for event flush function */
-
-	RTE_STD_C11
-	uint8_t dev_started : 1;
-	/**< Device state: STARTED(1)/STOPPED(0) */
-
-	char name[RTE_EVENTDEV_NAME_MAX_LEN];
-	/**< Unique identifier name */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[4];	  /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-/** @internal The data structure associated with each event device. */
-struct rte_eventdev {
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
-	event_enqueue_burst_t enqueue_burst;
-	/**< Pointer to PMD enqueue burst function. */
-	event_enqueue_burst_t enqueue_new_burst;
-	/**< Pointer to PMD enqueue burst function(op new variant) */
-	event_enqueue_burst_t enqueue_forward_burst;
-	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
-	event_dequeue_burst_t dequeue_burst;
-	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
-	/**< Pointer to PMD eth Tx adapter burst enqueue function with
-	 * events destined to same Eth port & Tx queue.
-	 */
-	event_tx_adapter_enqueue txa_enqueue;
-	/**< Pointer to PMD eth Tx adapter enqueue function. */
-	struct rte_eventdev_data *data;
-	/**< Pointer to device data */
-	struct eventdev_ops *dev_ops;
-	/**< Functions exported by PMD */
-	struct rte_device *dev;
-	/**< Device info. supplied by probing */
-
-	RTE_STD_C11
-	uint8_t attached : 1;
-	/**< Flag indicating the device is attached */
-
-	event_crypto_adapter_enqueue ca_enqueue;
-	/**< Pointer to PMD crypto adapter enqueue function. */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[3];	  /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-extern struct rte_eventdev *rte_eventdevs;
-/** @internal The pool of rte_eventdev structures. */
-
 #ifdef __cplusplus
 }
 #endif
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 09/15] eventdev: hide timer adapter pmd file
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (6 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 08/15] eventdev: hide event device related structures pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-08-23 19:40 ` [dpdk-dev] [RFC 10/15] eventdev: remove rte prefix for internal structs pbhagavatula
                   ` (8 subsequent siblings)
  16 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Mattias Rönnblom,
	Harry van Haaren, Erik Gabriel Carrillo
  Cc: konstantin.ananyev, dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Hide rte_event_timer_adapter_pmd.h file as it is an internal file.
Remove rte_ prefix from rte_event_timer_adapter_ops structure.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cnxk_tim_evdev.c           |  5 ++--
 drivers/event/cnxk/cnxk_tim_evdev.h           |  2 +-
 drivers/event/dsw/dsw_evdev.c                 |  4 +--
 drivers/event/octeontx/ssovf_evdev.c          |  2 +-
 drivers/event/octeontx/timvf_evdev.c          | 17 ++++++-----
 drivers/event/octeontx/timvf_evdev.h          |  9 +++---
 drivers/event/octeontx2/otx2_tim_evdev.c      |  5 ++--
 drivers/event/octeontx2/otx2_tim_evdev.h      |  4 +--
 drivers/event/sw/sw_evdev.c                   |  5 ++--
 ...dapter_pmd.h => event_timer_adapter_pmd.h} |  8 ++---
 lib/eventdev/eventdev_pmd.h                   |  8 ++---
 lib/eventdev/meson.build                      |  2 +-
 lib/eventdev/rte_event_timer_adapter.c        | 30 +++++++++----------
 lib/eventdev/rte_event_timer_adapter.h        |  2 +-
 lib/eventdev/rte_eventdev.c                   |  2 +-
 15 files changed, 51 insertions(+), 54 deletions(-)
 rename lib/eventdev/{rte_event_timer_adapter_pmd.h => event_timer_adapter_pmd.h} (95%)

diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c
index 9d40e336d7..10634c31e3 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -5,7 +5,7 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_tim_evdev.h"
 
-static struct rte_event_timer_adapter_ops cnxk_tim_ops;
+static struct event_timer_adapter_ops cnxk_tim_ops;
 
 static int
 cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
@@ -353,8 +353,7 @@ cnxk_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
 
 int
 cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
 
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h
index c369f6f472..91e163eb5a 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -267,7 +267,7 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 
 int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);
 
 void cnxk_tim_init(struct roc_sso *sso);
 void cnxk_tim_fini(void);
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 8e9e29e363..69bc08bf78 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -381,8 +381,8 @@ dsw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
 
 static int
 dsw_timer_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
-			   uint64_t flags  __rte_unused, uint32_t *caps,
-			   const struct rte_event_timer_adapter_ops **ops)
+			   uint64_t flags __rte_unused, uint32_t *caps,
+			   const struct event_timer_adapter_ops **ops)
 {
 	*caps = 0;
 	*ops = NULL;
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 4a8c6a13a5..e7aecd4139 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -721,7 +721,7 @@ ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque)
 
 static int
 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
+		     uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
 			timvf_enable_stats);
diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index 688e9daa66..1f1cda3f7f 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -407,18 +407,19 @@ timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
 	return 0;
 }
 
-static struct rte_event_timer_adapter_ops timvf_ops = {
-	.init		= timvf_ring_create,
-	.uninit		= timvf_ring_free,
-	.start		= timvf_ring_start,
-	.stop		= timvf_ring_stop,
-	.get_info	= timvf_ring_info_get,
+static struct event_timer_adapter_ops timvf_ops = {
+	.init = timvf_ring_create,
+	.uninit = timvf_ring_free,
+	.start = timvf_ring_start,
+	.stop = timvf_ring_stop,
+	.get_info = timvf_ring_info_get,
 };
 
 int
 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats)
+			     uint32_t *caps,
+			     const struct event_timer_adapter_ops **ops,
+			     uint8_t enable_stats)
 {
 	RTE_SET_USED(dev);
 
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index 2977063d66..cef02cd7f9 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -5,13 +5,13 @@
 #ifndef __TIMVF_EVDEV_H__
 #define __TIMVF_EVDEV_H__
 
+#include <event_timer_adapter_pmd.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
 #include <rte_debug.h>
 #include <rte_eal.h>
-#include <rte_eventdev.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
+#include <rte_eventdev.h>
 #include <rte_io.h>
 #include <rte_lcore.h>
 #include <rte_log.h>
@@ -196,8 +196,9 @@ uint8_t timvf_get_ring(void);
 void timvf_release_ring(uint8_t vfid);
 void *timvf_bar(uint8_t id, uint8_t bar);
 int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats);
+				 uint32_t *caps,
+				 const struct event_timer_adapter_ops **ops,
+				 uint8_t enable_stats);
 uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 		struct rte_event_timer **tim, const uint16_t nb_timers);
 uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.c b/drivers/event/octeontx2/otx2_tim_evdev.c
index de50c4c76e..7dcb291043 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.c
+++ b/drivers/event/octeontx2/otx2_tim_evdev.c
@@ -9,7 +9,7 @@
 #include "otx2_evdev.h"
 #include "otx2_tim_evdev.h"
 
-static struct rte_event_timer_adapter_ops otx2_tim_ops;
+static struct event_timer_adapter_ops otx2_tim_ops;
 
 static inline int
 tim_get_msix_offsets(void)
@@ -497,8 +497,7 @@ otx2_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
 
 int
 otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct otx2_tim_evdev *dev = tim_priv_get();
 
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.h b/drivers/event/octeontx2/otx2_tim_evdev.h
index caa6ad3b3c..dac642e0e1 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.h
+++ b/drivers/event/octeontx2/otx2_tim_evdev.h
@@ -5,8 +5,8 @@
 #ifndef __OTX2_TIM_EVDEV_H__
 #define __OTX2_TIM_EVDEV_H__
 
+#include <event_timer_adapter_pmd.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
 #include <rte_reciprocal.h>
 
 #include "otx2_dev.h"
@@ -244,7 +244,7 @@ uint16_t otx2_tim_timer_cancel_burst(
 
 int otx2_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);
 
 void otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev);
 void otx2_tim_fini(void);
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 494769fd06..a211d4f468 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -561,10 +561,9 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
 }
 
 static int
-sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
-			  uint64_t flags,
+sw_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 			  uint32_t *caps,
-			  const struct rte_event_timer_adapter_ops **ops)
+			  const struct event_timer_adapter_ops **ops)
 {
 	RTE_SET_USED(dev);
 	RTE_SET_USED(flags);
diff --git a/lib/eventdev/rte_event_timer_adapter_pmd.h b/lib/eventdev/event_timer_adapter_pmd.h
similarity index 95%
rename from lib/eventdev/rte_event_timer_adapter_pmd.h
rename to lib/eventdev/event_timer_adapter_pmd.h
index cf3509dc6f..189017b5c1 100644
--- a/lib/eventdev/rte_event_timer_adapter_pmd.h
+++ b/lib/eventdev/event_timer_adapter_pmd.h
@@ -3,8 +3,8 @@
  * All rights reserved.
  */
 
-#ifndef __RTE_EVENT_TIMER_ADAPTER_PMD_H__
-#define __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+#ifndef __EVENT_TIMER_ADAPTER_PMD_H__
+#define __EVENT_TIMER_ADAPTER_PMD_H__
 
 /**
  * @file
@@ -57,7 +57,7 @@ typedef int (*rte_event_timer_adapter_stats_reset_t)(
  * @internal Structure containing the functions exported by an event timer
  * adapter implementation.
  */
-struct rte_event_timer_adapter_ops {
+struct event_timer_adapter_ops {
 	rte_event_timer_adapter_init_t		init;  /**< Set up adapter */
 	rte_event_timer_adapter_uninit_t	uninit;/**< Tear down adapter */
 	rte_event_timer_adapter_start_t		start; /**< Start adapter */
@@ -111,4 +111,4 @@ struct rte_event_timer_adapter_data {
 }
 #endif
 
-#endif /* __RTE_EVENT_TIMER_ADAPTER_PMD_H__ */
+#endif /* __EVENT_TIMER_ADAPTER_PMD_H__ */
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index a7521fbd93..90ca08f880 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -28,8 +28,8 @@ extern "C" {
 #include <rte_mbuf.h>
 #include <rte_mbuf_dyn.h>
 
+#include "event_timer_adapter_pmd.h"
 #include "rte_eventdev.h"
-#include "rte_event_timer_adapter_pmd.h"
 
 /* Logging Macros */
 #define RTE_EDEV_LOG_ERR(...) \
@@ -574,10 +574,8 @@ struct rte_event_eth_rx_adapter_queue_conf;
  *
  */
 typedef int (*eventdev_timer_adapter_caps_get_t)(
-				const struct rte_eventdev *dev,
-				uint64_t flags,
-				uint32_t *caps,
-				const struct rte_event_timer_adapter_ops **ops);
+	const struct rte_eventdev *dev, uint64_t flags, uint32_t *caps,
+	const struct event_timer_adapter_ops **ops);
 
 /**
  * Add ethernet Rx queues to event device. This callback is invoked if
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 9051ff04b7..f19b831edd 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -24,7 +24,6 @@ headers = files(
         'rte_event_ring.h',
         'rte_event_eth_rx_adapter.h',
         'rte_event_timer_adapter.h',
-        'rte_event_timer_adapter_pmd.h',
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
@@ -35,6 +34,7 @@ driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
         'eventdev_pmd_vdev.h',
+        'event_timer_adapter_pmd.h',
 )
 
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index ee20b39f4b..ae55407042 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -20,11 +20,11 @@
 #include <rte_service_component.h>
 #include <rte_cycles.h>
 
-#include "rte_eventdev.h"
+#include "event_timer_adapter_pmd.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
 #include "rte_event_timer_adapter.h"
-#include "rte_event_timer_adapter_pmd.h"
+#include "rte_eventdev.h"
+#include "rte_eventdev_trace.h"
 
 #define DATA_MZ_NAME_MAX_LEN 64
 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
@@ -35,7 +35,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
 
 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
 
-static const struct rte_event_timer_adapter_ops swtim_ops;
+static const struct event_timer_adapter_ops swtim_ops;
 
 #define EVTIM_LOG(level, logtype, ...) \
 	rte_log(RTE_LOG_ ## level, logtype, \
@@ -1207,15 +1207,15 @@ swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
 }
 
-static const struct rte_event_timer_adapter_ops swtim_ops = {
-	.init			= swtim_init,
-	.uninit			= swtim_uninit,
-	.start			= swtim_start,
-	.stop			= swtim_stop,
-	.get_info		= swtim_get_info,
-	.stats_get		= swtim_stats_get,
-	.stats_reset		= swtim_stats_reset,
-	.arm_burst		= swtim_arm_burst,
-	.arm_tmo_tick_burst	= swtim_arm_tmo_tick_burst,
-	.cancel_burst		= swtim_cancel_burst,
+static const struct event_timer_adapter_ops swtim_ops = {
+	.init = swtim_init,
+	.uninit = swtim_uninit,
+	.start = swtim_start,
+	.stop = swtim_stop,
+	.get_info = swtim_get_info,
+	.stats_get = swtim_stats_get,
+	.stats_reset = swtim_stats_reset,
+	.arm_burst = swtim_arm_burst,
+	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
+	.cancel_burst = swtim_cancel_burst,
 };
diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h
index 4e0d2a819b..cad6d3b4c5 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -523,7 +523,7 @@ struct rte_event_timer_adapter {
 	/**< Pointer to driver cancel function. */
 	struct rte_event_timer_adapter_data *data;
 	/**< Pointer to shared adapter data */
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;
 	/**< Functions exported by adapter driver */
 
 	RTE_STD_C11
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 882654b788..b9b029edc4 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -140,7 +140,7 @@ int
 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
 {
 	struct rte_eventdev *dev;
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;
 
 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 10/15] eventdev: remove rte prefix for internal structs
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (7 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 09/15] eventdev: hide timer adapter pmd file pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-08-30 14:42   ` Jayatheerthan, Jay
  2021-08-23 19:40 ` [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object pbhagavatula
                   ` (7 subsequent siblings)
  16 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan
  Cc: konstantin.ananyev, dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Remove rte_ prefix from rte_eth_event_enqueue_buffer,
rte_event_eth_rx_adapter and rte_event_crypto_adapter
as they are only used in rte_event_eth_rx_adapter.c and
rte_event_crypto_adapter.c

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_crypto_adapter.c |  66 +++----
 lib/eventdev/rte_event_eth_rx_adapter.c | 249 ++++++++++--------------
 lib/eventdev/rte_eventdev.h             |   2 +-
 3 files changed, 141 insertions(+), 176 deletions(-)

diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index e1d38d383d..8a2a25c02e 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -30,7 +30,7 @@
  */
 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
 
-struct rte_event_crypto_adapter {
+struct event_crypto_adapter {
 	/* Event device identifier */
 	uint8_t eventdev_id;
 	/* Event port identifier */
@@ -99,7 +99,7 @@ struct crypto_queue_pair_info {
 	uint8_t len;
 } __rte_cache_aligned;
 
-static struct rte_event_crypto_adapter **event_crypto_adapter;
+static struct event_crypto_adapter **event_crypto_adapter;
 
 /* Macros to check for valid adapter */
 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
@@ -141,7 +141,7 @@ eca_init(void)
 	return 0;
 }
 
-static inline struct rte_event_crypto_adapter *
+static inline struct event_crypto_adapter *
 eca_id_to_adapter(uint8_t id)
 {
 	return event_crypto_adapter ?
@@ -158,7 +158,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	int ret;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
+	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
 
 	if (adapter == NULL)
 		return -EINVAL;
@@ -202,7 +202,7 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
 				enum rte_event_crypto_adapter_mode mode,
 				void *conf_arg)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
 	struct rte_event_dev_info dev_info;
 	int socket_id;
@@ -304,7 +304,7 @@ rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_crypto_adapter_free(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -329,8 +329,8 @@ rte_event_crypto_adapter_free(uint8_t id)
 }
 
 static inline unsigned int
-eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
-		 struct rte_event *ev, unsigned int cnt)
+eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
+		     unsigned int cnt)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -420,7 +420,7 @@ eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
 }
 
 static unsigned int
-eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
+eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -466,8 +466,8 @@ eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
 }
 
 static int
-eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_enq)
+eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_enq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct rte_event ev[BATCH_SIZE];
@@ -500,8 +500,8 @@ eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline void
-eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
-		  struct rte_crypto_op **ops, uint16_t num)
+eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
+		      struct rte_crypto_op **ops, uint16_t num)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -564,8 +564,8 @@ eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline unsigned int
-eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_deq)
+eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_deq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -627,8 +627,8 @@ eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static void
-eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_ops)
+eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
+		       unsigned int max_ops)
 {
 	while (max_ops) {
 		unsigned int e_cnt, d_cnt;
@@ -648,7 +648,7 @@ eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
 static int
 eca_service_func(void *args)
 {
-	struct rte_event_crypto_adapter *adapter = args;
+	struct event_crypto_adapter *adapter = args;
 
 	if (rte_spinlock_trylock(&adapter->lock) == 0)
 		return 0;
@@ -659,7 +659,7 @@ eca_service_func(void *args)
 }
 
 static int
-eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
+eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
 {
 	struct rte_event_crypto_adapter_conf adapter_conf;
 	struct rte_service_spec service;
@@ -699,10 +699,9 @@ eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
 }
 
 static void
-eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
-			struct crypto_device_info *dev_info,
-			int32_t queue_pair_id,
-			uint8_t add)
+eca_update_qp_info(struct event_crypto_adapter *adapter,
+		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
+		   uint8_t add)
 {
 	struct crypto_queue_pair_info *qp_info;
 	int enabled;
@@ -729,9 +728,8 @@ eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
 }
 
 static int
-eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
-		uint8_t cdev_id,
-		int queue_pair_id)
+eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
+		   int queue_pair_id)
 {
 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
 	struct crypto_queue_pair_info *qpairs;
@@ -773,7 +771,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			int32_t queue_pair_id,
 			const struct rte_event *event)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_eventdev *dev;
 	struct crypto_device_info *dev_info;
 	uint32_t cap;
@@ -889,7 +887,7 @@ int
 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 					int32_t queue_pair_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	int ret;
@@ -975,7 +973,7 @@ rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 static int
 eca_adapter_ctrl(uint8_t id, int start)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1017,7 +1015,7 @@ eca_adapter_ctrl(uint8_t id, int start)
 int
 rte_event_crypto_adapter_start(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 	adapter = eca_id_to_adapter(id);
@@ -1039,7 +1037,7 @@ int
 rte_event_crypto_adapter_stats_get(uint8_t id,
 				struct rte_event_crypto_adapter_stats *stats)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_crypto_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -1083,7 +1081,7 @@ rte_event_crypto_adapter_stats_get(uint8_t id,
 int
 rte_event_crypto_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1111,7 +1109,7 @@ rte_event_crypto_adapter_stats_reset(uint8_t id)
 int
 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -1128,7 +1126,7 @@ rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 int
 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 13dfb28401..f8225ebd3d 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -78,14 +78,14 @@ struct eth_rx_vector_data {
 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
 
 /* Instance per adapter */
-struct rte_eth_event_enqueue_buffer {
+struct eth_event_enqueue_buffer {
 	/* Count of events in this buffer */
 	uint16_t count;
 	/* Array of events in this buffer */
 	struct rte_event events[ETH_EVENT_BUFFER_SIZE];
 };
 
-struct rte_event_eth_rx_adapter {
+struct event_eth_rx_adapter {
 	/* RSS key */
 	uint8_t rss_key_be[RSS_KEY_SIZE];
 	/* Event device identifier */
@@ -109,7 +109,7 @@ struct rte_event_eth_rx_adapter {
 	/* Next entry in wrr[] to begin polling */
 	uint32_t wrr_pos;
 	/* Event burst buffer */
-	struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
+	struct eth_event_enqueue_buffer event_enqueue_buffer;
 	/* Vector enable flag */
 	uint8_t ena_vector;
 	/* Timestamp of previous vector expiry list traversal */
@@ -231,7 +231,7 @@ struct eth_rx_queue_info {
 	struct eth_rx_vector_data vector_data;
 };
 
-static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
+static struct event_eth_rx_adapter **event_eth_rx_adapter;
 
 static inline int
 rxa_validate_id(uint8_t id)
@@ -247,7 +247,7 @@ rxa_validate_id(uint8_t id)
 } while (0)
 
 static inline int
-rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
 {
 	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
 }
@@ -265,10 +265,9 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
  */
 static int
-rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
-	 unsigned int n, int *cw,
-	 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
-	 uint16_t gcd, int prev)
+rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
+	     struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
+	     uint16_t gcd, int prev)
 {
 	int i = prev;
 	uint16_t w;
@@ -373,10 +372,9 @@ rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
  */
 static void
-rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_intr)
+rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_intr)
 {
 	uint32_t intr_diff;
 
@@ -392,12 +390,10 @@ rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * interrupt queues could currently be poll mode Rx queues
  */
 static void
-rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+			  uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -424,11 +420,9 @@ rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
  * after deleting poll mode rx queues
  */
 static void
-rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_wrr)
 {
 	uint32_t poll_diff;
 	uint32_t wrr_len_diff;
@@ -449,13 +443,10 @@ rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
 /* Calculate nb_rx_* after adding poll mode rx queues
  */
 static void
-rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint16_t wt,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint16_t wt, uint32_t *nb_rx_poll,
+			  uint32_t *nb_rx_intr, uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -482,13 +473,10 @@ rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after adding rx_queue_id */
 static void
-rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint16_t wt,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	if (wt != 0)
 		rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
@@ -500,12 +488,10 @@ rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after deleting rx_queue_id */
 static void
-rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
 				nb_wrr);
@@ -517,8 +503,7 @@ rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the rx_poll array
  */
 static struct eth_rx_poll_entry *
-rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint32_t num_rx_polled)
+rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
 {
 	size_t len;
 
@@ -534,7 +519,7 @@ rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the WRR array
  */
 static uint32_t *
-rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
+rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
 {
 	size_t len;
 
@@ -547,11 +532,9 @@ rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
 }
 
 static int
-rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint32_t nb_poll,
-		uint32_t nb_wrr,
-		struct eth_rx_poll_entry **rx_poll,
-		uint32_t **wrr_sched)
+rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
+		      uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
+		      uint32_t **wrr_sched)
 {
 
 	if (nb_poll == 0) {
@@ -576,9 +559,8 @@ rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Precalculate WRR polling sequence for all queues in rx_adapter */
 static void
-rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_rx_poll_entry *rx_poll,
-		uint32_t *rx_wrr)
+rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
+		      struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
 {
 	uint16_t d;
 	uint16_t q;
@@ -705,13 +687,13 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
 }
 
 static inline int
-rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
 {
 	return !!rx_adapter->enq_block_count;
 }
 
 static inline void
-rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->rx_enq_block_start_ts)
 		return;
@@ -724,8 +706,8 @@ rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
-		    struct rte_event_eth_rx_adapter_stats *stats)
+rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
+		     struct rte_event_eth_rx_adapter_stats *stats)
 {
 	if (unlikely(!stats->rx_enq_start_ts))
 		stats->rx_enq_start_ts = rte_get_tsc_cycles();
@@ -744,10 +726,10 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue buffered events to event device */
 static inline uint16_t
-rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter)
 {
-	struct rte_eth_event_enqueue_buffer *buf =
-	    &rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
 
 	if (!buf->count)
@@ -774,7 +756,7 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
 		struct eth_rx_vector_data *vec)
 {
 	vec->vector_ev->nb_elem = 0;
@@ -785,9 +767,9 @@ rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline uint16_t
-rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
 			struct eth_rx_queue_info *queue_info,
-			struct rte_eth_event_enqueue_buffer *buf,
+			struct eth_event_enqueue_buffer *buf,
 			struct rte_mbuf **mbufs, uint16_t num)
 {
 	struct rte_event *ev = &buf->events[buf->count];
@@ -845,19 +827,16 @@ rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		uint16_t rx_queue_id,
-		struct rte_mbuf **mbufs,
-		uint16_t num)
+rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num)
 {
 	uint32_t i;
 	struct eth_device_info *dev_info =
 					&rx_adapter->eth_devices[eth_dev_id];
 	struct eth_rx_queue_info *eth_rx_queue_info =
 					&dev_info->rx_queue[rx_queue_id];
-	struct rte_eth_event_enqueue_buffer *buf =
-					&rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event *ev = &buf->events[buf->count];
 	uint64_t event = eth_rx_queue_info->event;
 	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
@@ -909,16 +888,13 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue packets from  <port, q>  to event buffer */
 static inline uint32_t
-rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint16_t port_id,
-	uint16_t queue_id,
-	uint32_t rx_count,
-	uint32_t max_rx,
-	int *rxq_empty)
+rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
+	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
+	   int *rxq_empty)
 {
 	struct rte_mbuf *mbufs[BATCH_SIZE];
-	struct rte_eth_event_enqueue_buffer *buf =
-					&rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event_eth_rx_adapter_stats *stats =
 					&rx_adapter->stats;
 	uint16_t n;
@@ -953,8 +929,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
-		void *data)
+rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
 {
 	uint16_t port_id;
 	uint16_t queue;
@@ -994,8 +969,8 @@ rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
-			uint32_t num_intr_vec)
+rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
+			  uint32_t num_intr_vec)
 {
 	if (rx_adapter->num_intr_vec + num_intr_vec >
 				RTE_EVENT_ETH_INTR_RING_SIZE) {
@@ -1010,9 +985,9 @@ rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Delete entries for (dev, queue) from the interrupt ring */
 static void
-rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			uint16_t rx_queue_id)
+rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info,
+			  uint16_t rx_queue_id)
 {
 	int i, n;
 	union queue_data qd;
@@ -1045,7 +1020,7 @@ rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
 static void *
 rxa_intr_thread(void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
+	struct event_eth_rx_adapter *rx_adapter = arg;
 	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
 	int n, i;
 
@@ -1068,12 +1043,12 @@ rxa_intr_thread(void *arg)
  * mbufs to eventdev
  */
 static inline uint32_t
-rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t n;
 	uint32_t nb_rx = 0;
 	int rxq_empty;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct eth_event_enqueue_buffer *buf;
 	rte_spinlock_t *ring_lock;
 	uint8_t max_done = 0;
 
@@ -1188,11 +1163,11 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
  * it.
  */
 static inline uint32_t
-rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t num_queue;
 	uint32_t nb_rx = 0;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct eth_event_enqueue_buffer *buf;
 	uint32_t wrr_pos;
 	uint32_t max_nb_rx;
 
@@ -1233,8 +1208,8 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
 static void
 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
-	struct rte_eth_event_enqueue_buffer *buf =
+	struct event_eth_rx_adapter *rx_adapter = arg;
+	struct eth_event_enqueue_buffer *buf =
 		&rx_adapter->event_enqueue_buffer;
 	struct rte_event *ev;
 
@@ -1257,7 +1232,7 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 static int
 rxa_service_func(void *args)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = args;
+	struct event_eth_rx_adapter *rx_adapter = args;
 	struct rte_event_eth_rx_adapter_stats *stats;
 
 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
@@ -1318,7 +1293,7 @@ rte_event_eth_rx_adapter_init(void)
 	return 0;
 }
 
-static inline struct rte_event_eth_rx_adapter *
+static inline struct event_eth_rx_adapter *
 rxa_id_to_adapter(uint8_t id)
 {
 	return event_eth_rx_adapter ?
@@ -1335,7 +1310,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	uint8_t port_id;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
+	struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
 	dev_conf = dev->data->dev_conf;
@@ -1384,7 +1359,7 @@ rxa_epoll_create1(void)
 }
 
 static int
-rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->epd != INIT_FD)
 		return 0;
@@ -1401,7 +1376,7 @@ rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 	char thread_name[RTE_MAX_THREAD_NAME_LEN];
@@ -1445,7 +1420,7 @@ rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 
@@ -1466,7 +1441,7 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
 {
 	int ret;
 
@@ -1484,9 +1459,8 @@ rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1514,9 +1488,8 @@ rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id)
+rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 {
 	int err;
 	int i;
@@ -1573,9 +1546,8 @@ rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
+		struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err, err1;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1663,9 +1635,8 @@ rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int rx_queue_id)
+rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 
 {
 	int i, j, err;
@@ -1713,9 +1684,8 @@ rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	return err;
 }
 
-
 static int
-rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
 {
 	int ret;
 	struct rte_service_spec service;
@@ -1758,10 +1728,9 @@ rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
 }
 
 static void
-rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int32_t rx_queue_id,
-		uint8_t add)
+rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, int32_t rx_queue_id,
+		 uint8_t add)
 {
 	struct eth_rx_queue_info *queue_info;
 	int enabled;
@@ -1811,9 +1780,8 @@ rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
 }
 
 static void
-rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id)
+rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
+	   struct eth_device_info *dev_info, int32_t rx_queue_id)
 {
 	struct eth_rx_vector_data *vec;
 	int pollq;
@@ -1854,10 +1822,9 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static void
-rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id,
-	const struct rte_event_eth_rx_adapter_queue_conf *conf)
+rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
+	      struct eth_device_info *dev_info, int32_t rx_queue_id,
+	      const struct rte_event_eth_rx_adapter_queue_conf *conf)
 {
 	struct eth_rx_queue_info *queue_info;
 	const struct rte_event *ev = &conf->ev;
@@ -1922,7 +1889,7 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 
 static void
 rxa_sw_event_vector_configure(
-	struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+	struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 	int rx_queue_id,
 	const struct rte_event_eth_rx_adapter_event_vector_config *config)
 {
@@ -1956,10 +1923,10 @@ rxa_sw_event_vector_configure(
 			      config->vector_timeout_ns >> 1;
 }
 
-static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		int rx_queue_id,
-		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+static int
+rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+	   int rx_queue_id,
+	   const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
 	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
 	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
@@ -2088,7 +2055,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
 static int
 rxa_ctrl(uint8_t id, int start)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2135,7 +2102,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
 				rte_event_eth_rx_adapter_conf_cb conf_cb,
 				void *conf_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	int ret;
 	int socket_id;
 	uint16_t i;
@@ -2235,7 +2202,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_eth_rx_adapter_free(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2267,7 +2234,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
 {
 	int ret;
 	uint32_t cap;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 
@@ -2385,7 +2352,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
 {
 	int ret = 0;
 	struct rte_eventdev *dev;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	uint32_t nb_rx_poll = 0;
@@ -2505,7 +2472,7 @@ rte_event_eth_rx_adapter_queue_event_vector_config(
 	struct rte_event_eth_rx_adapter_event_vector_config *config)
 {
 	struct rte_event_eth_rx_adapter_vector_limits limits;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	uint32_t cap;
 	int ret;
@@ -2632,7 +2599,7 @@ int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
 			       struct rte_event_eth_rx_adapter_stats *stats)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_eth_rx_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -2673,7 +2640,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 int
 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2701,7 +2668,7 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 int
 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2721,7 +2688,7 @@ rte_event_eth_rx_adapter_cb_register(uint8_t id,
 					rte_event_eth_rx_adapter_cb_fn cb_fn,
 					void *cb_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	int ret;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 7378597846..5853fadb0d 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1193,7 +1193,7 @@ struct rte_event {
 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
 /**< The application can override the adapter generated flow ID in the
  * event. This flow ID can be specified when adding an ethdev Rx queue
- * to the adapter using the ev member of struct rte_event_eth_rx_adapter
+ * to the adapter using the ev.flow_id member.
  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
  */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (8 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 10/15] eventdev: remove rte prefix for internal structs pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-08-23 20:42   ` Carrillo, Erik G
                     ` (2 more replies)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 12/15] eventdev: move timer adapters memory to hugepage pbhagavatula
                   ` (6 subsequent siblings)
  16 siblings, 3 replies; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Erik Gabriel Carrillo; +Cc: konstantin.ananyev, dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Reserve fields in rte_event_timer data structure to address future
use cases.
Also, remove volatile from rte_event_timer.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_timer_adapter.h | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h
index cad6d3b4c5..9499460a61 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -475,7 +475,7 @@ struct rte_event_timer {
 	 *  - op: RTE_EVENT_OP_NEW
 	 *  - event_type: RTE_EVENT_TYPE_TIMER
 	 */
-	volatile enum rte_event_timer_state state;
+	enum rte_event_timer_state state;
 	/**< State of the event timer. */
 	uint64_t timeout_ticks;
 	/**< Expiry timer ticks expressed in number of *timer_ticks_ns* from
@@ -492,6 +492,8 @@ struct rte_event_timer {
 	/**< Memory to store user specific metadata.
 	 * The event timer adapter implementation should not modify this area.
 	 */
+	uint64_t rsvd[2];
+	/**< Reserved fields for future use. */
 } __rte_cache_aligned;
 
 typedef uint16_t (*rte_event_timer_arm_burst_t)(
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 12/15] eventdev: move timer adapters memory to hugepage
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (9 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-08-24 13:50   ` Carrillo, Erik G
  2021-08-23 19:40 ` [dpdk-dev] [RFC 13/15] eventdev: promote event vector API to stable pbhagavatula
                   ` (5 subsequent siblings)
  16 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Erik Gabriel Carrillo; +Cc: konstantin.ananyev, dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move memory used by timer adapters to hugepage.
Allocate memory on the first adapter create or lookup to address
both primary and secondary process usecases.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_timer_adapter.c | 24 +++++++++++++++++++++++-
 1 file changed, 23 insertions(+), 1 deletion(-)

diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index ae55407042..c4dc7a5fd4 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -33,7 +33,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
 
-static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
+static struct rte_event_timer_adapter *adapters;
 
 static const struct event_timer_adapter_ops swtim_ops;
 
@@ -138,6 +138,17 @@ rte_event_timer_adapter_create_ext(
 	int n, ret;
 	struct rte_eventdev *dev;
 
+	if (adapters == NULL) {
+		adapters = rte_zmalloc("Eventdev",
+				       sizeof(struct rte_event_timer_adapter) *
+					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+				       RTE_CACHE_LINE_SIZE);
+		if (adapters == NULL) {
+			rte_errno = ENOMEM;
+			return NULL;
+		}
+	}
+
 	if (conf == NULL) {
 		rte_errno = EINVAL;
 		return NULL;
@@ -312,6 +323,17 @@ rte_event_timer_adapter_lookup(uint16_t adapter_id)
 	int ret;
 	struct rte_eventdev *dev;
 
+	if (adapters == NULL) {
+		adapters = rte_zmalloc("Eventdev",
+				       sizeof(struct rte_event_timer_adapter) *
+					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+				       RTE_CACHE_LINE_SIZE);
+		if (adapters == NULL) {
+			rte_errno = ENOMEM;
+			return NULL;
+		}
+	}
+
 	if (adapters[adapter_id].allocated)
 		return &adapters[adapter_id]; /* Adapter is already loaded */
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 13/15] eventdev: promote event vector API to stable
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (10 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 12/15] eventdev: move timer adapters memory to hugepage pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-08-30 14:43   ` Jayatheerthan, Jay
  2021-09-08 12:05   ` Kinsella, Ray
  2021-08-23 19:40 ` [dpdk-dev] [RFC 14/15] eventdev: make trace APIs internal pbhagavatula
                   ` (4 subsequent siblings)
  16 siblings, 2 replies; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Jay Jayatheerthan, Ray Kinsella
  Cc: konstantin.ananyev, dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Promote event vector configuration APIs to stable.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_eth_rx_adapter.h | 2 --
 lib/eventdev/rte_eventdev.h             | 1 -
 lib/eventdev/version.map                | 6 +++---
 3 files changed, 3 insertions(+), 6 deletions(-)

diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index 182dd2e5dd..d13d817025 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -543,7 +543,6 @@ int rte_event_eth_rx_adapter_cb_register(uint8_t id, uint16_t eth_dev_id,
  *  - 0: Success.
  *  - <0: Error code on failure.
  */
-__rte_experimental
 int rte_event_eth_rx_adapter_vector_limits_get(
 	uint8_t dev_id, uint16_t eth_port_id,
 	struct rte_event_eth_rx_adapter_vector_limits *limits);
@@ -570,7 +569,6 @@ int rte_event_eth_rx_adapter_vector_limits_get(
  *  - 0: Success, Receive queue configured correctly.
  *  - <0: Error code on failure.
  */
-__rte_experimental
 int rte_event_eth_rx_adapter_queue_event_vector_config(
 	uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
 	struct rte_event_eth_rx_adapter_event_vector_config *config);
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 5853fadb0d..f73346167b 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1734,7 +1734,6 @@ int rte_event_dev_selftest(uint8_t dev_id);
  *    - ENOMEM - no appropriate memory area found in which to create memzone
  *    - ENAMETOOLONG - mempool name requested is too long.
  */
-__rte_experimental
 struct rte_mempool *
 rte_event_vector_pool_create(const char *name, unsigned int n,
 			     unsigned int cache_size, uint16_t nb_elem,
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index d89cbc337e..062ca959e5 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -38,10 +38,12 @@ DPDK_22 {
 	rte_event_eth_rx_adapter_free;
 	rte_event_eth_rx_adapter_queue_add;
 	rte_event_eth_rx_adapter_queue_del;
+	rte_event_eth_rx_adapter_queue_event_vector_config;
 	rte_event_eth_rx_adapter_service_id_get;
 	rte_event_eth_rx_adapter_start;
 	rte_event_eth_rx_adapter_stats_get;
 	rte_event_eth_rx_adapter_stats_reset;
+	rte_event_eth_rx_adapter_vector_limits_get;
 	rte_event_eth_rx_adapter_stop;
 	rte_event_eth_tx_adapter_caps_get;
 	rte_event_eth_tx_adapter_create;
@@ -83,6 +85,7 @@ DPDK_22 {
 	rte_event_timer_arm_burst;
 	rte_event_timer_arm_tmo_tick_burst;
 	rte_event_timer_cancel_burst;
+	rte_event_vector_pool_create;
 	rte_eventdevs;
 
 	#added in 21.11
@@ -135,9 +138,6 @@ EXPERIMENTAL {
 	__rte_eventdev_trace_port_setup;
 
 	#added in 21.05
-	rte_event_vector_pool_create;
-	rte_event_eth_rx_adapter_vector_limits_get;
-	rte_event_eth_rx_adapter_queue_event_vector_config;
 	__rte_eventdev_trace_crypto_adapter_enqueue;
 };
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 14/15] eventdev: make trace APIs internal
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (11 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 13/15] eventdev: promote event vector API to stable pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-08-30 14:47   ` Jayatheerthan, Jay
  2021-08-23 19:40 ` [dpdk-dev] [RFC 15/15] eventdev: promote trace variables to stable pbhagavatula
                   ` (3 subsequent siblings)
  16 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan, Erik Gabriel Carrillo
  Cc: konstantin.ananyev, dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Slowpath trace APIs are only used in rte_eventdev.c so make them
as internal.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} | 0
 lib/eventdev/eventdev_trace_points.c                    | 2 +-
 lib/eventdev/meson.build                                | 2 +-
 lib/eventdev/rte_event_crypto_adapter.c                 | 2 +-
 lib/eventdev/rte_event_eth_rx_adapter.c                 | 2 +-
 lib/eventdev/rte_event_eth_tx_adapter.c                 | 2 +-
 lib/eventdev/rte_event_timer_adapter.c                  | 2 +-
 lib/eventdev/rte_eventdev.c                             | 2 +-
 8 files changed, 7 insertions(+), 7 deletions(-)
 rename lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} (100%)

diff --git a/lib/eventdev/rte_eventdev_trace.h b/lib/eventdev/eventdev_trace.h
similarity index 100%
rename from lib/eventdev/rte_eventdev_trace.h
rename to lib/eventdev/eventdev_trace.h
diff --git a/lib/eventdev/eventdev_trace_points.c b/lib/eventdev/eventdev_trace_points.c
index 3867ec8008..237d9383fd 100644
--- a/lib/eventdev/eventdev_trace_points.c
+++ b/lib/eventdev/eventdev_trace_points.c
@@ -4,7 +4,7 @@
 
 #include <rte_trace_point_register.h>
 
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 /* Eventdev trace points */
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_configure,
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index f19b831edd..c750e0214f 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -19,7 +19,6 @@ sources = files(
 )
 headers = files(
         'rte_eventdev.h',
-        'rte_eventdev_trace.h',
         'rte_eventdev_trace_fp.h',
         'rte_event_ring.h',
         'rte_event_eth_rx_adapter.h',
@@ -34,6 +33,7 @@ driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
         'eventdev_pmd_vdev.h',
+        'eventdev_trace.h',
         'event_timer_adapter_pmd.h',
 )
 
diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index 8a2a25c02e..93e7352c6e 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -16,7 +16,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_crypto_adapter.h"
 
 #define BATCH_SIZE 32
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index f8225ebd3d..7e97fbd21d 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -20,7 +20,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_rx_adapter.h"
 
 #define BATCH_SIZE		32
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.c b/lib/eventdev/rte_event_eth_tx_adapter.c
index 18c0359db7..ee3631bced 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.c
+++ b/lib/eventdev/rte_event_eth_tx_adapter.c
@@ -6,7 +6,7 @@
 #include <rte_ethdev.h>
 
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_tx_adapter.h"
 
 #define TXA_BATCH_SIZE		32
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index c4dc7a5fd4..7404b0cbb2 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -24,7 +24,7 @@
 #include "eventdev_pmd.h"
 #include "rte_event_timer_adapter.h"
 #include "rte_eventdev.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 #define DATA_MZ_NAME_MAX_LEN 64
 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index b9b029edc4..3a393bd120 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -36,7 +36,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 struct rte_eventdev *rte_eventdevs;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC 15/15] eventdev: promote trace variables to stable
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (12 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 14/15] eventdev: make trace APIs internal pbhagavatula
@ 2021-08-23 19:40 ` pbhagavatula
  2021-09-08 12:06   ` Kinsella, Ray
  2021-08-24  7:43 ` [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal Mattias Rönnblom
                   ` (2 subsequent siblings)
  16 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-08-23 19:40 UTC (permalink / raw)
  To: jerinj, Ray Kinsella; +Cc: konstantin.ananyev, dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Promote rte_trace global variables to stable i.e. remove them
from experimental section of version map.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/version.map | 78 ++++++++++++++++++----------------------
 1 file changed, 35 insertions(+), 43 deletions(-)

diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 062ca959e5..422f461733 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -88,57 +88,18 @@ DPDK_22 {
 	rte_event_vector_pool_create;
 	rte_eventdevs;
 
-	#added in 21.11
-	rte_eventdev_api;
-
-	local: *;
-};
-
-EXPERIMENTAL {
-	global:
-
-	# added in 20.05
-	__rte_eventdev_trace_configure;
-	__rte_eventdev_trace_queue_setup;
-	__rte_eventdev_trace_port_link;
-	__rte_eventdev_trace_port_unlink;
-	__rte_eventdev_trace_start;
-	__rte_eventdev_trace_stop;
-	__rte_eventdev_trace_close;
+	__rte_eventdev_trace_crypto_adapter_enqueue;
 	__rte_eventdev_trace_deq_burst;
 	__rte_eventdev_trace_enq_burst;
-	__rte_eventdev_trace_eth_rx_adapter_create;
-	__rte_eventdev_trace_eth_rx_adapter_free;
-	__rte_eventdev_trace_eth_rx_adapter_queue_add;
-	__rte_eventdev_trace_eth_rx_adapter_queue_del;
-	__rte_eventdev_trace_eth_rx_adapter_start;
-	__rte_eventdev_trace_eth_rx_adapter_stop;
-	__rte_eventdev_trace_eth_tx_adapter_create;
-	__rte_eventdev_trace_eth_tx_adapter_free;
-	__rte_eventdev_trace_eth_tx_adapter_queue_add;
-	__rte_eventdev_trace_eth_tx_adapter_queue_del;
-	__rte_eventdev_trace_eth_tx_adapter_start;
-	__rte_eventdev_trace_eth_tx_adapter_stop;
 	__rte_eventdev_trace_eth_tx_adapter_enqueue;
-	__rte_eventdev_trace_timer_adapter_create;
-	__rte_eventdev_trace_timer_adapter_start;
-	__rte_eventdev_trace_timer_adapter_stop;
-	__rte_eventdev_trace_timer_adapter_free;
 	__rte_eventdev_trace_timer_arm_burst;
 	__rte_eventdev_trace_timer_arm_tmo_tick_burst;
 	__rte_eventdev_trace_timer_cancel_burst;
-	__rte_eventdev_trace_crypto_adapter_create;
-	__rte_eventdev_trace_crypto_adapter_free;
-	__rte_eventdev_trace_crypto_adapter_queue_pair_add;
-	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
-	__rte_eventdev_trace_crypto_adapter_start;
-	__rte_eventdev_trace_crypto_adapter_stop;
 
-	# changed in 20.11
-	__rte_eventdev_trace_port_setup;
+	#added in 21.11
+	rte_eventdev_api;
 
-	#added in 21.05
-	__rte_eventdev_trace_crypto_adapter_enqueue;
+	local: *;
 };
 
 INTERNAL {
@@ -173,4 +134,35 @@ INTERNAL {
 	rte_event_pmd_release;
 	rte_event_pmd_vdev_init;
 	rte_event_pmd_vdev_uninit;
+
+	__rte_eventdev_trace_close;
+	__rte_eventdev_trace_configure;
+	__rte_eventdev_trace_crypto_adapter_create;
+	__rte_eventdev_trace_crypto_adapter_free;
+	__rte_eventdev_trace_crypto_adapter_queue_pair_add;
+	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
+	__rte_eventdev_trace_crypto_adapter_start;
+	__rte_eventdev_trace_crypto_adapter_stop;
+	__rte_eventdev_trace_eth_rx_adapter_create;
+	__rte_eventdev_trace_eth_rx_adapter_free;
+	__rte_eventdev_trace_eth_rx_adapter_queue_add;
+	__rte_eventdev_trace_eth_rx_adapter_queue_del;
+	__rte_eventdev_trace_eth_rx_adapter_start;
+	__rte_eventdev_trace_eth_rx_adapter_stop;
+	__rte_eventdev_trace_eth_tx_adapter_create;
+	__rte_eventdev_trace_eth_tx_adapter_free;
+	__rte_eventdev_trace_eth_tx_adapter_queue_add;
+	__rte_eventdev_trace_eth_tx_adapter_queue_del;
+	__rte_eventdev_trace_eth_tx_adapter_start;
+	__rte_eventdev_trace_eth_tx_adapter_stop;
+	__rte_eventdev_trace_port_link;
+	__rte_eventdev_trace_port_setup;
+	__rte_eventdev_trace_port_unlink;
+	__rte_eventdev_trace_queue_setup;
+	__rte_eventdev_trace_start;
+	__rte_eventdev_trace_stop;
+	__rte_eventdev_trace_timer_adapter_create;
+	__rte_eventdev_trace_timer_adapter_free;
+	__rte_eventdev_trace_timer_adapter_start;
+	__rte_eventdev_trace_timer_adapter_stop;
 };
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object
  2021-08-23 19:40 ` [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object pbhagavatula
@ 2021-08-23 20:42   ` Carrillo, Erik G
  2021-08-24  5:16     ` Pavan Nikhilesh Bhagavatula
  2021-08-24 15:10   ` Stephen Hemminger
  2021-09-07 21:31   ` [dpdk-dev] " Stephen Hemminger
  2 siblings, 1 reply; 119+ messages in thread
From: Carrillo, Erik G @ 2021-08-23 20:42 UTC (permalink / raw)
  To: pbhagavatula, jerinj; +Cc: Ananyev, Konstantin, dev

Hi Pavan,

One comment in-line:

> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Monday, August 23, 2021 2:40 PM
> To: jerinj@marvell.com; Carrillo, Erik G <erik.g.carrillo@intel.com>
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org;
> Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Reserve fields in rte_event_timer data structure to address future use cases.
> Also, remove volatile from rte_event_timer.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/rte_event_timer_adapter.h | 4 +++-
>  1 file changed, 3 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/eventdev/rte_event_timer_adapter.h
> b/lib/eventdev/rte_event_timer_adapter.h
> index cad6d3b4c5..9499460a61 100644
> --- a/lib/eventdev/rte_event_timer_adapter.h
> +++ b/lib/eventdev/rte_event_timer_adapter.h
> @@ -475,7 +475,7 @@ struct rte_event_timer {
>  	 *  - op: RTE_EVENT_OP_NEW
>  	 *  - event_type: RTE_EVENT_TYPE_TIMER
>  	 */
> -	volatile enum rte_event_timer_state state;
> +	enum rte_event_timer_state state;
>  	/**< State of the event timer. */
>  	uint64_t timeout_ticks;
>  	/**< Expiry timer ticks expressed in number of *timer_ticks_ns*
> from @@ -492,6 +492,8 @@ struct rte_event_timer {
>  	/**< Memory to store user specific metadata.
>  	 * The event timer adapter implementation should not modify this
> area.
>  	 */
> +	uint64_t rsvd[2];
> +	/**< Reserved fields for future use. */

This placement puts rsvd after the user_meta field, which should be last since it is a zero-length array.  Am I missing something?

Thanks,
Erik

>  } __rte_cache_aligned;
> 
>  typedef uint16_t (*rte_event_timer_arm_burst_t)(
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object
  2021-08-23 20:42   ` Carrillo, Erik G
@ 2021-08-24  5:16     ` Pavan Nikhilesh Bhagavatula
  0 siblings, 0 replies; 119+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2021-08-24  5:16 UTC (permalink / raw)
  To: Carrillo, Erik G, Jerin Jacob Kollanukkaran; +Cc: Ananyev, Konstantin, dev

Hi Erik,

>Hi Pavan,
>
>One comment in-line:
>
>> -----Original Message-----
>> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
>> Sent: Monday, August 23, 2021 2:40 PM
>> To: jerinj@marvell.com; Carrillo, Erik G <erik.g.carrillo@intel.com>
>> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>;
>dev@dpdk.org;
>> Pavan Nikhilesh <pbhagavatula@marvell.com>
>> Subject: [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer
>object
>>
>> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>
>> Reserve fields in rte_event_timer data structure to address future
>use cases.
>> Also, remove volatile from rte_event_timer.
>>
>> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
>> ---
>>  lib/eventdev/rte_event_timer_adapter.h | 4 +++-
>>  1 file changed, 3 insertions(+), 1 deletion(-)
>>
>> diff --git a/lib/eventdev/rte_event_timer_adapter.h
>> b/lib/eventdev/rte_event_timer_adapter.h
>> index cad6d3b4c5..9499460a61 100644
>> --- a/lib/eventdev/rte_event_timer_adapter.h
>> +++ b/lib/eventdev/rte_event_timer_adapter.h
>> @@ -475,7 +475,7 @@ struct rte_event_timer {
>>  	 *  - op: RTE_EVENT_OP_NEW
>>  	 *  - event_type: RTE_EVENT_TYPE_TIMER
>>  	 */
>> -	volatile enum rte_event_timer_state state;
>> +	enum rte_event_timer_state state;
>>  	/**< State of the event timer. */
>>  	uint64_t timeout_ticks;
>>  	/**< Expiry timer ticks expressed in number of
>*timer_ticks_ns*
>> from @@ -492,6 +492,8 @@ struct rte_event_timer {
>>  	/**< Memory to store user specific metadata.
>>  	 * The event timer adapter implementation should not modify
>this
>> area.
>>  	 */
>> +	uint64_t rsvd[2];
>> +	/**< Reserved fields for future use. */
>
>This placement puts rsvd after the user_meta field, which should be last
>since it is a zero-length array.  Am I missing something?

My bad, I will fix it in next version.

>
>Thanks,
>Erik

Thanks,
Pavan.

>
>>  } __rte_cache_aligned;
>>
>>  typedef uint16_t (*rte_event_timer_arm_burst_t)(
>> --
>> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (13 preceding siblings ...)
  2021-08-23 19:40 ` [dpdk-dev] [RFC 15/15] eventdev: promote trace variables to stable pbhagavatula
@ 2021-08-24  7:43 ` Mattias Rönnblom
  2021-08-24  7:47   ` Pavan Nikhilesh Bhagavatula
  2021-08-30 10:25   ` Mattias Rönnblom
  2021-09-28  9:56 ` [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal Jerin Jacob
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
  16 siblings, 2 replies; 119+ messages in thread
From: Mattias Rönnblom @ 2021-08-24  7:43 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Shijith Thotton, Timothy McDaniel,
	Hemant Agrawal, Nipun Gupta, Liang Ma, Peter Mccarthy,
	Harry van Haaren, Abhinandan Gujjar, Ray Kinsella
  Cc: konstantin.ananyev, dev

On 2021-08-23 21:40, pbhagavatula@marvell.com wrote:
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Mark all the driver specific functions as internal, remove
> `rte` prefix from `struct rte_eventdev_ops`.
> Remove experimental tag from internal functions.
> Remove `eventdev_pmd.h` from non-internal header files.
>
Is the enqueue/dequeue shortcut still worth the trouble? Considering the 
size of this patch set, it seems to be a lot of trouble to handle this 
special case.


Is the same kind of reorganization planned for the ethdev API?


<snip>



^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal
  2021-08-24  7:43 ` [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal Mattias Rönnblom
@ 2021-08-24  7:47   ` Pavan Nikhilesh Bhagavatula
  2021-08-24  8:05     ` Pavan Nikhilesh Bhagavatula
  2021-08-30 10:25   ` Mattias Rönnblom
  1 sibling, 1 reply; 119+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2021-08-24  7:47 UTC (permalink / raw)
  To: Mattias Rönnblom, Jerin Jacob Kollanukkaran,
	Shijith Thotton, Timothy McDaniel, Hemant Agrawal, Nipun Gupta,
	Liang Ma, Peter Mccarthy, Harry van Haaren, Abhinandan Gujjar,
	Ray Kinsella
  Cc: konstantin.ananyev, dev

>On 2021-08-23 21:40, pbhagavatula@marvell.com wrote:
>> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>
>> Mark all the driver specific functions as internal, remove
>> `rte` prefix from `struct rte_eventdev_ops`.
>> Remove experimental tag from internal functions.
>> Remove `eventdev_pmd.h` from non-internal header files.
>>
>Is the enqueue/dequeue shortcut still worth the trouble? Considering
>the
>size of this patch set, it seems to be a lot of trouble to handle this
>special case.
>
>
>Is the same kind of reorganization planned for the ethdev API?

There is already a series by Konstantin 
http://patches.dpdk.org/project/dpdk/list/?series=18422

>
>
><snip>
>


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal
  2021-08-24  7:47   ` Pavan Nikhilesh Bhagavatula
@ 2021-08-24  8:05     ` Pavan Nikhilesh Bhagavatula
  0 siblings, 0 replies; 119+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2021-08-24  8:05 UTC (permalink / raw)
  To: Pavan Nikhilesh Bhagavatula, Mattias Rönnblom,
	Jerin Jacob Kollanukkaran, Shijith Thotton, Timothy McDaniel,
	Hemant Agrawal, Nipun Gupta, Liang Ma, Peter Mccarthy,
	Harry van Haaren, Abhinandan Gujjar, Ray Kinsella
  Cc: konstantin.ananyev, dev

>>On 2021-08-23 21:40, pbhagavatula@marvell.com wrote:
>>> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>>
>>> Mark all the driver specific functions as internal, remove
>>> `rte` prefix from `struct rte_eventdev_ops`.
>>> Remove experimental tag from internal functions.
>>> Remove `eventdev_pmd.h` from non-internal header files.
>>>
>>Is the enqueue/dequeue shortcut still worth the trouble? Considering
>>the
>>size of this patch set, it seems to be a lot of trouble to handle this
>>special case.
>>
>>
>>Is the same kind of reorganization planned for the ethdev API?
>
>There is already a series by Konstantin
>http://patches.dpdk.org/project/dpdk/list/?series=18422

http://patches.dpdk.org/project/dpdk/list/?series=18382

>>
>>
>><snip>
>>


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 12/15] eventdev: move timer adapters memory to hugepage
  2021-08-23 19:40 ` [dpdk-dev] [RFC 12/15] eventdev: move timer adapters memory to hugepage pbhagavatula
@ 2021-08-24 13:50   ` Carrillo, Erik G
  2021-09-01  6:30     ` Pavan Nikhilesh Bhagavatula
  0 siblings, 1 reply; 119+ messages in thread
From: Carrillo, Erik G @ 2021-08-24 13:50 UTC (permalink / raw)
  To: pbhagavatula, jerinj; +Cc: Ananyev, Konstantin, dev

Hi Pavan,

> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Monday, August 23, 2021 2:40 PM
> To: jerinj@marvell.com; Carrillo, Erik G <erik.g.carrillo@intel.com>
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org;
> Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [RFC 12/15] eventdev: move timer adapters memory to
> hugepage
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Move memory used by timer adapters to hugepage.
> Allocate memory on the first adapter create or lookup to address both
> primary and secondary process usecases.
> 

Is the motivation for this change performance or space improvement?  Can we add something to the commit message to say?

Thanks,
Erik

> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/rte_event_timer_adapter.c | 24
> +++++++++++++++++++++++-
>  1 file changed, 23 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/eventdev/rte_event_timer_adapter.c
> b/lib/eventdev/rte_event_timer_adapter.c
> index ae55407042..c4dc7a5fd4 100644
> --- a/lib/eventdev/rte_event_timer_adapter.c
> +++ b/lib/eventdev/rte_event_timer_adapter.c
> @@ -33,7 +33,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_logtype,
> adapter.timer, NOTICE);
> RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
> RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc,
> NOTICE);
> 
> -static struct rte_event_timer_adapter
> adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
> +static struct rte_event_timer_adapter *adapters;
> 
>  static const struct event_timer_adapter_ops swtim_ops;
> 
> @@ -138,6 +138,17 @@ rte_event_timer_adapter_create_ext(
>  	int n, ret;
>  	struct rte_eventdev *dev;
> 
> +	if (adapters == NULL) {
> +		adapters = rte_zmalloc("Eventdev",
> +				       sizeof(struct rte_event_timer_adapter) *
> +
> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
> +				       RTE_CACHE_LINE_SIZE);
> +		if (adapters == NULL) {
> +			rte_errno = ENOMEM;
> +			return NULL;
> +		}
> +	}
> +
>  	if (conf == NULL) {
>  		rte_errno = EINVAL;
>  		return NULL;
> @@ -312,6 +323,17 @@ rte_event_timer_adapter_lookup(uint16_t
> adapter_id)
>  	int ret;
>  	struct rte_eventdev *dev;
> 
> +	if (adapters == NULL) {
> +		adapters = rte_zmalloc("Eventdev",
> +				       sizeof(struct rte_event_timer_adapter) *
> +
> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
> +				       RTE_CACHE_LINE_SIZE);
> +		if (adapters == NULL) {
> +			rte_errno = ENOMEM;
> +			return NULL;
> +		}
> +	}
> +
>  	if (adapters[adapter_id].allocated)
>  		return &adapters[adapter_id]; /* Adapter is already loaded
> */
> 
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object
  2021-08-23 19:40 ` [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object pbhagavatula
  2021-08-23 20:42   ` Carrillo, Erik G
@ 2021-08-24 15:10   ` Stephen Hemminger
  2021-09-01  6:48     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
  2021-09-07 21:31   ` [dpdk-dev] " Stephen Hemminger
  2 siblings, 1 reply; 119+ messages in thread
From: Stephen Hemminger @ 2021-08-24 15:10 UTC (permalink / raw)
  To: pbhagavatula; +Cc: jerinj, Erik Gabriel Carrillo, konstantin.ananyev, dev

On Tue, 24 Aug 2021 01:10:15 +0530
<pbhagavatula@marvell.com> wrote:

> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Reserve fields in rte_event_timer data structure to address future
> use cases.
> Also, remove volatile from rte_event_timer.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>

Reserve fields are not a good idea. They don't solve future API/ABI problems.

The issue is that you need to zero them and check they are zero otherwise
they can't safely be used later.  This happened with the Linux kernel
system calls where in several cases a flag field was added for future.
The problem is that old programs would work with any garbage in the flag
field, and therefore the flag could not be extended.

A better way to make structures internal opaque objects that
can be resized.  Why is rte_event_timer_adapter exposed in API?

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal
  2021-08-24  7:43 ` [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal Mattias Rönnblom
  2021-08-24  7:47   ` Pavan Nikhilesh Bhagavatula
@ 2021-08-30 10:25   ` Mattias Rönnblom
  2021-08-30 16:00     ` [dpdk-dev] [RFC] eventdev: uninline inline API functions Mattias Rönnblom
  1 sibling, 1 reply; 119+ messages in thread
From: Mattias Rönnblom @ 2021-08-30 10:25 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Shijith Thotton, Timothy McDaniel,
	Hemant Agrawal, Nipun Gupta, Liang Ma, Peter Mccarthy,
	Harry van Haaren, Abhinandan Gujjar, Ray Kinsella
  Cc: konstantin.ananyev, dev

On 2021-08-24 09:43, Mattias Rönnblom wrote:
> On 2021-08-23 21:40, pbhagavatula@marvell.com wrote:
>> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>
>> Mark all the driver specific functions as internal, remove
>> `rte` prefix from `struct rte_eventdev_ops`.
>> Remove experimental tag from internal functions.
>> Remove `eventdev_pmd.h` from non-internal header files.
>>
> Is the enqueue/dequeue shortcut still worth the trouble? Considering the
> size of this patch set, it seems to be a lot of trouble to handle this
> special case.
>
>

I had a quick look at this, using an overhead measurement benchmark for 
DSW. Depending on compiler version and details of the test program's 
structure, the gains ranged from modest to non-existent. In some 
scenarios, the inline versions even performed more poorly than a 
function call proper. This was on a Intel Skylake and static DPDK linking.


The dev and port lookup are essentially a very short pointer chase, and 
in case the dev table and the dev struct itself is not in a close cache, 
significant stalls may occur. For most applications they will be in L1 
though, I imagine. The inline version should give the compiler some 
freedom to generate the appropriate loads earlier. If you insert a 
compiler barrier before the rte_event_*() call, the inline version seem 
to have no gains at all.


Did anyone else attempt to quantify the performance gains with keeping 
these functions as inline?


/M

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 06/15] eventdev: use new API for inline functions
  2021-08-23 19:40 ` [dpdk-dev] [RFC 06/15] eventdev: use new API for inline functions pbhagavatula
@ 2021-08-30 14:41   ` Jayatheerthan, Jay
  2021-08-30 14:46   ` David Marchand
  1 sibling, 0 replies; 119+ messages in thread
From: Jayatheerthan, Jay @ 2021-08-30 14:41 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Gujjar, Abhinandan S; +Cc: Ananyev, Konstantin, dev

> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Tuesday, August 24, 2021 1:10 AM
> To: jerinj@marvell.com; Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [RFC 06/15] eventdev: use new API for inline functions
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Use new driver interface for the fastpath enqueue/dequeue inline
> functions.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/rte_event_crypto_adapter.h | 13 +-----
>  lib/eventdev/rte_event_eth_tx_adapter.h | 22 ++-------
>  lib/eventdev/rte_eventdev.h             | 61 +++++++------------------
>  3 files changed, 22 insertions(+), 74 deletions(-)
> 
> diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
> index 431d05b6ed..a91585a369 100644
> --- a/lib/eventdev/rte_event_crypto_adapter.h
> +++ b/lib/eventdev/rte_event_crypto_adapter.h
> @@ -568,20 +568,11 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id,
>  				struct rte_event ev[],
>  				uint16_t nb_events)
>  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> -
> -	if (port_id >= dev->data->nb_ports) {
> -		rte_errno = EINVAL;
> -		return 0;
> -	}
> -#endif
>  	rte_eventdev_trace_crypto_adapter_enqueue(dev_id, port_id, ev,
>  		nb_events);
> 
> -	return dev->ca_enqueue(dev->data->ports[port_id], ev, nb_events);
> +	return rte_eventdev_api[dev_id].ca_enqueue(dev_id, port_id, ev,
> +						   nb_events);
>  }
> 
>  #ifdef __cplusplus
> diff --git a/lib/eventdev/rte_event_eth_tx_adapter.h b/lib/eventdev/rte_event_eth_tx_adapter.h
> index 8c59547165..e3e78a5616 100644
> --- a/lib/eventdev/rte_event_eth_tx_adapter.h
> +++ b/lib/eventdev/rte_event_eth_tx_adapter.h
> @@ -355,28 +355,14 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
>  				uint16_t nb_events,
>  				const uint8_t flags)
>  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -	if (dev_id >= RTE_EVENT_MAX_DEVS ||
> -		!rte_eventdevs[dev_id].attached) {
> -		rte_errno = EINVAL;
> -		return 0;
> -	}
> -
> -	if (port_id >= dev->data->nb_ports) {
> -		rte_errno = EINVAL;
> -		return 0;
> -	}
> -#endif
>  	rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
>  		nb_events, flags);
>  	if (flags)
> -		return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
> -						  ev, nb_events);
> +		return rte_eventdev_api[dev_id].txa_enqueue_same_dest(
> +			dev_id, port_id, ev, nb_events);
>  	else
> -		return dev->txa_enqueue(dev->data->ports[port_id], ev,
> -					nb_events);
> +		return rte_eventdev_api[dev_id].txa_enqueue(dev_id, port_id, ev,
> +							    nb_events);
>  }
> 

Looks good to me.
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>

>  /**
> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> index 1b11d4576d..7378597846 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -1745,30 +1745,17 @@ rte_event_vector_pool_create(const char *name, unsigned int n,
>  static __rte_always_inline uint16_t
>  __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
>  			  const struct rte_event ev[], uint16_t nb_events,
> -			  const event_enqueue_burst_t fn)
> +			  const rte_event_enqueue_burst_t fn)
>  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> -		rte_errno = EINVAL;
> -		return 0;
> -	}
> -
> -	if (port_id >= dev->data->nb_ports) {
> -		rte_errno = EINVAL;
> -		return 0;
> -	}
> -#endif
>  	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
>  	/*
>  	 * Allow zero cost non burst mode routine invocation if application
>  	 * requests nb_events as const one
>  	 */
>  	if (nb_events == 1)
> -		return (*dev->enqueue)(dev->data->ports[port_id], ev);
> +		return rte_eventdev_api[dev_id].enqueue(dev_id, port_id, ev);
>  	else
> -		return fn(dev->data->ports[port_id], ev, nb_events);
> +		return fn(dev_id, port_id, ev, nb_events);
>  }
> 
>  /**
> @@ -1818,10 +1805,9 @@ static inline uint16_t
>  rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
>  			const struct rte_event ev[], uint16_t nb_events)
>  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -					 dev->enqueue_burst);
> +	return __rte_event_enqueue_burst(
> +		dev_id, port_id, ev, nb_events,
> +		rte_eventdev_api[dev_id].enqueue_burst);
>  }
> 
>  /**
> @@ -1869,10 +1855,9 @@ static inline uint16_t
>  rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
>  			    const struct rte_event ev[], uint16_t nb_events)
>  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -					 dev->enqueue_new_burst);
> +	return __rte_event_enqueue_burst(
> +		dev_id, port_id, ev, nb_events,
> +		rte_eventdev_api[dev_id].enqueue_new_burst);
>  }
> 
>  /**
> @@ -1920,10 +1905,9 @@ static inline uint16_t
>  rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
>  				const struct rte_event ev[], uint16_t nb_events)
>  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -					 dev->enqueue_forward_burst);
> +	return __rte_event_enqueue_burst(
> +		dev_id, port_id, ev, nb_events,
> +		rte_eventdev_api[dev_id].enqueue_forward_burst);
>  }
> 
>  /**
> @@ -1996,30 +1980,17 @@ static inline uint16_t
>  rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
>  			uint16_t nb_events, uint64_t timeout_ticks)
>  {
> -	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> -		rte_errno = EINVAL;
> -		return 0;
> -	}
> -
> -	if (port_id >= dev->data->nb_ports) {
> -		rte_errno = EINVAL;
> -		return 0;
> -	}
> -#endif
>  	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
>  	/*
>  	 * Allow zero cost non burst mode routine invocation if application
>  	 * requests nb_events as const one
>  	 */
>  	if (nb_events == 1)
> -		return (*dev->dequeue)(dev->data->ports[port_id], ev,
> -				       timeout_ticks);
> +		return rte_eventdev_api[dev_id].dequeue(dev_id, port_id, ev,
> +							timeout_ticks);
>  	else
> -		return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
> -					     nb_events, timeout_ticks);
> +		return rte_eventdev_api[dev_id].dequeue_burst(
> +			dev_id, port_id, ev, nb_events, timeout_ticks);
>  }
> 
>  #ifdef __cplusplus
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 10/15] eventdev: remove rte prefix for internal structs
  2021-08-23 19:40 ` [dpdk-dev] [RFC 10/15] eventdev: remove rte prefix for internal structs pbhagavatula
@ 2021-08-30 14:42   ` Jayatheerthan, Jay
  0 siblings, 0 replies; 119+ messages in thread
From: Jayatheerthan, Jay @ 2021-08-30 14:42 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Gujjar, Abhinandan S; +Cc: Ananyev, Konstantin, dev

> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Tuesday, August 24, 2021 1:10 AM
> To: jerinj@marvell.com; Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [RFC 10/15] eventdev: remove rte prefix for internal structs
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Remove rte_ prefix from rte_eth_event_enqueue_buffer,
> rte_event_eth_rx_adapter and rte_event_crypto_adapter
> as they are only used in rte_event_eth_rx_adapter.c and
> rte_event_crypto_adapter.c
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/rte_event_crypto_adapter.c |  66 +++----
>  lib/eventdev/rte_event_eth_rx_adapter.c | 249 ++++++++++--------------
>  lib/eventdev/rte_eventdev.h             |   2 +-
>  3 files changed, 141 insertions(+), 176 deletions(-)
> 
> diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
> index e1d38d383d..8a2a25c02e 100644
> --- a/lib/eventdev/rte_event_crypto_adapter.c
> +++ b/lib/eventdev/rte_event_crypto_adapter.c
> @@ -30,7 +30,7 @@
>   */
>  #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
> 
> -struct rte_event_crypto_adapter {
> +struct event_crypto_adapter {
>  	/* Event device identifier */
>  	uint8_t eventdev_id;
>  	/* Event port identifier */
> @@ -99,7 +99,7 @@ struct crypto_queue_pair_info {
>  	uint8_t len;
>  } __rte_cache_aligned;
> 
> -static struct rte_event_crypto_adapter **event_crypto_adapter;
> +static struct event_crypto_adapter **event_crypto_adapter;
> 
>  /* Macros to check for valid adapter */
>  #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
> @@ -141,7 +141,7 @@ eca_init(void)
>  	return 0;
>  }
> 
> -static inline struct rte_event_crypto_adapter *
> +static inline struct event_crypto_adapter *
>  eca_id_to_adapter(uint8_t id)
>  {
>  	return event_crypto_adapter ?
> @@ -158,7 +158,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id,
>  	int started;
>  	int ret;
>  	struct rte_event_port_conf *port_conf = arg;
> -	struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
> +	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
> 
>  	if (adapter == NULL)
>  		return -EINVAL;
> @@ -202,7 +202,7 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
>  				enum rte_event_crypto_adapter_mode mode,
>  				void *conf_arg)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
>  	struct rte_event_dev_info dev_info;
>  	int socket_id;
> @@ -304,7 +304,7 @@ rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
>  int
>  rte_event_crypto_adapter_free(uint8_t id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> @@ -329,8 +329,8 @@ rte_event_crypto_adapter_free(uint8_t id)
>  }
> 
>  static inline unsigned int
> -eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
> -		 struct rte_event *ev, unsigned int cnt)
> +eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
> +		     unsigned int cnt)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
>  	union rte_event_crypto_metadata *m_data = NULL;
> @@ -420,7 +420,7 @@ eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
>  }
> 
>  static unsigned int
> -eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
> +eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
>  	struct crypto_device_info *curr_dev;
> @@ -466,8 +466,8 @@ eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
>  }
> 
>  static int
> -eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
> -			unsigned int max_enq)
> +eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
> +			   unsigned int max_enq)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
>  	struct rte_event ev[BATCH_SIZE];
> @@ -500,8 +500,8 @@ eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
>  }
> 
>  static inline void
> -eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
> -		  struct rte_crypto_op **ops, uint16_t num)
> +eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
> +		      struct rte_crypto_op **ops, uint16_t num)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
>  	union rte_event_crypto_metadata *m_data = NULL;
> @@ -564,8 +564,8 @@ eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
>  }
> 
>  static inline unsigned int
> -eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
> -			unsigned int max_deq)
> +eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
> +			   unsigned int max_deq)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
>  	struct crypto_device_info *curr_dev;
> @@ -627,8 +627,8 @@ eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
>  }
> 
>  static void
> -eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
> -			unsigned int max_ops)
> +eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
> +		       unsigned int max_ops)
>  {
>  	while (max_ops) {
>  		unsigned int e_cnt, d_cnt;
> @@ -648,7 +648,7 @@ eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
>  static int
>  eca_service_func(void *args)
>  {
> -	struct rte_event_crypto_adapter *adapter = args;
> +	struct event_crypto_adapter *adapter = args;
> 
>  	if (rte_spinlock_trylock(&adapter->lock) == 0)
>  		return 0;
> @@ -659,7 +659,7 @@ eca_service_func(void *args)
>  }
> 
>  static int
> -eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
> +eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
>  {
>  	struct rte_event_crypto_adapter_conf adapter_conf;
>  	struct rte_service_spec service;
> @@ -699,10 +699,9 @@ eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
>  }
> 
>  static void
> -eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
> -			struct crypto_device_info *dev_info,
> -			int32_t queue_pair_id,
> -			uint8_t add)
> +eca_update_qp_info(struct event_crypto_adapter *adapter,
> +		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
> +		   uint8_t add)
>  {
>  	struct crypto_queue_pair_info *qp_info;
>  	int enabled;
> @@ -729,9 +728,8 @@ eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
>  }
> 
>  static int
> -eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
> -		uint8_t cdev_id,
> -		int queue_pair_id)
> +eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
> +		   int queue_pair_id)
>  {
>  	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
>  	struct crypto_queue_pair_info *qpairs;
> @@ -773,7 +771,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
>  			int32_t queue_pair_id,
>  			const struct rte_event *event)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct rte_eventdev *dev;
>  	struct crypto_device_info *dev_info;
>  	uint32_t cap;
> @@ -889,7 +887,7 @@ int
>  rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
>  					int32_t queue_pair_id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct crypto_device_info *dev_info;
>  	struct rte_eventdev *dev;
>  	int ret;
> @@ -975,7 +973,7 @@ rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
>  static int
>  eca_adapter_ctrl(uint8_t id, int start)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct crypto_device_info *dev_info;
>  	struct rte_eventdev *dev;
>  	uint32_t i;
> @@ -1017,7 +1015,7 @@ eca_adapter_ctrl(uint8_t id, int start)
>  int
>  rte_event_crypto_adapter_start(uint8_t id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
>  	adapter = eca_id_to_adapter(id);
> @@ -1039,7 +1037,7 @@ int
>  rte_event_crypto_adapter_stats_get(uint8_t id,
>  				struct rte_event_crypto_adapter_stats *stats)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
>  	struct rte_event_crypto_adapter_stats dev_stats;
>  	struct rte_eventdev *dev;
> @@ -1083,7 +1081,7 @@ rte_event_crypto_adapter_stats_get(uint8_t id,
>  int
>  rte_event_crypto_adapter_stats_reset(uint8_t id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct crypto_device_info *dev_info;
>  	struct rte_eventdev *dev;
>  	uint32_t i;
> @@ -1111,7 +1109,7 @@ rte_event_crypto_adapter_stats_reset(uint8_t id)
>  int
>  rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> @@ -1128,7 +1126,7 @@ rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
>  int
>  rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
> index 13dfb28401..f8225ebd3d 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.c
> @@ -78,14 +78,14 @@ struct eth_rx_vector_data {
>  TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
> 
>  /* Instance per adapter */
> -struct rte_eth_event_enqueue_buffer {
> +struct eth_event_enqueue_buffer {
>  	/* Count of events in this buffer */
>  	uint16_t count;
>  	/* Array of events in this buffer */
>  	struct rte_event events[ETH_EVENT_BUFFER_SIZE];
>  };
> 
> -struct rte_event_eth_rx_adapter {
> +struct event_eth_rx_adapter {
>  	/* RSS key */
>  	uint8_t rss_key_be[RSS_KEY_SIZE];
>  	/* Event device identifier */
> @@ -109,7 +109,7 @@ struct rte_event_eth_rx_adapter {
>  	/* Next entry in wrr[] to begin polling */
>  	uint32_t wrr_pos;
>  	/* Event burst buffer */
> -	struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer event_enqueue_buffer;
>  	/* Vector enable flag */
>  	uint8_t ena_vector;
>  	/* Timestamp of previous vector expiry list traversal */
> @@ -231,7 +231,7 @@ struct eth_rx_queue_info {
>  	struct eth_rx_vector_data vector_data;
>  };
> 
> -static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
> +static struct event_eth_rx_adapter **event_eth_rx_adapter;
> 
>  static inline int
>  rxa_validate_id(uint8_t id)
> @@ -247,7 +247,7 @@ rxa_validate_id(uint8_t id)
>  } while (0)
> 
>  static inline int
> -rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
>  }
> @@ -265,10 +265,9 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
>   * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
>   */
>  static int
> -rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
> -	 unsigned int n, int *cw,
> -	 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
> -	 uint16_t gcd, int prev)
> +rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
> +	     struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
> +	     uint16_t gcd, int prev)
>  {
>  	int i = prev;
>  	uint16_t w;
> @@ -373,10 +372,9 @@ rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
>  /* Calculate nb_rx_intr after deleting interrupt mode rx queues
>   */
>  static void
> -rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint32_t *nb_rx_intr)
> +rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint32_t *nb_rx_intr)
>  {
>  	uint32_t intr_diff;
> 
> @@ -392,12 +390,10 @@ rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
>   * interrupt queues could currently be poll mode Rx queues
>   */
>  static void
> -rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint32_t *nb_rx_poll,
> -			uint32_t *nb_rx_intr,
> -			uint32_t *nb_wrr)
> +rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
> +			  uint32_t *nb_wrr)
>  {
>  	uint32_t intr_diff;
>  	uint32_t poll_diff;
> @@ -424,11 +420,9 @@ rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
>   * after deleting poll mode rx queues
>   */
>  static void
> -rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint32_t *nb_rx_poll,
> -			uint32_t *nb_wrr)
> +rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint32_t *nb_rx_poll, uint32_t *nb_wrr)
>  {
>  	uint32_t poll_diff;
>  	uint32_t wrr_len_diff;
> @@ -449,13 +443,10 @@ rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
>  /* Calculate nb_rx_* after adding poll mode rx queues
>   */
>  static void
> -rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint16_t wt,
> -			uint32_t *nb_rx_poll,
> -			uint32_t *nb_rx_intr,
> -			uint32_t *nb_wrr)
> +rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint16_t wt, uint32_t *nb_rx_poll,
> +			  uint32_t *nb_rx_intr, uint32_t *nb_wrr)
>  {
>  	uint32_t intr_diff;
>  	uint32_t poll_diff;
> @@ -482,13 +473,10 @@ rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Calculate nb_rx_* after adding rx_queue_id */
>  static void
> -rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int rx_queue_id,
> -		uint16_t wt,
> -		uint32_t *nb_rx_poll,
> -		uint32_t *nb_rx_intr,
> -		uint32_t *nb_wrr)
> +rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
> +		     struct eth_device_info *dev_info, int rx_queue_id,
> +		     uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
> +		     uint32_t *nb_wrr)
>  {
>  	if (wt != 0)
>  		rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
> @@ -500,12 +488,10 @@ rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Calculate nb_rx_* after deleting rx_queue_id */
>  static void
> -rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int rx_queue_id,
> -		uint32_t *nb_rx_poll,
> -		uint32_t *nb_rx_intr,
> -		uint32_t *nb_wrr)
> +rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
> +		     struct eth_device_info *dev_info, int rx_queue_id,
> +		     uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
> +		     uint32_t *nb_wrr)
>  {
>  	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
>  				nb_wrr);
> @@ -517,8 +503,7 @@ rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
>   * Allocate the rx_poll array
>   */
>  static struct eth_rx_poll_entry *
> -rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
> -	uint32_t num_rx_polled)
> +rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
>  {
>  	size_t len;
> 
> @@ -534,7 +519,7 @@ rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
>   * Allocate the WRR array
>   */
>  static uint32_t *
> -rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
> +rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
>  {
>  	size_t len;
> 
> @@ -547,11 +532,9 @@ rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
>  }
> 
>  static int
> -rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
> -		uint32_t nb_poll,
> -		uint32_t nb_wrr,
> -		struct eth_rx_poll_entry **rx_poll,
> -		uint32_t **wrr_sched)
> +rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
> +		      uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
> +		      uint32_t **wrr_sched)
>  {
> 
>  	if (nb_poll == 0) {
> @@ -576,9 +559,8 @@ rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Precalculate WRR polling sequence for all queues in rx_adapter */
>  static void
> -rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_rx_poll_entry *rx_poll,
> -		uint32_t *rx_wrr)
> +rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
> +		      struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
>  {
>  	uint16_t d;
>  	uint16_t q;
> @@ -705,13 +687,13 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
>  }
> 
>  static inline int
> -rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	return !!rx_adapter->enq_block_count;
>  }
> 
>  static inline void
> -rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	if (rx_adapter->rx_enq_block_start_ts)
>  		return;
> @@ -724,8 +706,8 @@ rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static inline void
> -rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
> -		    struct rte_event_eth_rx_adapter_stats *stats)
> +rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
> +		     struct rte_event_eth_rx_adapter_stats *stats)
>  {
>  	if (unlikely(!stats->rx_enq_start_ts))
>  		stats->rx_enq_start_ts = rte_get_tsc_cycles();
> @@ -744,10 +726,10 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Enqueue buffered events to event device */
>  static inline uint16_t
> -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter)
>  {
> -	struct rte_eth_event_enqueue_buffer *buf =
> -	    &rx_adapter->event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer *buf =
> +		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
> 
>  	if (!buf->count)
> @@ -774,7 +756,7 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static inline void
> -rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
> +rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
>  		struct eth_rx_vector_data *vec)
>  {
>  	vec->vector_ev->nb_elem = 0;
> @@ -785,9 +767,9 @@ rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static inline uint16_t
> -rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
> +rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
>  			struct eth_rx_queue_info *queue_info,
> -			struct rte_eth_event_enqueue_buffer *buf,
> +			struct eth_event_enqueue_buffer *buf,
>  			struct rte_mbuf **mbufs, uint16_t num)
>  {
>  	struct rte_event *ev = &buf->events[buf->count];
> @@ -845,19 +827,16 @@ rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static inline void
> -rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
> -		uint16_t eth_dev_id,
> -		uint16_t rx_queue_id,
> -		struct rte_mbuf **mbufs,
> -		uint16_t num)
> +rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> +		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num)
>  {
>  	uint32_t i;
>  	struct eth_device_info *dev_info =
>  					&rx_adapter->eth_devices[eth_dev_id];
>  	struct eth_rx_queue_info *eth_rx_queue_info =
>  					&dev_info->rx_queue[rx_queue_id];
> -	struct rte_eth_event_enqueue_buffer *buf =
> -					&rx_adapter->event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer *buf =
> +		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event *ev = &buf->events[buf->count];
>  	uint64_t event = eth_rx_queue_info->event;
>  	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
> @@ -909,16 +888,13 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Enqueue packets from  <port, q>  to event buffer */
>  static inline uint32_t
> -rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
> -	uint16_t port_id,
> -	uint16_t queue_id,
> -	uint32_t rx_count,
> -	uint32_t max_rx,
> -	int *rxq_empty)
> +rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
> +	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
> +	   int *rxq_empty)
>  {
>  	struct rte_mbuf *mbufs[BATCH_SIZE];
> -	struct rte_eth_event_enqueue_buffer *buf =
> -					&rx_adapter->event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer *buf =
> +		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event_eth_rx_adapter_stats *stats =
>  					&rx_adapter->stats;
>  	uint16_t n;
> @@ -953,8 +929,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static inline void
> -rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
> -		void *data)
> +rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
>  {
>  	uint16_t port_id;
>  	uint16_t queue;
> @@ -994,8 +969,8 @@ rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static int
> -rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
> -			uint32_t num_intr_vec)
> +rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
> +			  uint32_t num_intr_vec)
>  {
>  	if (rx_adapter->num_intr_vec + num_intr_vec >
>  				RTE_EVENT_ETH_INTR_RING_SIZE) {
> @@ -1010,9 +985,9 @@ rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Delete entries for (dev, queue) from the interrupt ring */
>  static void
> -rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			uint16_t rx_queue_id)
> +rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info,
> +			  uint16_t rx_queue_id)
>  {
>  	int i, n;
>  	union queue_data qd;
> @@ -1045,7 +1020,7 @@ rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
>  static void *
>  rxa_intr_thread(void *arg)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter = arg;
> +	struct event_eth_rx_adapter *rx_adapter = arg;
>  	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
>  	int n, i;
> 
> @@ -1068,12 +1043,12 @@ rxa_intr_thread(void *arg)
>   * mbufs to eventdev
>   */
>  static inline uint32_t
> -rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	uint32_t n;
>  	uint32_t nb_rx = 0;
>  	int rxq_empty;
> -	struct rte_eth_event_enqueue_buffer *buf;
> +	struct eth_event_enqueue_buffer *buf;
>  	rte_spinlock_t *ring_lock;
>  	uint8_t max_done = 0;
> 
> @@ -1188,11 +1163,11 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
>   * it.
>   */
>  static inline uint32_t
> -rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_poll(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	uint32_t num_queue;
>  	uint32_t nb_rx = 0;
> -	struct rte_eth_event_enqueue_buffer *buf;
> +	struct eth_event_enqueue_buffer *buf;
>  	uint32_t wrr_pos;
>  	uint32_t max_nb_rx;
> 
> @@ -1233,8 +1208,8 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
>  static void
>  rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter = arg;
> -	struct rte_eth_event_enqueue_buffer *buf =
> +	struct event_eth_rx_adapter *rx_adapter = arg;
> +	struct eth_event_enqueue_buffer *buf =
>  		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event *ev;
> 
> @@ -1257,7 +1232,7 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
>  static int
>  rxa_service_func(void *args)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter = args;
> +	struct event_eth_rx_adapter *rx_adapter = args;
>  	struct rte_event_eth_rx_adapter_stats *stats;
> 
>  	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
> @@ -1318,7 +1293,7 @@ rte_event_eth_rx_adapter_init(void)
>  	return 0;
>  }
> 
> -static inline struct rte_event_eth_rx_adapter *
> +static inline struct event_eth_rx_adapter *
>  rxa_id_to_adapter(uint8_t id)
>  {
>  	return event_eth_rx_adapter ?
> @@ -1335,7 +1310,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
>  	int started;
>  	uint8_t port_id;
>  	struct rte_event_port_conf *port_conf = arg;
> -	struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
> +	struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
> 
>  	dev = &rte_eventdevs[rx_adapter->eventdev_id];
>  	dev_conf = dev->data->dev_conf;
> @@ -1384,7 +1359,7 @@ rxa_epoll_create1(void)
>  }
> 
>  static int
> -rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	if (rx_adapter->epd != INIT_FD)
>  		return 0;
> @@ -1401,7 +1376,7 @@ rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static int
> -rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	int err;
>  	char thread_name[RTE_MAX_THREAD_NAME_LEN];
> @@ -1445,7 +1420,7 @@ rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static int
> -rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	int err;
> 
> @@ -1466,7 +1441,7 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static int
> -rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	int ret;
> 
> @@ -1484,9 +1459,8 @@ rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static int
> -rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	uint16_t rx_queue_id)
> +rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
> +		 struct eth_device_info *dev_info, uint16_t rx_queue_id)
>  {
>  	int err;
>  	uint16_t eth_dev_id = dev_info->dev->data->port_id;
> @@ -1514,9 +1488,8 @@ rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static int
> -rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int rx_queue_id)
> +rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
> +		   struct eth_device_info *dev_info, int rx_queue_id)
>  {
>  	int err;
>  	int i;
> @@ -1573,9 +1546,8 @@ rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static int
> -rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	uint16_t rx_queue_id)
> +rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
> +		struct eth_device_info *dev_info, uint16_t rx_queue_id)
>  {
>  	int err, err1;
>  	uint16_t eth_dev_id = dev_info->dev->data->port_id;
> @@ -1663,9 +1635,8 @@ rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static int
> -rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	int rx_queue_id)
> +rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
> +		   struct eth_device_info *dev_info, int rx_queue_id)
> 
>  {
>  	int i, j, err;
> @@ -1713,9 +1684,8 @@ rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  	return err;
>  }
> 
> -
>  static int
> -rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
> +rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
>  {
>  	int ret;
>  	struct rte_service_spec service;
> @@ -1758,10 +1728,9 @@ rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
>  }
> 
>  static void
> -rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int32_t rx_queue_id,
> -		uint8_t add)
> +rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
> +		 struct eth_device_info *dev_info, int32_t rx_queue_id,
> +		 uint8_t add)
>  {
>  	struct eth_rx_queue_info *queue_info;
>  	int enabled;
> @@ -1811,9 +1780,8 @@ rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
>  }
> 
>  static void
> -rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	int32_t rx_queue_id)
> +rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
> +	   struct eth_device_info *dev_info, int32_t rx_queue_id)
>  {
>  	struct eth_rx_vector_data *vec;
>  	int pollq;
> @@ -1854,10 +1822,9 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static void
> -rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	int32_t rx_queue_id,
> -	const struct rte_event_eth_rx_adapter_queue_conf *conf)
> +rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
> +	      struct eth_device_info *dev_info, int32_t rx_queue_id,
> +	      const struct rte_event_eth_rx_adapter_queue_conf *conf)
>  {
>  	struct eth_rx_queue_info *queue_info;
>  	const struct rte_event *ev = &conf->ev;
> @@ -1922,7 +1889,7 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  static void
>  rxa_sw_event_vector_configure(
> -	struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> +	struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
>  	int rx_queue_id,
>  	const struct rte_event_eth_rx_adapter_event_vector_config *config)
>  {
> @@ -1956,10 +1923,10 @@ rxa_sw_event_vector_configure(
>  			      config->vector_timeout_ns >> 1;
>  }
> 
> -static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
> -		uint16_t eth_dev_id,
> -		int rx_queue_id,
> -		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
> +static int
> +rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> +	   int rx_queue_id,
> +	   const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
>  {
>  	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
>  	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
> @@ -2088,7 +2055,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
>  static int
>  rxa_ctrl(uint8_t id, int start)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
>  	uint32_t i;
> @@ -2135,7 +2102,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
>  				rte_event_eth_rx_adapter_conf_cb conf_cb,
>  				void *conf_arg)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	int ret;
>  	int socket_id;
>  	uint16_t i;
> @@ -2235,7 +2202,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
>  int
>  rte_event_eth_rx_adapter_free(uint8_t id)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
> 
>  	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> @@ -2267,7 +2234,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
>  {
>  	int ret;
>  	uint32_t cap;
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
> 
> @@ -2385,7 +2352,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
>  {
>  	int ret = 0;
>  	struct rte_eventdev *dev;
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct eth_device_info *dev_info;
>  	uint32_t cap;
>  	uint32_t nb_rx_poll = 0;
> @@ -2505,7 +2472,7 @@ rte_event_eth_rx_adapter_queue_event_vector_config(
>  	struct rte_event_eth_rx_adapter_event_vector_config *config)
>  {
>  	struct rte_event_eth_rx_adapter_vector_limits limits;
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	uint32_t cap;
>  	int ret;
> @@ -2632,7 +2599,7 @@ int
>  rte_event_eth_rx_adapter_stats_get(uint8_t id,
>  			       struct rte_event_eth_rx_adapter_stats *stats)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
>  	struct rte_event_eth_rx_adapter_stats dev_stats;
>  	struct rte_eventdev *dev;
> @@ -2673,7 +2640,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
>  int
>  rte_event_eth_rx_adapter_stats_reset(uint8_t id)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
>  	uint32_t i;
> @@ -2701,7 +2668,7 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
>  int
>  rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
> 
>  	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> @@ -2721,7 +2688,7 @@ rte_event_eth_rx_adapter_cb_register(uint8_t id,
>  					rte_event_eth_rx_adapter_cb_fn cb_fn,
>  					void *cb_arg)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct eth_device_info *dev_info;
>  	uint32_t cap;
>  	int ret;

Looks good to me.
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>

> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> index 7378597846..5853fadb0d 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -1193,7 +1193,7 @@ struct rte_event {
>  #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
>  /**< The application can override the adapter generated flow ID in the
>   * event. This flow ID can be specified when adding an ethdev Rx queue
> - * to the adapter using the ev member of struct rte_event_eth_rx_adapter
> + * to the adapter using the ev.flow_id member.
>   * @see struct rte_event_eth_rx_adapter_queue_conf::ev
>   * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
>   */
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 13/15] eventdev: promote event vector API to stable
  2021-08-23 19:40 ` [dpdk-dev] [RFC 13/15] eventdev: promote event vector API to stable pbhagavatula
@ 2021-08-30 14:43   ` Jayatheerthan, Jay
  2021-09-08 12:05   ` Kinsella, Ray
  1 sibling, 0 replies; 119+ messages in thread
From: Jayatheerthan, Jay @ 2021-08-30 14:43 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Ray Kinsella; +Cc: Ananyev, Konstantin, dev

> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Tuesday, August 24, 2021 1:10 AM
> To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>; Ray Kinsella <mdr@ashroe.eu>
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [RFC 13/15] eventdev: promote event vector API to stable
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Promote event vector configuration APIs to stable.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/rte_event_eth_rx_adapter.h | 2 --
>  lib/eventdev/rte_eventdev.h             | 1 -
>  lib/eventdev/version.map                | 6 +++---
>  3 files changed, 3 insertions(+), 6 deletions(-)
> 
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
> index 182dd2e5dd..d13d817025 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> @@ -543,7 +543,6 @@ int rte_event_eth_rx_adapter_cb_register(uint8_t id, uint16_t eth_dev_id,
>   *  - 0: Success.
>   *  - <0: Error code on failure.
>   */
> -__rte_experimental
>  int rte_event_eth_rx_adapter_vector_limits_get(
>  	uint8_t dev_id, uint16_t eth_port_id,
>  	struct rte_event_eth_rx_adapter_vector_limits *limits);
> @@ -570,7 +569,6 @@ int rte_event_eth_rx_adapter_vector_limits_get(
>   *  - 0: Success, Receive queue configured correctly.
>   *  - <0: Error code on failure.
>   */
> -__rte_experimental
>  int rte_event_eth_rx_adapter_queue_event_vector_config(
>  	uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
>  	struct rte_event_eth_rx_adapter_event_vector_config *config);

Looks good to me.
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>

> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> index 5853fadb0d..f73346167b 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -1734,7 +1734,6 @@ int rte_event_dev_selftest(uint8_t dev_id);
>   *    - ENOMEM - no appropriate memory area found in which to create memzone
>   *    - ENAMETOOLONG - mempool name requested is too long.
>   */
> -__rte_experimental
>  struct rte_mempool *
>  rte_event_vector_pool_create(const char *name, unsigned int n,
>  			     unsigned int cache_size, uint16_t nb_elem,
> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
> index d89cbc337e..062ca959e5 100644
> --- a/lib/eventdev/version.map
> +++ b/lib/eventdev/version.map
> @@ -38,10 +38,12 @@ DPDK_22 {
>  	rte_event_eth_rx_adapter_free;
>  	rte_event_eth_rx_adapter_queue_add;
>  	rte_event_eth_rx_adapter_queue_del;
> +	rte_event_eth_rx_adapter_queue_event_vector_config;
>  	rte_event_eth_rx_adapter_service_id_get;
>  	rte_event_eth_rx_adapter_start;
>  	rte_event_eth_rx_adapter_stats_get;
>  	rte_event_eth_rx_adapter_stats_reset;
> +	rte_event_eth_rx_adapter_vector_limits_get;
>  	rte_event_eth_rx_adapter_stop;
>  	rte_event_eth_tx_adapter_caps_get;
>  	rte_event_eth_tx_adapter_create;
> @@ -83,6 +85,7 @@ DPDK_22 {
>  	rte_event_timer_arm_burst;
>  	rte_event_timer_arm_tmo_tick_burst;
>  	rte_event_timer_cancel_burst;
> +	rte_event_vector_pool_create;
>  	rte_eventdevs;
> 
>  	#added in 21.11
> @@ -135,9 +138,6 @@ EXPERIMENTAL {
>  	__rte_eventdev_trace_port_setup;
> 
>  	#added in 21.05
> -	rte_event_vector_pool_create;
> -	rte_event_eth_rx_adapter_vector_limits_get;
> -	rte_event_eth_rx_adapter_queue_event_vector_config;
>  	__rte_eventdev_trace_crypto_adapter_enqueue;
>  };
> 
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 06/15] eventdev: use new API for inline functions
  2021-08-23 19:40 ` [dpdk-dev] [RFC 06/15] eventdev: use new API for inline functions pbhagavatula
  2021-08-30 14:41   ` Jayatheerthan, Jay
@ 2021-08-30 14:46   ` David Marchand
  2021-10-02 20:32     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
  1 sibling, 1 reply; 119+ messages in thread
From: David Marchand @ 2021-08-30 14:46 UTC (permalink / raw)
  To: Pavan Nikhilesh
  Cc: Jerin Jacob Kollanukkaran, Abhinandan Gujjar, Jay Jayatheerthan,
	Ananyev, Konstantin, dev

Hello Pavan,

On Mon, Aug 23, 2021 at 9:41 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Use new driver interface for the fastpath enqueue/dequeue inline
> functions.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/rte_event_crypto_adapter.h | 13 +-----
>  lib/eventdev/rte_event_eth_tx_adapter.h | 22 ++-------
>  lib/eventdev/rte_eventdev.h             | 61 +++++++------------------
>  3 files changed, 22 insertions(+), 74 deletions(-)

I sent this series in a branch of mine, and ran it per commit in GHA.
It caught a UT failure on this patch:
https://github.com/david-marchand/dpdk/runs/3408921022?check_suite_focus=true


--- stdout ---
RTE>>event_eth_tx_adapter_autotest
 + ------------------------------------------------------- +
 + Test Suite : tx event eth adapter test suite
Port 0 MAC: 00 00 00 00 00 00
Port 1 MAC: 00 00 00 00 00 00
Failed to find a valid event device, testing with event_sw0 device
 + ------------------------------------------------------- +
 + TestCase [ 0] : tx_adapter_create_free succeeded
 + TestCase [ 1] : tx_adapter_queue_add_del succeeded
 + TestCase [ 2] : tx_adapter_start_stop succeeded
 + TestCase [ 3] : tx_adapter_service failed
 + TestCase [ 4] : tx_adapter_dynamic_device failed
 + ------------------------------------------------------- +
 + Test Suite Summary : tx event eth adapter test suite
 + ------------------------------------------------------- +
 + Tests Total :        5
 + Tests Skipped :      0
 + Tests Executed :     5
 + Tests Unsupported:   0
 + Tests Passed :       3
 + Tests Failed :       2
 + ------------------------------------------------------- +
Test Failed


Can you double check?
Thanks.

-- 
David Marchand


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 14/15] eventdev: make trace APIs internal
  2021-08-23 19:40 ` [dpdk-dev] [RFC 14/15] eventdev: make trace APIs internal pbhagavatula
@ 2021-08-30 14:47   ` Jayatheerthan, Jay
  0 siblings, 0 replies; 119+ messages in thread
From: Jayatheerthan, Jay @ 2021-08-30 14:47 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Gujjar, Abhinandan S, Carrillo, Erik G
  Cc: Ananyev, Konstantin, dev

> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Tuesday, August 24, 2021 1:10 AM
> To: jerinj@marvell.com; Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>;
> Carrillo, Erik G <erik.g.carrillo@intel.com>
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [RFC 14/15] eventdev: make trace APIs internal
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Slowpath trace APIs are only used in rte_eventdev.c so make them
> as internal.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} | 0
>  lib/eventdev/eventdev_trace_points.c                    | 2 +-
>  lib/eventdev/meson.build                                | 2 +-
>  lib/eventdev/rte_event_crypto_adapter.c                 | 2 +-
>  lib/eventdev/rte_event_eth_rx_adapter.c                 | 2 +-
>  lib/eventdev/rte_event_eth_tx_adapter.c                 | 2 +-
>  lib/eventdev/rte_event_timer_adapter.c                  | 2 +-
>  lib/eventdev/rte_eventdev.c                             | 2 +-
>  8 files changed, 7 insertions(+), 7 deletions(-)
>  rename lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} (100%)
> 
> diff --git a/lib/eventdev/rte_eventdev_trace.h b/lib/eventdev/eventdev_trace.h
> similarity index 100%
> rename from lib/eventdev/rte_eventdev_trace.h
> rename to lib/eventdev/eventdev_trace.h
> diff --git a/lib/eventdev/eventdev_trace_points.c b/lib/eventdev/eventdev_trace_points.c
> index 3867ec8008..237d9383fd 100644
> --- a/lib/eventdev/eventdev_trace_points.c
> +++ b/lib/eventdev/eventdev_trace_points.c
> @@ -4,7 +4,7 @@
> 
>  #include <rte_trace_point_register.h>
> 
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
> 
>  /* Eventdev trace points */
>  RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_configure,
> diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
> index f19b831edd..c750e0214f 100644
> --- a/lib/eventdev/meson.build
> +++ b/lib/eventdev/meson.build
> @@ -19,7 +19,6 @@ sources = files(
>  )
>  headers = files(
>          'rte_eventdev.h',
> -        'rte_eventdev_trace.h',
>          'rte_eventdev_trace_fp.h',
>          'rte_event_ring.h',
>          'rte_event_eth_rx_adapter.h',
> @@ -34,6 +33,7 @@ driver_sdk_headers += files(
>          'eventdev_pmd.h',
>          'eventdev_pmd_pci.h',
>          'eventdev_pmd_vdev.h',
> +        'eventdev_trace.h',
>          'event_timer_adapter_pmd.h',
>  )
> 
> diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
> index 8a2a25c02e..93e7352c6e 100644
> --- a/lib/eventdev/rte_event_crypto_adapter.c
> +++ b/lib/eventdev/rte_event_crypto_adapter.c
> @@ -16,7 +16,7 @@
> 
>  #include "rte_eventdev.h"
>  #include "eventdev_pmd.h"
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
>  #include "rte_event_crypto_adapter.h"
> 
>  #define BATCH_SIZE 32
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
> index f8225ebd3d..7e97fbd21d 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.c
> @@ -20,7 +20,7 @@
> 
>  #include "rte_eventdev.h"
>  #include "eventdev_pmd.h"
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
>  #include "rte_event_eth_rx_adapter.h"
> 
>  #define BATCH_SIZE		32

Looks good to me.
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>

> diff --git a/lib/eventdev/rte_event_eth_tx_adapter.c b/lib/eventdev/rte_event_eth_tx_adapter.c
> index 18c0359db7..ee3631bced 100644
> --- a/lib/eventdev/rte_event_eth_tx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_tx_adapter.c
> @@ -6,7 +6,7 @@
>  #include <rte_ethdev.h>
> 
>  #include "eventdev_pmd.h"
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
>  #include "rte_event_eth_tx_adapter.h"
> 
>  #define TXA_BATCH_SIZE		32

Looks good to me.
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>

> diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
> index c4dc7a5fd4..7404b0cbb2 100644
> --- a/lib/eventdev/rte_event_timer_adapter.c
> +++ b/lib/eventdev/rte_event_timer_adapter.c
> @@ -24,7 +24,7 @@
>  #include "eventdev_pmd.h"
>  #include "rte_event_timer_adapter.h"
>  #include "rte_eventdev.h"
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
> 
>  #define DATA_MZ_NAME_MAX_LEN 64
>  #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
> diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
> index b9b029edc4..3a393bd120 100644
> --- a/lib/eventdev/rte_eventdev.c
> +++ b/lib/eventdev/rte_eventdev.c
> @@ -36,7 +36,7 @@
> 
>  #include "rte_eventdev.h"
>  #include "eventdev_pmd.h"
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
> 
>  struct rte_eventdev *rte_eventdevs;
> 
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [RFC] eventdev: uninline inline API functions
  2021-08-30 10:25   ` Mattias Rönnblom
@ 2021-08-30 16:00     ` Mattias Rönnblom
  2021-08-31 12:28       ` Jerin Jacob
  0 siblings, 1 reply; 119+ messages in thread
From: Mattias Rönnblom @ 2021-08-30 16:00 UTC (permalink / raw)
  To: jerinj; +Cc: pbhagavatula, dev, bogdan.tanasa, Mattias Rönnblom

Replace the inline functions in the eventdev user application API with
regular non-inline API calls. This allows for a cleaner and more
simple API/ABI, but might well also cause performance regressions.

The purpose of this RFC patch is to allow for performance testing.

The rte_eventdev struct declaration should be moved off the public
API.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 drivers/net/octeontx/octeontx_ethdev.h  |  1 +
 lib/eventdev/rte_event_eth_rx_adapter.h |  1 +
 lib/eventdev/rte_event_eth_tx_adapter.c | 31 ++++++++
 lib/eventdev/rte_event_eth_tx_adapter.h | 35 ++-------
 lib/eventdev/rte_eventdev.c             | 82 +++++++++++++++++++++
 lib/eventdev/rte_eventdev.h             | 94 +++----------------------
 lib/eventdev/version.map                |  4 ++
 7 files changed, 134 insertions(+), 114 deletions(-)

diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index b73515de37..9402105fcf 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -9,6 +9,7 @@
 
 #include <rte_common.h>
 #include <ethdev_driver.h>
+#include <eventdev_pmd.h>
 #include <rte_eventdev.h>
 #include <rte_mempool.h>
 #include <rte_memory.h>
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index 182dd2e5dd..79f4822fb0 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -84,6 +84,7 @@ extern "C" {
 #include <rte_service.h>
 
 #include "rte_eventdev.h"
+#include "eventdev_pmd.h"
 
 #define RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE 32
 
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.c b/lib/eventdev/rte_event_eth_tx_adapter.c
index 18c0359db7..74f88e6147 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.c
+++ b/lib/eventdev/rte_event_eth_tx_adapter.c
@@ -1154,6 +1154,37 @@ rte_event_eth_tx_adapter_start(uint8_t id)
 	return ret;
 }
 
+uint16_t
+rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
+				 uint8_t port_id,
+				 struct rte_event ev[],
+				 uint16_t nb_events,
+				 const uint8_t flags)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+		!rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
+		nb_events, flags);
+	if (flags)
+		return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
+						  ev, nb_events);
+	else
+		return dev->txa_enqueue(dev->data->ports[port_id], ev,
+					nb_events);
+}
+
 int
 rte_event_eth_tx_adapter_stats_get(uint8_t id,
 				struct rte_event_eth_tx_adapter_stats *stats)
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.h b/lib/eventdev/rte_event_eth_tx_adapter.h
index 8c59547165..3cd65e8a09 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.h
+++ b/lib/eventdev/rte_event_eth_tx_adapter.h
@@ -79,6 +79,7 @@ extern "C" {
 #include <rte_mbuf.h>
 
 #include "rte_eventdev.h"
+#include "eventdev_pmd.h"
 
 /**
  * Adapter configuration structure
@@ -348,36 +349,12 @@ rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
  *              one or more events. This error code is only applicable to
  *              closed systems.
  */
-static inline uint16_t
+uint16_t
 rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
-				uint8_t port_id,
-				struct rte_event ev[],
-				uint16_t nb_events,
-				const uint8_t flags)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS ||
-		!rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
-		nb_events, flags);
-	if (flags)
-		return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
-						  ev, nb_events);
-	else
-		return dev->txa_enqueue(dev->data->ports[port_id], ev,
-					nb_events);
-}
+				 uint8_t port_id,
+				 struct rte_event ev[],
+				 uint16_t nb_events,
+				 const uint8_t flags);
 
 /**
  * Retrieve statistics for an adapter
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 594dd5e759..e2dad8a838 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -1119,6 +1119,65 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
 	return count;
 }
 
+static __rte_always_inline uint16_t
+__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			const struct rte_event ev[], uint16_t nb_events,
+			const event_enqueue_burst_t fn)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->enqueue)(dev->data->ports[port_id], ev);
+	else
+		return fn(dev->data->ports[port_id], ev, nb_events);
+}
+
+uint16_t
+rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_burst);
+}
+
+uint16_t
+rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
+			    const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_event_devices[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_new_burst);
+}
+
+uint16_t
+rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
+				const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+			dev->enqueue_forward_burst);
+}
+
 int
 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
 				 uint64_t *timeout_ticks)
@@ -1135,6 +1194,29 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
 	return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
 }
 
+uint16_t
+rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
+			uint16_t nb_events, uint64_t timeout_ticks)
+{
+	struct rte_eventdev *dev = &rte_event_devices[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
+
+	return (*dev->dequeue_burst)(dev->data->ports[port_id], ev, nb_events,
+				     timeout_ticks);
+}
+
 int
 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
 {
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index a9c496fb62..451e9fb0a0 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1445,38 +1445,6 @@ struct rte_eventdev {
 	void *reserved_ptrs[3];   /**< Reserved for future fields */
 } __rte_cache_aligned;
 
-extern struct rte_eventdev *rte_eventdevs;
-/** @internal The pool of rte_eventdev structures. */
-
-static __rte_always_inline uint16_t
-__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events,
-			const event_enqueue_burst_t fn)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->enqueue)(dev->data->ports[port_id], ev);
-	else
-		return fn(dev->data->ports[port_id], ev, nb_events);
-}
-
 /**
  * Enqueue a burst of events objects or an event object supplied in *rte_event*
  * structure on an  event device designated by its *dev_id* through the event
@@ -1520,15 +1488,9 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
  *              closed systems.
  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
  */
-static inline uint16_t
+uint16_t
 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_burst);
-}
+			const struct rte_event ev[], uint16_t nb_events);
 
 /**
  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
@@ -1571,15 +1533,9 @@ rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
  * @see rte_event_enqueue_burst()
  */
-static inline uint16_t
+uint16_t
 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_new_burst);
-}
+			    const struct rte_event ev[], uint16_t nb_events);
 
 /**
  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
@@ -1622,15 +1578,10 @@ rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
  * @see rte_event_enqueue_burst()
  */
-static inline uint16_t
+uint16_t
 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_forward_burst);
-}
+				const struct rte_event ev[],
+				uint16_t nb_events);
 
 /**
  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
@@ -1727,36 +1678,9 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
  *
  * @see rte_event_port_dequeue_depth()
  */
-static inline uint16_t
+uint16_t
 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
-			uint16_t nb_events, uint64_t timeout_ticks)
-{
-	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->dequeue)(
-			dev->data->ports[port_id], ev, timeout_ticks);
-	else
-		return (*dev->dequeue_burst)(
-			dev->data->ports[port_id], ev, nb_events,
-				timeout_ticks);
-}
+			uint16_t nb_events, uint64_t timeout_ticks);
 
 /**
  * Link multiple source event queues supplied in *queues* to the destination
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 88625621ec..8da79cbdc0 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -13,7 +13,11 @@ DPDK_22 {
 	rte_event_crypto_adapter_stats_get;
 	rte_event_crypto_adapter_stats_reset;
 	rte_event_crypto_adapter_stop;
+	rte_event_enqueue_burst;
+	rte_event_enqueue_new_burst;
+	rte_event_enqueue_forward_burst;
 	rte_event_dequeue_timeout_ticks;
+	rte_event_dequeue_burst;
 	rte_event_dev_attr_get;
 	rte_event_dev_close;
 	rte_event_dev_configure;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC] eventdev: uninline inline API functions
  2021-08-30 16:00     ` [dpdk-dev] [RFC] eventdev: uninline inline API functions Mattias Rönnblom
@ 2021-08-31 12:28       ` Jerin Jacob
  2021-08-31 12:34         ` Mattias Rönnblom
  0 siblings, 1 reply; 119+ messages in thread
From: Jerin Jacob @ 2021-08-31 12:28 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Jerin Jacob, Pavan Nikhilesh, dpdk-dev, bogdan.tanasa

On Mon, Aug 30, 2021 at 9:30 PM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> Replace the inline functions in the eventdev user application API with
> regular non-inline API calls. This allows for a cleaner and more
> simple API/ABI, but might well also cause performance regressions.
>
> The purpose of this RFC patch is to allow for performance testing.
>
> The rte_eventdev struct declaration should be moved off the public
> API.
>
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

I think we need to align all DPDK subsystems to a similar scheme.[1]
I see -5% kind of regression-based on workload.

[1]
https://patches.dpdk.org/project/dpdk/patch/20210820162834.12544-2-konstantin.ananyev@intel.com/

> ---
>  drivers/net/octeontx/octeontx_ethdev.h  |  1 +
>  lib/eventdev/rte_event_eth_rx_adapter.h |  1 +
>  lib/eventdev/rte_event_eth_tx_adapter.c | 31 ++++++++
>  lib/eventdev/rte_event_eth_tx_adapter.h | 35 ++-------
>  lib/eventdev/rte_eventdev.c             | 82 +++++++++++++++++++++
>  lib/eventdev/rte_eventdev.h             | 94 +++----------------------
>  lib/eventdev/version.map                |  4 ++
>  7 files changed, 134 insertions(+), 114 deletions(-)
>
> diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
> index b73515de37..9402105fcf 100644
> --- a/drivers/net/octeontx/octeontx_ethdev.h
> +++ b/drivers/net/octeontx/octeontx_ethdev.h
> @@ -9,6 +9,7 @@
>
>  #include <rte_common.h>
>  #include <ethdev_driver.h>
> +#include <eventdev_pmd.h>
>  #include <rte_eventdev.h>
>  #include <rte_mempool.h>
>  #include <rte_memory.h>
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
> index 182dd2e5dd..79f4822fb0 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> @@ -84,6 +84,7 @@ extern "C" {
>  #include <rte_service.h>
>
>  #include "rte_eventdev.h"
> +#include "eventdev_pmd.h"
>
>  #define RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE 32
>
> diff --git a/lib/eventdev/rte_event_eth_tx_adapter.c b/lib/eventdev/rte_event_eth_tx_adapter.c
> index 18c0359db7..74f88e6147 100644
> --- a/lib/eventdev/rte_event_eth_tx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_tx_adapter.c
> @@ -1154,6 +1154,37 @@ rte_event_eth_tx_adapter_start(uint8_t id)
>         return ret;
>  }
>
> +uint16_t
> +rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
> +                                uint8_t port_id,
> +                                struct rte_event ev[],
> +                                uint16_t nb_events,
> +                                const uint8_t flags)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> +       if (dev_id >= RTE_EVENT_MAX_DEVS ||
> +               !rte_eventdevs[dev_id].attached) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +
> +       if (port_id >= dev->data->nb_ports) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +#endif
> +       rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
> +               nb_events, flags);
> +       if (flags)
> +               return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
> +                                                 ev, nb_events);
> +       else
> +               return dev->txa_enqueue(dev->data->ports[port_id], ev,
> +                                       nb_events);
> +}
> +
>  int
>  rte_event_eth_tx_adapter_stats_get(uint8_t id,
>                                 struct rte_event_eth_tx_adapter_stats *stats)
> diff --git a/lib/eventdev/rte_event_eth_tx_adapter.h b/lib/eventdev/rte_event_eth_tx_adapter.h
> index 8c59547165..3cd65e8a09 100644
> --- a/lib/eventdev/rte_event_eth_tx_adapter.h
> +++ b/lib/eventdev/rte_event_eth_tx_adapter.h
> @@ -79,6 +79,7 @@ extern "C" {
>  #include <rte_mbuf.h>
>
>  #include "rte_eventdev.h"
> +#include "eventdev_pmd.h"
>
>  /**
>   * Adapter configuration structure
> @@ -348,36 +349,12 @@ rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
>   *              one or more events. This error code is only applicable to
>   *              closed systems.
>   */
> -static inline uint16_t
> +uint16_t
>  rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
> -                               uint8_t port_id,
> -                               struct rte_event ev[],
> -                               uint16_t nb_events,
> -                               const uint8_t flags)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -       if (dev_id >= RTE_EVENT_MAX_DEVS ||
> -               !rte_eventdevs[dev_id].attached) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -
> -       if (port_id >= dev->data->nb_ports) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -#endif
> -       rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
> -               nb_events, flags);
> -       if (flags)
> -               return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
> -                                                 ev, nb_events);
> -       else
> -               return dev->txa_enqueue(dev->data->ports[port_id], ev,
> -                                       nb_events);
> -}
> +                                uint8_t port_id,
> +                                struct rte_event ev[],
> +                                uint16_t nb_events,
> +                                const uint8_t flags);
>
>  /**
>   * Retrieve statistics for an adapter
> diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
> index 594dd5e759..e2dad8a838 100644
> --- a/lib/eventdev/rte_eventdev.c
> +++ b/lib/eventdev/rte_eventdev.c
> @@ -1119,6 +1119,65 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
>         return count;
>  }
>
> +static __rte_always_inline uint16_t
> +__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> +                       const struct rte_event ev[], uint16_t nb_events,
> +                       const event_enqueue_burst_t fn)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> +       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +
> +       if (port_id >= dev->data->nb_ports) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +#endif
> +       rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
> +       /*
> +        * Allow zero cost non burst mode routine invocation if application
> +        * requests nb_events as const one
> +        */
> +       if (nb_events == 1)
> +               return (*dev->enqueue)(dev->data->ports[port_id], ev);
> +       else
> +               return fn(dev->data->ports[port_id], ev, nb_events);
> +}
> +
> +uint16_t
> +rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> +                       const struct rte_event ev[], uint16_t nb_events)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> +                                        dev->enqueue_burst);
> +}
> +
> +uint16_t
> +rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
> +                           const struct rte_event ev[], uint16_t nb_events)
> +{
> +       const struct rte_eventdev *dev = &rte_event_devices[dev_id];
> +
> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> +                                        dev->enqueue_new_burst);
> +}
> +
> +uint16_t
> +rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
> +                               const struct rte_event ev[], uint16_t nb_events)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> +                       dev->enqueue_forward_burst);
> +}
> +
>  int
>  rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
>                                  uint64_t *timeout_ticks)
> @@ -1135,6 +1194,29 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
>         return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
>  }
>
> +uint16_t
> +rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
> +                       uint16_t nb_events, uint64_t timeout_ticks)
> +{
> +       struct rte_eventdev *dev = &rte_event_devices[dev_id];
> +
> +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> +       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +
> +       if (port_id >= dev->data->nb_ports) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +#endif
> +       rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
> +
> +       return (*dev->dequeue_burst)(dev->data->ports[port_id], ev, nb_events,
> +                                    timeout_ticks);
> +}
> +
>  int
>  rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
>  {
> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> index a9c496fb62..451e9fb0a0 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -1445,38 +1445,6 @@ struct rte_eventdev {
>         void *reserved_ptrs[3];   /**< Reserved for future fields */
>  } __rte_cache_aligned;
>
> -extern struct rte_eventdev *rte_eventdevs;
> -/** @internal The pool of rte_eventdev structures. */
> -
> -static __rte_always_inline uint16_t
> -__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events,
> -                       const event_enqueue_burst_t fn)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -
> -       if (port_id >= dev->data->nb_ports) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -#endif
> -       rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
> -       /*
> -        * Allow zero cost non burst mode routine invocation if application
> -        * requests nb_events as const one
> -        */
> -       if (nb_events == 1)
> -               return (*dev->enqueue)(dev->data->ports[port_id], ev);
> -       else
> -               return fn(dev->data->ports[port_id], ev, nb_events);
> -}
> -
>  /**
>   * Enqueue a burst of events objects or an event object supplied in *rte_event*
>   * structure on an  event device designated by its *dev_id* through the event
> @@ -1520,15 +1488,9 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
>   *              closed systems.
>   * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
>   */
> -static inline uint16_t
> +uint16_t
>  rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -                       dev->enqueue_burst);
> -}
> +                       const struct rte_event ev[], uint16_t nb_events);
>
>  /**
>   * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
> @@ -1571,15 +1533,9 @@ rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
>   * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
>   * @see rte_event_enqueue_burst()
>   */
> -static inline uint16_t
> +uint16_t
>  rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -                       dev->enqueue_new_burst);
> -}
> +                           const struct rte_event ev[], uint16_t nb_events);
>
>  /**
>   * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
> @@ -1622,15 +1578,10 @@ rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
>   * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
>   * @see rte_event_enqueue_burst()
>   */
> -static inline uint16_t
> +uint16_t
>  rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -                       dev->enqueue_forward_burst);
> -}
> +                               const struct rte_event ev[],
> +                               uint16_t nb_events);
>
>  /**
>   * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
> @@ -1727,36 +1678,9 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
>   *
>   * @see rte_event_port_dequeue_depth()
>   */
> -static inline uint16_t
> +uint16_t
>  rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
> -                       uint16_t nb_events, uint64_t timeout_ticks)
> -{
> -       struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -
> -       if (port_id >= dev->data->nb_ports) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -#endif
> -       rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
> -       /*
> -        * Allow zero cost non burst mode routine invocation if application
> -        * requests nb_events as const one
> -        */
> -       if (nb_events == 1)
> -               return (*dev->dequeue)(
> -                       dev->data->ports[port_id], ev, timeout_ticks);
> -       else
> -               return (*dev->dequeue_burst)(
> -                       dev->data->ports[port_id], ev, nb_events,
> -                               timeout_ticks);
> -}
> +                       uint16_t nb_events, uint64_t timeout_ticks);
>
>  /**
>   * Link multiple source event queues supplied in *queues* to the destination
> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
> index 88625621ec..8da79cbdc0 100644
> --- a/lib/eventdev/version.map
> +++ b/lib/eventdev/version.map
> @@ -13,7 +13,11 @@ DPDK_22 {
>         rte_event_crypto_adapter_stats_get;
>         rte_event_crypto_adapter_stats_reset;
>         rte_event_crypto_adapter_stop;
> +       rte_event_enqueue_burst;
> +       rte_event_enqueue_new_burst;
> +       rte_event_enqueue_forward_burst;
>         rte_event_dequeue_timeout_ticks;
> +       rte_event_dequeue_burst;
>         rte_event_dev_attr_get;
>         rte_event_dev_close;
>         rte_event_dev_configure;
> --
> 2.17.1
>

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC] eventdev: uninline inline API functions
  2021-08-31 12:28       ` Jerin Jacob
@ 2021-08-31 12:34         ` Mattias Rönnblom
  0 siblings, 0 replies; 119+ messages in thread
From: Mattias Rönnblom @ 2021-08-31 12:34 UTC (permalink / raw)
  To: Jerin Jacob; +Cc: Jerin Jacob, Pavan Nikhilesh, dpdk-dev, Bogdan Tanasa

On 2021-08-31 14:28, Jerin Jacob wrote:
> On Mon, Aug 30, 2021 at 9:30 PM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>> Replace the inline functions in the eventdev user application API with
>> regular non-inline API calls. This allows for a cleaner and more
>> simple API/ABI, but might well also cause performance regressions.
>>
>> The purpose of this RFC patch is to allow for performance testing.
>>
>> The rte_eventdev struct declaration should be moved off the public
>> API.
>>
>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> I think we need to align all DPDK subsystems to a similar scheme.[1]


That makes perfect sense.


> I see -5% kind of regression-based on workload.
>
> [1]
> https://protect2.fireeye.com/v1/url?k=50900b74-0f0b325f-50904bef-866038973a15-e3d1ddd2888a6b58&q=1&e=96184cd6-f4b1-4ed9-98a3-ae921c747c7e&u=https%3A%2F%2Fpatches.dpdk.org%2Fproject%2Fdpdk%2Fpatch%2F20210820162834.12544-2-konstantin.ananyev%40intel.com%2F
>
>> ---
>>   drivers/net/octeontx/octeontx_ethdev.h  |  1 +
>>   lib/eventdev/rte_event_eth_rx_adapter.h |  1 +
>>   lib/eventdev/rte_event_eth_tx_adapter.c | 31 ++++++++
>>   lib/eventdev/rte_event_eth_tx_adapter.h | 35 ++-------
>>   lib/eventdev/rte_eventdev.c             | 82 +++++++++++++++++++++
>>   lib/eventdev/rte_eventdev.h             | 94 +++----------------------
>>   lib/eventdev/version.map                |  4 ++
>>   7 files changed, 134 insertions(+), 114 deletions(-)
>>
>> diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
>> index b73515de37..9402105fcf 100644
>> --- a/drivers/net/octeontx/octeontx_ethdev.h
>> +++ b/drivers/net/octeontx/octeontx_ethdev.h
>> @@ -9,6 +9,7 @@
>>
>>   #include <rte_common.h>
>>   #include <ethdev_driver.h>
>> +#include <eventdev_pmd.h>
>>   #include <rte_eventdev.h>
>>   #include <rte_mempool.h>
>>   #include <rte_memory.h>
>> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
>> index 182dd2e5dd..79f4822fb0 100644
>> --- a/lib/eventdev/rte_event_eth_rx_adapter.h
>> +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
>> @@ -84,6 +84,7 @@ extern "C" {
>>   #include <rte_service.h>
>>
>>   #include "rte_eventdev.h"
>> +#include "eventdev_pmd.h"
>>
>>   #define RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE 32
>>
>> diff --git a/lib/eventdev/rte_event_eth_tx_adapter.c b/lib/eventdev/rte_event_eth_tx_adapter.c
>> index 18c0359db7..74f88e6147 100644
>> --- a/lib/eventdev/rte_event_eth_tx_adapter.c
>> +++ b/lib/eventdev/rte_event_eth_tx_adapter.c
>> @@ -1154,6 +1154,37 @@ rte_event_eth_tx_adapter_start(uint8_t id)
>>          return ret;
>>   }
>>
>> +uint16_t
>> +rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
>> +                                uint8_t port_id,
>> +                                struct rte_event ev[],
>> +                                uint16_t nb_events,
>> +                                const uint8_t flags)
>> +{
>> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
>> +
>> +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
>> +       if (dev_id >= RTE_EVENT_MAX_DEVS ||
>> +               !rte_eventdevs[dev_id].attached) {
>> +               rte_errno = EINVAL;
>> +               return 0;
>> +       }
>> +
>> +       if (port_id >= dev->data->nb_ports) {
>> +               rte_errno = EINVAL;
>> +               return 0;
>> +       }
>> +#endif
>> +       rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
>> +               nb_events, flags);
>> +       if (flags)
>> +               return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
>> +                                                 ev, nb_events);
>> +       else
>> +               return dev->txa_enqueue(dev->data->ports[port_id], ev,
>> +                                       nb_events);
>> +}
>> +
>>   int
>>   rte_event_eth_tx_adapter_stats_get(uint8_t id,
>>                                  struct rte_event_eth_tx_adapter_stats *stats)
>> diff --git a/lib/eventdev/rte_event_eth_tx_adapter.h b/lib/eventdev/rte_event_eth_tx_adapter.h
>> index 8c59547165..3cd65e8a09 100644
>> --- a/lib/eventdev/rte_event_eth_tx_adapter.h
>> +++ b/lib/eventdev/rte_event_eth_tx_adapter.h
>> @@ -79,6 +79,7 @@ extern "C" {
>>   #include <rte_mbuf.h>
>>
>>   #include "rte_eventdev.h"
>> +#include "eventdev_pmd.h"
>>
>>   /**
>>    * Adapter configuration structure
>> @@ -348,36 +349,12 @@ rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
>>    *              one or more events. This error code is only applicable to
>>    *              closed systems.
>>    */
>> -static inline uint16_t
>> +uint16_t
>>   rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
>> -                               uint8_t port_id,
>> -                               struct rte_event ev[],
>> -                               uint16_t nb_events,
>> -                               const uint8_t flags)
>> -{
>> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
>> -
>> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
>> -       if (dev_id >= RTE_EVENT_MAX_DEVS ||
>> -               !rte_eventdevs[dev_id].attached) {
>> -               rte_errno = EINVAL;
>> -               return 0;
>> -       }
>> -
>> -       if (port_id >= dev->data->nb_ports) {
>> -               rte_errno = EINVAL;
>> -               return 0;
>> -       }
>> -#endif
>> -       rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
>> -               nb_events, flags);
>> -       if (flags)
>> -               return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
>> -                                                 ev, nb_events);
>> -       else
>> -               return dev->txa_enqueue(dev->data->ports[port_id], ev,
>> -                                       nb_events);
>> -}
>> +                                uint8_t port_id,
>> +                                struct rte_event ev[],
>> +                                uint16_t nb_events,
>> +                                const uint8_t flags);
>>
>>   /**
>>    * Retrieve statistics for an adapter
>> diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
>> index 594dd5e759..e2dad8a838 100644
>> --- a/lib/eventdev/rte_eventdev.c
>> +++ b/lib/eventdev/rte_eventdev.c
>> @@ -1119,6 +1119,65 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
>>          return count;
>>   }
>>
>> +static __rte_always_inline uint16_t
>> +__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
>> +                       const struct rte_event ev[], uint16_t nb_events,
>> +                       const event_enqueue_burst_t fn)
>> +{
>> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
>> +
>> +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
>> +       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
>> +               rte_errno = EINVAL;
>> +               return 0;
>> +       }
>> +
>> +       if (port_id >= dev->data->nb_ports) {
>> +               rte_errno = EINVAL;
>> +               return 0;
>> +       }
>> +#endif
>> +       rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
>> +       /*
>> +        * Allow zero cost non burst mode routine invocation if application
>> +        * requests nb_events as const one
>> +        */
>> +       if (nb_events == 1)
>> +               return (*dev->enqueue)(dev->data->ports[port_id], ev);
>> +       else
>> +               return fn(dev->data->ports[port_id], ev, nb_events);
>> +}
>> +
>> +uint16_t
>> +rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
>> +                       const struct rte_event ev[], uint16_t nb_events)
>> +{
>> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
>> +
>> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
>> +                                        dev->enqueue_burst);
>> +}
>> +
>> +uint16_t
>> +rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
>> +                           const struct rte_event ev[], uint16_t nb_events)
>> +{
>> +       const struct rte_eventdev *dev = &rte_event_devices[dev_id];
>> +
>> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
>> +                                        dev->enqueue_new_burst);
>> +}
>> +
>> +uint16_t
>> +rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
>> +                               const struct rte_event ev[], uint16_t nb_events)
>> +{
>> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
>> +
>> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
>> +                       dev->enqueue_forward_burst);
>> +}
>> +
>>   int
>>   rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
>>                                   uint64_t *timeout_ticks)
>> @@ -1135,6 +1194,29 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
>>          return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
>>   }
>>
>> +uint16_t
>> +rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
>> +                       uint16_t nb_events, uint64_t timeout_ticks)
>> +{
>> +       struct rte_eventdev *dev = &rte_event_devices[dev_id];
>> +
>> +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
>> +       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
>> +               rte_errno = EINVAL;
>> +               return 0;
>> +       }
>> +
>> +       if (port_id >= dev->data->nb_ports) {
>> +               rte_errno = EINVAL;
>> +               return 0;
>> +       }
>> +#endif
>> +       rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
>> +
>> +       return (*dev->dequeue_burst)(dev->data->ports[port_id], ev, nb_events,
>> +                                    timeout_ticks);
>> +}
>> +
>>   int
>>   rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
>>   {
>> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
>> index a9c496fb62..451e9fb0a0 100644
>> --- a/lib/eventdev/rte_eventdev.h
>> +++ b/lib/eventdev/rte_eventdev.h
>> @@ -1445,38 +1445,6 @@ struct rte_eventdev {
>>          void *reserved_ptrs[3];   /**< Reserved for future fields */
>>   } __rte_cache_aligned;
>>
>> -extern struct rte_eventdev *rte_eventdevs;
>> -/** @internal The pool of rte_eventdev structures. */
>> -
>> -static __rte_always_inline uint16_t
>> -__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
>> -                       const struct rte_event ev[], uint16_t nb_events,
>> -                       const event_enqueue_burst_t fn)
>> -{
>> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
>> -
>> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
>> -       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
>> -               rte_errno = EINVAL;
>> -               return 0;
>> -       }
>> -
>> -       if (port_id >= dev->data->nb_ports) {
>> -               rte_errno = EINVAL;
>> -               return 0;
>> -       }
>> -#endif
>> -       rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
>> -       /*
>> -        * Allow zero cost non burst mode routine invocation if application
>> -        * requests nb_events as const one
>> -        */
>> -       if (nb_events == 1)
>> -               return (*dev->enqueue)(dev->data->ports[port_id], ev);
>> -       else
>> -               return fn(dev->data->ports[port_id], ev, nb_events);
>> -}
>> -
>>   /**
>>    * Enqueue a burst of events objects or an event object supplied in *rte_event*
>>    * structure on an  event device designated by its *dev_id* through the event
>> @@ -1520,15 +1488,9 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
>>    *              closed systems.
>>    * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
>>    */
>> -static inline uint16_t
>> +uint16_t
>>   rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
>> -                       const struct rte_event ev[], uint16_t nb_events)
>> -{
>> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
>> -
>> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
>> -                       dev->enqueue_burst);
>> -}
>> +                       const struct rte_event ev[], uint16_t nb_events);
>>
>>   /**
>>    * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
>> @@ -1571,15 +1533,9 @@ rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
>>    * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
>>    * @see rte_event_enqueue_burst()
>>    */
>> -static inline uint16_t
>> +uint16_t
>>   rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
>> -                       const struct rte_event ev[], uint16_t nb_events)
>> -{
>> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
>> -
>> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
>> -                       dev->enqueue_new_burst);
>> -}
>> +                           const struct rte_event ev[], uint16_t nb_events);
>>
>>   /**
>>    * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
>> @@ -1622,15 +1578,10 @@ rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
>>    * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
>>    * @see rte_event_enqueue_burst()
>>    */
>> -static inline uint16_t
>> +uint16_t
>>   rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
>> -                       const struct rte_event ev[], uint16_t nb_events)
>> -{
>> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
>> -
>> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
>> -                       dev->enqueue_forward_burst);
>> -}
>> +                               const struct rte_event ev[],
>> +                               uint16_t nb_events);
>>
>>   /**
>>    * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
>> @@ -1727,36 +1678,9 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
>>    *
>>    * @see rte_event_port_dequeue_depth()
>>    */
>> -static inline uint16_t
>> +uint16_t
>>   rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
>> -                       uint16_t nb_events, uint64_t timeout_ticks)
>> -{
>> -       struct rte_eventdev *dev = &rte_eventdevs[dev_id];
>> -
>> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
>> -       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
>> -               rte_errno = EINVAL;
>> -               return 0;
>> -       }
>> -
>> -       if (port_id >= dev->data->nb_ports) {
>> -               rte_errno = EINVAL;
>> -               return 0;
>> -       }
>> -#endif
>> -       rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
>> -       /*
>> -        * Allow zero cost non burst mode routine invocation if application
>> -        * requests nb_events as const one
>> -        */
>> -       if (nb_events == 1)
>> -               return (*dev->dequeue)(
>> -                       dev->data->ports[port_id], ev, timeout_ticks);
>> -       else
>> -               return (*dev->dequeue_burst)(
>> -                       dev->data->ports[port_id], ev, nb_events,
>> -                               timeout_ticks);
>> -}
>> +                       uint16_t nb_events, uint64_t timeout_ticks);
>>
>>   /**
>>    * Link multiple source event queues supplied in *queues* to the destination
>> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
>> index 88625621ec..8da79cbdc0 100644
>> --- a/lib/eventdev/version.map
>> +++ b/lib/eventdev/version.map
>> @@ -13,7 +13,11 @@ DPDK_22 {
>>          rte_event_crypto_adapter_stats_get;
>>          rte_event_crypto_adapter_stats_reset;
>>          rte_event_crypto_adapter_stop;
>> +       rte_event_enqueue_burst;
>> +       rte_event_enqueue_new_burst;
>> +       rte_event_enqueue_forward_burst;
>>          rte_event_dequeue_timeout_ticks;
>> +       rte_event_dequeue_burst;
>>          rte_event_dev_attr_get;
>>          rte_event_dev_close;
>>          rte_event_dev_configure;
>> --
>> 2.17.1
>>


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 12/15] eventdev: move timer adapters memory to hugepage
  2021-08-24 13:50   ` Carrillo, Erik G
@ 2021-09-01  6:30     ` Pavan Nikhilesh Bhagavatula
  0 siblings, 0 replies; 119+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2021-09-01  6:30 UTC (permalink / raw)
  To: Carrillo, Erik G, Jerin Jacob Kollanukkaran; +Cc: Ananyev, Konstantin, dev

>Hi Pavan,
>
>> -----Original Message-----
>> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
>> Sent: Monday, August 23, 2021 2:40 PM
>> To: jerinj@marvell.com; Carrillo, Erik G <erik.g.carrillo@intel.com>
>> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>;
>dev@dpdk.org;
>> Pavan Nikhilesh <pbhagavatula@marvell.com>
>> Subject: [dpdk-dev] [RFC 12/15] eventdev: move timer adapters
>memory to
>> hugepage
>>
>> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>
>> Move memory used by timer adapters to hugepage.
>> Allocate memory on the first adapter create or lookup to address both
>> primary and secondary process usecases.
>>
>
>Is the motivation for this change performance or space improvement?
>Can we add something to the commit message to say?

This was supposed to be a perf improvement change, I will be dropping this 
for event device as it causes an additional load for getting the base address.

For timer adapter I think we can make this change as we return the pointer
to the adapter directly, so no additional lookup cost.

I will update the commit message in the next version.

>
>Thanks,
>Erik

Thanks,
Pavan.

>
>> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
>> ---
>>  lib/eventdev/rte_event_timer_adapter.c | 24
>> +++++++++++++++++++++++-
>>  1 file changed, 23 insertions(+), 1 deletion(-)
>>
>> diff --git a/lib/eventdev/rte_event_timer_adapter.c
>> b/lib/eventdev/rte_event_timer_adapter.c
>> index ae55407042..c4dc7a5fd4 100644
>> --- a/lib/eventdev/rte_event_timer_adapter.c
>> +++ b/lib/eventdev/rte_event_timer_adapter.c
>> @@ -33,7 +33,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_logtype,
>> adapter.timer, NOTICE);
>> RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer,
>NOTICE);
>> RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc,
>> NOTICE);
>>
>> -static struct rte_event_timer_adapter
>> adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
>> +static struct rte_event_timer_adapter *adapters;
>>
>>  static const struct event_timer_adapter_ops swtim_ops;
>>
>> @@ -138,6 +138,17 @@ rte_event_timer_adapter_create_ext(
>>  	int n, ret;
>>  	struct rte_eventdev *dev;
>>
>> +	if (adapters == NULL) {
>> +		adapters = rte_zmalloc("Eventdev",
>> +				       sizeof(struct
>rte_event_timer_adapter) *
>> +
>> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
>> +				       RTE_CACHE_LINE_SIZE);
>> +		if (adapters == NULL) {
>> +			rte_errno = ENOMEM;
>> +			return NULL;
>> +		}
>> +	}
>> +
>>  	if (conf == NULL) {
>>  		rte_errno = EINVAL;
>>  		return NULL;
>> @@ -312,6 +323,17 @@ rte_event_timer_adapter_lookup(uint16_t
>> adapter_id)
>>  	int ret;
>>  	struct rte_eventdev *dev;
>>
>> +	if (adapters == NULL) {
>> +		adapters = rte_zmalloc("Eventdev",
>> +				       sizeof(struct
>rte_event_timer_adapter) *
>> +
>> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
>> +				       RTE_CACHE_LINE_SIZE);
>> +		if (adapters == NULL) {
>> +			rte_errno = ENOMEM;
>> +			return NULL;
>> +		}
>> +	}
>> +
>>  	if (adapters[adapter_id].allocated)
>>  		return &adapters[adapter_id]; /* Adapter is already
>loaded
>> */
>>
>> --
>> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [RFC 11/15] eventdev: reserve fields in timer object
  2021-08-24 15:10   ` Stephen Hemminger
@ 2021-09-01  6:48     ` Pavan Nikhilesh Bhagavatula
  2021-09-07 21:02       ` Carrillo, Erik G
  0 siblings, 1 reply; 119+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2021-09-01  6:48 UTC (permalink / raw)
  To: Stephen Hemminger, Erik Gabriel Carrillo
  Cc: Jerin Jacob Kollanukkaran, konstantin.ananyev, dev

>On Tue, 24 Aug 2021 01:10:15 +0530
><pbhagavatula@marvell.com> wrote:
>
>> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>
>> Reserve fields in rte_event_timer data structure to address future
>> use cases.
>> Also, remove volatile from rte_event_timer.
>>
>> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
>Reserve fields are not a good idea. They don't solve future API/ABI
>problems.
>
>The issue is that you need to zero them and check they are zero
>otherwise
>they can't safely be used later.  This happened with the Linux kernel
>system calls where in several cases a flag field was added for future.
>The problem is that old programs would work with any garbage in the
>flag
>field, and therefore the flag could not be extended.

The change is in rte_event_timer which is a fastpath object similar to 
rte_mbuf.
I think fast path objects don't have the above mentioned caveat.

>
>A better way to make structures internal opaque objects that
>can be resized.  Why is rte_event_timer_adapter exposed in API?

rte_event_timer_adapter has similar API semantics of  rte_mempool, 
the objects of the adapter are rte_event_timer objects which have
information of the timer state.

Erik,

I think we should move the fields in current rte_event_timer structure
around, currently we have

struct rte_event_timer {
	struct rte_event ev;
	volatile enum rte_event_timer_state state;
              x-------x 4 byte hole x---------x
	uint64_t timeout_ticks;
	uint64_t impl_opaque[2];
	uint8_t user_meta[0];
} __rte_cache_aligned;

Move to

struct rte_event_timer {
	struct rte_event ev;
	uint64_t timeout_ticks;
	uint64_t impl_opaque[2];
	uint64_t rsvd;
	enum rte_event_timer_state state;
	uint8_t user_meta[0];
} __rte_cache_aligned;

Since its cache aligned, the size doesn't change.

Thoughts?

Thanks,
Pavan.


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [RFC 11/15] eventdev: reserve fields in timer object
  2021-09-01  6:48     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
@ 2021-09-07 21:02       ` Carrillo, Erik G
  0 siblings, 0 replies; 119+ messages in thread
From: Carrillo, Erik G @ 2021-09-07 21:02 UTC (permalink / raw)
  To: Pavan Nikhilesh Bhagavatula, Stephen Hemminger
  Cc: Jerin Jacob Kollanukkaran, Ananyev, Konstantin, dev



> -----Original Message-----
> From: Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com>
> Sent: Wednesday, September 1, 2021 1:48 AM
> To: Stephen Hemminger <stephen@networkplumber.org>; Carrillo, Erik G
> <erik.g.carrillo@intel.com>
> Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; dev@dpdk.org
> Subject: RE: [EXT] Re: [dpdk-dev] [RFC 11/15] eventdev: reserve fields in
> timer object
> 
> >On Tue, 24 Aug 2021 01:10:15 +0530
> ><pbhagavatula@marvell.com> wrote:
> >
> >> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >>
> >> Reserve fields in rte_event_timer data structure to address future
> >> use cases.
> >> Also, remove volatile from rte_event_timer.
> >>
> >> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >
> >Reserve fields are not a good idea. They don't solve future API/ABI
> >problems.
> >
> >The issue is that you need to zero them and check they are zero
> >otherwise they can't safely be used later.  This happened with the
> >Linux kernel system calls where in several cases a flag field was added
> >for future.
> >The problem is that old programs would work with any garbage in the
> >flag field, and therefore the flag could not be extended.
> 
> The change is in rte_event_timer which is a fastpath object similar to
> rte_mbuf.
> I think fast path objects don't have the above mentioned caveat.
> 
> >
> >A better way to make structures internal opaque objects that can be
> >resized.  Why is rte_event_timer_adapter exposed in API?
> 
> rte_event_timer_adapter has similar API semantics of  rte_mempool, the
> objects of the adapter are rte_event_timer objects which have information
> of the timer state.
> 
> Erik,
> 
> I think we should move the fields in current rte_event_timer structure
> around, currently we have
> 
> struct rte_event_timer {
> 	struct rte_event ev;
> 	volatile enum rte_event_timer_state state;
>               x-------x 4 byte hole x---------x
> 	uint64_t timeout_ticks;
> 	uint64_t impl_opaque[2];
> 	uint8_t user_meta[0];
> } __rte_cache_aligned;
> 
> Move to
> 
> struct rte_event_timer {
> 	struct rte_event ev;
> 	uint64_t timeout_ticks;
> 	uint64_t impl_opaque[2];
> 	uint64_t rsvd;
> 	enum rte_event_timer_state state;
> 	uint8_t user_meta[0];
> } __rte_cache_aligned;
> 
> Since its cache aligned, the size doesn't change.
> 
> Thoughts?
> 

I'm not seeing any problem with rearranging the members.   However, you originally had " uint64_t rsvd[2];"  and above, it's just one variable.  Did you mean to make it an array?

The following also appears to have no holes:

$ pahole -C rte_event_timer eventdev_rte_event_timer_adapter.c.o 
struct rte_event_timer {
	struct rte_event           ev;                   /*     0    16 */
	uint64_t                   timeout_ticks;        /*    16     8 */
	uint64_t                   impl_opaque[2];       /*    24    16 */
	uint64_t                   rsvd[2];              /*    40    16 */
	enum rte_event_timer_state state;                /*    56     4 */
	uint8_t                    user_meta[];          /*    60     0 */

	/* size: 64, cachelines: 1, members: 6 */
	/* padding: 4 */
} __attribute__((__aligned__(64)));

Thanks,
Erik

> Thanks,
> Pavan.


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object
  2021-08-23 19:40 ` [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object pbhagavatula
  2021-08-23 20:42   ` Carrillo, Erik G
  2021-08-24 15:10   ` Stephen Hemminger
@ 2021-09-07 21:31   ` Stephen Hemminger
  2 siblings, 0 replies; 119+ messages in thread
From: Stephen Hemminger @ 2021-09-07 21:31 UTC (permalink / raw)
  To: pbhagavatula; +Cc: jerinj, Erik Gabriel Carrillo, konstantin.ananyev, dev

On Tue, 24 Aug 2021 01:10:15 +0530
<pbhagavatula@marvell.com> wrote:

> +	uint64_t rsvd[2];
> +	/**< Reserved fields for future use. */

Did you check that these are 0 in current code?
No. then they can't be safely used in the future.

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 07/15] eventdev: make drivers to use new API
  2021-08-23 19:40 ` [dpdk-dev] [RFC 07/15] eventdev: make drivers to use new API pbhagavatula
@ 2021-09-08  6:43   ` Hemant Agrawal
  0 siblings, 0 replies; 119+ messages in thread
From: Hemant Agrawal @ 2021-09-08  6:43 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Shijith Thotton, Timothy McDaniel,
	Hemant Agrawal, Nipun Gupta, Mattias Rönnblom, Liang Ma,
	Peter Mccarthy, Harry van Haaren
  Cc: konstantin.ananyev, dev

For dpaa and dpaa2 changes

Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>



^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 04/15] eventdev: move inline APIs into separate structure
  2021-08-23 19:40 ` [dpdk-dev] [RFC 04/15] eventdev: move inline APIs into separate structure pbhagavatula
@ 2021-09-08 12:03   ` Kinsella, Ray
  0 siblings, 0 replies; 119+ messages in thread
From: Kinsella, Ray @ 2021-09-08 12:03 UTC (permalink / raw)
  To: pbhagavatula, jerinj; +Cc: konstantin.ananyev, dev



On 23/08/2021 20:40, pbhagavatula@marvell.com wrote:
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Move fastpath inline function pointers from rte_eventdev into a
> separate structure accessed via a flat array.
> The intension is to make rte_eventdev and related structures private
> to avoid future API/ABI breakages.`
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/eventdev_pmd.h      |  10 ++++
>  lib/eventdev/eventdev_private.c  | 100 +++++++++++++++++++++++++++++++
>  lib/eventdev/meson.build         |   1 +
>  lib/eventdev/rte_eventdev.c      |  25 +++++++-
>  lib/eventdev/rte_eventdev_core.h |  44 ++++++++++++++
>  lib/eventdev/version.map         |   4 ++
>  6 files changed, 183 insertions(+), 1 deletion(-)
>  create mode 100644 lib/eventdev/eventdev_private.c
> 

I will deferred to others on the wisdom of exposing rte_eventdev_api.

Acked-by: Ray Kinsella <mdr@ashroe.eu>

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 05/15] eventdev: add helper functions for new driver API
  2021-08-23 19:40 ` [dpdk-dev] [RFC 05/15] eventdev: add helper functions for new driver API pbhagavatula
@ 2021-09-08 12:04   ` Kinsella, Ray
  0 siblings, 0 replies; 119+ messages in thread
From: Kinsella, Ray @ 2021-09-08 12:04 UTC (permalink / raw)
  To: pbhagavatula, jerinj; +Cc: konstantin.ananyev, dev



On 23/08/2021 20:40, pbhagavatula@marvell.com wrote:
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Add helper functions and macros to help drivers to transition to new
> fastpath interface.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/eventdev_pmd.h | 396 ++++++++++++++++++++++++++++++++++++
>  lib/eventdev/rte_eventdev.c | 174 ++++++++++++++++
>  lib/eventdev/version.map    |  18 ++
>  3 files changed, 588 insertions(+)
> 

Acked-by: Ray Kinsella <mdr@ashroe.eu>

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 13/15] eventdev: promote event vector API to stable
  2021-08-23 19:40 ` [dpdk-dev] [RFC 13/15] eventdev: promote event vector API to stable pbhagavatula
  2021-08-30 14:43   ` Jayatheerthan, Jay
@ 2021-09-08 12:05   ` Kinsella, Ray
  1 sibling, 0 replies; 119+ messages in thread
From: Kinsella, Ray @ 2021-09-08 12:05 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Jay Jayatheerthan; +Cc: konstantin.ananyev, dev



On 23/08/2021 20:40, pbhagavatula@marvell.com wrote:
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Promote event vector configuration APIs to stable.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/rte_event_eth_rx_adapter.h | 2 --
>  lib/eventdev/rte_eventdev.h             | 1 -
>  lib/eventdev/version.map                | 6 +++---
>  3 files changed, 3 insertions(+), 6 deletions(-)
> 
Acked-by: Ray Kinsella <mdr@ashroe.eu>

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 15/15] eventdev: promote trace variables to stable
  2021-08-23 19:40 ` [dpdk-dev] [RFC 15/15] eventdev: promote trace variables to stable pbhagavatula
@ 2021-09-08 12:06   ` Kinsella, Ray
  0 siblings, 0 replies; 119+ messages in thread
From: Kinsella, Ray @ 2021-09-08 12:06 UTC (permalink / raw)
  To: pbhagavatula, jerinj; +Cc: konstantin.ananyev, dev



On 23/08/2021 20:40, pbhagavatula@marvell.com wrote:
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Promote rte_trace global variables to stable i.e. remove them
> from experimental section of version map.

Minor niggle - they are being made INTERNAL, not promoted to stable.

> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/version.map | 78 ++++++++++++++++++----------------------
>  1 file changed, 35 insertions(+), 43 deletions(-)
> 
Acked-by: Ray Kinsella <mdr@ashroe.eu>

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (14 preceding siblings ...)
  2021-08-24  7:43 ` [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal Mattias Rönnblom
@ 2021-09-28  9:56 ` Jerin Jacob
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
  16 siblings, 0 replies; 119+ messages in thread
From: Jerin Jacob @ 2021-09-28  9:56 UTC (permalink / raw)
  To: Pavan Nikhilesh
  Cc: Jerin Jacob, Shijith Thotton, Timothy McDaniel, Hemant Agrawal,
	Nipun Gupta, Mattias Rönnblom, Liang Ma, Peter Mccarthy,
	Harry van Haaren, Abhinandan Gujjar, Ray Kinsella, Ananyev,
	Konstantin, dpdk-dev

On Tue, Aug 24, 2021 at 1:10 AM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Mark all the driver specific functions as internal, remove
> `rte` prefix from `struct rte_eventdev_ops`.
> Remove experimental tag from internal functions.
> Remove `eventdev_pmd.h` from non-internal header files.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>


Seems like ethdev side there is conscious with
https://patches.dpdk.org/project/dpdk/list/?series=19084
Could you respin the version similar to
https://patches.dpdk.org/project/dpdk/list/?series=19084 or the next
version v3 from Konstantin.
Since eventdev does not have a callback, largely this series aligns
with expected output. But please align function and structure name
etc with ethdev for next series. Marking as "Changes Requested".
Thanks for the rework.

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [RFC 06/15] eventdev: use new API for inline functions
  2021-08-30 14:46   ` David Marchand
@ 2021-10-02 20:32     ` Pavan Nikhilesh Bhagavatula
  0 siblings, 0 replies; 119+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2021-10-02 20:32 UTC (permalink / raw)
  To: David Marchand
  Cc: Jerin Jacob Kollanukkaran, Abhinandan Gujjar, Jay Jayatheerthan,
	Ananyev, Konstantin, dev

>Hello Pavan,
>
>On Mon, Aug 23, 2021 at 9:41 PM <pbhagavatula@marvell.com> wrote:
>>
>> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>
>> Use new driver interface for the fastpath enqueue/dequeue inline
>> functions.
>>
>> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
>> ---
>>  lib/eventdev/rte_event_crypto_adapter.h | 13 +-----
>>  lib/eventdev/rte_event_eth_tx_adapter.h | 22 ++-------
>>  lib/eventdev/rte_eventdev.h             | 61 +++++++------------------
>>  3 files changed, 22 insertions(+), 74 deletions(-)
>
>I sent this series in a branch of mine, and ran it per commit in GHA.
>It caught a UT failure on this patch:
>https://urldefense.proofpoint.com/v2/url?u=https-
>3A__github.com_david-2Dmarchand_dpdk_runs_3408921022-3Fcheck-
>5Fsuite-5Ffocus-
>3Dtrue&d=DwIBaQ&c=nKjWec2b6R0mOyPaz7xtfQ&r=E3SgYMjtKCMVs
>B-fmvgGV3o-g_fjLhk5Pupi9ijohpc&m=tO66InX69hLTAjHqOU-
>zyUJUFA4AbnwTf4hFh1Wx2-c&s=awIyoR8T6crX-
>sB4E1_NfLyzBt5zGfhSmfX6CIyw1x0&e=
>
>
>--- stdout ---
>RTE>>event_eth_tx_adapter_autotest
> + ------------------------------------------------------- +
> + Test Suite : tx event eth adapter test suite
>Port 0 MAC: 00 00 00 00 00 00
>Port 1 MAC: 00 00 00 00 00 00
>Failed to find a valid event device, testing with event_sw0 device
> + ------------------------------------------------------- +
> + TestCase [ 0] : tx_adapter_create_free succeeded
> + TestCase [ 1] : tx_adapter_queue_add_del succeeded
> + TestCase [ 2] : tx_adapter_start_stop succeeded
> + TestCase [ 3] : tx_adapter_service failed
> + TestCase [ 4] : tx_adapter_dynamic_device failed
> + ------------------------------------------------------- +
> + Test Suite Summary : tx event eth adapter test suite
> + ------------------------------------------------------- +
> + Tests Total :        5
> + Tests Skipped :      0
> + Tests Executed :     5
> + Tests Unsupported:   0
> + Tests Passed :       3
> + Tests Failed :       2
> + ------------------------------------------------------- +
>Test Failed
>

I just check with v2 (in progress) of my series and test passes, hopefully it fixes something overlooked in v1

EAL: Detected CPU lcores: 24
EAL: Detected NUMA nodes: 1
EAL: Detected static linkage of DPDK
EAL: Multi-process socket /var/run/dpdk/rte/mp_socket
EAL: Selected IOVA mode 'VA'
EAL: No available 16777216 kB hugepages reported
EAL: No available 2048 kB hugepages reported
EAL: VFIO support initialized
TELEMETRY: No legacy callbacks, legacy socket not created
APP: HPET is not enabled, using TSC as default timer
RTE>>event_eth_tx_adapter_autotest
 + ------------------------------------------------------- +
 + Test Suite : tx event eth adapter test suite
Port 0 MAC: 00 00 00 00 00 00
Port 1 MAC: 00 00 00 00 00 00
Failed to find a valid event device, testing with event_sw0 device
 + ------------------------------------------------------- +
 + TestCase [ 0] : tx_adapter_create_free succeeded
Invalid port_id=2
EVENTDEV: txa_service_adapter_free() line 743: 1 Tx queues not deleted
 + TestCase [ 1] : tx_adapter_queue_add_del succeeded
 + TestCase [ 2] : tx_adapter_start_stop succeeded
 + TestCase [ 3] : tx_adapter_service succeeded
 + TestCase [ 4] : tx_adapter_dynamic_device succeeded
 + ------------------------------------------------------- +
 + Test Suite Summary : tx event eth adapter test suite
 + ------------------------------------------------------- +
 + Tests Total :        5
 + Tests Skipped :      0
 + Tests Executed :     5
 + Tests Unsupported:   0
 + Tests Passed :       5
 + Tests Failed :       0
 + ------------------------------------------------------- +
Test OK
RTE>>

>
>Can you double check?
>Thanks.
>
>--
>David Marchand


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 01/13] eventdev: make driver interface as internal
  2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
                   ` (15 preceding siblings ...)
  2021-09-28  9:56 ` [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal Jerin Jacob
@ 2021-10-03  8:26 ` pbhagavatula
  2021-10-03  8:26   ` [dpdk-dev] [PATCH v2 02/13] eventdev: separate internal structures pbhagavatula
                     ` (13 more replies)
  16 siblings, 14 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:26 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Timothy McDaniel,
	Hemant Agrawal, Nipun Gupta, Mattias Rönnblom, Liang Ma,
	Peter Mccarthy, Harry van Haaren, Abhinandan Gujjar,
	Ray Kinsella
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Mark all the driver specific functions as internal, remove
`rte` prefix from `struct rte_eventdev_ops`.
Remove experimental tag from internal functions.
Remove `eventdev_pmd.h` from non-internal header files.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 v2 Changes:
 - Rework inline flat array by adding port data into it.
 - Rearrange rte_event_timer elements.

 drivers/event/cnxk/cn10k_eventdev.c        |  6 ++---
 drivers/event/cnxk/cn9k_eventdev.c         | 10 ++++-----
 drivers/event/dlb2/dlb2.c                  |  2 +-
 drivers/event/dpaa/dpaa_eventdev.c         |  2 +-
 drivers/event/dpaa2/dpaa2_eventdev.c       |  2 +-
 drivers/event/dsw/dsw_evdev.c              |  2 +-
 drivers/event/octeontx/ssovf_evdev.c       |  2 +-
 drivers/event/octeontx/ssovf_worker.c      |  4 ++--
 drivers/event/octeontx2/otx2_evdev.c       | 26 +++++++++++-----------
 drivers/event/opdl/opdl_evdev.c            |  2 +-
 drivers/event/skeleton/skeleton_eventdev.c |  2 +-
 drivers/event/sw/sw_evdev.c                |  2 +-
 lib/eventdev/eventdev_pmd.h                |  6 ++++-
 lib/eventdev/eventdev_pmd_pci.h            |  4 +++-
 lib/eventdev/eventdev_pmd_vdev.h           |  2 ++
 lib/eventdev/meson.build                   |  6 +++++
 lib/eventdev/rte_event_crypto_adapter.h    |  1 -
 lib/eventdev/rte_eventdev.h                | 25 ++++++++++++---------
 lib/eventdev/version.map                   | 17 +++++++-------
 19 files changed, 70 insertions(+), 53 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 8af273a01b..b2c3a6cd31 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -375,7 +375,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
@@ -383,7 +383,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
@@ -858,7 +858,7 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 }

-static struct rte_eventdev_ops cn10k_sso_dev_ops = {
+static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
 	.queue_def_conf = cnxk_sso_queue_def_conf,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 59a3dc22a3..0e0bf7177e 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -507,7 +507,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
@@ -515,7 +515,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
@@ -523,7 +523,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
@@ -531,7 +531,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
@@ -1052,7 +1052,7 @@ cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 }

-static struct rte_eventdev_ops cn9k_sso_dev_ops = {
+static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
 	.queue_def_conf = cnxk_sso_queue_def_conf,
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 252bbd8d5e..c8742ddb2c 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -4384,7 +4384,7 @@ dlb2_entry_points_init(struct rte_eventdev *dev)
 	struct dlb2_eventdev *dlb2;

 	/* Expose PMD's eventdev interface */
-	static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
+	static struct eventdev_ops dlb2_eventdev_entry_ops = {
 		.dev_infos_get    = dlb2_eventdev_info_get,
 		.dev_configure    = dlb2_eventdev_configure,
 		.dev_start        = dlb2_eventdev_start,
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index ec74160325..9f14390d28 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -925,7 +925,7 @@ dpaa_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }

-static struct rte_eventdev_ops dpaa_eventdev_ops = {
+static struct eventdev_ops dpaa_eventdev_ops = {
 	.dev_infos_get    = dpaa_event_dev_info_get,
 	.dev_configure    = dpaa_event_dev_configure,
 	.dev_start        = dpaa_event_dev_start,
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 5ccf22f77f..d577f64824 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1015,7 +1015,7 @@ dpaa2_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }

-static struct rte_eventdev_ops dpaa2_eventdev_ops = {
+static struct eventdev_ops dpaa2_eventdev_ops = {
 	.dev_infos_get    = dpaa2_eventdev_info_get,
 	.dev_configure    = dpaa2_eventdev_configure,
 	.dev_start        = dpaa2_eventdev_start,
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 2301a4b7a0..01f060fff3 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -398,7 +398,7 @@ dsw_crypto_adapter_caps_get(const struct rte_eventdev *dev  __rte_unused,
 	return 0;
 }

-static struct rte_eventdev_ops dsw_evdev_ops = {
+static struct eventdev_ops dsw_evdev_ops = {
 	.port_setup = dsw_port_setup,
 	.port_def_conf = dsw_port_def_conf,
 	.port_release = dsw_port_release,
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index b93f6ec8c6..4a8c6a13a5 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -790,7 +790,7 @@ ssovf_crypto_adapter_qp_del(const struct rte_eventdev *dev,
 }

 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops ssovf_ops = {
+static struct eventdev_ops ssovf_ops = {
 	.dev_infos_get    = ssovf_info_get,
 	.dev_configure    = ssovf_configure,
 	.queue_def_conf   = ssovf_queue_def_conf,
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index 8b056ddc5a..2df940f0f1 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -343,11 +343,11 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)

 	dev->ca_enqueue = ssow_crypto_adapter_enqueue;

-	const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
+	const event_tx_adapter_enqueue_t ssow_txa_enqueue[2][2][2][2] = {
 #define T(name, f3, f2, f1, f0, sz, flags)				\
 	[f3][f2][f1][f0] =  sso_event_tx_adapter_enqueue_ ##name,

-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+		SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 	};

diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index 38a6b651d9..f26bed334f 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -178,41 +178,41 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_tx_adptr_enq_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_tx_adptr_enq_seg_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_dual_tx_adptr_enq_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

 	event_dev->enqueue			= otx2_ssogws_enq;
 	event_dev->enqueue_burst		= otx2_ssogws_enq_burst;
@@ -1596,7 +1596,7 @@ otx2_sso_close(struct rte_eventdev *event_dev)
 }

 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops otx2_sso_ops = {
+static struct eventdev_ops otx2_sso_ops = {
 	.dev_infos_get    = otx2_sso_info_get,
 	.dev_configure    = otx2_sso_configure,
 	.queue_def_conf   = otx2_sso_queue_def_conf,
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index cfa9733b64..739dc64c82 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -609,7 +609,7 @@ set_do_test(const char *key __rte_unused, const char *value, void *opaque)
 static int
 opdl_probe(struct rte_vdev_device *vdev)
 {
-	static struct rte_eventdev_ops evdev_opdl_ops = {
+	static struct eventdev_ops evdev_opdl_ops = {
 		.dev_configure = opdl_dev_configure,
 		.dev_infos_get = opdl_info_get,
 		.dev_close = opdl_close,
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index 6fd1102596..c9e17e7cb1 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -320,7 +320,7 @@ skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)


 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops skeleton_eventdev_ops = {
+static struct eventdev_ops skeleton_eventdev_ops = {
 	.dev_infos_get    = skeleton_eventdev_info_get,
 	.dev_configure    = skeleton_eventdev_configure,
 	.dev_start        = skeleton_eventdev_start,
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index a5e6ca22e8..9b72073322 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -945,7 +945,7 @@ static int32_t sw_sched_service_func(void *args)
 static int
 sw_probe(struct rte_vdev_device *vdev)
 {
-	static struct rte_eventdev_ops evdev_sw_ops = {
+	static struct eventdev_ops evdev_sw_ops = {
 			.dev_configure = sw_dev_configure,
 			.dev_infos_get = sw_info_get,
 			.dev_close = sw_close,
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 94d99f4903..682b61cff0 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -99,6 +99,7 @@ extern struct rte_eventdev *rte_eventdevs;
  * @return
  *   - The rte_eventdev structure pointer for the given device ID.
  */
+__rte_internal
 static inline struct rte_eventdev *
 rte_event_pmd_get_named_dev(const char *name)
 {
@@ -127,6 +128,7 @@ rte_event_pmd_get_named_dev(const char *name)
  * @return
  *   - If the device index is valid (1) or not (0).
  */
+__rte_internal
 static inline unsigned
 rte_event_pmd_is_valid_dev(uint8_t dev_id)
 {
@@ -1056,7 +1058,7 @@ typedef int (*eventdev_eth_tx_adapter_stats_reset_t)(uint8_t id,
 					const struct rte_eventdev *dev);

 /** Event device operations function pointer table */
-struct rte_eventdev_ops {
+struct eventdev_ops {
 	eventdev_info_get_t dev_infos_get;	/**< Get device info. */
 	eventdev_configure_t dev_configure;	/**< Configure device. */
 	eventdev_start_t dev_start;		/**< Start device. */
@@ -1174,6 +1176,7 @@ struct rte_eventdev_ops {
  * @return
  *   - Slot in the rte_dev_devices array for a new device;
  */
+__rte_internal
 struct rte_eventdev *
 rte_event_pmd_allocate(const char *name, int socket_id);

@@ -1185,6 +1188,7 @@ rte_event_pmd_allocate(const char *name, int socket_id);
  * @return
  *   - 0 on success, negative on error
  */
+__rte_internal
 int
 rte_event_pmd_release(struct rte_eventdev *eventdev);

diff --git a/lib/eventdev/eventdev_pmd_pci.h b/lib/eventdev/eventdev_pmd_pci.h
index 1545b240f2..2f12a5eb24 100644
--- a/lib/eventdev/eventdev_pmd_pci.h
+++ b/lib/eventdev/eventdev_pmd_pci.h
@@ -31,7 +31,7 @@ typedef int (*eventdev_pmd_pci_callback_t)(struct rte_eventdev *dev);
  * interface.  Same as rte_event_pmd_pci_probe, except caller can specify
  * the name.
  */
-__rte_experimental
+__rte_internal
 static inline int
 rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
 			      struct rte_pci_device *pci_dev,
@@ -85,6 +85,7 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
  * Wrapper for use by pci drivers as a .probe function to attach to a event
  * interface.
  */
+__rte_internal
 static inline int
 rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
 			    struct rte_pci_device *pci_dev,
@@ -108,6 +109,7 @@ rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
  * Wrapper for use by pci drivers as a .remove function to detach a event
  * interface.
  */
+__rte_internal
 static inline int
 rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev,
 			     eventdev_pmd_pci_callback_t devuninit)
diff --git a/lib/eventdev/eventdev_pmd_vdev.h b/lib/eventdev/eventdev_pmd_vdev.h
index 2d33924e6c..d9ee7277dd 100644
--- a/lib/eventdev/eventdev_pmd_vdev.h
+++ b/lib/eventdev/eventdev_pmd_vdev.h
@@ -37,6 +37,7 @@
  *   - Eventdev pointer if device is successfully created.
  *   - NULL if device cannot be created.
  */
+__rte_internal
 static inline struct rte_eventdev *
 rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
 		int socket_id)
@@ -74,6 +75,7 @@ rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
  * @return
  *   - 0 on success, negative on error
  */
+__rte_internal
 static inline int
 rte_event_pmd_vdev_uninit(const char *name)
 {
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 32abeba794..523ea9ccae 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -27,5 +27,11 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
+driver_sdk_headers += files(
+        'eventdev_pmd.h',
+        'eventdev_pmd_pci.h',
+        'eventdev_pmd_vdev.h',
+)
+
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
 deps += ['telemetry']
diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index f8c6cca87c..431d05b6ed 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -171,7 +171,6 @@ extern "C" {
 #include <stdint.h>

 #include "rte_eventdev.h"
-#include "eventdev_pmd.h"

 /**
  * Crypto event adapter mode
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index a9c496fb62..0c701888d5 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1324,7 +1324,7 @@ int
 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 				uint32_t *caps);

-struct rte_eventdev_ops;
+struct eventdev_ops;
 struct rte_eventdev;

 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
@@ -1342,18 +1342,21 @@ typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
 		uint16_t nb_events, uint64_t timeout_ticks);
 /**< @internal Dequeue burst of events from port of a device */

-typedef uint16_t (*event_tx_adapter_enqueue)(void *port,
-				struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
+					       struct rte_event ev[],
+					       uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */

-typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
-		struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_tx_adapter_enqueue_same_dest_t)(void *port,
+							 struct rte_event ev[],
+							 uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device supporting
  * burst having same destination Ethernet port & Tx queue.
  */

-typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
-				struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
+						   struct rte_event ev[],
+						   uint16_t nb_events);
 /**< @internal Enqueue burst of events on crypto adapter */

 #define RTE_EVENTDEV_NAME_MAX_LEN	(64)
@@ -1421,15 +1424,15 @@ struct rte_eventdev {
 	/**< Pointer to PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
+	event_tx_adapter_enqueue_same_dest_t txa_enqueue_same_dest;
 	/**< Pointer to PMD eth Tx adapter burst enqueue function with
 	 * events destined to same Eth port & Tx queue.
 	 */
-	event_tx_adapter_enqueue txa_enqueue;
+	event_tx_adapter_enqueue_t txa_enqueue;
 	/**< Pointer to PMD eth Tx adapter enqueue function. */
 	struct rte_eventdev_data *data;
 	/**< Pointer to device data */
-	struct rte_eventdev_ops *dev_ops;
+	struct eventdev_ops *dev_ops;
 	/**< Functions exported by PMD */
 	struct rte_device *dev;
 	/**< Device info. supplied by probing */
@@ -1438,7 +1441,7 @@ struct rte_eventdev {
 	uint8_t attached : 1;
 	/**< Flag indicating the device is attached */

-	event_crypto_adapter_enqueue ca_enqueue;
+	event_crypto_adapter_enqueue_t ca_enqueue;
 	/**< Pointer to PMD crypto adapter enqueue function. */

 	uint64_t reserved_64s[4]; /**< Reserved for future fields */
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 88625621ec..5f1fe412a4 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -55,12 +55,6 @@ DPDK_22 {
 	rte_event_eth_tx_adapter_stats_get;
 	rte_event_eth_tx_adapter_stats_reset;
 	rte_event_eth_tx_adapter_stop;
-	rte_event_pmd_allocate;
-	rte_event_pmd_pci_probe;
-	rte_event_pmd_pci_remove;
-	rte_event_pmd_release;
-	rte_event_pmd_vdev_init;
-	rte_event_pmd_vdev_uninit;
 	rte_event_port_attr_get;
 	rte_event_port_default_conf_get;
 	rte_event_port_link;
@@ -136,8 +130,6 @@ EXPERIMENTAL {

 	# changed in 20.11
 	__rte_eventdev_trace_port_setup;
-	# added in 20.11
-	rte_event_pmd_pci_probe_named;

 	#added in 21.05
 	rte_event_vector_pool_create;
@@ -150,4 +142,13 @@ INTERNAL {
 	global:

 	rte_event_pmd_selftest_seqn_dynfield_offset;
+	rte_event_pmd_allocate;
+	rte_event_pmd_get_named_dev;
+	rte_event_pmd_is_valid_dev;
+	rte_event_pmd_pci_probe;
+	rte_event_pmd_pci_probe_named;
+	rte_event_pmd_pci_remove;
+	rte_event_pmd_release;
+	rte_event_pmd_vdev_init;
+	rte_event_pmd_vdev_uninit;
 };
--
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 02/13] eventdev: separate internal structures
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
@ 2021-10-03  8:26   ` pbhagavatula
  2021-10-03  8:26   ` [dpdk-dev] [PATCH v2 03/13] eventdev: allocate max space for internal arrays pbhagavatula
                     ` (12 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:26 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Create rte_eventdev_core.h and move all the internal data structures
to this file. These structures are mostly used by drivers, but they
need to be in the public header file as they are accessed by datapath
inline functions for performance reasons.
The accessibility of these data structures is not changed.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/eventdev_pmd.h      |   3 -
 lib/eventdev/meson.build         |   3 +
 lib/eventdev/rte_eventdev.h      | 718 +++++++++++++------------------
 lib/eventdev/rte_eventdev_core.h | 138 ++++++
 4 files changed, 437 insertions(+), 425 deletions(-)
 create mode 100644 lib/eventdev/rte_eventdev_core.h

diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 682b61cff0..7eb2aa0520 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -87,9 +87,6 @@ struct rte_eventdev_global {
 	uint8_t nb_devs;	/**< Number of devices found */
 };
 
-extern struct rte_eventdev *rte_eventdevs;
-/** The pool of rte_eventdev structures. */
-
 /**
  * Get the rte_eventdev structure device pointer for the named device.
  *
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 523ea9ccae..8b51fde361 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -27,6 +27,9 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
+indirect_headers += files(
+        'rte_eventdev_core.h',
+)
 driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 0c701888d5..1b11d4576d 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1324,317 +1324,6 @@ int
 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 				uint32_t *caps);
 
-struct eventdev_ops;
-struct rte_eventdev;
-
-typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
-/**< @internal Enqueue event on port of a device */
-
-typedef uint16_t (*event_enqueue_burst_t)(void *port,
-			const struct rte_event ev[], uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device */
-
-typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
-		uint64_t timeout_ticks);
-/**< @internal Dequeue event from port of a device */
-
-typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
-		uint16_t nb_events, uint64_t timeout_ticks);
-/**< @internal Dequeue burst of events from port of a device */
-
-typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
-					       struct rte_event ev[],
-					       uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device */
-
-typedef uint16_t (*event_tx_adapter_enqueue_same_dest_t)(void *port,
-							 struct rte_event ev[],
-							 uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device supporting
- * burst having same destination Ethernet port & Tx queue.
- */
-
-typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
-						   struct rte_event ev[],
-						   uint16_t nb_events);
-/**< @internal Enqueue burst of events on crypto adapter */
-
-#define RTE_EVENTDEV_NAME_MAX_LEN	(64)
-/**< @internal Max length of name of event PMD */
-
-/**
- * @internal
- * The data part, with no function pointers, associated with each device.
- *
- * This structure is safe to place in shared memory to be common among
- * different processes in a multi-process configuration.
- */
-struct rte_eventdev_data {
-	int socket_id;
-	/**< Socket ID where memory is allocated */
-	uint8_t dev_id;
-	/**< Device ID for this instance */
-	uint8_t nb_queues;
-	/**< Number of event queues. */
-	uint8_t nb_ports;
-	/**< Number of event ports. */
-	void **ports;
-	/**< Array of pointers to ports. */
-	struct rte_event_port_conf *ports_cfg;
-	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf *queues_cfg;
-	/**< Array of queue configuration structures. */
-	uint16_t *links_map;
-	/**< Memory to store queues to port connections. */
-	void *dev_private;
-	/**< PMD-specific private data */
-	uint32_t event_dev_cap;
-	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
-	struct rte_event_dev_config dev_conf;
-	/**< Configuration applied to device. */
-	uint8_t service_inited;
-	/* Service initialization state */
-	uint32_t service_id;
-	/* Service ID*/
-	void *dev_stop_flush_arg;
-	/**< User-provided argument for event flush function */
-
-	RTE_STD_C11
-	uint8_t dev_started : 1;
-	/**< Device state: STARTED(1)/STOPPED(0) */
-
-	char name[RTE_EVENTDEV_NAME_MAX_LEN];
-	/**< Unique identifier name */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[4];   /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-/** @internal The data structure associated with each event device. */
-struct rte_eventdev {
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
-	event_enqueue_burst_t enqueue_burst;
-	/**< Pointer to PMD enqueue burst function. */
-	event_enqueue_burst_t enqueue_new_burst;
-	/**< Pointer to PMD enqueue burst function(op new variant) */
-	event_enqueue_burst_t enqueue_forward_burst;
-	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
-	event_dequeue_burst_t dequeue_burst;
-	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_same_dest_t txa_enqueue_same_dest;
-	/**< Pointer to PMD eth Tx adapter burst enqueue function with
-	 * events destined to same Eth port & Tx queue.
-	 */
-	event_tx_adapter_enqueue_t txa_enqueue;
-	/**< Pointer to PMD eth Tx adapter enqueue function. */
-	struct rte_eventdev_data *data;
-	/**< Pointer to device data */
-	struct eventdev_ops *dev_ops;
-	/**< Functions exported by PMD */
-	struct rte_device *dev;
-	/**< Device info. supplied by probing */
-
-	RTE_STD_C11
-	uint8_t attached : 1;
-	/**< Flag indicating the device is attached */
-
-	event_crypto_adapter_enqueue_t ca_enqueue;
-	/**< Pointer to PMD crypto adapter enqueue function. */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[3];   /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-extern struct rte_eventdev *rte_eventdevs;
-/** @internal The pool of rte_eventdev structures. */
-
-static __rte_always_inline uint16_t
-__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events,
-			const event_enqueue_burst_t fn)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->enqueue)(dev->data->ports[port_id], ev);
-	else
-		return fn(dev->data->ports[port_id], ev, nb_events);
-}
-
-/**
- * Enqueue a burst of events objects or an event object supplied in *rte_event*
- * structure on an  event device designated by its *dev_id* through the event
- * port specified by *port_id*. Each event object specifies the event queue on
- * which it will be enqueued.
- *
- * The *nb_events* parameter is the number of event objects to enqueue which are
- * supplied in the *ev* array of *rte_event* structure.
- *
- * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
- * enqueued to the same port that their associated events were dequeued from.
- *
- * The rte_event_enqueue_burst() function returns the number of
- * events objects it actually enqueued. A return value equal to *nb_events*
- * means that all event objects have been enqueued.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- */
-static inline uint16_t
-rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_burst);
-}
-
-/**
- * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
- * an event device designated by its *dev_id* through the event port specified
- * by *port_id*.
- *
- * Provides the same functionality as rte_event_enqueue_burst(), expect that
- * application can use this API when the all objects in the burst contains
- * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
- * function can provide the additional hint to the PMD and optimize if possible.
- *
- * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
- * has event object of operation type != RTE_EVENT_OP_NEW.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- * @see rte_event_enqueue_burst()
- */
-static inline uint16_t
-rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_new_burst);
-}
-
-/**
- * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
- * on an event device designated by its *dev_id* through the event port
- * specified by *port_id*.
- *
- * Provides the same functionality as rte_event_enqueue_burst(), expect that
- * application can use this API when the all objects in the burst contains
- * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
- * function can provide the additional hint to the PMD and optimize if possible.
- *
- * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
- * has event object of operation type != RTE_EVENT_OP_FORWARD.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- * @see rte_event_enqueue_burst()
- */
-static inline uint16_t
-rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_forward_burst);
-}
-
 /**
  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
  *
@@ -1665,124 +1354,27 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
 					uint64_t *timeout_ticks);
 
 /**
- * Dequeue a burst of events objects or an event object from the event port
- * designated by its *event_port_id*, on an event device designated
- * by its *dev_id*.
- *
- * rte_event_dequeue_burst() does not dictate the specifics of scheduling
- * algorithm as each eventdev driver may have different criteria to schedule
- * an event. However, in general, from an application perspective scheduler may
- * use the following scheme to dispatch an event to the port.
- *
- * 1) Selection of event queue based on
- *   a) The list of event queues are linked to the event port.
- *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
- *   queue selection from list is based on event queue priority relative to
- *   other event queue supplied as *priority* in rte_event_queue_setup()
- *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
- *   queue selection from the list is based on event priority supplied as
- *   *priority* in rte_event_enqueue_burst()
- * 2) Selection of event
- *   a) The number of flows available in selected event queue.
- *   b) Schedule type method associated with the event
- *
- * The *nb_events* parameter is the maximum number of event objects to dequeue
- * which are returned in the *ev* array of *rte_event* structure.
+ * Link multiple source event queues supplied in *queues* to the destination
+ * event port designated by its *port_id* with associated service priority
+ * supplied in *priorities* on the event device designated by its *dev_id*.
  *
- * The rte_event_dequeue_burst() function returns the number of events objects
- * it actually dequeued. A return value equal to *nb_events* means that all
- * event objects have been dequeued.
+ * The link establishment shall enable the event port *port_id* from
+ * receiving events from the specified event queue(s) supplied in *queues*
  *
- * The number of events dequeued is the number of scheduler contexts held by
- * this port. These contexts are automatically released in the next
- * rte_event_dequeue_burst() invocation if the port supports implicit
- * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
- * operation can be used to release the contexts early.
+ * An event queue may link to one or more event ports.
+ * The number of links can be established from an event queue to event port is
+ * implementation defined.
  *
- * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
- * enqueued to the same port that their associated events were dequeued from.
+ * Event queue(s) to event port link establishment can be changed at runtime
+ * without re-configuring the device to support scaling and to reduce the
+ * latency of critical work by establishing the link with more event ports
+ * at runtime.
  *
  * @param dev_id
  *   The identifier of the device.
+ *
  * @param port_id
- *   The identifier of the event port.
- * @param[out] ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   for output to be populated with the dequeued event objects.
- * @param nb_events
- *   The maximum number of event objects to dequeue, typically number of
- *   rte_event_port_dequeue_depth() available for this port.
- *
- * @param timeout_ticks
- *   - 0 no-wait, returns immediately if there is no event.
- *   - >0 wait for the event, if the device is configured with
- *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
- *   at least one event is available or *timeout_ticks* time.
- *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
- *   then this function will wait until the event available or
- *   *dequeue_timeout_ns* ns which was previously supplied to
- *   rte_event_dev_configure()
- *
- * @return
- * The number of event objects actually dequeued from the port. The return
- * value can be less than the value of the *nb_events* parameter when the
- * event port's queue is not full.
- *
- * @see rte_event_port_dequeue_depth()
- */
-static inline uint16_t
-rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
-			uint16_t nb_events, uint64_t timeout_ticks)
-{
-	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->dequeue)(
-			dev->data->ports[port_id], ev, timeout_ticks);
-	else
-		return (*dev->dequeue_burst)(
-			dev->data->ports[port_id], ev, nb_events,
-				timeout_ticks);
-}
-
-/**
- * Link multiple source event queues supplied in *queues* to the destination
- * event port designated by its *port_id* with associated service priority
- * supplied in *priorities* on the event device designated by its *dev_id*.
- *
- * The link establishment shall enable the event port *port_id* from
- * receiving events from the specified event queue(s) supplied in *queues*
- *
- * An event queue may link to one or more event ports.
- * The number of links can be established from an event queue to event port is
- * implementation defined.
- *
- * Event queue(s) to event port link establishment can be changed at runtime
- * without re-configuring the device to support scaling and to reduce the
- * latency of critical work by establishing the link with more event ports
- * at runtime.
- *
- * @param dev_id
- *   The identifier of the device.
- *
- * @param port_id
- *   Event port identifier to select the destination port to link.
+ *   Event port identifier to select the destination port to link.
  *
  * @param queues
  *   Points to an array of *nb_links* event queues to be linked
@@ -2148,6 +1740,288 @@ rte_event_vector_pool_create(const char *name, unsigned int n,
 			     unsigned int cache_size, uint16_t nb_elem,
 			     int socket_id);
 
+#include <rte_eventdev_core.h>
+
+static __rte_always_inline uint16_t
+__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			  const struct rte_event ev[], uint16_t nb_events,
+			  const event_enqueue_burst_t fn)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->enqueue)(dev->data->ports[port_id], ev);
+	else
+		return fn(dev->data->ports[port_id], ev, nb_events);
+}
+
+/**
+ * Enqueue a burst of events objects or an event object supplied in *rte_event*
+ * structure on an  event device designated by its *dev_id* through the event
+ * port specified by *port_id*. Each event object specifies the event queue on
+ * which it will be enqueued.
+ *
+ * The *nb_events* parameter is the number of event objects to enqueue which are
+ * supplied in the *ev* array of *rte_event* structure.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * The rte_event_enqueue_burst() function returns the number of
+ * events objects it actually enqueued. A return value equal to *nb_events*
+ * means that all event objects have been enqueued.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ */
+static inline uint16_t
+rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
+ * an event device designated by its *dev_id* through the event port specified
+ * by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_NEW.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
+			    const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_new_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
+ * on an event device designated by its *dev_id* through the event port
+ * specified by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_FORWARD.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
+				const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_forward_burst);
+}
+
+/**
+ * Dequeue a burst of events objects or an event object from the event port
+ * designated by its *event_port_id*, on an event device designated
+ * by its *dev_id*.
+ *
+ * rte_event_dequeue_burst() does not dictate the specifics of scheduling
+ * algorithm as each eventdev driver may have different criteria to schedule
+ * an event. However, in general, from an application perspective scheduler may
+ * use the following scheme to dispatch an event to the port.
+ *
+ * 1) Selection of event queue based on
+ *   a) The list of event queues are linked to the event port.
+ *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
+ *   queue selection from list is based on event queue priority relative to
+ *   other event queue supplied as *priority* in rte_event_queue_setup()
+ *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
+ *   queue selection from the list is based on event priority supplied as
+ *   *priority* in rte_event_enqueue_burst()
+ * 2) Selection of event
+ *   a) The number of flows available in selected event queue.
+ *   b) Schedule type method associated with the event
+ *
+ * The *nb_events* parameter is the maximum number of event objects to dequeue
+ * which are returned in the *ev* array of *rte_event* structure.
+ *
+ * The rte_event_dequeue_burst() function returns the number of events objects
+ * it actually dequeued. A return value equal to *nb_events* means that all
+ * event objects have been dequeued.
+ *
+ * The number of events dequeued is the number of scheduler contexts held by
+ * this port. These contexts are automatically released in the next
+ * rte_event_dequeue_burst() invocation if the port supports implicit
+ * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
+ * operation can be used to release the contexts early.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param[out] ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   for output to be populated with the dequeued event objects.
+ * @param nb_events
+ *   The maximum number of event objects to dequeue, typically number of
+ *   rte_event_port_dequeue_depth() available for this port.
+ *
+ * @param timeout_ticks
+ *   - 0 no-wait, returns immediately if there is no event.
+ *   - >0 wait for the event, if the device is configured with
+ *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
+ *   at least one event is available or *timeout_ticks* time.
+ *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ *   then this function will wait until the event available or
+ *   *dequeue_timeout_ns* ns which was previously supplied to
+ *   rte_event_dev_configure()
+ *
+ * @return
+ * The number of event objects actually dequeued from the port. The return
+ * value can be less than the value of the *nb_events* parameter when the
+ * event port's queue is not full.
+ *
+ * @see rte_event_port_dequeue_depth()
+ */
+static inline uint16_t
+rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
+			uint16_t nb_events, uint64_t timeout_ticks)
+{
+	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->dequeue)(dev->data->ports[port_id], ev,
+				       timeout_ticks);
+	else
+		return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
+					     nb_events, timeout_ticks);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
new file mode 100644
index 0000000000..b97cdf84fe
--- /dev/null
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright(C) 2021 Marvell.
+ * Copyright 2016 NXP
+ * All rights reserved.
+ */
+
+#ifndef _RTE_EVENTDEV_CORE_H_
+#define _RTE_EVENTDEV_CORE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
+/**< @internal Enqueue event on port of a device */
+
+typedef uint16_t (*event_enqueue_burst_t)(void *port,
+					  const struct rte_event ev[],
+					  uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
+				    uint64_t timeout_ticks);
+/**< @internal Dequeue event from port of a device */
+
+typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
+					  uint16_t nb_events,
+					  uint64_t timeout_ticks);
+/**< @internal Dequeue burst of events from port of a device */
+
+typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
+					       struct rte_event ev[],
+					       uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
+						   struct rte_event ev[],
+						   uint16_t nb_events);
+/**< @internal Enqueue burst of events on crypto adapter */
+
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+	int socket_id;
+	/**< Socket ID where memory is allocated */
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t nb_queues;
+	/**< Number of event queues. */
+	uint8_t nb_ports;
+	/**< Number of event ports. */
+	void **ports;
+	/**< Array of pointers to ports. */
+	struct rte_event_port_conf *ports_cfg;
+	/**< Array of port configuration structures. */
+	struct rte_event_queue_conf *queues_cfg;
+	/**< Array of queue configuration structures. */
+	uint16_t *links_map;
+	/**< Memory to store queues to port connections. */
+	void *dev_private;
+	/**< PMD-specific private data */
+	uint32_t event_dev_cap;
+	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+	struct rte_event_dev_config dev_conf;
+	/**< Configuration applied to device. */
+	uint8_t service_inited;
+	/* Service initialization state */
+	uint32_t service_id;
+	/* Service ID*/
+	void *dev_stop_flush_arg;
+	/**< User-provided argument for event flush function */
+
+	RTE_STD_C11
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	char name[RTE_EVENTDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[4];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+	event_enqueue_t enqueue;
+	/**< Pointer to PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< Pointer to PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< Pointer to PMD enqueue burst function(op new variant) */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< Pointer to PMD enqueue burst function(op forward variant) */
+	event_dequeue_t dequeue;
+	/**< Pointer to PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< Pointer to PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< Pointer to PMD eth Tx adapter burst enqueue function with
+	 * events destined to same Eth port & Tx queue.
+	 */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< Pointer to PMD eth Tx adapter enqueue function. */
+	struct rte_eventdev_data *data;
+	/**< Pointer to device data */
+	struct eventdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	struct rte_device *dev;
+	/**< Device info. supplied by probing */
+
+	RTE_STD_C11
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
+
+	event_crypto_adapter_enqueue_t ca_enqueue;
+	/**< Pointer to PMD crypto adapter enqueue function. */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[3];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*_RTE_EVENTDEV_CORE_H_*/
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 03/13] eventdev: allocate max space for internal arrays
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
  2021-10-03  8:26   ` [dpdk-dev] [PATCH v2 02/13] eventdev: separate internal structures pbhagavatula
@ 2021-10-03  8:26   ` pbhagavatula
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 04/13] eventdev: move inline APIs into separate structure pbhagavatula
                     ` (11 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:26 UTC (permalink / raw)
  To: jerinj, Bruce Richardson, Anatoly Burakov; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Allocate max space for internal port, port config, queue config and
link map arrays.
Introduce new macro RTE_EVENT_MAX_PORTS_PER_DEV and set it to max
possible value.
This simplifies the port and queue reconfigure scenarios and will
also allow inline functions to refer pointer to internal port data
without extra checking of current number of configured queues.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 config/rte_config.h              |   1 +
 lib/eventdev/rte_eventdev.c      | 154 +++++++------------------------
 lib/eventdev/rte_eventdev_core.h |   9 +-
 3 files changed, 38 insertions(+), 126 deletions(-)

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c07d..e0ead8b251 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -72,6 +72,7 @@
 
 /* eventdev defines */
 #define RTE_EVENT_MAX_DEVS 16
+#define RTE_EVENT_MAX_PORTS_PER_DEV 255
 #define RTE_EVENT_MAX_QUEUES_PER_DEV 255
 #define RTE_EVENT_TIMER_ADAPTER_NUM_MAX 32
 #define RTE_EVENT_ETH_INTR_RING_SIZE 1024
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index e347d6dfd5..bfcfa31cd1 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -209,7 +209,7 @@ rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 }
 
 static inline int
-rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
+event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 {
 	uint8_t old_nb_queues = dev->data->nb_queues;
 	struct rte_event_queue_conf *queues_cfg;
@@ -218,37 +218,13 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
 			 dev->data->dev_id);
 
-	/* First time configuration */
-	if (dev->data->queues_cfg == NULL && nb_queues != 0) {
-		/* Allocate memory to store queue configuration */
-		dev->data->queues_cfg = rte_zmalloc_socket(
-				"eventdev->data->queues_cfg",
-				sizeof(dev->data->queues_cfg[0]) * nb_queues,
-				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->queues_cfg == NULL) {
-			dev->data->nb_queues = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
-					"nb_queues %u", nb_queues);
-			return -(ENOMEM);
-		}
-	/* Re-configure */
-	} else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
+	if (nb_queues != 0) {
+		queues_cfg = dev->data->queues_cfg;
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
 
 		for (i = nb_queues; i < old_nb_queues; i++)
 			(*dev->dev_ops->queue_release)(dev, i);
 
-		/* Re allocate memory to store queue configuration */
-		queues_cfg = dev->data->queues_cfg;
-		queues_cfg = rte_realloc(queues_cfg,
-				sizeof(queues_cfg[0]) * nb_queues,
-				RTE_CACHE_LINE_SIZE);
-		if (queues_cfg == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
-						" nb_queues %u", nb_queues);
-			return -(ENOMEM);
-		}
-		dev->data->queues_cfg = queues_cfg;
 
 		if (nb_queues > old_nb_queues) {
 			uint8_t new_qs = nb_queues - old_nb_queues;
@@ -256,7 +232,7 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 			memset(queues_cfg + old_nb_queues, 0,
 				sizeof(queues_cfg[0]) * new_qs);
 		}
-	} else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
+	} else {
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
 
 		for (i = nb_queues; i < old_nb_queues; i++)
@@ -270,7 +246,7 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
 
 static inline int
-rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
+event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 {
 	uint8_t old_nb_ports = dev->data->nb_ports;
 	void **ports;
@@ -281,46 +257,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
 			 dev->data->dev_id);
 
-	/* First time configuration */
-	if (dev->data->ports == NULL && nb_ports != 0) {
-		dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
-				sizeof(dev->data->ports[0]) * nb_ports,
-				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->ports == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Allocate memory to store port configurations */
-		dev->data->ports_cfg =
-			rte_zmalloc_socket("eventdev->ports_cfg",
-			sizeof(dev->data->ports_cfg[0]) * nb_ports,
-			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->ports_cfg == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Allocate memory to store queue to port link connection */
-		dev->data->links_map =
-			rte_zmalloc_socket("eventdev->links_map",
-			sizeof(dev->data->links_map[0]) * nb_ports *
-			RTE_EVENT_MAX_QUEUES_PER_DEV,
-			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->links_map == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-		for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
-			dev->data->links_map[i] =
-				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
-	} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
+	if (nb_ports != 0) { /* re-config */
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
 
 		ports = dev->data->ports;
@@ -330,37 +267,6 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 		for (i = nb_ports; i < old_nb_ports; i++)
 			(*dev->dev_ops->port_release)(ports[i]);
 
-		/* Realloc memory for ports */
-		ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
-				RTE_CACHE_LINE_SIZE);
-		if (ports == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
-						" nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Realloc memory for ports_cfg */
-		ports_cfg = rte_realloc(ports_cfg,
-			sizeof(ports_cfg[0]) * nb_ports,
-			RTE_CACHE_LINE_SIZE);
-		if (ports_cfg == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
-						" nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Realloc memory to store queue to port link connection */
-		links_map = rte_realloc(links_map,
-			sizeof(dev->data->links_map[0]) * nb_ports *
-			RTE_EVENT_MAX_QUEUES_PER_DEV,
-			RTE_CACHE_LINE_SIZE);
-		if (links_map == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
 		if (nb_ports > old_nb_ports) {
 			uint8_t new_ps = nb_ports - old_nb_ports;
 			unsigned int old_links_map_end =
@@ -376,16 +282,14 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 				links_map[i] =
 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
 		}
-
-		dev->data->ports = ports;
-		dev->data->ports_cfg = ports_cfg;
-		dev->data->links_map = links_map;
-	} else if (dev->data->ports != NULL && nb_ports == 0) {
+	} else {
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
 
 		ports = dev->data->ports;
-		for (i = nb_ports; i < old_nb_ports; i++)
+		for (i = nb_ports; i < old_nb_ports; i++) {
 			(*dev->dev_ops->port_release)(ports[i]);
+			ports[i] = NULL;
+		}
 	}
 
 	dev->data->nb_ports = nb_ports;
@@ -550,19 +454,19 @@ rte_event_dev_configure(uint8_t dev_id,
 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
 
 	/* Setup new number of queues and reconfigure device. */
-	diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
+	diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
 	if (diag != 0) {
-		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
-				dev_id, diag);
+		RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
+				 diag);
 		return diag;
 	}
 
 	/* Setup new number of ports and reconfigure device. */
-	diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
+	diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
 	if (diag != 0) {
-		rte_event_dev_queue_config(dev, 0);
-		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
-				dev_id, diag);
+		event_dev_queue_config(dev, 0);
+		RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
+				 diag);
 		return diag;
 	}
 
@@ -570,8 +474,8 @@ rte_event_dev_configure(uint8_t dev_id,
 	diag = (*dev->dev_ops->dev_configure)(dev);
 	if (diag != 0) {
 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
-		rte_event_dev_queue_config(dev, 0);
-		rte_event_dev_port_config(dev, 0);
+		event_dev_queue_config(dev, 0);
+		event_dev_port_config(dev, 0);
 	}
 
 	dev->data->event_dev_cap = info.event_dev_cap;
@@ -1403,8 +1307,8 @@ rte_event_dev_close(uint8_t dev_id)
 }
 
 static inline int
-rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
-		int socket_id)
+eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
+		    int socket_id)
 {
 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
 	const struct rte_memzone *mz;
@@ -1426,14 +1330,20 @@ rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
 		return -ENOMEM;
 
 	*data = mz->addr;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
 		memset(*data, 0, sizeof(struct rte_eventdev_data));
+		for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
+					RTE_EVENT_MAX_QUEUES_PER_DEV;
+		     n++)
+			(*data)->links_map[n] =
+				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+	}
 
 	return 0;
 }
 
 static inline uint8_t
-rte_eventdev_find_free_device_index(void)
+eventdev_find_free_device_index(void)
 {
 	uint8_t dev_id;
 
@@ -1475,7 +1385,7 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 		return NULL;
 	}
 
-	dev_id = rte_eventdev_find_free_device_index();
+	dev_id = eventdev_find_free_device_index();
 	if (dev_id == RTE_EVENT_MAX_DEVS) {
 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
 		return NULL;
@@ -1490,8 +1400,8 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 	if (eventdev->data == NULL) {
 		struct rte_eventdev_data *eventdev_data = NULL;
 
-		int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
-				socket_id);
+		int retval =
+			eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
 
 		if (retval < 0 || eventdev_data == NULL)
 			return NULL;
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index b97cdf84fe..115b97e431 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -58,13 +58,14 @@ struct rte_eventdev_data {
 	/**< Number of event queues. */
 	uint8_t nb_ports;
 	/**< Number of event ports. */
-	void **ports;
+	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
 	/**< Array of pointers to ports. */
-	struct rte_event_port_conf *ports_cfg;
+	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
 	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf *queues_cfg;
+	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
 	/**< Array of queue configuration structures. */
-	uint16_t *links_map;
+	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
+			   RTE_EVENT_MAX_QUEUES_PER_DEV];
 	/**< Memory to store queues to port connections. */
 	void *dev_private;
 	/**< PMD-specific private data */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 04/13] eventdev: move inline APIs into separate structure
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
  2021-10-03  8:26   ` [dpdk-dev] [PATCH v2 02/13] eventdev: separate internal structures pbhagavatula
  2021-10-03  8:26   ` [dpdk-dev] [PATCH v2 03/13] eventdev: allocate max space for internal arrays pbhagavatula
@ 2021-10-03  8:27   ` pbhagavatula
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 05/13] eventdev: use new API for inline functions pbhagavatula
                     ` (10 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:27 UTC (permalink / raw)
  To: jerinj, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move fastpath inline function pointers from rte_eventdev into a
separate structure accessed via a flat array.
The intension is to make rte_eventdev and related structures private
to avoid future API/ABI breakages.`

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 lib/eventdev/eventdev_pmd.h      |  25 +++++++
 lib/eventdev/eventdev_pmd_pci.h  |   5 +-
 lib/eventdev/eventdev_private.c  | 112 +++++++++++++++++++++++++++++++
 lib/eventdev/meson.build         |   1 +
 lib/eventdev/rte_eventdev.c      |  12 +++-
 lib/eventdev/rte_eventdev_core.h |  28 ++++++++
 lib/eventdev/version.map         |   5 ++
 7 files changed, 186 insertions(+), 2 deletions(-)
 create mode 100644 lib/eventdev/eventdev_private.c

diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 7eb2aa0520..2f88dbd6d8 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -1189,4 +1189,29 @@ __rte_internal
 int
 rte_event_pmd_release(struct rte_eventdev *eventdev);
 
+/**
+ * Reset eventdevice fastpath APIs to dummy values.
+ *
+ * @param fp_ops
+ * The *fp_ops* pointer to reset.
+ */
+__rte_internal
+void
+event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op);
+
+/**
+ * Set eventdevice fastpath APIs to event device values.
+ *
+ * @param fp_ops
+ * The *fp_ops* pointer to set.
+ */
+__rte_internal
+void
+event_dev_fp_ops_set(struct rte_event_fp_ops *fp_ops,
+		     const struct rte_eventdev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _RTE_EVENTDEV_PMD_H_ */
diff --git a/lib/eventdev/eventdev_pmd_pci.h b/lib/eventdev/eventdev_pmd_pci.h
index 2f12a5eb24..563b579a77 100644
--- a/lib/eventdev/eventdev_pmd_pci.h
+++ b/lib/eventdev/eventdev_pmd_pci.h
@@ -67,8 +67,11 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
 
 	/* Invoke PMD device initialization function */
 	retval = devinit(eventdev);
-	if (retval == 0)
+	if (retval == 0) {
+		event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
+				     eventdev);
 		return 0;
+	}
 
 	RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
 			" failed", pci_drv->driver.name,
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
new file mode 100644
index 0000000000..9084833847
--- /dev/null
+++ b/lib/eventdev/eventdev_private.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "eventdev_pmd.h"
+#include "rte_eventdev.h"
+
+static uint16_t
+dummy_event_enqueue(__rte_unused void *port,
+		    __rte_unused const struct rte_event *ev)
+{
+	RTE_EDEV_LOG_ERR(
+		"event enqueue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_enqueue_burst(__rte_unused void *port,
+			  __rte_unused const struct rte_event ev[],
+			  __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event enqueue burst requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
+		    __rte_unused uint64_t timeout_ticks)
+{
+	RTE_EDEV_LOG_ERR(
+		"event dequeue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_dequeue_burst(__rte_unused void *port,
+			  __rte_unused struct rte_event ev[],
+			  __rte_unused uint16_t nb_events,
+			  __rte_unused uint64_t timeout_ticks)
+{
+	RTE_EDEV_LOG_ERR(
+		"event dequeue burst requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue(__rte_unused void *port,
+			       __rte_unused struct rte_event ev[],
+			       __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event Tx adapter enqueue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue_same_dest(__rte_unused void *port,
+					 __rte_unused struct rte_event ev[],
+					 __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event Tx adapter enqueue same destination requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_crypto_adapter_enqueue(__rte_unused void *port,
+				   __rte_unused struct rte_event ev[],
+				   __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event crypto adapter enqueue requested for unconfigured event device");
+	return 0;
+}
+
+void
+event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
+{
+	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
+	static const struct rte_event_fp_ops dummy = {
+		.enqueue = dummy_event_enqueue,
+		.enqueue_burst = dummy_event_enqueue_burst,
+		.enqueue_new_burst = dummy_event_enqueue_burst,
+		.enqueue_forward_burst = dummy_event_enqueue_burst,
+		.dequeue = dummy_event_dequeue,
+		.dequeue_burst = dummy_event_dequeue_burst,
+		.txa_enqueue = dummy_event_tx_adapter_enqueue,
+		.txa_enqueue_same_dest =
+			dummy_event_tx_adapter_enqueue_same_dest,
+		.ca_enqueue = dummy_event_crypto_adapter_enqueue,
+		.data = dummy_data,
+	};
+
+	*fp_op = dummy;
+}
+
+void
+event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
+		     const struct rte_eventdev *dev)
+{
+	fp_op->enqueue = dev->enqueue;
+	fp_op->enqueue_burst = dev->enqueue_burst;
+	fp_op->enqueue_new_burst = dev->enqueue_new_burst;
+	fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
+	fp_op->dequeue = dev->dequeue;
+	fp_op->dequeue_burst = dev->dequeue_burst;
+	fp_op->txa_enqueue = dev->txa_enqueue;
+	fp_op->txa_enqueue_same_dest = dev->txa_enqueue_same_dest;
+	fp_op->ca_enqueue = dev->ca_enqueue;
+	fp_op->data = dev->data->ports;
+}
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 8b51fde361..9051ff04b7 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -8,6 +8,7 @@ else
 endif
 
 sources = files(
+        'eventdev_private.c',
         'rte_eventdev.c',
         'rte_event_ring.c',
         'eventdev_trace_points.c',
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index bfcfa31cd1..f14a887340 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -46,6 +46,9 @@ static struct rte_eventdev_global eventdev_globals = {
 	.nb_devs		= 0
 };
 
+/* Public fastpath APIs. */
+struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
+
 /* Event dev north bound API implementation */
 
 uint8_t
@@ -300,8 +303,8 @@ int
 rte_event_dev_configure(uint8_t dev_id,
 			const struct rte_event_dev_config *dev_conf)
 {
-	struct rte_eventdev *dev;
 	struct rte_event_dev_info info;
+	struct rte_eventdev *dev;
 	int diag;
 
 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -470,10 +473,13 @@ rte_event_dev_configure(uint8_t dev_id,
 		return diag;
 	}
 
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
+
 	/* Configure the device */
 	diag = (*dev->dev_ops->dev_configure)(dev);
 	if (diag != 0) {
 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
+		event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 		event_dev_queue_config(dev, 0);
 		event_dev_port_config(dev, 0);
 	}
@@ -1244,6 +1250,8 @@ rte_event_dev_start(uint8_t dev_id)
 	else
 		return diag;
 
+	event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
+
 	return 0;
 }
 
@@ -1284,6 +1292,7 @@ rte_event_dev_stop(uint8_t dev_id)
 	dev->data->dev_started = 0;
 	(*dev->dev_ops->dev_stop)(dev);
 	rte_eventdev_trace_stop(dev_id);
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 }
 
 int
@@ -1302,6 +1311,7 @@ rte_event_dev_close(uint8_t dev_id)
 		return -EBUSY;
 	}
 
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 	rte_eventdev_trace_close(dev_id);
 	return (*dev->dev_ops->dev_close)(dev);
 }
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 115b97e431..4461073101 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -39,6 +39,34 @@ typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
 						   uint16_t nb_events);
 /**< @internal Enqueue burst of events on crypto adapter */
 
+struct rte_event_fp_ops {
+	event_enqueue_t enqueue;
+	/**< PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< PMD enqueue burst new function. */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< PMD enqueue burst fwd function. */
+	event_dequeue_t dequeue;
+	/**< PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< PMD Tx adapter enqueue function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< PMD Tx adapter enqueue same destination function. */
+	event_crypto_adapter_enqueue_t ca_enqueue;
+	/**< PMD Crypto adapter enqueue function. */
+	uintptr_t reserved[2];
+
+	void **data;
+	/**< points to array of internal port data pointers */
+	uintptr_t reserved2[4];
+} __rte_cache_aligned;
+
+extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
+
 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
 /**< @internal Max length of name of event PMD */
 
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 5f1fe412a4..33ab447d4b 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -85,6 +85,9 @@ DPDK_22 {
 	rte_event_timer_cancel_burst;
 	rte_eventdevs;
 
+	#added in 21.11
+	rte_event_fp_ops;
+
 	local: *;
 };
 
@@ -141,6 +144,8 @@ EXPERIMENTAL {
 INTERNAL {
 	global:
 
+	event_dev_fp_ops_reset;
+	event_dev_fp_ops_set;
 	rte_event_pmd_selftest_seqn_dynfield_offset;
 	rte_event_pmd_allocate;
 	rte_event_pmd_get_named_dev;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 05/13] eventdev: use new API for inline functions
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
                     ` (2 preceding siblings ...)
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 04/13] eventdev: move inline APIs into separate structure pbhagavatula
@ 2021-10-03  8:27   ` pbhagavatula
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 06/13] eventdev: hide event device related structures pbhagavatula
                     ` (9 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:27 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Use new driver interface for the fastpath enqueue/dequeue inline
functions.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
 lib/eventdev/rte_event_crypto_adapter.h | 15 +++++---
 lib/eventdev/rte_event_eth_tx_adapter.h | 15 ++++----
 lib/eventdev/rte_eventdev.h             | 46 +++++++++++++++----------
 3 files changed, 47 insertions(+), 29 deletions(-)

diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index 431d05b6ed..eb82818d05 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -568,12 +568,19 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id,
 				struct rte_event ev[],
 				uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
+		rte_errno = EINVAL;
+		return 0;
+	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -581,7 +588,7 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id,
 	rte_eventdev_trace_crypto_adapter_enqueue(dev_id, port_id, ev,
 		nb_events);
 
-	return dev->ca_enqueue(dev->data->ports[port_id], ev, nb_events);
+	return fp_ops->ca_enqueue(port, ev, nb_events);
 }
 
 #ifdef __cplusplus
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.h b/lib/eventdev/rte_event_eth_tx_adapter.h
index 8c59547165..3908c2ded5 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.h
+++ b/lib/eventdev/rte_event_eth_tx_adapter.h
@@ -355,16 +355,19 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
 				uint16_t nb_events,
 				const uint8_t flags)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
-		!rte_eventdevs[dev_id].attached) {
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -372,11 +375,9 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
 	rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
 		nb_events, flags);
 	if (flags)
-		return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
-						  ev, nb_events);
+		return fp_ops->txa_enqueue_same_dest(port, ev, nb_events);
 	else
-		return dev->txa_enqueue(dev->data->ports[port_id], ev,
-					nb_events);
+		return fp_ops->txa_enqueue(port, ev, nb_events);
 }
 
 /**
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 1b11d4576d..31fa9ac4b8 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1747,15 +1747,19 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 			  const struct rte_event ev[], uint16_t nb_events,
 			  const event_enqueue_burst_t fn)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -1766,9 +1770,9 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 	 * requests nb_events as const one
 	 */
 	if (nb_events == 1)
-		return (*dev->enqueue)(dev->data->ports[port_id], ev);
+		return (fp_ops->enqueue)(port, ev);
 	else
-		return fn(dev->data->ports[port_id], ev, nb_events);
+		return fn(port, ev, nb_events);
 }
 
 /**
@@ -1818,10 +1822,11 @@ static inline uint16_t
 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 			const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_burst);
+					 fp_ops->enqueue_burst);
 }
 
 /**
@@ -1869,10 +1874,11 @@ static inline uint16_t
 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
 			    const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_new_burst);
+					 fp_ops->enqueue_new_burst);
 }
 
 /**
@@ -1920,10 +1926,11 @@ static inline uint16_t
 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
 				const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_forward_burst);
+					 fp_ops->enqueue_forward_burst);
 }
 
 /**
@@ -1996,15 +2003,19 @@ static inline uint16_t
 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 			uint16_t nb_events, uint64_t timeout_ticks)
 {
-	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -2015,11 +2026,10 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 	 * requests nb_events as const one
 	 */
 	if (nb_events == 1)
-		return (*dev->dequeue)(dev->data->ports[port_id], ev,
-				       timeout_ticks);
+		return (fp_ops->dequeue)(port, ev, timeout_ticks);
 	else
-		return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
-					     nb_events, timeout_ticks);
+		return (fp_ops->dequeue_burst)(port, ev, nb_events,
+					       timeout_ticks);
 }
 
 #ifdef __cplusplus
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 06/13] eventdev: hide event device related structures
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
                     ` (3 preceding siblings ...)
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 05/13] eventdev: use new API for inline functions pbhagavatula
@ 2021-10-03  8:27   ` pbhagavatula
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v 07/13] eventdev: hide timer adapter PMD file pbhagavatula
                     ` (8 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:27 UTC (permalink / raw)
  To: jerinj, Timothy McDaniel, Mattias Rönnblom, Pavan Nikhilesh,
	Harman Kalra
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move rte_eventdev, rte_eventdev_data structures to eventdev_pmd.h.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/dlb2/dlb2_inline_fns.h   |  2 +
 drivers/event/dsw/dsw_evdev.h          |  2 +
 drivers/event/octeontx/timvf_worker.h  |  2 +
 drivers/net/octeontx/octeontx_ethdev.c |  3 +-
 lib/eventdev/eventdev_pmd.h            | 92 +++++++++++++++++++++++++
 lib/eventdev/rte_eventdev.c            | 22 ------
 lib/eventdev/rte_eventdev_core.h       | 93 --------------------------
 7 files changed, 100 insertions(+), 116 deletions(-)

diff --git a/drivers/event/dlb2/dlb2_inline_fns.h b/drivers/event/dlb2/dlb2_inline_fns.h
index ac8d01aa98..1429281cfd 100644
--- a/drivers/event/dlb2/dlb2_inline_fns.h
+++ b/drivers/event/dlb2/dlb2_inline_fns.h
@@ -5,6 +5,8 @@
 #ifndef _DLB2_INLINE_FNS_H_
 #define _DLB2_INLINE_FNS_H_
 
+#include <eventdev_pmd.h>
+
 /* Inline functions required in more than one source file. */
 
 static inline struct dlb2_eventdev *
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index 08889a0990..631daea55c 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -5,6 +5,8 @@
 #ifndef _DSW_EVDEV_H_
 #define _DSW_EVDEV_H_
 
+#include <eventdev_pmd.h>
+
 #include <rte_event_ring.h>
 #include <rte_eventdev.h>
 
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index dede1a4a4f..3f1e77f1d1 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -2,6 +2,8 @@
  * Copyright(c) 2017 Cavium, Inc
  */
 
+#include <eventdev_pmd.h>
+
 #include <rte_common.h>
 #include <rte_branch_prediction.h>
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 9f4c0503b4..c55304839e 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -9,13 +9,14 @@
 #include <string.h>
 #include <unistd.h>
 
+#include <eventdev_pmd.h>
 #include <rte_alarm.h>
 #include <rte_branch_prediction.h>
 #include <rte_bus_vdev.h>
 #include <rte_cycles.h>
 #include <rte_debug.h>
-#include <rte_devargs.h>
 #include <rte_dev.h>
+#include <rte_devargs.h>
 #include <rte_kvargs.h>
 #include <rte_malloc.h>
 #include <rte_mbuf_pool_ops.h>
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 2f88dbd6d8..764555d54c 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -80,6 +80,9 @@
 #define RTE_EVENTDEV_DETACHED  (0)
 #define RTE_EVENTDEV_ATTACHED  (1)
 
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
 struct rte_eth_dev;
 
 /** Global structure used for maintaining state of allocated event devices */
@@ -87,6 +90,95 @@ struct rte_eventdev_global {
 	uint8_t nb_devs;	/**< Number of devices found */
 };
 
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+	int socket_id;
+	/**< Socket ID where memory is allocated */
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t nb_queues;
+	/**< Number of event queues. */
+	uint8_t nb_ports;
+	/**< Number of event ports. */
+	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
+	/**< Array of pointers to ports. */
+	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
+	/**< Array of port configuration structures. */
+	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
+	/**< Array of queue configuration structures. */
+	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
+			   RTE_EVENT_MAX_QUEUES_PER_DEV];
+	/**< Memory to store queues to port connections. */
+	void *dev_private;
+	/**< PMD-specific private data */
+	uint32_t event_dev_cap;
+	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+	struct rte_event_dev_config dev_conf;
+	/**< Configuration applied to device. */
+	uint8_t service_inited;
+	/* Service initialization state */
+	uint32_t service_id;
+	/* Service ID*/
+	void *dev_stop_flush_arg;
+	/**< User-provided argument for event flush function */
+
+	RTE_STD_C11
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	char name[RTE_EVENTDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[4];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+	struct rte_eventdev_data *data;
+	/**< Pointer to device data */
+	struct eventdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	struct rte_device *dev;
+	/**< Device info. supplied by probing */
+
+	RTE_STD_C11
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
+
+	event_enqueue_t enqueue;
+	/**< Pointer to PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< Pointer to PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< Pointer to PMD enqueue burst function(op new variant) */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< Pointer to PMD enqueue burst function(op forward variant) */
+	event_dequeue_t dequeue;
+	/**< Pointer to PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< Pointer to PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< Pointer to PMD eth Tx adapter burst enqueue function with
+	 * events destined to same Eth port & Tx queue.
+	 */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< Pointer to PMD eth Tx adapter enqueue function. */
+	event_crypto_adapter_enqueue_t ca_enqueue;
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[3];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
 /**
  * Get the rte_eventdev structure device pointer for the named device.
  *
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index f14a887340..7a70c7a963 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -1365,24 +1365,6 @@ eventdev_find_free_device_index(void)
 	return RTE_EVENT_MAX_DEVS;
 }
 
-static uint16_t
-rte_event_tx_adapter_enqueue(__rte_unused void *port,
-			__rte_unused struct rte_event ev[],
-			__rte_unused uint16_t nb_events)
-{
-	rte_errno = ENOTSUP;
-	return 0;
-}
-
-static uint16_t
-rte_event_crypto_adapter_enqueue(__rte_unused void *port,
-			__rte_unused struct rte_event ev[],
-			__rte_unused uint16_t nb_events)
-{
-	rte_errno = ENOTSUP;
-	return 0;
-}
-
 struct rte_eventdev *
 rte_event_pmd_allocate(const char *name, int socket_id)
 {
@@ -1403,10 +1385,6 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 
 	eventdev = &rte_eventdevs[dev_id];
 
-	eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
-	eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
-	eventdev->ca_enqueue = rte_event_crypto_adapter_enqueue;
-
 	if (eventdev->data == NULL) {
 		struct rte_eventdev_data *eventdev_data = NULL;
 
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 4461073101..0da724fa86 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -67,99 +67,6 @@ struct rte_event_fp_ops {
 
 extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
 
-#define RTE_EVENTDEV_NAME_MAX_LEN (64)
-/**< @internal Max length of name of event PMD */
-
-/**
- * @internal
- * The data part, with no function pointers, associated with each device.
- *
- * This structure is safe to place in shared memory to be common among
- * different processes in a multi-process configuration.
- */
-struct rte_eventdev_data {
-	int socket_id;
-	/**< Socket ID where memory is allocated */
-	uint8_t dev_id;
-	/**< Device ID for this instance */
-	uint8_t nb_queues;
-	/**< Number of event queues. */
-	uint8_t nb_ports;
-	/**< Number of event ports. */
-	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
-	/**< Array of pointers to ports. */
-	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
-	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
-	/**< Array of queue configuration structures. */
-	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
-			   RTE_EVENT_MAX_QUEUES_PER_DEV];
-	/**< Memory to store queues to port connections. */
-	void *dev_private;
-	/**< PMD-specific private data */
-	uint32_t event_dev_cap;
-	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
-	struct rte_event_dev_config dev_conf;
-	/**< Configuration applied to device. */
-	uint8_t service_inited;
-	/* Service initialization state */
-	uint32_t service_id;
-	/* Service ID*/
-	void *dev_stop_flush_arg;
-	/**< User-provided argument for event flush function */
-
-	RTE_STD_C11
-	uint8_t dev_started : 1;
-	/**< Device state: STARTED(1)/STOPPED(0) */
-
-	char name[RTE_EVENTDEV_NAME_MAX_LEN];
-	/**< Unique identifier name */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[4];	  /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-/** @internal The data structure associated with each event device. */
-struct rte_eventdev {
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
-	event_enqueue_burst_t enqueue_burst;
-	/**< Pointer to PMD enqueue burst function. */
-	event_enqueue_burst_t enqueue_new_burst;
-	/**< Pointer to PMD enqueue burst function(op new variant) */
-	event_enqueue_burst_t enqueue_forward_burst;
-	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
-	event_dequeue_burst_t dequeue_burst;
-	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
-	/**< Pointer to PMD eth Tx adapter burst enqueue function with
-	 * events destined to same Eth port & Tx queue.
-	 */
-	event_tx_adapter_enqueue_t txa_enqueue;
-	/**< Pointer to PMD eth Tx adapter enqueue function. */
-	struct rte_eventdev_data *data;
-	/**< Pointer to device data */
-	struct eventdev_ops *dev_ops;
-	/**< Functions exported by PMD */
-	struct rte_device *dev;
-	/**< Device info. supplied by probing */
-
-	RTE_STD_C11
-	uint8_t attached : 1;
-	/**< Flag indicating the device is attached */
-
-	event_crypto_adapter_enqueue_t ca_enqueue;
-	/**< Pointer to PMD crypto adapter enqueue function. */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[3];	  /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-extern struct rte_eventdev *rte_eventdevs;
-/** @internal The pool of rte_eventdev structures. */
-
 #ifdef __cplusplus
 }
 #endif
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v 07/13] eventdev: hide timer adapter PMD file
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
                     ` (4 preceding siblings ...)
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 06/13] eventdev: hide event device related structures pbhagavatula
@ 2021-10-03  8:27   ` pbhagavatula
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 " pbhagavatula
                     ` (7 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:27 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Mattias Rönnblom,
	Harry van Haaren, Erik Gabriel Carrillo
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Hide rte_event_timer_adapter_pmd.h file as it is an internal file.
Remove rte_ prefix from rte_event_timer_adapter_ops structure.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cnxk_tim_evdev.c           |  5 ++--
 drivers/event/cnxk/cnxk_tim_evdev.h           |  2 +-
 drivers/event/dsw/dsw_evdev.c                 |  4 +--
 drivers/event/octeontx/ssovf_evdev.c          |  2 +-
 drivers/event/octeontx/timvf_evdev.c          | 17 ++++++-----
 drivers/event/octeontx/timvf_evdev.h          |  9 +++---
 drivers/event/octeontx2/otx2_tim_evdev.c      |  5 ++--
 drivers/event/octeontx2/otx2_tim_evdev.h      |  4 +--
 drivers/event/sw/sw_evdev.c                   |  5 ++--
 ...dapter_pmd.h => event_timer_adapter_pmd.h} |  8 ++---
 lib/eventdev/eventdev_pmd.h                   |  8 ++---
 lib/eventdev/meson.build                      |  2 +-
 lib/eventdev/rte_event_timer_adapter.c        | 30 +++++++++----------
 lib/eventdev/rte_event_timer_adapter.h        |  2 +-
 lib/eventdev/rte_eventdev.c                   |  2 +-
 15 files changed, 51 insertions(+), 54 deletions(-)
 rename lib/eventdev/{rte_event_timer_adapter_pmd.h => event_timer_adapter_pmd.h} (95%)

diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c
index 9d40e336d7..10634c31e3 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -5,7 +5,7 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_tim_evdev.h"

-static struct rte_event_timer_adapter_ops cnxk_tim_ops;
+static struct event_timer_adapter_ops cnxk_tim_ops;

 static int
 cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
@@ -353,8 +353,7 @@ cnxk_tim_stats_reset(const struct rte_event_timer_adapter *adapter)

 int
 cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();

diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h
index c369f6f472..91e163eb5a 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -267,7 +267,7 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,

 int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);

 void cnxk_tim_init(struct roc_sso *sso);
 void cnxk_tim_fini(void);
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 01f060fff3..e9d33ad36b 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -381,8 +381,8 @@ dsw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,

 static int
 dsw_timer_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
-			   uint64_t flags  __rte_unused, uint32_t *caps,
-			   const struct rte_event_timer_adapter_ops **ops)
+			   uint64_t flags __rte_unused, uint32_t *caps,
+			   const struct event_timer_adapter_ops **ops)
 {
 	*caps = 0;
 	*ops = NULL;
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 4a8c6a13a5..e7aecd4139 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -721,7 +721,7 @@ ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque)

 static int
 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
+		     uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
 			timvf_enable_stats);
diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index 688e9daa66..1f1cda3f7f 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -407,18 +407,19 @@ timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
 	return 0;
 }

-static struct rte_event_timer_adapter_ops timvf_ops = {
-	.init		= timvf_ring_create,
-	.uninit		= timvf_ring_free,
-	.start		= timvf_ring_start,
-	.stop		= timvf_ring_stop,
-	.get_info	= timvf_ring_info_get,
+static struct event_timer_adapter_ops timvf_ops = {
+	.init = timvf_ring_create,
+	.uninit = timvf_ring_free,
+	.start = timvf_ring_start,
+	.stop = timvf_ring_stop,
+	.get_info = timvf_ring_info_get,
 };

 int
 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats)
+			     uint32_t *caps,
+			     const struct event_timer_adapter_ops **ops,
+			     uint8_t enable_stats)
 {
 	RTE_SET_USED(dev);

diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index 2977063d66..cef02cd7f9 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -5,13 +5,13 @@
 #ifndef __TIMVF_EVDEV_H__
 #define __TIMVF_EVDEV_H__

+#include <event_timer_adapter_pmd.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
 #include <rte_debug.h>
 #include <rte_eal.h>
-#include <rte_eventdev.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
+#include <rte_eventdev.h>
 #include <rte_io.h>
 #include <rte_lcore.h>
 #include <rte_log.h>
@@ -196,8 +196,9 @@ uint8_t timvf_get_ring(void);
 void timvf_release_ring(uint8_t vfid);
 void *timvf_bar(uint8_t id, uint8_t bar);
 int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats);
+				 uint32_t *caps,
+				 const struct event_timer_adapter_ops **ops,
+				 uint8_t enable_stats);
 uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 		struct rte_event_timer **tim, const uint16_t nb_timers);
 uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.c b/drivers/event/octeontx2/otx2_tim_evdev.c
index de50c4c76e..7dcb291043 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.c
+++ b/drivers/event/octeontx2/otx2_tim_evdev.c
@@ -9,7 +9,7 @@
 #include "otx2_evdev.h"
 #include "otx2_tim_evdev.h"

-static struct rte_event_timer_adapter_ops otx2_tim_ops;
+static struct event_timer_adapter_ops otx2_tim_ops;

 static inline int
 tim_get_msix_offsets(void)
@@ -497,8 +497,7 @@ otx2_tim_stats_reset(const struct rte_event_timer_adapter *adapter)

 int
 otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct otx2_tim_evdev *dev = tim_priv_get();

diff --git a/drivers/event/octeontx2/otx2_tim_evdev.h b/drivers/event/octeontx2/otx2_tim_evdev.h
index caa6ad3b3c..dac642e0e1 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.h
+++ b/drivers/event/octeontx2/otx2_tim_evdev.h
@@ -5,8 +5,8 @@
 #ifndef __OTX2_TIM_EVDEV_H__
 #define __OTX2_TIM_EVDEV_H__

+#include <event_timer_adapter_pmd.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
 #include <rte_reciprocal.h>

 #include "otx2_dev.h"
@@ -244,7 +244,7 @@ uint16_t otx2_tim_timer_cancel_burst(

 int otx2_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);

 void otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev);
 void otx2_tim_fini(void);
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 9b72073322..34815b30b2 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -561,10 +561,9 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
 }

 static int
-sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
-			  uint64_t flags,
+sw_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 			  uint32_t *caps,
-			  const struct rte_event_timer_adapter_ops **ops)
+			  const struct event_timer_adapter_ops **ops)
 {
 	RTE_SET_USED(dev);
 	RTE_SET_USED(flags);
diff --git a/lib/eventdev/rte_event_timer_adapter_pmd.h b/lib/eventdev/event_timer_adapter_pmd.h
similarity index 95%
rename from lib/eventdev/rte_event_timer_adapter_pmd.h
rename to lib/eventdev/event_timer_adapter_pmd.h
index cf3509dc6f..189017b5c1 100644
--- a/lib/eventdev/rte_event_timer_adapter_pmd.h
+++ b/lib/eventdev/event_timer_adapter_pmd.h
@@ -3,8 +3,8 @@
  * All rights reserved.
  */

-#ifndef __RTE_EVENT_TIMER_ADAPTER_PMD_H__
-#define __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+#ifndef __EVENT_TIMER_ADAPTER_PMD_H__
+#define __EVENT_TIMER_ADAPTER_PMD_H__

 /**
  * @file
@@ -57,7 +57,7 @@ typedef int (*rte_event_timer_adapter_stats_reset_t)(
  * @internal Structure containing the functions exported by an event timer
  * adapter implementation.
  */
-struct rte_event_timer_adapter_ops {
+struct event_timer_adapter_ops {
 	rte_event_timer_adapter_init_t		init;  /**< Set up adapter */
 	rte_event_timer_adapter_uninit_t	uninit;/**< Tear down adapter */
 	rte_event_timer_adapter_start_t		start; /**< Start adapter */
@@ -111,4 +111,4 @@ struct rte_event_timer_adapter_data {
 }
 #endif

-#endif /* __RTE_EVENT_TIMER_ADAPTER_PMD_H__ */
+#endif /* __EVENT_TIMER_ADAPTER_PMD_H__ */
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 764555d54c..6646db4918 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -24,8 +24,8 @@
 #include <rte_mbuf.h>
 #include <rte_mbuf_dyn.h>

+#include "event_timer_adapter_pmd.h"
 #include "rte_eventdev.h"
-#include "rte_event_timer_adapter_pmd.h"

 /* Logging Macros */
 #define RTE_EDEV_LOG_ERR(...) \
@@ -591,10 +591,8 @@ struct rte_event_eth_rx_adapter_queue_conf;
  *
  */
 typedef int (*eventdev_timer_adapter_caps_get_t)(
-				const struct rte_eventdev *dev,
-				uint64_t flags,
-				uint32_t *caps,
-				const struct rte_event_timer_adapter_ops **ops);
+	const struct rte_eventdev *dev, uint64_t flags, uint32_t *caps,
+	const struct event_timer_adapter_ops **ops);

 /**
  * Add ethernet Rx queues to event device. This callback is invoked if
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 9051ff04b7..f19b831edd 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -24,7 +24,6 @@ headers = files(
         'rte_event_ring.h',
         'rte_event_eth_rx_adapter.h',
         'rte_event_timer_adapter.h',
-        'rte_event_timer_adapter_pmd.h',
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
@@ -35,6 +34,7 @@ driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
         'eventdev_pmd_vdev.h',
+        'event_timer_adapter_pmd.h',
 )

 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index ee20b39f4b..ae55407042 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -20,11 +20,11 @@
 #include <rte_service_component.h>
 #include <rte_cycles.h>

-#include "rte_eventdev.h"
+#include "event_timer_adapter_pmd.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
 #include "rte_event_timer_adapter.h"
-#include "rte_event_timer_adapter_pmd.h"
+#include "rte_eventdev.h"
+#include "rte_eventdev_trace.h"

 #define DATA_MZ_NAME_MAX_LEN 64
 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
@@ -35,7 +35,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);

 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];

-static const struct rte_event_timer_adapter_ops swtim_ops;
+static const struct event_timer_adapter_ops swtim_ops;

 #define EVTIM_LOG(level, logtype, ...) \
 	rte_log(RTE_LOG_ ## level, logtype, \
@@ -1207,15 +1207,15 @@ swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
 }

-static const struct rte_event_timer_adapter_ops swtim_ops = {
-	.init			= swtim_init,
-	.uninit			= swtim_uninit,
-	.start			= swtim_start,
-	.stop			= swtim_stop,
-	.get_info		= swtim_get_info,
-	.stats_get		= swtim_stats_get,
-	.stats_reset		= swtim_stats_reset,
-	.arm_burst		= swtim_arm_burst,
-	.arm_tmo_tick_burst	= swtim_arm_tmo_tick_burst,
-	.cancel_burst		= swtim_cancel_burst,
+static const struct event_timer_adapter_ops swtim_ops = {
+	.init = swtim_init,
+	.uninit = swtim_uninit,
+	.start = swtim_start,
+	.stop = swtim_stop,
+	.get_info = swtim_get_info,
+	.stats_get = swtim_stats_get,
+	.stats_reset = swtim_stats_reset,
+	.arm_burst = swtim_arm_burst,
+	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
+	.cancel_burst = swtim_cancel_burst,
 };
diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h
index 4e0d2a819b..cad6d3b4c5 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -523,7 +523,7 @@ struct rte_event_timer_adapter {
 	/**< Pointer to driver cancel function. */
 	struct rte_event_timer_adapter_data *data;
 	/**< Pointer to shared adapter data */
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;
 	/**< Functions exported by adapter driver */

 	RTE_STD_C11
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 7a70c7a963..8de5f8b47f 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -142,7 +142,7 @@ int
 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
 {
 	struct rte_eventdev *dev;
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;

 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);

--
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 07/13] eventdev: hide timer adapter PMD file
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
                     ` (5 preceding siblings ...)
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v 07/13] eventdev: hide timer adapter PMD file pbhagavatula
@ 2021-10-03  8:27   ` pbhagavatula
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 08/13] eventdev: remove rte prefix for internal structs pbhagavatula
                     ` (6 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:27 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Mattias Rönnblom,
	Harry van Haaren, Erik Gabriel Carrillo
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Hide rte_event_timer_adapter_pmd.h file as it is an internal file.
Remove rte_ prefix from rte_event_timer_adapter_ops structure.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cnxk_tim_evdev.c           |  5 ++--
 drivers/event/cnxk/cnxk_tim_evdev.h           |  2 +-
 drivers/event/dsw/dsw_evdev.c                 |  4 +--
 drivers/event/octeontx/ssovf_evdev.c          |  2 +-
 drivers/event/octeontx/timvf_evdev.c          | 17 ++++++-----
 drivers/event/octeontx/timvf_evdev.h          |  9 +++---
 drivers/event/octeontx2/otx2_tim_evdev.c      |  5 ++--
 drivers/event/octeontx2/otx2_tim_evdev.h      |  4 +--
 drivers/event/sw/sw_evdev.c                   |  5 ++--
 ...dapter_pmd.h => event_timer_adapter_pmd.h} |  8 ++---
 lib/eventdev/eventdev_pmd.h                   |  8 ++---
 lib/eventdev/meson.build                      |  2 +-
 lib/eventdev/rte_event_timer_adapter.c        | 30 +++++++++----------
 lib/eventdev/rte_event_timer_adapter.h        |  2 +-
 lib/eventdev/rte_eventdev.c                   |  2 +-
 15 files changed, 51 insertions(+), 54 deletions(-)
 rename lib/eventdev/{rte_event_timer_adapter_pmd.h => event_timer_adapter_pmd.h} (95%)

diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c
index 9d40e336d7..10634c31e3 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -5,7 +5,7 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_tim_evdev.h"
 
-static struct rte_event_timer_adapter_ops cnxk_tim_ops;
+static struct event_timer_adapter_ops cnxk_tim_ops;
 
 static int
 cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
@@ -353,8 +353,7 @@ cnxk_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
 
 int
 cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
 
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h
index c369f6f472..91e163eb5a 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -267,7 +267,7 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 
 int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);
 
 void cnxk_tim_init(struct roc_sso *sso);
 void cnxk_tim_fini(void);
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 01f060fff3..e9d33ad36b 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -381,8 +381,8 @@ dsw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
 
 static int
 dsw_timer_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
-			   uint64_t flags  __rte_unused, uint32_t *caps,
-			   const struct rte_event_timer_adapter_ops **ops)
+			   uint64_t flags __rte_unused, uint32_t *caps,
+			   const struct event_timer_adapter_ops **ops)
 {
 	*caps = 0;
 	*ops = NULL;
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 4a8c6a13a5..e7aecd4139 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -721,7 +721,7 @@ ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque)
 
 static int
 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
+		     uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
 			timvf_enable_stats);
diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index 688e9daa66..1f1cda3f7f 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -407,18 +407,19 @@ timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
 	return 0;
 }
 
-static struct rte_event_timer_adapter_ops timvf_ops = {
-	.init		= timvf_ring_create,
-	.uninit		= timvf_ring_free,
-	.start		= timvf_ring_start,
-	.stop		= timvf_ring_stop,
-	.get_info	= timvf_ring_info_get,
+static struct event_timer_adapter_ops timvf_ops = {
+	.init = timvf_ring_create,
+	.uninit = timvf_ring_free,
+	.start = timvf_ring_start,
+	.stop = timvf_ring_stop,
+	.get_info = timvf_ring_info_get,
 };
 
 int
 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats)
+			     uint32_t *caps,
+			     const struct event_timer_adapter_ops **ops,
+			     uint8_t enable_stats)
 {
 	RTE_SET_USED(dev);
 
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index 2977063d66..cef02cd7f9 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -5,13 +5,13 @@
 #ifndef __TIMVF_EVDEV_H__
 #define __TIMVF_EVDEV_H__
 
+#include <event_timer_adapter_pmd.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
 #include <rte_debug.h>
 #include <rte_eal.h>
-#include <rte_eventdev.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
+#include <rte_eventdev.h>
 #include <rte_io.h>
 #include <rte_lcore.h>
 #include <rte_log.h>
@@ -196,8 +196,9 @@ uint8_t timvf_get_ring(void);
 void timvf_release_ring(uint8_t vfid);
 void *timvf_bar(uint8_t id, uint8_t bar);
 int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats);
+				 uint32_t *caps,
+				 const struct event_timer_adapter_ops **ops,
+				 uint8_t enable_stats);
 uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 		struct rte_event_timer **tim, const uint16_t nb_timers);
 uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.c b/drivers/event/octeontx2/otx2_tim_evdev.c
index de50c4c76e..7dcb291043 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.c
+++ b/drivers/event/octeontx2/otx2_tim_evdev.c
@@ -9,7 +9,7 @@
 #include "otx2_evdev.h"
 #include "otx2_tim_evdev.h"
 
-static struct rte_event_timer_adapter_ops otx2_tim_ops;
+static struct event_timer_adapter_ops otx2_tim_ops;
 
 static inline int
 tim_get_msix_offsets(void)
@@ -497,8 +497,7 @@ otx2_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
 
 int
 otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct otx2_tim_evdev *dev = tim_priv_get();
 
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.h b/drivers/event/octeontx2/otx2_tim_evdev.h
index caa6ad3b3c..dac642e0e1 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.h
+++ b/drivers/event/octeontx2/otx2_tim_evdev.h
@@ -5,8 +5,8 @@
 #ifndef __OTX2_TIM_EVDEV_H__
 #define __OTX2_TIM_EVDEV_H__
 
+#include <event_timer_adapter_pmd.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
 #include <rte_reciprocal.h>
 
 #include "otx2_dev.h"
@@ -244,7 +244,7 @@ uint16_t otx2_tim_timer_cancel_burst(
 
 int otx2_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);
 
 void otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev);
 void otx2_tim_fini(void);
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 9b72073322..34815b30b2 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -561,10 +561,9 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
 }
 
 static int
-sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
-			  uint64_t flags,
+sw_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 			  uint32_t *caps,
-			  const struct rte_event_timer_adapter_ops **ops)
+			  const struct event_timer_adapter_ops **ops)
 {
 	RTE_SET_USED(dev);
 	RTE_SET_USED(flags);
diff --git a/lib/eventdev/rte_event_timer_adapter_pmd.h b/lib/eventdev/event_timer_adapter_pmd.h
similarity index 95%
rename from lib/eventdev/rte_event_timer_adapter_pmd.h
rename to lib/eventdev/event_timer_adapter_pmd.h
index cf3509dc6f..189017b5c1 100644
--- a/lib/eventdev/rte_event_timer_adapter_pmd.h
+++ b/lib/eventdev/event_timer_adapter_pmd.h
@@ -3,8 +3,8 @@
  * All rights reserved.
  */
 
-#ifndef __RTE_EVENT_TIMER_ADAPTER_PMD_H__
-#define __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+#ifndef __EVENT_TIMER_ADAPTER_PMD_H__
+#define __EVENT_TIMER_ADAPTER_PMD_H__
 
 /**
  * @file
@@ -57,7 +57,7 @@ typedef int (*rte_event_timer_adapter_stats_reset_t)(
  * @internal Structure containing the functions exported by an event timer
  * adapter implementation.
  */
-struct rte_event_timer_adapter_ops {
+struct event_timer_adapter_ops {
 	rte_event_timer_adapter_init_t		init;  /**< Set up adapter */
 	rte_event_timer_adapter_uninit_t	uninit;/**< Tear down adapter */
 	rte_event_timer_adapter_start_t		start; /**< Start adapter */
@@ -111,4 +111,4 @@ struct rte_event_timer_adapter_data {
 }
 #endif
 
-#endif /* __RTE_EVENT_TIMER_ADAPTER_PMD_H__ */
+#endif /* __EVENT_TIMER_ADAPTER_PMD_H__ */
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 764555d54c..6646db4918 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -24,8 +24,8 @@
 #include <rte_mbuf.h>
 #include <rte_mbuf_dyn.h>
 
+#include "event_timer_adapter_pmd.h"
 #include "rte_eventdev.h"
-#include "rte_event_timer_adapter_pmd.h"
 
 /* Logging Macros */
 #define RTE_EDEV_LOG_ERR(...) \
@@ -591,10 +591,8 @@ struct rte_event_eth_rx_adapter_queue_conf;
  *
  */
 typedef int (*eventdev_timer_adapter_caps_get_t)(
-				const struct rte_eventdev *dev,
-				uint64_t flags,
-				uint32_t *caps,
-				const struct rte_event_timer_adapter_ops **ops);
+	const struct rte_eventdev *dev, uint64_t flags, uint32_t *caps,
+	const struct event_timer_adapter_ops **ops);
 
 /**
  * Add ethernet Rx queues to event device. This callback is invoked if
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 9051ff04b7..f19b831edd 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -24,7 +24,6 @@ headers = files(
         'rte_event_ring.h',
         'rte_event_eth_rx_adapter.h',
         'rte_event_timer_adapter.h',
-        'rte_event_timer_adapter_pmd.h',
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
@@ -35,6 +34,7 @@ driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
         'eventdev_pmd_vdev.h',
+        'event_timer_adapter_pmd.h',
 )
 
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index ee20b39f4b..ae55407042 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -20,11 +20,11 @@
 #include <rte_service_component.h>
 #include <rte_cycles.h>
 
-#include "rte_eventdev.h"
+#include "event_timer_adapter_pmd.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
 #include "rte_event_timer_adapter.h"
-#include "rte_event_timer_adapter_pmd.h"
+#include "rte_eventdev.h"
+#include "rte_eventdev_trace.h"
 
 #define DATA_MZ_NAME_MAX_LEN 64
 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
@@ -35,7 +35,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
 
 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
 
-static const struct rte_event_timer_adapter_ops swtim_ops;
+static const struct event_timer_adapter_ops swtim_ops;
 
 #define EVTIM_LOG(level, logtype, ...) \
 	rte_log(RTE_LOG_ ## level, logtype, \
@@ -1207,15 +1207,15 @@ swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
 }
 
-static const struct rte_event_timer_adapter_ops swtim_ops = {
-	.init			= swtim_init,
-	.uninit			= swtim_uninit,
-	.start			= swtim_start,
-	.stop			= swtim_stop,
-	.get_info		= swtim_get_info,
-	.stats_get		= swtim_stats_get,
-	.stats_reset		= swtim_stats_reset,
-	.arm_burst		= swtim_arm_burst,
-	.arm_tmo_tick_burst	= swtim_arm_tmo_tick_burst,
-	.cancel_burst		= swtim_cancel_burst,
+static const struct event_timer_adapter_ops swtim_ops = {
+	.init = swtim_init,
+	.uninit = swtim_uninit,
+	.start = swtim_start,
+	.stop = swtim_stop,
+	.get_info = swtim_get_info,
+	.stats_get = swtim_stats_get,
+	.stats_reset = swtim_stats_reset,
+	.arm_burst = swtim_arm_burst,
+	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
+	.cancel_burst = swtim_cancel_burst,
 };
diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h
index 4e0d2a819b..cad6d3b4c5 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -523,7 +523,7 @@ struct rte_event_timer_adapter {
 	/**< Pointer to driver cancel function. */
 	struct rte_event_timer_adapter_data *data;
 	/**< Pointer to shared adapter data */
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;
 	/**< Functions exported by adapter driver */
 
 	RTE_STD_C11
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 7a70c7a963..8de5f8b47f 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -142,7 +142,7 @@ int
 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
 {
 	struct rte_eventdev *dev;
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;
 
 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 08/13] eventdev: remove rte prefix for internal structs
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
                     ` (6 preceding siblings ...)
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 " pbhagavatula
@ 2021-10-03  8:27   ` pbhagavatula
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 09/13] eventdev: rearrange fields in timer object pbhagavatula
                     ` (5 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:27 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Remove rte_ prefix from rte_eth_event_enqueue_buffer,
rte_event_eth_rx_adapter and rte_event_crypto_adapter
as they are only used in rte_event_eth_rx_adapter.c and
rte_event_crypto_adapter.c

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
 lib/eventdev/rte_event_crypto_adapter.c |  66 +++----
 lib/eventdev/rte_event_eth_rx_adapter.c | 249 ++++++++++--------------
 lib/eventdev/rte_eventdev.h             |   2 +-
 3 files changed, 141 insertions(+), 176 deletions(-)

diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index ebfc8326a8..e9e660a3d2 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -30,7 +30,7 @@
  */
 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
 
-struct rte_event_crypto_adapter {
+struct event_crypto_adapter {
 	/* Event device identifier */
 	uint8_t eventdev_id;
 	/* Event port identifier */
@@ -99,7 +99,7 @@ struct crypto_queue_pair_info {
 	uint8_t len;
 } __rte_cache_aligned;
 
-static struct rte_event_crypto_adapter **event_crypto_adapter;
+static struct event_crypto_adapter **event_crypto_adapter;
 
 /* Macros to check for valid adapter */
 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
@@ -141,7 +141,7 @@ eca_init(void)
 	return 0;
 }
 
-static inline struct rte_event_crypto_adapter *
+static inline struct event_crypto_adapter *
 eca_id_to_adapter(uint8_t id)
 {
 	return event_crypto_adapter ?
@@ -158,7 +158,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	int ret;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
+	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
 
 	if (adapter == NULL)
 		return -EINVAL;
@@ -202,7 +202,7 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
 				enum rte_event_crypto_adapter_mode mode,
 				void *conf_arg)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
 	struct rte_event_dev_info dev_info;
 	int socket_id;
@@ -304,7 +304,7 @@ rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_crypto_adapter_free(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -329,8 +329,8 @@ rte_event_crypto_adapter_free(uint8_t id)
 }
 
 static inline unsigned int
-eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
-		 struct rte_event *ev, unsigned int cnt)
+eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
+		     unsigned int cnt)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -420,7 +420,7 @@ eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
 }
 
 static unsigned int
-eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
+eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -466,8 +466,8 @@ eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
 }
 
 static int
-eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_enq)
+eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_enq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct rte_event ev[BATCH_SIZE];
@@ -500,8 +500,8 @@ eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline void
-eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
-		  struct rte_crypto_op **ops, uint16_t num)
+eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
+		      struct rte_crypto_op **ops, uint16_t num)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -564,8 +564,8 @@ eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline unsigned int
-eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_deq)
+eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_deq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -627,8 +627,8 @@ eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static void
-eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_ops)
+eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
+		       unsigned int max_ops)
 {
 	while (max_ops) {
 		unsigned int e_cnt, d_cnt;
@@ -648,7 +648,7 @@ eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
 static int
 eca_service_func(void *args)
 {
-	struct rte_event_crypto_adapter *adapter = args;
+	struct event_crypto_adapter *adapter = args;
 
 	if (rte_spinlock_trylock(&adapter->lock) == 0)
 		return 0;
@@ -659,7 +659,7 @@ eca_service_func(void *args)
 }
 
 static int
-eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
+eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
 {
 	struct rte_event_crypto_adapter_conf adapter_conf;
 	struct rte_service_spec service;
@@ -699,10 +699,9 @@ eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
 }
 
 static void
-eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
-			struct crypto_device_info *dev_info,
-			int32_t queue_pair_id,
-			uint8_t add)
+eca_update_qp_info(struct event_crypto_adapter *adapter,
+		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
+		   uint8_t add)
 {
 	struct crypto_queue_pair_info *qp_info;
 	int enabled;
@@ -729,9 +728,8 @@ eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
 }
 
 static int
-eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
-		uint8_t cdev_id,
-		int queue_pair_id)
+eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
+		   int queue_pair_id)
 {
 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
 	struct crypto_queue_pair_info *qpairs;
@@ -773,7 +771,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			int32_t queue_pair_id,
 			const struct rte_event *event)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_eventdev *dev;
 	struct crypto_device_info *dev_info;
 	uint32_t cap;
@@ -889,7 +887,7 @@ int
 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 					int32_t queue_pair_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	int ret;
@@ -975,7 +973,7 @@ rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 static int
 eca_adapter_ctrl(uint8_t id, int start)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1017,7 +1015,7 @@ eca_adapter_ctrl(uint8_t id, int start)
 int
 rte_event_crypto_adapter_start(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 	adapter = eca_id_to_adapter(id);
@@ -1039,7 +1037,7 @@ int
 rte_event_crypto_adapter_stats_get(uint8_t id,
 				struct rte_event_crypto_adapter_stats *stats)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_crypto_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -1083,7 +1081,7 @@ rte_event_crypto_adapter_stats_get(uint8_t id,
 int
 rte_event_crypto_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1111,7 +1109,7 @@ rte_event_crypto_adapter_stats_reset(uint8_t id)
 int
 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -1128,7 +1126,7 @@ rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 int
 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 13dfb28401..f8225ebd3d 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -78,14 +78,14 @@ struct eth_rx_vector_data {
 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
 
 /* Instance per adapter */
-struct rte_eth_event_enqueue_buffer {
+struct eth_event_enqueue_buffer {
 	/* Count of events in this buffer */
 	uint16_t count;
 	/* Array of events in this buffer */
 	struct rte_event events[ETH_EVENT_BUFFER_SIZE];
 };
 
-struct rte_event_eth_rx_adapter {
+struct event_eth_rx_adapter {
 	/* RSS key */
 	uint8_t rss_key_be[RSS_KEY_SIZE];
 	/* Event device identifier */
@@ -109,7 +109,7 @@ struct rte_event_eth_rx_adapter {
 	/* Next entry in wrr[] to begin polling */
 	uint32_t wrr_pos;
 	/* Event burst buffer */
-	struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
+	struct eth_event_enqueue_buffer event_enqueue_buffer;
 	/* Vector enable flag */
 	uint8_t ena_vector;
 	/* Timestamp of previous vector expiry list traversal */
@@ -231,7 +231,7 @@ struct eth_rx_queue_info {
 	struct eth_rx_vector_data vector_data;
 };
 
-static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
+static struct event_eth_rx_adapter **event_eth_rx_adapter;
 
 static inline int
 rxa_validate_id(uint8_t id)
@@ -247,7 +247,7 @@ rxa_validate_id(uint8_t id)
 } while (0)
 
 static inline int
-rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
 {
 	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
 }
@@ -265,10 +265,9 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
  */
 static int
-rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
-	 unsigned int n, int *cw,
-	 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
-	 uint16_t gcd, int prev)
+rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
+	     struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
+	     uint16_t gcd, int prev)
 {
 	int i = prev;
 	uint16_t w;
@@ -373,10 +372,9 @@ rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
  */
 static void
-rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_intr)
+rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_intr)
 {
 	uint32_t intr_diff;
 
@@ -392,12 +390,10 @@ rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * interrupt queues could currently be poll mode Rx queues
  */
 static void
-rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+			  uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -424,11 +420,9 @@ rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
  * after deleting poll mode rx queues
  */
 static void
-rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_wrr)
 {
 	uint32_t poll_diff;
 	uint32_t wrr_len_diff;
@@ -449,13 +443,10 @@ rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
 /* Calculate nb_rx_* after adding poll mode rx queues
  */
 static void
-rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint16_t wt,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint16_t wt, uint32_t *nb_rx_poll,
+			  uint32_t *nb_rx_intr, uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -482,13 +473,10 @@ rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after adding rx_queue_id */
 static void
-rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint16_t wt,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	if (wt != 0)
 		rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
@@ -500,12 +488,10 @@ rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after deleting rx_queue_id */
 static void
-rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
 				nb_wrr);
@@ -517,8 +503,7 @@ rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the rx_poll array
  */
 static struct eth_rx_poll_entry *
-rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint32_t num_rx_polled)
+rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
 {
 	size_t len;
 
@@ -534,7 +519,7 @@ rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the WRR array
  */
 static uint32_t *
-rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
+rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
 {
 	size_t len;
 
@@ -547,11 +532,9 @@ rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
 }
 
 static int
-rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint32_t nb_poll,
-		uint32_t nb_wrr,
-		struct eth_rx_poll_entry **rx_poll,
-		uint32_t **wrr_sched)
+rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
+		      uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
+		      uint32_t **wrr_sched)
 {
 
 	if (nb_poll == 0) {
@@ -576,9 +559,8 @@ rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Precalculate WRR polling sequence for all queues in rx_adapter */
 static void
-rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_rx_poll_entry *rx_poll,
-		uint32_t *rx_wrr)
+rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
+		      struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
 {
 	uint16_t d;
 	uint16_t q;
@@ -705,13 +687,13 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
 }
 
 static inline int
-rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
 {
 	return !!rx_adapter->enq_block_count;
 }
 
 static inline void
-rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->rx_enq_block_start_ts)
 		return;
@@ -724,8 +706,8 @@ rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
-		    struct rte_event_eth_rx_adapter_stats *stats)
+rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
+		     struct rte_event_eth_rx_adapter_stats *stats)
 {
 	if (unlikely(!stats->rx_enq_start_ts))
 		stats->rx_enq_start_ts = rte_get_tsc_cycles();
@@ -744,10 +726,10 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue buffered events to event device */
 static inline uint16_t
-rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter)
 {
-	struct rte_eth_event_enqueue_buffer *buf =
-	    &rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
 
 	if (!buf->count)
@@ -774,7 +756,7 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
 		struct eth_rx_vector_data *vec)
 {
 	vec->vector_ev->nb_elem = 0;
@@ -785,9 +767,9 @@ rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline uint16_t
-rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
 			struct eth_rx_queue_info *queue_info,
-			struct rte_eth_event_enqueue_buffer *buf,
+			struct eth_event_enqueue_buffer *buf,
 			struct rte_mbuf **mbufs, uint16_t num)
 {
 	struct rte_event *ev = &buf->events[buf->count];
@@ -845,19 +827,16 @@ rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		uint16_t rx_queue_id,
-		struct rte_mbuf **mbufs,
-		uint16_t num)
+rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num)
 {
 	uint32_t i;
 	struct eth_device_info *dev_info =
 					&rx_adapter->eth_devices[eth_dev_id];
 	struct eth_rx_queue_info *eth_rx_queue_info =
 					&dev_info->rx_queue[rx_queue_id];
-	struct rte_eth_event_enqueue_buffer *buf =
-					&rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event *ev = &buf->events[buf->count];
 	uint64_t event = eth_rx_queue_info->event;
 	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
@@ -909,16 +888,13 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue packets from  <port, q>  to event buffer */
 static inline uint32_t
-rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint16_t port_id,
-	uint16_t queue_id,
-	uint32_t rx_count,
-	uint32_t max_rx,
-	int *rxq_empty)
+rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
+	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
+	   int *rxq_empty)
 {
 	struct rte_mbuf *mbufs[BATCH_SIZE];
-	struct rte_eth_event_enqueue_buffer *buf =
-					&rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event_eth_rx_adapter_stats *stats =
 					&rx_adapter->stats;
 	uint16_t n;
@@ -953,8 +929,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
-		void *data)
+rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
 {
 	uint16_t port_id;
 	uint16_t queue;
@@ -994,8 +969,8 @@ rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
-			uint32_t num_intr_vec)
+rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
+			  uint32_t num_intr_vec)
 {
 	if (rx_adapter->num_intr_vec + num_intr_vec >
 				RTE_EVENT_ETH_INTR_RING_SIZE) {
@@ -1010,9 +985,9 @@ rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Delete entries for (dev, queue) from the interrupt ring */
 static void
-rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			uint16_t rx_queue_id)
+rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info,
+			  uint16_t rx_queue_id)
 {
 	int i, n;
 	union queue_data qd;
@@ -1045,7 +1020,7 @@ rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
 static void *
 rxa_intr_thread(void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
+	struct event_eth_rx_adapter *rx_adapter = arg;
 	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
 	int n, i;
 
@@ -1068,12 +1043,12 @@ rxa_intr_thread(void *arg)
  * mbufs to eventdev
  */
 static inline uint32_t
-rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t n;
 	uint32_t nb_rx = 0;
 	int rxq_empty;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct eth_event_enqueue_buffer *buf;
 	rte_spinlock_t *ring_lock;
 	uint8_t max_done = 0;
 
@@ -1188,11 +1163,11 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
  * it.
  */
 static inline uint32_t
-rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t num_queue;
 	uint32_t nb_rx = 0;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct eth_event_enqueue_buffer *buf;
 	uint32_t wrr_pos;
 	uint32_t max_nb_rx;
 
@@ -1233,8 +1208,8 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
 static void
 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
-	struct rte_eth_event_enqueue_buffer *buf =
+	struct event_eth_rx_adapter *rx_adapter = arg;
+	struct eth_event_enqueue_buffer *buf =
 		&rx_adapter->event_enqueue_buffer;
 	struct rte_event *ev;
 
@@ -1257,7 +1232,7 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 static int
 rxa_service_func(void *args)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = args;
+	struct event_eth_rx_adapter *rx_adapter = args;
 	struct rte_event_eth_rx_adapter_stats *stats;
 
 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
@@ -1318,7 +1293,7 @@ rte_event_eth_rx_adapter_init(void)
 	return 0;
 }
 
-static inline struct rte_event_eth_rx_adapter *
+static inline struct event_eth_rx_adapter *
 rxa_id_to_adapter(uint8_t id)
 {
 	return event_eth_rx_adapter ?
@@ -1335,7 +1310,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	uint8_t port_id;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
+	struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
 	dev_conf = dev->data->dev_conf;
@@ -1384,7 +1359,7 @@ rxa_epoll_create1(void)
 }
 
 static int
-rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->epd != INIT_FD)
 		return 0;
@@ -1401,7 +1376,7 @@ rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 	char thread_name[RTE_MAX_THREAD_NAME_LEN];
@@ -1445,7 +1420,7 @@ rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 
@@ -1466,7 +1441,7 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
 {
 	int ret;
 
@@ -1484,9 +1459,8 @@ rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1514,9 +1488,8 @@ rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id)
+rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 {
 	int err;
 	int i;
@@ -1573,9 +1546,8 @@ rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
+		struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err, err1;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1663,9 +1635,8 @@ rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int rx_queue_id)
+rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 
 {
 	int i, j, err;
@@ -1713,9 +1684,8 @@ rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	return err;
 }
 
-
 static int
-rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
 {
 	int ret;
 	struct rte_service_spec service;
@@ -1758,10 +1728,9 @@ rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
 }
 
 static void
-rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int32_t rx_queue_id,
-		uint8_t add)
+rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, int32_t rx_queue_id,
+		 uint8_t add)
 {
 	struct eth_rx_queue_info *queue_info;
 	int enabled;
@@ -1811,9 +1780,8 @@ rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
 }
 
 static void
-rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id)
+rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
+	   struct eth_device_info *dev_info, int32_t rx_queue_id)
 {
 	struct eth_rx_vector_data *vec;
 	int pollq;
@@ -1854,10 +1822,9 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static void
-rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id,
-	const struct rte_event_eth_rx_adapter_queue_conf *conf)
+rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
+	      struct eth_device_info *dev_info, int32_t rx_queue_id,
+	      const struct rte_event_eth_rx_adapter_queue_conf *conf)
 {
 	struct eth_rx_queue_info *queue_info;
 	const struct rte_event *ev = &conf->ev;
@@ -1922,7 +1889,7 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 
 static void
 rxa_sw_event_vector_configure(
-	struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+	struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 	int rx_queue_id,
 	const struct rte_event_eth_rx_adapter_event_vector_config *config)
 {
@@ -1956,10 +1923,10 @@ rxa_sw_event_vector_configure(
 			      config->vector_timeout_ns >> 1;
 }
 
-static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		int rx_queue_id,
-		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+static int
+rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+	   int rx_queue_id,
+	   const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
 	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
 	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
@@ -2088,7 +2055,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
 static int
 rxa_ctrl(uint8_t id, int start)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2135,7 +2102,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
 				rte_event_eth_rx_adapter_conf_cb conf_cb,
 				void *conf_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	int ret;
 	int socket_id;
 	uint16_t i;
@@ -2235,7 +2202,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_eth_rx_adapter_free(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2267,7 +2234,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
 {
 	int ret;
 	uint32_t cap;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 
@@ -2385,7 +2352,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
 {
 	int ret = 0;
 	struct rte_eventdev *dev;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	uint32_t nb_rx_poll = 0;
@@ -2505,7 +2472,7 @@ rte_event_eth_rx_adapter_queue_event_vector_config(
 	struct rte_event_eth_rx_adapter_event_vector_config *config)
 {
 	struct rte_event_eth_rx_adapter_vector_limits limits;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	uint32_t cap;
 	int ret;
@@ -2632,7 +2599,7 @@ int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
 			       struct rte_event_eth_rx_adapter_stats *stats)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_eth_rx_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -2673,7 +2640,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 int
 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2701,7 +2668,7 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 int
 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2721,7 +2688,7 @@ rte_event_eth_rx_adapter_cb_register(uint8_t id,
 					rte_event_eth_rx_adapter_cb_fn cb_fn,
 					void *cb_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	int ret;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 31fa9ac4b8..f1fcd6ce3d 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1193,7 +1193,7 @@ struct rte_event {
 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
 /**< The application can override the adapter generated flow ID in the
  * event. This flow ID can be specified when adding an ethdev Rx queue
- * to the adapter using the ev member of struct rte_event_eth_rx_adapter
+ * to the adapter using the ev.flow_id member.
  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
  */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 09/13] eventdev: rearrange fields in timer object
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
                     ` (7 preceding siblings ...)
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 08/13] eventdev: remove rte prefix for internal structs pbhagavatula
@ 2021-10-03  8:27   ` pbhagavatula
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 10/13] eventdev: move timer adapters memory to hugepage pbhagavatula
                     ` (4 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:27 UTC (permalink / raw)
  To: jerinj, Erik Gabriel Carrillo; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Rearrange fields in rte_event_timer data structure to remove holes.
Also, remove use of volatile from rte_event_timer.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_timer_adapter.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h
index cad6d3b4c5..1551741820 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -475,8 +475,6 @@ struct rte_event_timer {
 	 *  - op: RTE_EVENT_OP_NEW
 	 *  - event_type: RTE_EVENT_TYPE_TIMER
 	 */
-	volatile enum rte_event_timer_state state;
-	/**< State of the event timer. */
 	uint64_t timeout_ticks;
 	/**< Expiry timer ticks expressed in number of *timer_ticks_ns* from
 	 * now.
@@ -488,6 +486,8 @@ struct rte_event_timer {
 	 * implementation specific values to share between the arm and cancel
 	 * operations.  The application should not modify this field.
 	 */
+	enum rte_event_timer_state state;
+	/**< State of the event timer. */
 	uint8_t user_meta[0];
 	/**< Memory to store user specific metadata.
 	 * The event timer adapter implementation should not modify this area.
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 10/13] eventdev: move timer adapters memory to hugepage
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
                     ` (8 preceding siblings ...)
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 09/13] eventdev: rearrange fields in timer object pbhagavatula
@ 2021-10-03  8:27   ` pbhagavatula
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 11/13] eventdev: promote event vector API to stable pbhagavatula
                     ` (3 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:27 UTC (permalink / raw)
  To: jerinj, Erik Gabriel Carrillo; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move memory used by timer adapters to hugepage.
Allocate memory on the first adapter create or lookup to address
both primary and secondary process usecases.
This will prevent TLB misses if any and aligns to memory structure
of other subsystems.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_timer_adapter.c | 24 +++++++++++++++++++++++-
 1 file changed, 23 insertions(+), 1 deletion(-)

diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index ae55407042..c4dc7a5fd4 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -33,7 +33,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
 
-static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
+static struct rte_event_timer_adapter *adapters;
 
 static const struct event_timer_adapter_ops swtim_ops;
 
@@ -138,6 +138,17 @@ rte_event_timer_adapter_create_ext(
 	int n, ret;
 	struct rte_eventdev *dev;
 
+	if (adapters == NULL) {
+		adapters = rte_zmalloc("Eventdev",
+				       sizeof(struct rte_event_timer_adapter) *
+					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+				       RTE_CACHE_LINE_SIZE);
+		if (adapters == NULL) {
+			rte_errno = ENOMEM;
+			return NULL;
+		}
+	}
+
 	if (conf == NULL) {
 		rte_errno = EINVAL;
 		return NULL;
@@ -312,6 +323,17 @@ rte_event_timer_adapter_lookup(uint16_t adapter_id)
 	int ret;
 	struct rte_eventdev *dev;
 
+	if (adapters == NULL) {
+		adapters = rte_zmalloc("Eventdev",
+				       sizeof(struct rte_event_timer_adapter) *
+					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+				       RTE_CACHE_LINE_SIZE);
+		if (adapters == NULL) {
+			rte_errno = ENOMEM;
+			return NULL;
+		}
+	}
+
 	if (adapters[adapter_id].allocated)
 		return &adapters[adapter_id]; /* Adapter is already loaded */
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 11/13] eventdev: promote event vector API to stable
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
                     ` (9 preceding siblings ...)
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 10/13] eventdev: move timer adapters memory to hugepage pbhagavatula
@ 2021-10-03  8:27   ` pbhagavatula
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 12/13] eventdev: make trace APIs internal pbhagavatula
                     ` (2 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:27 UTC (permalink / raw)
  To: jerinj, Jay Jayatheerthan, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Promote event vector configuration APIs to stable.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 lib/eventdev/rte_event_eth_rx_adapter.h | 2 --
 lib/eventdev/rte_eventdev.h             | 1 -
 lib/eventdev/version.map                | 6 +++---
 3 files changed, 3 insertions(+), 6 deletions(-)

diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index 182dd2e5dd..d13d817025 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -543,7 +543,6 @@ int rte_event_eth_rx_adapter_cb_register(uint8_t id, uint16_t eth_dev_id,
  *  - 0: Success.
  *  - <0: Error code on failure.
  */
-__rte_experimental
 int rte_event_eth_rx_adapter_vector_limits_get(
 	uint8_t dev_id, uint16_t eth_port_id,
 	struct rte_event_eth_rx_adapter_vector_limits *limits);
@@ -570,7 +569,6 @@ int rte_event_eth_rx_adapter_vector_limits_get(
  *  - 0: Success, Receive queue configured correctly.
  *  - <0: Error code on failure.
  */
-__rte_experimental
 int rte_event_eth_rx_adapter_queue_event_vector_config(
 	uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
 	struct rte_event_eth_rx_adapter_event_vector_config *config);
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index f1fcd6ce3d..14d4d9ec81 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1734,7 +1734,6 @@ int rte_event_dev_selftest(uint8_t dev_id);
  *    - ENOMEM - no appropriate memory area found in which to create memzone
  *    - ENAMETOOLONG - mempool name requested is too long.
  */
-__rte_experimental
 struct rte_mempool *
 rte_event_vector_pool_create(const char *name, unsigned int n,
 			     unsigned int cache_size, uint16_t nb_elem,
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 33ab447d4b..9c040fea0a 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -38,10 +38,12 @@ DPDK_22 {
 	rte_event_eth_rx_adapter_free;
 	rte_event_eth_rx_adapter_queue_add;
 	rte_event_eth_rx_adapter_queue_del;
+	rte_event_eth_rx_adapter_queue_event_vector_config;
 	rte_event_eth_rx_adapter_service_id_get;
 	rte_event_eth_rx_adapter_start;
 	rte_event_eth_rx_adapter_stats_get;
 	rte_event_eth_rx_adapter_stats_reset;
+	rte_event_eth_rx_adapter_vector_limits_get;
 	rte_event_eth_rx_adapter_stop;
 	rte_event_eth_tx_adapter_caps_get;
 	rte_event_eth_tx_adapter_create;
@@ -83,6 +85,7 @@ DPDK_22 {
 	rte_event_timer_arm_burst;
 	rte_event_timer_arm_tmo_tick_burst;
 	rte_event_timer_cancel_burst;
+	rte_event_vector_pool_create;
 	rte_eventdevs;
 
 	#added in 21.11
@@ -135,9 +138,6 @@ EXPERIMENTAL {
 	__rte_eventdev_trace_port_setup;
 
 	#added in 21.05
-	rte_event_vector_pool_create;
-	rte_event_eth_rx_adapter_vector_limits_get;
-	rte_event_eth_rx_adapter_queue_event_vector_config;
 	__rte_eventdev_trace_crypto_adapter_enqueue;
 };
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 12/13] eventdev: make trace APIs internal
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
                     ` (10 preceding siblings ...)
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 11/13] eventdev: promote event vector API to stable pbhagavatula
@ 2021-10-03  8:27   ` pbhagavatula
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 13/13] eventdev: mark trace variables as internal pbhagavatula
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:27 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan, Erik Gabriel Carrillo
  Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Slowpath trace APIs are only used in rte_eventdev.c so make them
as internal.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
 lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} | 0
 lib/eventdev/eventdev_trace_points.c                    | 2 +-
 lib/eventdev/meson.build                                | 2 +-
 lib/eventdev/rte_event_crypto_adapter.c                 | 2 +-
 lib/eventdev/rte_event_eth_rx_adapter.c                 | 2 +-
 lib/eventdev/rte_event_eth_tx_adapter.c                 | 2 +-
 lib/eventdev/rte_event_timer_adapter.c                  | 2 +-
 lib/eventdev/rte_eventdev.c                             | 2 +-
 8 files changed, 7 insertions(+), 7 deletions(-)
 rename lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} (100%)

diff --git a/lib/eventdev/rte_eventdev_trace.h b/lib/eventdev/eventdev_trace.h
similarity index 100%
rename from lib/eventdev/rte_eventdev_trace.h
rename to lib/eventdev/eventdev_trace.h
diff --git a/lib/eventdev/eventdev_trace_points.c b/lib/eventdev/eventdev_trace_points.c
index 3867ec8008..237d9383fd 100644
--- a/lib/eventdev/eventdev_trace_points.c
+++ b/lib/eventdev/eventdev_trace_points.c
@@ -4,7 +4,7 @@
 
 #include <rte_trace_point_register.h>
 
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 /* Eventdev trace points */
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_configure,
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index f19b831edd..c750e0214f 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -19,7 +19,6 @@ sources = files(
 )
 headers = files(
         'rte_eventdev.h',
-        'rte_eventdev_trace.h',
         'rte_eventdev_trace_fp.h',
         'rte_event_ring.h',
         'rte_event_eth_rx_adapter.h',
@@ -34,6 +33,7 @@ driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
         'eventdev_pmd_vdev.h',
+        'eventdev_trace.h',
         'event_timer_adapter_pmd.h',
 )
 
diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index e9e660a3d2..ae1151fb75 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -16,7 +16,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_crypto_adapter.h"
 
 #define BATCH_SIZE 32
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index f8225ebd3d..7e97fbd21d 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -20,7 +20,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_rx_adapter.h"
 
 #define BATCH_SIZE		32
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.c b/lib/eventdev/rte_event_eth_tx_adapter.c
index 18c0359db7..ee3631bced 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.c
+++ b/lib/eventdev/rte_event_eth_tx_adapter.c
@@ -6,7 +6,7 @@
 #include <rte_ethdev.h>
 
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_tx_adapter.h"
 
 #define TXA_BATCH_SIZE		32
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index c4dc7a5fd4..7404b0cbb2 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -24,7 +24,7 @@
 #include "eventdev_pmd.h"
 #include "rte_event_timer_adapter.h"
 #include "rte_eventdev.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 #define DATA_MZ_NAME_MAX_LEN 64
 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 8de5f8b47f..7deaa5333c 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -36,7 +36,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v2 13/13] eventdev: mark trace variables as internal
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
                     ` (11 preceding siblings ...)
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 12/13] eventdev: make trace APIs internal pbhagavatula
@ 2021-10-03  8:27   ` pbhagavatula
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-03  8:27 UTC (permalink / raw)
  To: jerinj, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Mark rte_trace global variables as internal i.e. remove them
from experimental section of version map.
Some of them are used in inline APIs, mark those as global.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 lib/eventdev/version.map | 77 ++++++++++++++++++----------------------
 1 file changed, 35 insertions(+), 42 deletions(-)

diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 9c040fea0a..d21adedf14 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -88,57 +88,19 @@ DPDK_22 {
 	rte_event_vector_pool_create;
 	rte_eventdevs;
 
-	#added in 21.11
-	rte_event_fp_ops;
-
-	local: *;
-};
-
-EXPERIMENTAL {
-	global:
-
 	# added in 20.05
-	__rte_eventdev_trace_configure;
-	__rte_eventdev_trace_queue_setup;
-	__rte_eventdev_trace_port_link;
-	__rte_eventdev_trace_port_unlink;
-	__rte_eventdev_trace_start;
-	__rte_eventdev_trace_stop;
-	__rte_eventdev_trace_close;
+	__rte_eventdev_trace_crypto_adapter_enqueue;
 	__rte_eventdev_trace_deq_burst;
 	__rte_eventdev_trace_enq_burst;
-	__rte_eventdev_trace_eth_rx_adapter_create;
-	__rte_eventdev_trace_eth_rx_adapter_free;
-	__rte_eventdev_trace_eth_rx_adapter_queue_add;
-	__rte_eventdev_trace_eth_rx_adapter_queue_del;
-	__rte_eventdev_trace_eth_rx_adapter_start;
-	__rte_eventdev_trace_eth_rx_adapter_stop;
-	__rte_eventdev_trace_eth_tx_adapter_create;
-	__rte_eventdev_trace_eth_tx_adapter_free;
-	__rte_eventdev_trace_eth_tx_adapter_queue_add;
-	__rte_eventdev_trace_eth_tx_adapter_queue_del;
-	__rte_eventdev_trace_eth_tx_adapter_start;
-	__rte_eventdev_trace_eth_tx_adapter_stop;
 	__rte_eventdev_trace_eth_tx_adapter_enqueue;
-	__rte_eventdev_trace_timer_adapter_create;
-	__rte_eventdev_trace_timer_adapter_start;
-	__rte_eventdev_trace_timer_adapter_stop;
-	__rte_eventdev_trace_timer_adapter_free;
 	__rte_eventdev_trace_timer_arm_burst;
 	__rte_eventdev_trace_timer_arm_tmo_tick_burst;
 	__rte_eventdev_trace_timer_cancel_burst;
-	__rte_eventdev_trace_crypto_adapter_create;
-	__rte_eventdev_trace_crypto_adapter_free;
-	__rte_eventdev_trace_crypto_adapter_queue_pair_add;
-	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
-	__rte_eventdev_trace_crypto_adapter_start;
-	__rte_eventdev_trace_crypto_adapter_stop;
 
-	# changed in 20.11
-	__rte_eventdev_trace_port_setup;
+	#added in 21.11
+	rte_event_fp_ops;
 
-	#added in 21.05
-	__rte_eventdev_trace_crypto_adapter_enqueue;
+	local: *;
 };
 
 INTERNAL {
@@ -156,4 +118,35 @@ INTERNAL {
 	rte_event_pmd_release;
 	rte_event_pmd_vdev_init;
 	rte_event_pmd_vdev_uninit;
+
+	__rte_eventdev_trace_close;
+	__rte_eventdev_trace_configure;
+	__rte_eventdev_trace_crypto_adapter_create;
+	__rte_eventdev_trace_crypto_adapter_free;
+	__rte_eventdev_trace_crypto_adapter_queue_pair_add;
+	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
+	__rte_eventdev_trace_crypto_adapter_start;
+	__rte_eventdev_trace_crypto_adapter_stop;
+	__rte_eventdev_trace_eth_rx_adapter_create;
+	__rte_eventdev_trace_eth_rx_adapter_free;
+	__rte_eventdev_trace_eth_rx_adapter_queue_add;
+	__rte_eventdev_trace_eth_rx_adapter_queue_del;
+	__rte_eventdev_trace_eth_rx_adapter_start;
+	__rte_eventdev_trace_eth_rx_adapter_stop;
+	__rte_eventdev_trace_eth_tx_adapter_create;
+	__rte_eventdev_trace_eth_tx_adapter_free;
+	__rte_eventdev_trace_eth_tx_adapter_queue_add;
+	__rte_eventdev_trace_eth_tx_adapter_queue_del;
+	__rte_eventdev_trace_eth_tx_adapter_start;
+	__rte_eventdev_trace_eth_tx_adapter_stop;
+	__rte_eventdev_trace_port_link;
+	__rte_eventdev_trace_port_setup;
+	__rte_eventdev_trace_port_unlink;
+	__rte_eventdev_trace_queue_setup;
+	__rte_eventdev_trace_start;
+	__rte_eventdev_trace_stop;
+	__rte_eventdev_trace_timer_adapter_create;
+	__rte_eventdev_trace_timer_adapter_free;
+	__rte_eventdev_trace_timer_adapter_start;
+	__rte_eventdev_trace_timer_adapter_stop;
 };
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface as internal
  2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
                     ` (12 preceding siblings ...)
  2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 13/13] eventdev: mark trace variables as internal pbhagavatula
@ 2021-10-06  6:49   ` pbhagavatula
  2021-10-06  6:49     ` [dpdk-dev] [PATCH v3 02/14] eventdev: separate internal structures pbhagavatula
                       ` (15 more replies)
  13 siblings, 16 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:49 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Timothy McDaniel,
	Hemant Agrawal, Nipun Gupta, Mattias Rönnblom, Liang Ma,
	Peter Mccarthy, Harry van Haaren, Abhinandan Gujjar,
	Ray Kinsella
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Mark all the driver specific functions as internal, remove
`rte` prefix from `struct rte_eventdev_ops`.
Remove experimental tag from internal functions.
Remove `eventdev_pmd.h` from non-internal header files.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 v3 Changes:
 - Reset fp_ops when device is torndown.
 - Add `event_dev_probing_finish()` this function is used for
   post-initialization processing. In current usecase we use it to
   initialize fastpath ops.

 v2 Changes:
 - Rework inline flat array by adding port data into it.
 - Rearrange rte_event_timer elements.

 drivers/event/cnxk/cn10k_eventdev.c        |  6 ++---
 drivers/event/cnxk/cn9k_eventdev.c         | 10 ++++-----
 drivers/event/dlb2/dlb2.c                  |  2 +-
 drivers/event/dpaa/dpaa_eventdev.c         |  2 +-
 drivers/event/dpaa2/dpaa2_eventdev.c       |  2 +-
 drivers/event/dsw/dsw_evdev.c              |  2 +-
 drivers/event/octeontx/ssovf_evdev.c       |  2 +-
 drivers/event/octeontx/ssovf_worker.c      |  4 ++--
 drivers/event/octeontx2/otx2_evdev.c       | 26 +++++++++++-----------
 drivers/event/opdl/opdl_evdev.c            |  2 +-
 drivers/event/skeleton/skeleton_eventdev.c |  2 +-
 drivers/event/sw/sw_evdev.c                |  2 +-
 lib/eventdev/eventdev_pmd.h                |  6 ++++-
 lib/eventdev/eventdev_pmd_pci.h            |  4 +++-
 lib/eventdev/eventdev_pmd_vdev.h           |  2 ++
 lib/eventdev/meson.build                   |  6 +++++
 lib/eventdev/rte_event_crypto_adapter.h    |  1 -
 lib/eventdev/rte_eventdev.h                | 25 ++++++++++++---------
 lib/eventdev/version.map                   | 17 +++++++-------
 19 files changed, 70 insertions(+), 53 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 8af273a01b..b2c3a6cd31 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -375,7 +375,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
@@ -383,7 +383,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
@@ -858,7 +858,7 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 }

-static struct rte_eventdev_ops cn10k_sso_dev_ops = {
+static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
 	.queue_def_conf = cnxk_sso_queue_def_conf,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 59a3dc22a3..0e0bf7177e 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -507,7 +507,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
@@ -515,7 +515,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
@@ -523,7 +523,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
@@ -531,7 +531,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = {
 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
 	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
@@ -1052,7 +1052,7 @@ cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 }

-static struct rte_eventdev_ops cn9k_sso_dev_ops = {
+static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
 	.queue_def_conf = cnxk_sso_queue_def_conf,
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 252bbd8d5e..c8742ddb2c 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -4384,7 +4384,7 @@ dlb2_entry_points_init(struct rte_eventdev *dev)
 	struct dlb2_eventdev *dlb2;

 	/* Expose PMD's eventdev interface */
-	static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
+	static struct eventdev_ops dlb2_eventdev_entry_ops = {
 		.dev_infos_get    = dlb2_eventdev_info_get,
 		.dev_configure    = dlb2_eventdev_configure,
 		.dev_start        = dlb2_eventdev_start,
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index ec74160325..9f14390d28 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -925,7 +925,7 @@ dpaa_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }

-static struct rte_eventdev_ops dpaa_eventdev_ops = {
+static struct eventdev_ops dpaa_eventdev_ops = {
 	.dev_infos_get    = dpaa_event_dev_info_get,
 	.dev_configure    = dpaa_event_dev_configure,
 	.dev_start        = dpaa_event_dev_start,
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 5ccf22f77f..d577f64824 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1015,7 +1015,7 @@ dpaa2_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }

-static struct rte_eventdev_ops dpaa2_eventdev_ops = {
+static struct eventdev_ops dpaa2_eventdev_ops = {
 	.dev_infos_get    = dpaa2_eventdev_info_get,
 	.dev_configure    = dpaa2_eventdev_configure,
 	.dev_start        = dpaa2_eventdev_start,
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 2301a4b7a0..01f060fff3 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -398,7 +398,7 @@ dsw_crypto_adapter_caps_get(const struct rte_eventdev *dev  __rte_unused,
 	return 0;
 }

-static struct rte_eventdev_ops dsw_evdev_ops = {
+static struct eventdev_ops dsw_evdev_ops = {
 	.port_setup = dsw_port_setup,
 	.port_def_conf = dsw_port_def_conf,
 	.port_release = dsw_port_release,
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index b93f6ec8c6..4a8c6a13a5 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -790,7 +790,7 @@ ssovf_crypto_adapter_qp_del(const struct rte_eventdev *dev,
 }

 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops ssovf_ops = {
+static struct eventdev_ops ssovf_ops = {
 	.dev_infos_get    = ssovf_info_get,
 	.dev_configure    = ssovf_configure,
 	.queue_def_conf   = ssovf_queue_def_conf,
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index 8b056ddc5a..2df940f0f1 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -343,11 +343,11 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)

 	dev->ca_enqueue = ssow_crypto_adapter_enqueue;

-	const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
+	const event_tx_adapter_enqueue_t ssow_txa_enqueue[2][2][2][2] = {
 #define T(name, f3, f2, f1, f0, sz, flags)				\
 	[f3][f2][f1][f0] =  sso_event_tx_adapter_enqueue_ ##name,

-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+		SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 	};

diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index 38a6b651d9..f26bed334f 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -178,41 +178,41 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_tx_adptr_enq_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_tx_adptr_enq_seg_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_dual_tx_adptr_enq_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

 	event_dev->enqueue			= otx2_ssogws_enq;
 	event_dev->enqueue_burst		= otx2_ssogws_enq_burst;
@@ -1596,7 +1596,7 @@ otx2_sso_close(struct rte_eventdev *event_dev)
 }

 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops otx2_sso_ops = {
+static struct eventdev_ops otx2_sso_ops = {
 	.dev_infos_get    = otx2_sso_info_get,
 	.dev_configure    = otx2_sso_configure,
 	.queue_def_conf   = otx2_sso_queue_def_conf,
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index cfa9733b64..739dc64c82 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -609,7 +609,7 @@ set_do_test(const char *key __rte_unused, const char *value, void *opaque)
 static int
 opdl_probe(struct rte_vdev_device *vdev)
 {
-	static struct rte_eventdev_ops evdev_opdl_ops = {
+	static struct eventdev_ops evdev_opdl_ops = {
 		.dev_configure = opdl_dev_configure,
 		.dev_infos_get = opdl_info_get,
 		.dev_close = opdl_close,
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index 6fd1102596..c9e17e7cb1 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -320,7 +320,7 @@ skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)


 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops skeleton_eventdev_ops = {
+static struct eventdev_ops skeleton_eventdev_ops = {
 	.dev_infos_get    = skeleton_eventdev_info_get,
 	.dev_configure    = skeleton_eventdev_configure,
 	.dev_start        = skeleton_eventdev_start,
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index a5e6ca22e8..9b72073322 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -945,7 +945,7 @@ static int32_t sw_sched_service_func(void *args)
 static int
 sw_probe(struct rte_vdev_device *vdev)
 {
-	static struct rte_eventdev_ops evdev_sw_ops = {
+	static struct eventdev_ops evdev_sw_ops = {
 			.dev_configure = sw_dev_configure,
 			.dev_infos_get = sw_info_get,
 			.dev_close = sw_close,
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 94d99f4903..682b61cff0 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -99,6 +99,7 @@ extern struct rte_eventdev *rte_eventdevs;
  * @return
  *   - The rte_eventdev structure pointer for the given device ID.
  */
+__rte_internal
 static inline struct rte_eventdev *
 rte_event_pmd_get_named_dev(const char *name)
 {
@@ -127,6 +128,7 @@ rte_event_pmd_get_named_dev(const char *name)
  * @return
  *   - If the device index is valid (1) or not (0).
  */
+__rte_internal
 static inline unsigned
 rte_event_pmd_is_valid_dev(uint8_t dev_id)
 {
@@ -1056,7 +1058,7 @@ typedef int (*eventdev_eth_tx_adapter_stats_reset_t)(uint8_t id,
 					const struct rte_eventdev *dev);

 /** Event device operations function pointer table */
-struct rte_eventdev_ops {
+struct eventdev_ops {
 	eventdev_info_get_t dev_infos_get;	/**< Get device info. */
 	eventdev_configure_t dev_configure;	/**< Configure device. */
 	eventdev_start_t dev_start;		/**< Start device. */
@@ -1174,6 +1176,7 @@ struct rte_eventdev_ops {
  * @return
  *   - Slot in the rte_dev_devices array for a new device;
  */
+__rte_internal
 struct rte_eventdev *
 rte_event_pmd_allocate(const char *name, int socket_id);

@@ -1185,6 +1188,7 @@ rte_event_pmd_allocate(const char *name, int socket_id);
  * @return
  *   - 0 on success, negative on error
  */
+__rte_internal
 int
 rte_event_pmd_release(struct rte_eventdev *eventdev);

diff --git a/lib/eventdev/eventdev_pmd_pci.h b/lib/eventdev/eventdev_pmd_pci.h
index 1545b240f2..2f12a5eb24 100644
--- a/lib/eventdev/eventdev_pmd_pci.h
+++ b/lib/eventdev/eventdev_pmd_pci.h
@@ -31,7 +31,7 @@ typedef int (*eventdev_pmd_pci_callback_t)(struct rte_eventdev *dev);
  * interface.  Same as rte_event_pmd_pci_probe, except caller can specify
  * the name.
  */
-__rte_experimental
+__rte_internal
 static inline int
 rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
 			      struct rte_pci_device *pci_dev,
@@ -85,6 +85,7 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
  * Wrapper for use by pci drivers as a .probe function to attach to a event
  * interface.
  */
+__rte_internal
 static inline int
 rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
 			    struct rte_pci_device *pci_dev,
@@ -108,6 +109,7 @@ rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
  * Wrapper for use by pci drivers as a .remove function to detach a event
  * interface.
  */
+__rte_internal
 static inline int
 rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev,
 			     eventdev_pmd_pci_callback_t devuninit)
diff --git a/lib/eventdev/eventdev_pmd_vdev.h b/lib/eventdev/eventdev_pmd_vdev.h
index 2d33924e6c..d9ee7277dd 100644
--- a/lib/eventdev/eventdev_pmd_vdev.h
+++ b/lib/eventdev/eventdev_pmd_vdev.h
@@ -37,6 +37,7 @@
  *   - Eventdev pointer if device is successfully created.
  *   - NULL if device cannot be created.
  */
+__rte_internal
 static inline struct rte_eventdev *
 rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
 		int socket_id)
@@ -74,6 +75,7 @@ rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
  * @return
  *   - 0 on success, negative on error
  */
+__rte_internal
 static inline int
 rte_event_pmd_vdev_uninit(const char *name)
 {
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 32abeba794..523ea9ccae 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -27,5 +27,11 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
+driver_sdk_headers += files(
+        'eventdev_pmd.h',
+        'eventdev_pmd_pci.h',
+        'eventdev_pmd_vdev.h',
+)
+
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
 deps += ['telemetry']
diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index f8c6cca87c..431d05b6ed 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -171,7 +171,6 @@ extern "C" {
 #include <stdint.h>

 #include "rte_eventdev.h"
-#include "eventdev_pmd.h"

 /**
  * Crypto event adapter mode
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index a9c496fb62..0c701888d5 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1324,7 +1324,7 @@ int
 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 				uint32_t *caps);

-struct rte_eventdev_ops;
+struct eventdev_ops;
 struct rte_eventdev;

 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
@@ -1342,18 +1342,21 @@ typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
 		uint16_t nb_events, uint64_t timeout_ticks);
 /**< @internal Dequeue burst of events from port of a device */

-typedef uint16_t (*event_tx_adapter_enqueue)(void *port,
-				struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
+					       struct rte_event ev[],
+					       uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */

-typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
-		struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_tx_adapter_enqueue_same_dest_t)(void *port,
+							 struct rte_event ev[],
+							 uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device supporting
  * burst having same destination Ethernet port & Tx queue.
  */

-typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
-				struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
+						   struct rte_event ev[],
+						   uint16_t nb_events);
 /**< @internal Enqueue burst of events on crypto adapter */

 #define RTE_EVENTDEV_NAME_MAX_LEN	(64)
@@ -1421,15 +1424,15 @@ struct rte_eventdev {
 	/**< Pointer to PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
+	event_tx_adapter_enqueue_same_dest_t txa_enqueue_same_dest;
 	/**< Pointer to PMD eth Tx adapter burst enqueue function with
 	 * events destined to same Eth port & Tx queue.
 	 */
-	event_tx_adapter_enqueue txa_enqueue;
+	event_tx_adapter_enqueue_t txa_enqueue;
 	/**< Pointer to PMD eth Tx adapter enqueue function. */
 	struct rte_eventdev_data *data;
 	/**< Pointer to device data */
-	struct rte_eventdev_ops *dev_ops;
+	struct eventdev_ops *dev_ops;
 	/**< Functions exported by PMD */
 	struct rte_device *dev;
 	/**< Device info. supplied by probing */
@@ -1438,7 +1441,7 @@ struct rte_eventdev {
 	uint8_t attached : 1;
 	/**< Flag indicating the device is attached */

-	event_crypto_adapter_enqueue ca_enqueue;
+	event_crypto_adapter_enqueue_t ca_enqueue;
 	/**< Pointer to PMD crypto adapter enqueue function. */

 	uint64_t reserved_64s[4]; /**< Reserved for future fields */
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 88625621ec..5f1fe412a4 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -55,12 +55,6 @@ DPDK_22 {
 	rte_event_eth_tx_adapter_stats_get;
 	rte_event_eth_tx_adapter_stats_reset;
 	rte_event_eth_tx_adapter_stop;
-	rte_event_pmd_allocate;
-	rte_event_pmd_pci_probe;
-	rte_event_pmd_pci_remove;
-	rte_event_pmd_release;
-	rte_event_pmd_vdev_init;
-	rte_event_pmd_vdev_uninit;
 	rte_event_port_attr_get;
 	rte_event_port_default_conf_get;
 	rte_event_port_link;
@@ -136,8 +130,6 @@ EXPERIMENTAL {

 	# changed in 20.11
 	__rte_eventdev_trace_port_setup;
-	# added in 20.11
-	rte_event_pmd_pci_probe_named;

 	#added in 21.05
 	rte_event_vector_pool_create;
@@ -150,4 +142,13 @@ INTERNAL {
 	global:

 	rte_event_pmd_selftest_seqn_dynfield_offset;
+	rte_event_pmd_allocate;
+	rte_event_pmd_get_named_dev;
+	rte_event_pmd_is_valid_dev;
+	rte_event_pmd_pci_probe;
+	rte_event_pmd_pci_probe_named;
+	rte_event_pmd_pci_remove;
+	rte_event_pmd_release;
+	rte_event_pmd_vdev_init;
+	rte_event_pmd_vdev_uninit;
 };
--
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 02/14] eventdev: separate internal structures
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
@ 2021-10-06  6:49     ` pbhagavatula
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 03/14] eventdev: allocate max space for internal arrays pbhagavatula
                       ` (14 subsequent siblings)
  15 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:49 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Create rte_eventdev_core.h and move all the internal data structures
to this file. These structures are mostly used by drivers, but they
need to be in the public header file as they are accessed by datapath
inline functions for performance reasons.
The accessibility of these data structures is not changed.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/eventdev_pmd.h      |   3 -
 lib/eventdev/meson.build         |   3 +
 lib/eventdev/rte_eventdev.h      | 718 +++++++++++++------------------
 lib/eventdev/rte_eventdev_core.h | 138 ++++++
 4 files changed, 437 insertions(+), 425 deletions(-)
 create mode 100644 lib/eventdev/rte_eventdev_core.h

diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 682b61cff0..7eb2aa0520 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -87,9 +87,6 @@ struct rte_eventdev_global {
 	uint8_t nb_devs;	/**< Number of devices found */
 };
 
-extern struct rte_eventdev *rte_eventdevs;
-/** The pool of rte_eventdev structures. */
-
 /**
  * Get the rte_eventdev structure device pointer for the named device.
  *
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 523ea9ccae..8b51fde361 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -27,6 +27,9 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
+indirect_headers += files(
+        'rte_eventdev_core.h',
+)
 driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 0c701888d5..1b11d4576d 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1324,317 +1324,6 @@ int
 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 				uint32_t *caps);
 
-struct eventdev_ops;
-struct rte_eventdev;
-
-typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
-/**< @internal Enqueue event on port of a device */
-
-typedef uint16_t (*event_enqueue_burst_t)(void *port,
-			const struct rte_event ev[], uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device */
-
-typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
-		uint64_t timeout_ticks);
-/**< @internal Dequeue event from port of a device */
-
-typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
-		uint16_t nb_events, uint64_t timeout_ticks);
-/**< @internal Dequeue burst of events from port of a device */
-
-typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
-					       struct rte_event ev[],
-					       uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device */
-
-typedef uint16_t (*event_tx_adapter_enqueue_same_dest_t)(void *port,
-							 struct rte_event ev[],
-							 uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device supporting
- * burst having same destination Ethernet port & Tx queue.
- */
-
-typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
-						   struct rte_event ev[],
-						   uint16_t nb_events);
-/**< @internal Enqueue burst of events on crypto adapter */
-
-#define RTE_EVENTDEV_NAME_MAX_LEN	(64)
-/**< @internal Max length of name of event PMD */
-
-/**
- * @internal
- * The data part, with no function pointers, associated with each device.
- *
- * This structure is safe to place in shared memory to be common among
- * different processes in a multi-process configuration.
- */
-struct rte_eventdev_data {
-	int socket_id;
-	/**< Socket ID where memory is allocated */
-	uint8_t dev_id;
-	/**< Device ID for this instance */
-	uint8_t nb_queues;
-	/**< Number of event queues. */
-	uint8_t nb_ports;
-	/**< Number of event ports. */
-	void **ports;
-	/**< Array of pointers to ports. */
-	struct rte_event_port_conf *ports_cfg;
-	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf *queues_cfg;
-	/**< Array of queue configuration structures. */
-	uint16_t *links_map;
-	/**< Memory to store queues to port connections. */
-	void *dev_private;
-	/**< PMD-specific private data */
-	uint32_t event_dev_cap;
-	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
-	struct rte_event_dev_config dev_conf;
-	/**< Configuration applied to device. */
-	uint8_t service_inited;
-	/* Service initialization state */
-	uint32_t service_id;
-	/* Service ID*/
-	void *dev_stop_flush_arg;
-	/**< User-provided argument for event flush function */
-
-	RTE_STD_C11
-	uint8_t dev_started : 1;
-	/**< Device state: STARTED(1)/STOPPED(0) */
-
-	char name[RTE_EVENTDEV_NAME_MAX_LEN];
-	/**< Unique identifier name */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[4];   /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-/** @internal The data structure associated with each event device. */
-struct rte_eventdev {
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
-	event_enqueue_burst_t enqueue_burst;
-	/**< Pointer to PMD enqueue burst function. */
-	event_enqueue_burst_t enqueue_new_burst;
-	/**< Pointer to PMD enqueue burst function(op new variant) */
-	event_enqueue_burst_t enqueue_forward_burst;
-	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
-	event_dequeue_burst_t dequeue_burst;
-	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_same_dest_t txa_enqueue_same_dest;
-	/**< Pointer to PMD eth Tx adapter burst enqueue function with
-	 * events destined to same Eth port & Tx queue.
-	 */
-	event_tx_adapter_enqueue_t txa_enqueue;
-	/**< Pointer to PMD eth Tx adapter enqueue function. */
-	struct rte_eventdev_data *data;
-	/**< Pointer to device data */
-	struct eventdev_ops *dev_ops;
-	/**< Functions exported by PMD */
-	struct rte_device *dev;
-	/**< Device info. supplied by probing */
-
-	RTE_STD_C11
-	uint8_t attached : 1;
-	/**< Flag indicating the device is attached */
-
-	event_crypto_adapter_enqueue_t ca_enqueue;
-	/**< Pointer to PMD crypto adapter enqueue function. */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[3];   /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-extern struct rte_eventdev *rte_eventdevs;
-/** @internal The pool of rte_eventdev structures. */
-
-static __rte_always_inline uint16_t
-__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events,
-			const event_enqueue_burst_t fn)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->enqueue)(dev->data->ports[port_id], ev);
-	else
-		return fn(dev->data->ports[port_id], ev, nb_events);
-}
-
-/**
- * Enqueue a burst of events objects or an event object supplied in *rte_event*
- * structure on an  event device designated by its *dev_id* through the event
- * port specified by *port_id*. Each event object specifies the event queue on
- * which it will be enqueued.
- *
- * The *nb_events* parameter is the number of event objects to enqueue which are
- * supplied in the *ev* array of *rte_event* structure.
- *
- * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
- * enqueued to the same port that their associated events were dequeued from.
- *
- * The rte_event_enqueue_burst() function returns the number of
- * events objects it actually enqueued. A return value equal to *nb_events*
- * means that all event objects have been enqueued.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- */
-static inline uint16_t
-rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_burst);
-}
-
-/**
- * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
- * an event device designated by its *dev_id* through the event port specified
- * by *port_id*.
- *
- * Provides the same functionality as rte_event_enqueue_burst(), expect that
- * application can use this API when the all objects in the burst contains
- * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
- * function can provide the additional hint to the PMD and optimize if possible.
- *
- * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
- * has event object of operation type != RTE_EVENT_OP_NEW.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- * @see rte_event_enqueue_burst()
- */
-static inline uint16_t
-rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_new_burst);
-}
-
-/**
- * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
- * on an event device designated by its *dev_id* through the event port
- * specified by *port_id*.
- *
- * Provides the same functionality as rte_event_enqueue_burst(), expect that
- * application can use this API when the all objects in the burst contains
- * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
- * function can provide the additional hint to the PMD and optimize if possible.
- *
- * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
- * has event object of operation type != RTE_EVENT_OP_FORWARD.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- * @see rte_event_enqueue_burst()
- */
-static inline uint16_t
-rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_forward_burst);
-}
-
 /**
  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
  *
@@ -1665,124 +1354,27 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
 					uint64_t *timeout_ticks);
 
 /**
- * Dequeue a burst of events objects or an event object from the event port
- * designated by its *event_port_id*, on an event device designated
- * by its *dev_id*.
- *
- * rte_event_dequeue_burst() does not dictate the specifics of scheduling
- * algorithm as each eventdev driver may have different criteria to schedule
- * an event. However, in general, from an application perspective scheduler may
- * use the following scheme to dispatch an event to the port.
- *
- * 1) Selection of event queue based on
- *   a) The list of event queues are linked to the event port.
- *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
- *   queue selection from list is based on event queue priority relative to
- *   other event queue supplied as *priority* in rte_event_queue_setup()
- *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
- *   queue selection from the list is based on event priority supplied as
- *   *priority* in rte_event_enqueue_burst()
- * 2) Selection of event
- *   a) The number of flows available in selected event queue.
- *   b) Schedule type method associated with the event
- *
- * The *nb_events* parameter is the maximum number of event objects to dequeue
- * which are returned in the *ev* array of *rte_event* structure.
+ * Link multiple source event queues supplied in *queues* to the destination
+ * event port designated by its *port_id* with associated service priority
+ * supplied in *priorities* on the event device designated by its *dev_id*.
  *
- * The rte_event_dequeue_burst() function returns the number of events objects
- * it actually dequeued. A return value equal to *nb_events* means that all
- * event objects have been dequeued.
+ * The link establishment shall enable the event port *port_id* from
+ * receiving events from the specified event queue(s) supplied in *queues*
  *
- * The number of events dequeued is the number of scheduler contexts held by
- * this port. These contexts are automatically released in the next
- * rte_event_dequeue_burst() invocation if the port supports implicit
- * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
- * operation can be used to release the contexts early.
+ * An event queue may link to one or more event ports.
+ * The number of links can be established from an event queue to event port is
+ * implementation defined.
  *
- * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
- * enqueued to the same port that their associated events were dequeued from.
+ * Event queue(s) to event port link establishment can be changed at runtime
+ * without re-configuring the device to support scaling and to reduce the
+ * latency of critical work by establishing the link with more event ports
+ * at runtime.
  *
  * @param dev_id
  *   The identifier of the device.
+ *
  * @param port_id
- *   The identifier of the event port.
- * @param[out] ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   for output to be populated with the dequeued event objects.
- * @param nb_events
- *   The maximum number of event objects to dequeue, typically number of
- *   rte_event_port_dequeue_depth() available for this port.
- *
- * @param timeout_ticks
- *   - 0 no-wait, returns immediately if there is no event.
- *   - >0 wait for the event, if the device is configured with
- *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
- *   at least one event is available or *timeout_ticks* time.
- *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
- *   then this function will wait until the event available or
- *   *dequeue_timeout_ns* ns which was previously supplied to
- *   rte_event_dev_configure()
- *
- * @return
- * The number of event objects actually dequeued from the port. The return
- * value can be less than the value of the *nb_events* parameter when the
- * event port's queue is not full.
- *
- * @see rte_event_port_dequeue_depth()
- */
-static inline uint16_t
-rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
-			uint16_t nb_events, uint64_t timeout_ticks)
-{
-	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->dequeue)(
-			dev->data->ports[port_id], ev, timeout_ticks);
-	else
-		return (*dev->dequeue_burst)(
-			dev->data->ports[port_id], ev, nb_events,
-				timeout_ticks);
-}
-
-/**
- * Link multiple source event queues supplied in *queues* to the destination
- * event port designated by its *port_id* with associated service priority
- * supplied in *priorities* on the event device designated by its *dev_id*.
- *
- * The link establishment shall enable the event port *port_id* from
- * receiving events from the specified event queue(s) supplied in *queues*
- *
- * An event queue may link to one or more event ports.
- * The number of links can be established from an event queue to event port is
- * implementation defined.
- *
- * Event queue(s) to event port link establishment can be changed at runtime
- * without re-configuring the device to support scaling and to reduce the
- * latency of critical work by establishing the link with more event ports
- * at runtime.
- *
- * @param dev_id
- *   The identifier of the device.
- *
- * @param port_id
- *   Event port identifier to select the destination port to link.
+ *   Event port identifier to select the destination port to link.
  *
  * @param queues
  *   Points to an array of *nb_links* event queues to be linked
@@ -2148,6 +1740,288 @@ rte_event_vector_pool_create(const char *name, unsigned int n,
 			     unsigned int cache_size, uint16_t nb_elem,
 			     int socket_id);
 
+#include <rte_eventdev_core.h>
+
+static __rte_always_inline uint16_t
+__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			  const struct rte_event ev[], uint16_t nb_events,
+			  const event_enqueue_burst_t fn)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->enqueue)(dev->data->ports[port_id], ev);
+	else
+		return fn(dev->data->ports[port_id], ev, nb_events);
+}
+
+/**
+ * Enqueue a burst of events objects or an event object supplied in *rte_event*
+ * structure on an  event device designated by its *dev_id* through the event
+ * port specified by *port_id*. Each event object specifies the event queue on
+ * which it will be enqueued.
+ *
+ * The *nb_events* parameter is the number of event objects to enqueue which are
+ * supplied in the *ev* array of *rte_event* structure.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * The rte_event_enqueue_burst() function returns the number of
+ * events objects it actually enqueued. A return value equal to *nb_events*
+ * means that all event objects have been enqueued.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ */
+static inline uint16_t
+rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
+ * an event device designated by its *dev_id* through the event port specified
+ * by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_NEW.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
+			    const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_new_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
+ * on an event device designated by its *dev_id* through the event port
+ * specified by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_FORWARD.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
+				const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_forward_burst);
+}
+
+/**
+ * Dequeue a burst of events objects or an event object from the event port
+ * designated by its *event_port_id*, on an event device designated
+ * by its *dev_id*.
+ *
+ * rte_event_dequeue_burst() does not dictate the specifics of scheduling
+ * algorithm as each eventdev driver may have different criteria to schedule
+ * an event. However, in general, from an application perspective scheduler may
+ * use the following scheme to dispatch an event to the port.
+ *
+ * 1) Selection of event queue based on
+ *   a) The list of event queues are linked to the event port.
+ *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
+ *   queue selection from list is based on event queue priority relative to
+ *   other event queue supplied as *priority* in rte_event_queue_setup()
+ *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
+ *   queue selection from the list is based on event priority supplied as
+ *   *priority* in rte_event_enqueue_burst()
+ * 2) Selection of event
+ *   a) The number of flows available in selected event queue.
+ *   b) Schedule type method associated with the event
+ *
+ * The *nb_events* parameter is the maximum number of event objects to dequeue
+ * which are returned in the *ev* array of *rte_event* structure.
+ *
+ * The rte_event_dequeue_burst() function returns the number of events objects
+ * it actually dequeued. A return value equal to *nb_events* means that all
+ * event objects have been dequeued.
+ *
+ * The number of events dequeued is the number of scheduler contexts held by
+ * this port. These contexts are automatically released in the next
+ * rte_event_dequeue_burst() invocation if the port supports implicit
+ * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
+ * operation can be used to release the contexts early.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param[out] ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   for output to be populated with the dequeued event objects.
+ * @param nb_events
+ *   The maximum number of event objects to dequeue, typically number of
+ *   rte_event_port_dequeue_depth() available for this port.
+ *
+ * @param timeout_ticks
+ *   - 0 no-wait, returns immediately if there is no event.
+ *   - >0 wait for the event, if the device is configured with
+ *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
+ *   at least one event is available or *timeout_ticks* time.
+ *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ *   then this function will wait until the event available or
+ *   *dequeue_timeout_ns* ns which was previously supplied to
+ *   rte_event_dev_configure()
+ *
+ * @return
+ * The number of event objects actually dequeued from the port. The return
+ * value can be less than the value of the *nb_events* parameter when the
+ * event port's queue is not full.
+ *
+ * @see rte_event_port_dequeue_depth()
+ */
+static inline uint16_t
+rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
+			uint16_t nb_events, uint64_t timeout_ticks)
+{
+	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->dequeue)(dev->data->ports[port_id], ev,
+				       timeout_ticks);
+	else
+		return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
+					     nb_events, timeout_ticks);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
new file mode 100644
index 0000000000..b97cdf84fe
--- /dev/null
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright(C) 2021 Marvell.
+ * Copyright 2016 NXP
+ * All rights reserved.
+ */
+
+#ifndef _RTE_EVENTDEV_CORE_H_
+#define _RTE_EVENTDEV_CORE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
+/**< @internal Enqueue event on port of a device */
+
+typedef uint16_t (*event_enqueue_burst_t)(void *port,
+					  const struct rte_event ev[],
+					  uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
+				    uint64_t timeout_ticks);
+/**< @internal Dequeue event from port of a device */
+
+typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
+					  uint16_t nb_events,
+					  uint64_t timeout_ticks);
+/**< @internal Dequeue burst of events from port of a device */
+
+typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
+					       struct rte_event ev[],
+					       uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
+						   struct rte_event ev[],
+						   uint16_t nb_events);
+/**< @internal Enqueue burst of events on crypto adapter */
+
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+	int socket_id;
+	/**< Socket ID where memory is allocated */
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t nb_queues;
+	/**< Number of event queues. */
+	uint8_t nb_ports;
+	/**< Number of event ports. */
+	void **ports;
+	/**< Array of pointers to ports. */
+	struct rte_event_port_conf *ports_cfg;
+	/**< Array of port configuration structures. */
+	struct rte_event_queue_conf *queues_cfg;
+	/**< Array of queue configuration structures. */
+	uint16_t *links_map;
+	/**< Memory to store queues to port connections. */
+	void *dev_private;
+	/**< PMD-specific private data */
+	uint32_t event_dev_cap;
+	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+	struct rte_event_dev_config dev_conf;
+	/**< Configuration applied to device. */
+	uint8_t service_inited;
+	/* Service initialization state */
+	uint32_t service_id;
+	/* Service ID*/
+	void *dev_stop_flush_arg;
+	/**< User-provided argument for event flush function */
+
+	RTE_STD_C11
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	char name[RTE_EVENTDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[4];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+	event_enqueue_t enqueue;
+	/**< Pointer to PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< Pointer to PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< Pointer to PMD enqueue burst function(op new variant) */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< Pointer to PMD enqueue burst function(op forward variant) */
+	event_dequeue_t dequeue;
+	/**< Pointer to PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< Pointer to PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< Pointer to PMD eth Tx adapter burst enqueue function with
+	 * events destined to same Eth port & Tx queue.
+	 */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< Pointer to PMD eth Tx adapter enqueue function. */
+	struct rte_eventdev_data *data;
+	/**< Pointer to device data */
+	struct eventdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	struct rte_device *dev;
+	/**< Device info. supplied by probing */
+
+	RTE_STD_C11
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
+
+	event_crypto_adapter_enqueue_t ca_enqueue;
+	/**< Pointer to PMD crypto adapter enqueue function. */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[3];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*_RTE_EVENTDEV_CORE_H_*/
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 03/14] eventdev: allocate max space for internal arrays
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
  2021-10-06  6:49     ` [dpdk-dev] [PATCH v3 02/14] eventdev: separate internal structures pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 04/14] eventdev: move inline APIs into separate structure pbhagavatula
                       ` (13 subsequent siblings)
  15 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Bruce Richardson, Anatoly Burakov; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Allocate max space for internal port, port config, queue config and
link map arrays.
Introduce new macro RTE_EVENT_MAX_PORTS_PER_DEV and set it to max
possible value.
This simplifies the port and queue reconfigure scenarios and will
also allow inline functions to refer pointer to internal port data
without extra checking of current number of configured queues.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 config/rte_config.h              |   1 +
 lib/eventdev/rte_eventdev.c      | 154 +++++++------------------------
 lib/eventdev/rte_eventdev_core.h |   9 +-
 3 files changed, 38 insertions(+), 126 deletions(-)

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c07d..e0ead8b251 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -72,6 +72,7 @@
 
 /* eventdev defines */
 #define RTE_EVENT_MAX_DEVS 16
+#define RTE_EVENT_MAX_PORTS_PER_DEV 255
 #define RTE_EVENT_MAX_QUEUES_PER_DEV 255
 #define RTE_EVENT_TIMER_ADAPTER_NUM_MAX 32
 #define RTE_EVENT_ETH_INTR_RING_SIZE 1024
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index e347d6dfd5..bfcfa31cd1 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -209,7 +209,7 @@ rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 }
 
 static inline int
-rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
+event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 {
 	uint8_t old_nb_queues = dev->data->nb_queues;
 	struct rte_event_queue_conf *queues_cfg;
@@ -218,37 +218,13 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
 			 dev->data->dev_id);
 
-	/* First time configuration */
-	if (dev->data->queues_cfg == NULL && nb_queues != 0) {
-		/* Allocate memory to store queue configuration */
-		dev->data->queues_cfg = rte_zmalloc_socket(
-				"eventdev->data->queues_cfg",
-				sizeof(dev->data->queues_cfg[0]) * nb_queues,
-				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->queues_cfg == NULL) {
-			dev->data->nb_queues = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
-					"nb_queues %u", nb_queues);
-			return -(ENOMEM);
-		}
-	/* Re-configure */
-	} else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
+	if (nb_queues != 0) {
+		queues_cfg = dev->data->queues_cfg;
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
 
 		for (i = nb_queues; i < old_nb_queues; i++)
 			(*dev->dev_ops->queue_release)(dev, i);
 
-		/* Re allocate memory to store queue configuration */
-		queues_cfg = dev->data->queues_cfg;
-		queues_cfg = rte_realloc(queues_cfg,
-				sizeof(queues_cfg[0]) * nb_queues,
-				RTE_CACHE_LINE_SIZE);
-		if (queues_cfg == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
-						" nb_queues %u", nb_queues);
-			return -(ENOMEM);
-		}
-		dev->data->queues_cfg = queues_cfg;
 
 		if (nb_queues > old_nb_queues) {
 			uint8_t new_qs = nb_queues - old_nb_queues;
@@ -256,7 +232,7 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 			memset(queues_cfg + old_nb_queues, 0,
 				sizeof(queues_cfg[0]) * new_qs);
 		}
-	} else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
+	} else {
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
 
 		for (i = nb_queues; i < old_nb_queues; i++)
@@ -270,7 +246,7 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
 
 static inline int
-rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
+event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 {
 	uint8_t old_nb_ports = dev->data->nb_ports;
 	void **ports;
@@ -281,46 +257,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
 			 dev->data->dev_id);
 
-	/* First time configuration */
-	if (dev->data->ports == NULL && nb_ports != 0) {
-		dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
-				sizeof(dev->data->ports[0]) * nb_ports,
-				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->ports == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Allocate memory to store port configurations */
-		dev->data->ports_cfg =
-			rte_zmalloc_socket("eventdev->ports_cfg",
-			sizeof(dev->data->ports_cfg[0]) * nb_ports,
-			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->ports_cfg == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Allocate memory to store queue to port link connection */
-		dev->data->links_map =
-			rte_zmalloc_socket("eventdev->links_map",
-			sizeof(dev->data->links_map[0]) * nb_ports *
-			RTE_EVENT_MAX_QUEUES_PER_DEV,
-			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->links_map == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-		for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
-			dev->data->links_map[i] =
-				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
-	} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
+	if (nb_ports != 0) { /* re-config */
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
 
 		ports = dev->data->ports;
@@ -330,37 +267,6 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 		for (i = nb_ports; i < old_nb_ports; i++)
 			(*dev->dev_ops->port_release)(ports[i]);
 
-		/* Realloc memory for ports */
-		ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
-				RTE_CACHE_LINE_SIZE);
-		if (ports == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
-						" nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Realloc memory for ports_cfg */
-		ports_cfg = rte_realloc(ports_cfg,
-			sizeof(ports_cfg[0]) * nb_ports,
-			RTE_CACHE_LINE_SIZE);
-		if (ports_cfg == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
-						" nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Realloc memory to store queue to port link connection */
-		links_map = rte_realloc(links_map,
-			sizeof(dev->data->links_map[0]) * nb_ports *
-			RTE_EVENT_MAX_QUEUES_PER_DEV,
-			RTE_CACHE_LINE_SIZE);
-		if (links_map == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
 		if (nb_ports > old_nb_ports) {
 			uint8_t new_ps = nb_ports - old_nb_ports;
 			unsigned int old_links_map_end =
@@ -376,16 +282,14 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 				links_map[i] =
 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
 		}
-
-		dev->data->ports = ports;
-		dev->data->ports_cfg = ports_cfg;
-		dev->data->links_map = links_map;
-	} else if (dev->data->ports != NULL && nb_ports == 0) {
+	} else {
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
 
 		ports = dev->data->ports;
-		for (i = nb_ports; i < old_nb_ports; i++)
+		for (i = nb_ports; i < old_nb_ports; i++) {
 			(*dev->dev_ops->port_release)(ports[i]);
+			ports[i] = NULL;
+		}
 	}
 
 	dev->data->nb_ports = nb_ports;
@@ -550,19 +454,19 @@ rte_event_dev_configure(uint8_t dev_id,
 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
 
 	/* Setup new number of queues and reconfigure device. */
-	diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
+	diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
 	if (diag != 0) {
-		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
-				dev_id, diag);
+		RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
+				 diag);
 		return diag;
 	}
 
 	/* Setup new number of ports and reconfigure device. */
-	diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
+	diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
 	if (diag != 0) {
-		rte_event_dev_queue_config(dev, 0);
-		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
-				dev_id, diag);
+		event_dev_queue_config(dev, 0);
+		RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
+				 diag);
 		return diag;
 	}
 
@@ -570,8 +474,8 @@ rte_event_dev_configure(uint8_t dev_id,
 	diag = (*dev->dev_ops->dev_configure)(dev);
 	if (diag != 0) {
 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
-		rte_event_dev_queue_config(dev, 0);
-		rte_event_dev_port_config(dev, 0);
+		event_dev_queue_config(dev, 0);
+		event_dev_port_config(dev, 0);
 	}
 
 	dev->data->event_dev_cap = info.event_dev_cap;
@@ -1403,8 +1307,8 @@ rte_event_dev_close(uint8_t dev_id)
 }
 
 static inline int
-rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
-		int socket_id)
+eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
+		    int socket_id)
 {
 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
 	const struct rte_memzone *mz;
@@ -1426,14 +1330,20 @@ rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
 		return -ENOMEM;
 
 	*data = mz->addr;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
 		memset(*data, 0, sizeof(struct rte_eventdev_data));
+		for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
+					RTE_EVENT_MAX_QUEUES_PER_DEV;
+		     n++)
+			(*data)->links_map[n] =
+				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+	}
 
 	return 0;
 }
 
 static inline uint8_t
-rte_eventdev_find_free_device_index(void)
+eventdev_find_free_device_index(void)
 {
 	uint8_t dev_id;
 
@@ -1475,7 +1385,7 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 		return NULL;
 	}
 
-	dev_id = rte_eventdev_find_free_device_index();
+	dev_id = eventdev_find_free_device_index();
 	if (dev_id == RTE_EVENT_MAX_DEVS) {
 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
 		return NULL;
@@ -1490,8 +1400,8 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 	if (eventdev->data == NULL) {
 		struct rte_eventdev_data *eventdev_data = NULL;
 
-		int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
-				socket_id);
+		int retval =
+			eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
 
 		if (retval < 0 || eventdev_data == NULL)
 			return NULL;
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index b97cdf84fe..115b97e431 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -58,13 +58,14 @@ struct rte_eventdev_data {
 	/**< Number of event queues. */
 	uint8_t nb_ports;
 	/**< Number of event ports. */
-	void **ports;
+	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
 	/**< Array of pointers to ports. */
-	struct rte_event_port_conf *ports_cfg;
+	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
 	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf *queues_cfg;
+	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
 	/**< Array of queue configuration structures. */
-	uint16_t *links_map;
+	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
+			   RTE_EVENT_MAX_QUEUES_PER_DEV];
 	/**< Memory to store queues to port connections. */
 	void *dev_private;
 	/**< PMD-specific private data */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 04/14] eventdev: move inline APIs into separate structure
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
  2021-10-06  6:49     ` [dpdk-dev] [PATCH v3 02/14] eventdev: separate internal structures pbhagavatula
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 03/14] eventdev: allocate max space for internal arrays pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-14  9:20       ` Jerin Jacob
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 05/14] drivers/event: invoke probing finish function pbhagavatula
                       ` (12 subsequent siblings)
  15 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move fastpath inline function pointers from rte_eventdev into a
separate structure accessed via a flat array.
The intension is to make rte_eventdev and related structures private
to avoid future API/ABI breakages.`

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 lib/eventdev/eventdev_pmd.h      |  38 +++++++++++
 lib/eventdev/eventdev_pmd_pci.h  |   4 +-
 lib/eventdev/eventdev_private.c  | 112 +++++++++++++++++++++++++++++++
 lib/eventdev/meson.build         |   1 +
 lib/eventdev/rte_eventdev.c      |  22 +++++-
 lib/eventdev/rte_eventdev_core.h |  28 ++++++++
 lib/eventdev/version.map         |   6 ++
 7 files changed, 209 insertions(+), 2 deletions(-)
 create mode 100644 lib/eventdev/eventdev_private.c

diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 7eb2aa0520..b188280778 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -1189,4 +1189,42 @@ __rte_internal
 int
 rte_event_pmd_release(struct rte_eventdev *eventdev);
 
+/**
+ *
+ * @internal
+ * This is the last step of device probing.
+ * It must be called after a port is allocated and initialized successfully.
+ *
+ * @param eventdev
+ *  New event device.
+ */
+__rte_internal
+void
+event_dev_probing_finish(struct rte_eventdev *eventdev);
+
+/**
+ * Reset eventdevice fastpath APIs to dummy values.
+ *
+ * @param fp_ops
+ * The *fp_ops* pointer to reset.
+ */
+__rte_internal
+void
+event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op);
+
+/**
+ * Set eventdevice fastpath APIs to event device values.
+ *
+ * @param fp_ops
+ * The *fp_ops* pointer to set.
+ */
+__rte_internal
+void
+event_dev_fp_ops_set(struct rte_event_fp_ops *fp_ops,
+		     const struct rte_eventdev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _RTE_EVENTDEV_PMD_H_ */
diff --git a/lib/eventdev/eventdev_pmd_pci.h b/lib/eventdev/eventdev_pmd_pci.h
index 2f12a5eb24..499852db16 100644
--- a/lib/eventdev/eventdev_pmd_pci.h
+++ b/lib/eventdev/eventdev_pmd_pci.h
@@ -67,8 +67,10 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
 
 	/* Invoke PMD device initialization function */
 	retval = devinit(eventdev);
-	if (retval == 0)
+	if (retval == 0) {
+		event_dev_probing_finish(eventdev);
 		return 0;
+	}
 
 	RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
 			" failed", pci_drv->driver.name,
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
new file mode 100644
index 0000000000..9084833847
--- /dev/null
+++ b/lib/eventdev/eventdev_private.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "eventdev_pmd.h"
+#include "rte_eventdev.h"
+
+static uint16_t
+dummy_event_enqueue(__rte_unused void *port,
+		    __rte_unused const struct rte_event *ev)
+{
+	RTE_EDEV_LOG_ERR(
+		"event enqueue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_enqueue_burst(__rte_unused void *port,
+			  __rte_unused const struct rte_event ev[],
+			  __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event enqueue burst requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
+		    __rte_unused uint64_t timeout_ticks)
+{
+	RTE_EDEV_LOG_ERR(
+		"event dequeue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_dequeue_burst(__rte_unused void *port,
+			  __rte_unused struct rte_event ev[],
+			  __rte_unused uint16_t nb_events,
+			  __rte_unused uint64_t timeout_ticks)
+{
+	RTE_EDEV_LOG_ERR(
+		"event dequeue burst requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue(__rte_unused void *port,
+			       __rte_unused struct rte_event ev[],
+			       __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event Tx adapter enqueue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue_same_dest(__rte_unused void *port,
+					 __rte_unused struct rte_event ev[],
+					 __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event Tx adapter enqueue same destination requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_crypto_adapter_enqueue(__rte_unused void *port,
+				   __rte_unused struct rte_event ev[],
+				   __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event crypto adapter enqueue requested for unconfigured event device");
+	return 0;
+}
+
+void
+event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
+{
+	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
+	static const struct rte_event_fp_ops dummy = {
+		.enqueue = dummy_event_enqueue,
+		.enqueue_burst = dummy_event_enqueue_burst,
+		.enqueue_new_burst = dummy_event_enqueue_burst,
+		.enqueue_forward_burst = dummy_event_enqueue_burst,
+		.dequeue = dummy_event_dequeue,
+		.dequeue_burst = dummy_event_dequeue_burst,
+		.txa_enqueue = dummy_event_tx_adapter_enqueue,
+		.txa_enqueue_same_dest =
+			dummy_event_tx_adapter_enqueue_same_dest,
+		.ca_enqueue = dummy_event_crypto_adapter_enqueue,
+		.data = dummy_data,
+	};
+
+	*fp_op = dummy;
+}
+
+void
+event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
+		     const struct rte_eventdev *dev)
+{
+	fp_op->enqueue = dev->enqueue;
+	fp_op->enqueue_burst = dev->enqueue_burst;
+	fp_op->enqueue_new_burst = dev->enqueue_new_burst;
+	fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
+	fp_op->dequeue = dev->dequeue;
+	fp_op->dequeue_burst = dev->dequeue_burst;
+	fp_op->txa_enqueue = dev->txa_enqueue;
+	fp_op->txa_enqueue_same_dest = dev->txa_enqueue_same_dest;
+	fp_op->ca_enqueue = dev->ca_enqueue;
+	fp_op->data = dev->data->ports;
+}
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 8b51fde361..9051ff04b7 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -8,6 +8,7 @@ else
 endif
 
 sources = files(
+        'eventdev_private.c',
         'rte_eventdev.c',
         'rte_event_ring.c',
         'eventdev_trace_points.c',
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index bfcfa31cd1..4c30a37831 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -46,6 +46,9 @@ static struct rte_eventdev_global eventdev_globals = {
 	.nb_devs		= 0
 };
 
+/* Public fastpath APIs. */
+struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
+
 /* Event dev north bound API implementation */
 
 uint8_t
@@ -300,8 +303,8 @@ int
 rte_event_dev_configure(uint8_t dev_id,
 			const struct rte_event_dev_config *dev_conf)
 {
-	struct rte_eventdev *dev;
 	struct rte_event_dev_info info;
+	struct rte_eventdev *dev;
 	int diag;
 
 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -470,10 +473,13 @@ rte_event_dev_configure(uint8_t dev_id,
 		return diag;
 	}
 
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
+
 	/* Configure the device */
 	diag = (*dev->dev_ops->dev_configure)(dev);
 	if (diag != 0) {
 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
+		event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 		event_dev_queue_config(dev, 0);
 		event_dev_port_config(dev, 0);
 	}
@@ -1244,6 +1250,8 @@ rte_event_dev_start(uint8_t dev_id)
 	else
 		return diag;
 
+	event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
+
 	return 0;
 }
 
@@ -1284,6 +1292,7 @@ rte_event_dev_stop(uint8_t dev_id)
 	dev->data->dev_started = 0;
 	(*dev->dev_ops->dev_stop)(dev);
 	rte_eventdev_trace_stop(dev_id);
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 }
 
 int
@@ -1302,6 +1311,7 @@ rte_event_dev_close(uint8_t dev_id)
 		return -EBUSY;
 	}
 
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 	rte_eventdev_trace_close(dev_id);
 	return (*dev->dev_ops->dev_close)(dev);
 }
@@ -1435,6 +1445,7 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
 	if (eventdev == NULL)
 		return -EINVAL;
 
+	event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
 	eventdev->attached = RTE_EVENTDEV_DETACHED;
 	eventdev_globals.nb_devs--;
 
@@ -1460,6 +1471,15 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
 	return 0;
 }
 
+void
+event_dev_probing_finish(struct rte_eventdev *eventdev)
+{
+	if (eventdev == NULL)
+		return;
+
+	event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
+			     eventdev);
+}
 
 static int
 handle_dev_list(const char *cmd __rte_unused,
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 115b97e431..4461073101 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -39,6 +39,34 @@ typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
 						   uint16_t nb_events);
 /**< @internal Enqueue burst of events on crypto adapter */
 
+struct rte_event_fp_ops {
+	event_enqueue_t enqueue;
+	/**< PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< PMD enqueue burst new function. */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< PMD enqueue burst fwd function. */
+	event_dequeue_t dequeue;
+	/**< PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< PMD Tx adapter enqueue function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< PMD Tx adapter enqueue same destination function. */
+	event_crypto_adapter_enqueue_t ca_enqueue;
+	/**< PMD Crypto adapter enqueue function. */
+	uintptr_t reserved[2];
+
+	void **data;
+	/**< points to array of internal port data pointers */
+	uintptr_t reserved2[4];
+} __rte_cache_aligned;
+
+extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
+
 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
 /**< @internal Max length of name of event PMD */
 
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 5f1fe412a4..a3a732089b 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -85,6 +85,9 @@ DPDK_22 {
 	rte_event_timer_cancel_burst;
 	rte_eventdevs;
 
+	#added in 21.11
+	rte_event_fp_ops;
+
 	local: *;
 };
 
@@ -141,6 +144,9 @@ EXPERIMENTAL {
 INTERNAL {
 	global:
 
+	event_dev_fp_ops_reset;
+	event_dev_fp_ops_set;
+	event_dev_probing_finish;
 	rte_event_pmd_selftest_seqn_dynfield_offset;
 	rte_event_pmd_allocate;
 	rte_event_pmd_get_named_dev;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 05/14] drivers/event: invoke probing finish function
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (2 preceding siblings ...)
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 04/14] eventdev: move inline APIs into separate structure pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-14  9:22       ` Jerin Jacob
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 06/14] eventdev: use new API for inline functions pbhagavatula
                       ` (11 subsequent siblings)
  15 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Hemant Agrawal, Nipun Gupta, Mattias Rönnblom,
	Liang Ma, Peter Mccarthy, Harry van Haaren
  Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Invoke event_dev_probing_finish() functions at the end of probing,
this function sets the function pointers in the fp_ops flat array.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/dpaa/dpaa_eventdev.c         | 4 +++-
 drivers/event/dpaa2/dpaa2_eventdev.c       | 4 +++-
 drivers/event/dsw/dsw_evdev.c              | 1 +
 drivers/event/octeontx/ssovf_evdev.c       | 1 +
 drivers/event/opdl/opdl_evdev.c            | 4 +++-
 drivers/event/skeleton/skeleton_eventdev.c | 1 +
 drivers/event/sw/sw_evdev.c                | 2 ++
 7 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 9f14390d28..14ca341829 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -1026,10 +1026,12 @@ dpaa_event_dev_create(const char *name, const char *params)
 
 	/* For secondary processes, the primary has done all the work */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
+		goto done;
 
 	priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
 
+done:
+	event_dev_probing_finish(eventdev);
 	return 0;
 fail:
 	return -EFAULT;
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index d577f64824..1d3ad8ffd6 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1110,7 +1110,7 @@ dpaa2_eventdev_create(const char *name)
 
 	/* For secondary processes, the primary has done all the work */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
+		goto done;
 
 	priv = eventdev->data->dev_private;
 	priv->max_event_queues = 0;
@@ -1139,6 +1139,8 @@ dpaa2_eventdev_create(const char *name)
 
 	RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
 
+done:
+	event_dev_probing_finish(eventdev);
 	return 0;
 fail:
 	return -EFAULT;
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 01f060fff3..17568967be 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -448,6 +448,7 @@ dsw_probe(struct rte_vdev_device *vdev)
 	dsw = dev->data->dev_private;
 	dsw->data = dev->data;
 
+	event_dev_probing_finish(dev);
 	return 0;
 }
 
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 4a8c6a13a5..eb80eeafe1 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -933,6 +933,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
 			edev->max_event_ports);
 
 	ssovf_init_once = 1;
+	event_dev_probing_finish(eventdev);
 	return 0;
 
 error:
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index 739dc64c82..5007e9a7bf 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -720,7 +720,7 @@ opdl_probe(struct rte_vdev_device *vdev)
 	dev->dequeue_burst = opdl_event_dequeue_burst;
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
+		goto done;
 
 	opdl = dev->data->dev_private;
 	opdl->data = dev->data;
@@ -733,6 +733,8 @@ opdl_probe(struct rte_vdev_device *vdev)
 	if (do_test == 1)
 		test_result =  opdl_selftest();
 
+done:
+	event_dev_probing_finish(dev);
 	return test_result;
 }
 
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index c9e17e7cb1..af0efb3302 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -443,6 +443,7 @@ skeleton_eventdev_create(const char *name, int socket_id)
 	eventdev->dequeue       = skeleton_eventdev_dequeue;
 	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
 
+	event_dev_probing_finish(eventdev);
 	return 0;
 fail:
 	return -EFAULT;
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 9b72073322..e99b47afbe 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -1124,6 +1124,8 @@ sw_probe(struct rte_vdev_device *vdev)
 	dev->data->service_inited = 1;
 	dev->data->service_id = sw->service_id;
 
+	event_dev_probing_finish(dev);
+
 	return 0;
 }
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 06/14] eventdev: use new API for inline functions
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (3 preceding siblings ...)
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 05/14] drivers/event: invoke probing finish function pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-11  9:51       ` Gujjar, Abhinandan S
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 07/14] eventdev: hide event device related structures pbhagavatula
                       ` (10 subsequent siblings)
  15 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Use new driver interface for the fastpath enqueue/dequeue inline
functions.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
 lib/eventdev/rte_event_crypto_adapter.h | 15 +++++---
 lib/eventdev/rte_event_eth_tx_adapter.h | 15 ++++----
 lib/eventdev/rte_eventdev.h             | 46 +++++++++++++++----------
 3 files changed, 47 insertions(+), 29 deletions(-)

diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index 431d05b6ed..eb82818d05 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -568,12 +568,19 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id,
 				struct rte_event ev[],
 				uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
+		rte_errno = EINVAL;
+		return 0;
+	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -581,7 +588,7 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id,
 	rte_eventdev_trace_crypto_adapter_enqueue(dev_id, port_id, ev,
 		nb_events);
 
-	return dev->ca_enqueue(dev->data->ports[port_id], ev, nb_events);
+	return fp_ops->ca_enqueue(port, ev, nb_events);
 }
 
 #ifdef __cplusplus
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.h b/lib/eventdev/rte_event_eth_tx_adapter.h
index 8c59547165..3908c2ded5 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.h
+++ b/lib/eventdev/rte_event_eth_tx_adapter.h
@@ -355,16 +355,19 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
 				uint16_t nb_events,
 				const uint8_t flags)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
-		!rte_eventdevs[dev_id].attached) {
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -372,11 +375,9 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
 	rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
 		nb_events, flags);
 	if (flags)
-		return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
-						  ev, nb_events);
+		return fp_ops->txa_enqueue_same_dest(port, ev, nb_events);
 	else
-		return dev->txa_enqueue(dev->data->ports[port_id], ev,
-					nb_events);
+		return fp_ops->txa_enqueue(port, ev, nb_events);
 }
 
 /**
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 1b11d4576d..31fa9ac4b8 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1747,15 +1747,19 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 			  const struct rte_event ev[], uint16_t nb_events,
 			  const event_enqueue_burst_t fn)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -1766,9 +1770,9 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 	 * requests nb_events as const one
 	 */
 	if (nb_events == 1)
-		return (*dev->enqueue)(dev->data->ports[port_id], ev);
+		return (fp_ops->enqueue)(port, ev);
 	else
-		return fn(dev->data->ports[port_id], ev, nb_events);
+		return fn(port, ev, nb_events);
 }
 
 /**
@@ -1818,10 +1822,11 @@ static inline uint16_t
 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 			const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_burst);
+					 fp_ops->enqueue_burst);
 }
 
 /**
@@ -1869,10 +1874,11 @@ static inline uint16_t
 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
 			    const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_new_burst);
+					 fp_ops->enqueue_new_burst);
 }
 
 /**
@@ -1920,10 +1926,11 @@ static inline uint16_t
 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
 				const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_forward_burst);
+					 fp_ops->enqueue_forward_burst);
 }
 
 /**
@@ -1996,15 +2003,19 @@ static inline uint16_t
 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 			uint16_t nb_events, uint64_t timeout_ticks)
 {
-	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -2015,11 +2026,10 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 	 * requests nb_events as const one
 	 */
 	if (nb_events == 1)
-		return (*dev->dequeue)(dev->data->ports[port_id], ev,
-				       timeout_ticks);
+		return (fp_ops->dequeue)(port, ev, timeout_ticks);
 	else
-		return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
-					     nb_events, timeout_ticks);
+		return (fp_ops->dequeue_burst)(port, ev, nb_events,
+					       timeout_ticks);
 }
 
 #ifdef __cplusplus
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 07/14] eventdev: hide event device related structures
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (4 preceding siblings ...)
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 06/14] eventdev: use new API for inline functions pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 08/14] eventdev: hide timer adapter PMD file pbhagavatula
                       ` (9 subsequent siblings)
  15 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Timothy McDaniel, Mattias Rönnblom, Pavan Nikhilesh,
	Harman Kalra
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move rte_eventdev, rte_eventdev_data structures to eventdev_pmd.h.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/dlb2/dlb2_inline_fns.h   |  2 +
 drivers/event/dsw/dsw_evdev.h          |  2 +
 drivers/event/octeontx/timvf_worker.h  |  2 +
 drivers/net/octeontx/octeontx_ethdev.c |  3 +-
 lib/eventdev/eventdev_pmd.h            | 92 +++++++++++++++++++++++++
 lib/eventdev/rte_eventdev.c            | 22 ------
 lib/eventdev/rte_eventdev_core.h       | 93 --------------------------
 7 files changed, 100 insertions(+), 116 deletions(-)

diff --git a/drivers/event/dlb2/dlb2_inline_fns.h b/drivers/event/dlb2/dlb2_inline_fns.h
index ac8d01aa98..1429281cfd 100644
--- a/drivers/event/dlb2/dlb2_inline_fns.h
+++ b/drivers/event/dlb2/dlb2_inline_fns.h
@@ -5,6 +5,8 @@
 #ifndef _DLB2_INLINE_FNS_H_
 #define _DLB2_INLINE_FNS_H_
 
+#include <eventdev_pmd.h>
+
 /* Inline functions required in more than one source file. */
 
 static inline struct dlb2_eventdev *
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index 08889a0990..631daea55c 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -5,6 +5,8 @@
 #ifndef _DSW_EVDEV_H_
 #define _DSW_EVDEV_H_
 
+#include <eventdev_pmd.h>
+
 #include <rte_event_ring.h>
 #include <rte_eventdev.h>
 
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index dede1a4a4f..3f1e77f1d1 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -2,6 +2,8 @@
  * Copyright(c) 2017 Cavium, Inc
  */
 
+#include <eventdev_pmd.h>
+
 #include <rte_common.h>
 #include <rte_branch_prediction.h>
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 9f4c0503b4..c55304839e 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -9,13 +9,14 @@
 #include <string.h>
 #include <unistd.h>
 
+#include <eventdev_pmd.h>
 #include <rte_alarm.h>
 #include <rte_branch_prediction.h>
 #include <rte_bus_vdev.h>
 #include <rte_cycles.h>
 #include <rte_debug.h>
-#include <rte_devargs.h>
 #include <rte_dev.h>
+#include <rte_devargs.h>
 #include <rte_kvargs.h>
 #include <rte_malloc.h>
 #include <rte_mbuf_pool_ops.h>
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index b188280778..dab7f835de 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -80,6 +80,9 @@
 #define RTE_EVENTDEV_DETACHED  (0)
 #define RTE_EVENTDEV_ATTACHED  (1)
 
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
 struct rte_eth_dev;
 
 /** Global structure used for maintaining state of allocated event devices */
@@ -87,6 +90,95 @@ struct rte_eventdev_global {
 	uint8_t nb_devs;	/**< Number of devices found */
 };
 
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+	int socket_id;
+	/**< Socket ID where memory is allocated */
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t nb_queues;
+	/**< Number of event queues. */
+	uint8_t nb_ports;
+	/**< Number of event ports. */
+	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
+	/**< Array of pointers to ports. */
+	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
+	/**< Array of port configuration structures. */
+	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
+	/**< Array of queue configuration structures. */
+	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
+			   RTE_EVENT_MAX_QUEUES_PER_DEV];
+	/**< Memory to store queues to port connections. */
+	void *dev_private;
+	/**< PMD-specific private data */
+	uint32_t event_dev_cap;
+	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+	struct rte_event_dev_config dev_conf;
+	/**< Configuration applied to device. */
+	uint8_t service_inited;
+	/* Service initialization state */
+	uint32_t service_id;
+	/* Service ID*/
+	void *dev_stop_flush_arg;
+	/**< User-provided argument for event flush function */
+
+	RTE_STD_C11
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	char name[RTE_EVENTDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[4];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+	struct rte_eventdev_data *data;
+	/**< Pointer to device data */
+	struct eventdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	struct rte_device *dev;
+	/**< Device info. supplied by probing */
+
+	RTE_STD_C11
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
+
+	event_enqueue_t enqueue;
+	/**< Pointer to PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< Pointer to PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< Pointer to PMD enqueue burst function(op new variant) */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< Pointer to PMD enqueue burst function(op forward variant) */
+	event_dequeue_t dequeue;
+	/**< Pointer to PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< Pointer to PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< Pointer to PMD eth Tx adapter burst enqueue function with
+	 * events destined to same Eth port & Tx queue.
+	 */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< Pointer to PMD eth Tx adapter enqueue function. */
+	event_crypto_adapter_enqueue_t ca_enqueue;
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[3];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
 /**
  * Get the rte_eventdev structure device pointer for the named device.
  *
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 4c30a37831..e55241defd 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -1365,24 +1365,6 @@ eventdev_find_free_device_index(void)
 	return RTE_EVENT_MAX_DEVS;
 }
 
-static uint16_t
-rte_event_tx_adapter_enqueue(__rte_unused void *port,
-			__rte_unused struct rte_event ev[],
-			__rte_unused uint16_t nb_events)
-{
-	rte_errno = ENOTSUP;
-	return 0;
-}
-
-static uint16_t
-rte_event_crypto_adapter_enqueue(__rte_unused void *port,
-			__rte_unused struct rte_event ev[],
-			__rte_unused uint16_t nb_events)
-{
-	rte_errno = ENOTSUP;
-	return 0;
-}
-
 struct rte_eventdev *
 rte_event_pmd_allocate(const char *name, int socket_id)
 {
@@ -1403,10 +1385,6 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 
 	eventdev = &rte_eventdevs[dev_id];
 
-	eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
-	eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
-	eventdev->ca_enqueue = rte_event_crypto_adapter_enqueue;
-
 	if (eventdev->data == NULL) {
 		struct rte_eventdev_data *eventdev_data = NULL;
 
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 4461073101..0da724fa86 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -67,99 +67,6 @@ struct rte_event_fp_ops {
 
 extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
 
-#define RTE_EVENTDEV_NAME_MAX_LEN (64)
-/**< @internal Max length of name of event PMD */
-
-/**
- * @internal
- * The data part, with no function pointers, associated with each device.
- *
- * This structure is safe to place in shared memory to be common among
- * different processes in a multi-process configuration.
- */
-struct rte_eventdev_data {
-	int socket_id;
-	/**< Socket ID where memory is allocated */
-	uint8_t dev_id;
-	/**< Device ID for this instance */
-	uint8_t nb_queues;
-	/**< Number of event queues. */
-	uint8_t nb_ports;
-	/**< Number of event ports. */
-	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
-	/**< Array of pointers to ports. */
-	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
-	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
-	/**< Array of queue configuration structures. */
-	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
-			   RTE_EVENT_MAX_QUEUES_PER_DEV];
-	/**< Memory to store queues to port connections. */
-	void *dev_private;
-	/**< PMD-specific private data */
-	uint32_t event_dev_cap;
-	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
-	struct rte_event_dev_config dev_conf;
-	/**< Configuration applied to device. */
-	uint8_t service_inited;
-	/* Service initialization state */
-	uint32_t service_id;
-	/* Service ID*/
-	void *dev_stop_flush_arg;
-	/**< User-provided argument for event flush function */
-
-	RTE_STD_C11
-	uint8_t dev_started : 1;
-	/**< Device state: STARTED(1)/STOPPED(0) */
-
-	char name[RTE_EVENTDEV_NAME_MAX_LEN];
-	/**< Unique identifier name */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[4];	  /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-/** @internal The data structure associated with each event device. */
-struct rte_eventdev {
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
-	event_enqueue_burst_t enqueue_burst;
-	/**< Pointer to PMD enqueue burst function. */
-	event_enqueue_burst_t enqueue_new_burst;
-	/**< Pointer to PMD enqueue burst function(op new variant) */
-	event_enqueue_burst_t enqueue_forward_burst;
-	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
-	event_dequeue_burst_t dequeue_burst;
-	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
-	/**< Pointer to PMD eth Tx adapter burst enqueue function with
-	 * events destined to same Eth port & Tx queue.
-	 */
-	event_tx_adapter_enqueue_t txa_enqueue;
-	/**< Pointer to PMD eth Tx adapter enqueue function. */
-	struct rte_eventdev_data *data;
-	/**< Pointer to device data */
-	struct eventdev_ops *dev_ops;
-	/**< Functions exported by PMD */
-	struct rte_device *dev;
-	/**< Device info. supplied by probing */
-
-	RTE_STD_C11
-	uint8_t attached : 1;
-	/**< Flag indicating the device is attached */
-
-	event_crypto_adapter_enqueue_t ca_enqueue;
-	/**< Pointer to PMD crypto adapter enqueue function. */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[3];	  /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-extern struct rte_eventdev *rte_eventdevs;
-/** @internal The pool of rte_eventdev structures. */
-
 #ifdef __cplusplus
 }
 #endif
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 08/14] eventdev: hide timer adapter PMD file
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (5 preceding siblings ...)
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 07/14] eventdev: hide event device related structures pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
                       ` (8 subsequent siblings)
  15 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Mattias Rönnblom,
	Harry van Haaren, Erik Gabriel Carrillo
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Hide rte_event_timer_adapter_pmd.h file as it is an internal file.
Remove rte_ prefix from rte_event_timer_adapter_ops structure.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cnxk_tim_evdev.c           |  5 ++--
 drivers/event/cnxk/cnxk_tim_evdev.h           |  2 +-
 drivers/event/dsw/dsw_evdev.c                 |  4 +--
 drivers/event/octeontx/ssovf_evdev.c          |  2 +-
 drivers/event/octeontx/timvf_evdev.c          | 17 ++++++-----
 drivers/event/octeontx/timvf_evdev.h          |  9 +++---
 drivers/event/octeontx2/otx2_tim_evdev.c      |  5 ++--
 drivers/event/octeontx2/otx2_tim_evdev.h      |  4 +--
 drivers/event/sw/sw_evdev.c                   |  5 ++--
 ...dapter_pmd.h => event_timer_adapter_pmd.h} |  8 ++---
 lib/eventdev/eventdev_pmd.h                   |  8 ++---
 lib/eventdev/meson.build                      |  2 +-
 lib/eventdev/rte_event_timer_adapter.c        | 30 +++++++++----------
 lib/eventdev/rte_event_timer_adapter.h        |  2 +-
 lib/eventdev/rte_eventdev.c                   |  2 +-
 15 files changed, 51 insertions(+), 54 deletions(-)
 rename lib/eventdev/{rte_event_timer_adapter_pmd.h => event_timer_adapter_pmd.h} (95%)

diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c
index 9d40e336d7..10634c31e3 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -5,7 +5,7 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_tim_evdev.h"
 
-static struct rte_event_timer_adapter_ops cnxk_tim_ops;
+static struct event_timer_adapter_ops cnxk_tim_ops;
 
 static int
 cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
@@ -353,8 +353,7 @@ cnxk_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
 
 int
 cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
 
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h
index c369f6f472..91e163eb5a 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -267,7 +267,7 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 
 int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);
 
 void cnxk_tim_init(struct roc_sso *sso);
 void cnxk_tim_fini(void);
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 17568967be..0652d83ad6 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -381,8 +381,8 @@ dsw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
 
 static int
 dsw_timer_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
-			   uint64_t flags  __rte_unused, uint32_t *caps,
-			   const struct rte_event_timer_adapter_ops **ops)
+			   uint64_t flags __rte_unused, uint32_t *caps,
+			   const struct event_timer_adapter_ops **ops)
 {
 	*caps = 0;
 	*ops = NULL;
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index eb80eeafe1..2245599810 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -721,7 +721,7 @@ ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque)
 
 static int
 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
+		     uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
 			timvf_enable_stats);
diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index 688e9daa66..1f1cda3f7f 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -407,18 +407,19 @@ timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
 	return 0;
 }
 
-static struct rte_event_timer_adapter_ops timvf_ops = {
-	.init		= timvf_ring_create,
-	.uninit		= timvf_ring_free,
-	.start		= timvf_ring_start,
-	.stop		= timvf_ring_stop,
-	.get_info	= timvf_ring_info_get,
+static struct event_timer_adapter_ops timvf_ops = {
+	.init = timvf_ring_create,
+	.uninit = timvf_ring_free,
+	.start = timvf_ring_start,
+	.stop = timvf_ring_stop,
+	.get_info = timvf_ring_info_get,
 };
 
 int
 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats)
+			     uint32_t *caps,
+			     const struct event_timer_adapter_ops **ops,
+			     uint8_t enable_stats)
 {
 	RTE_SET_USED(dev);
 
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index 2977063d66..cef02cd7f9 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -5,13 +5,13 @@
 #ifndef __TIMVF_EVDEV_H__
 #define __TIMVF_EVDEV_H__
 
+#include <event_timer_adapter_pmd.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
 #include <rte_debug.h>
 #include <rte_eal.h>
-#include <rte_eventdev.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
+#include <rte_eventdev.h>
 #include <rte_io.h>
 #include <rte_lcore.h>
 #include <rte_log.h>
@@ -196,8 +196,9 @@ uint8_t timvf_get_ring(void);
 void timvf_release_ring(uint8_t vfid);
 void *timvf_bar(uint8_t id, uint8_t bar);
 int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats);
+				 uint32_t *caps,
+				 const struct event_timer_adapter_ops **ops,
+				 uint8_t enable_stats);
 uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 		struct rte_event_timer **tim, const uint16_t nb_timers);
 uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.c b/drivers/event/octeontx2/otx2_tim_evdev.c
index de50c4c76e..7dcb291043 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.c
+++ b/drivers/event/octeontx2/otx2_tim_evdev.c
@@ -9,7 +9,7 @@
 #include "otx2_evdev.h"
 #include "otx2_tim_evdev.h"
 
-static struct rte_event_timer_adapter_ops otx2_tim_ops;
+static struct event_timer_adapter_ops otx2_tim_ops;
 
 static inline int
 tim_get_msix_offsets(void)
@@ -497,8 +497,7 @@ otx2_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
 
 int
 otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct otx2_tim_evdev *dev = tim_priv_get();
 
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.h b/drivers/event/octeontx2/otx2_tim_evdev.h
index caa6ad3b3c..dac642e0e1 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.h
+++ b/drivers/event/octeontx2/otx2_tim_evdev.h
@@ -5,8 +5,8 @@
 #ifndef __OTX2_TIM_EVDEV_H__
 #define __OTX2_TIM_EVDEV_H__
 
+#include <event_timer_adapter_pmd.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
 #include <rte_reciprocal.h>
 
 #include "otx2_dev.h"
@@ -244,7 +244,7 @@ uint16_t otx2_tim_timer_cancel_burst(
 
 int otx2_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);
 
 void otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev);
 void otx2_tim_fini(void);
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index e99b47afbe..070a4802e9 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -561,10 +561,9 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
 }
 
 static int
-sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
-			  uint64_t flags,
+sw_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 			  uint32_t *caps,
-			  const struct rte_event_timer_adapter_ops **ops)
+			  const struct event_timer_adapter_ops **ops)
 {
 	RTE_SET_USED(dev);
 	RTE_SET_USED(flags);
diff --git a/lib/eventdev/rte_event_timer_adapter_pmd.h b/lib/eventdev/event_timer_adapter_pmd.h
similarity index 95%
rename from lib/eventdev/rte_event_timer_adapter_pmd.h
rename to lib/eventdev/event_timer_adapter_pmd.h
index cf3509dc6f..189017b5c1 100644
--- a/lib/eventdev/rte_event_timer_adapter_pmd.h
+++ b/lib/eventdev/event_timer_adapter_pmd.h
@@ -3,8 +3,8 @@
  * All rights reserved.
  */
 
-#ifndef __RTE_EVENT_TIMER_ADAPTER_PMD_H__
-#define __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+#ifndef __EVENT_TIMER_ADAPTER_PMD_H__
+#define __EVENT_TIMER_ADAPTER_PMD_H__
 
 /**
  * @file
@@ -57,7 +57,7 @@ typedef int (*rte_event_timer_adapter_stats_reset_t)(
  * @internal Structure containing the functions exported by an event timer
  * adapter implementation.
  */
-struct rte_event_timer_adapter_ops {
+struct event_timer_adapter_ops {
 	rte_event_timer_adapter_init_t		init;  /**< Set up adapter */
 	rte_event_timer_adapter_uninit_t	uninit;/**< Tear down adapter */
 	rte_event_timer_adapter_start_t		start; /**< Start adapter */
@@ -111,4 +111,4 @@ struct rte_event_timer_adapter_data {
 }
 #endif
 
-#endif /* __RTE_EVENT_TIMER_ADAPTER_PMD_H__ */
+#endif /* __EVENT_TIMER_ADAPTER_PMD_H__ */
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index dab7f835de..0acf8fb2fa 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -24,8 +24,8 @@
 #include <rte_mbuf.h>
 #include <rte_mbuf_dyn.h>
 
+#include "event_timer_adapter_pmd.h"
 #include "rte_eventdev.h"
-#include "rte_event_timer_adapter_pmd.h"
 
 /* Logging Macros */
 #define RTE_EDEV_LOG_ERR(...) \
@@ -591,10 +591,8 @@ struct rte_event_eth_rx_adapter_queue_conf;
  *
  */
 typedef int (*eventdev_timer_adapter_caps_get_t)(
-				const struct rte_eventdev *dev,
-				uint64_t flags,
-				uint32_t *caps,
-				const struct rte_event_timer_adapter_ops **ops);
+	const struct rte_eventdev *dev, uint64_t flags, uint32_t *caps,
+	const struct event_timer_adapter_ops **ops);
 
 /**
  * Add ethernet Rx queues to event device. This callback is invoked if
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 9051ff04b7..f19b831edd 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -24,7 +24,6 @@ headers = files(
         'rte_event_ring.h',
         'rte_event_eth_rx_adapter.h',
         'rte_event_timer_adapter.h',
-        'rte_event_timer_adapter_pmd.h',
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
@@ -35,6 +34,7 @@ driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
         'eventdev_pmd_vdev.h',
+        'event_timer_adapter_pmd.h',
 )
 
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index ee20b39f4b..ae55407042 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -20,11 +20,11 @@
 #include <rte_service_component.h>
 #include <rte_cycles.h>
 
-#include "rte_eventdev.h"
+#include "event_timer_adapter_pmd.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
 #include "rte_event_timer_adapter.h"
-#include "rte_event_timer_adapter_pmd.h"
+#include "rte_eventdev.h"
+#include "rte_eventdev_trace.h"
 
 #define DATA_MZ_NAME_MAX_LEN 64
 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
@@ -35,7 +35,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
 
 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
 
-static const struct rte_event_timer_adapter_ops swtim_ops;
+static const struct event_timer_adapter_ops swtim_ops;
 
 #define EVTIM_LOG(level, logtype, ...) \
 	rte_log(RTE_LOG_ ## level, logtype, \
@@ -1207,15 +1207,15 @@ swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
 }
 
-static const struct rte_event_timer_adapter_ops swtim_ops = {
-	.init			= swtim_init,
-	.uninit			= swtim_uninit,
-	.start			= swtim_start,
-	.stop			= swtim_stop,
-	.get_info		= swtim_get_info,
-	.stats_get		= swtim_stats_get,
-	.stats_reset		= swtim_stats_reset,
-	.arm_burst		= swtim_arm_burst,
-	.arm_tmo_tick_burst	= swtim_arm_tmo_tick_burst,
-	.cancel_burst		= swtim_cancel_burst,
+static const struct event_timer_adapter_ops swtim_ops = {
+	.init = swtim_init,
+	.uninit = swtim_uninit,
+	.start = swtim_start,
+	.stop = swtim_stop,
+	.get_info = swtim_get_info,
+	.stats_get = swtim_stats_get,
+	.stats_reset = swtim_stats_reset,
+	.arm_burst = swtim_arm_burst,
+	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
+	.cancel_burst = swtim_cancel_burst,
 };
diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h
index 4e0d2a819b..cad6d3b4c5 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -523,7 +523,7 @@ struct rte_event_timer_adapter {
 	/**< Pointer to driver cancel function. */
 	struct rte_event_timer_adapter_data *data;
 	/**< Pointer to shared adapter data */
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;
 	/**< Functions exported by adapter driver */
 
 	RTE_STD_C11
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index e55241defd..de6346194e 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -142,7 +142,7 @@ int
 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
 {
 	struct rte_eventdev *dev;
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;
 
 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 09/14] eventdev: remove rte prefix for internal structs
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (6 preceding siblings ...)
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 08/14] eventdev: hide timer adapter PMD file pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-11  9:58       ` Gujjar, Abhinandan S
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 10/14] eventdev: rearrange fields in timer object pbhagavatula
                       ` (7 subsequent siblings)
  15 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Remove rte_ prefix from rte_eth_event_enqueue_buffer,
rte_event_eth_rx_adapter and rte_event_crypto_adapter
as they are only used in rte_event_eth_rx_adapter.c and
rte_event_crypto_adapter.c

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
 lib/eventdev/rte_event_crypto_adapter.c |  66 +++----
 lib/eventdev/rte_event_eth_rx_adapter.c | 249 ++++++++++--------------
 lib/eventdev/rte_eventdev.h             |   2 +-
 3 files changed, 141 insertions(+), 176 deletions(-)

diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index ebfc8326a8..e9e660a3d2 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -30,7 +30,7 @@
  */
 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
 
-struct rte_event_crypto_adapter {
+struct event_crypto_adapter {
 	/* Event device identifier */
 	uint8_t eventdev_id;
 	/* Event port identifier */
@@ -99,7 +99,7 @@ struct crypto_queue_pair_info {
 	uint8_t len;
 } __rte_cache_aligned;
 
-static struct rte_event_crypto_adapter **event_crypto_adapter;
+static struct event_crypto_adapter **event_crypto_adapter;
 
 /* Macros to check for valid adapter */
 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
@@ -141,7 +141,7 @@ eca_init(void)
 	return 0;
 }
 
-static inline struct rte_event_crypto_adapter *
+static inline struct event_crypto_adapter *
 eca_id_to_adapter(uint8_t id)
 {
 	return event_crypto_adapter ?
@@ -158,7 +158,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	int ret;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
+	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
 
 	if (adapter == NULL)
 		return -EINVAL;
@@ -202,7 +202,7 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
 				enum rte_event_crypto_adapter_mode mode,
 				void *conf_arg)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
 	struct rte_event_dev_info dev_info;
 	int socket_id;
@@ -304,7 +304,7 @@ rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_crypto_adapter_free(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -329,8 +329,8 @@ rte_event_crypto_adapter_free(uint8_t id)
 }
 
 static inline unsigned int
-eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
-		 struct rte_event *ev, unsigned int cnt)
+eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
+		     unsigned int cnt)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -420,7 +420,7 @@ eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
 }
 
 static unsigned int
-eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
+eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -466,8 +466,8 @@ eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
 }
 
 static int
-eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_enq)
+eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_enq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct rte_event ev[BATCH_SIZE];
@@ -500,8 +500,8 @@ eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline void
-eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
-		  struct rte_crypto_op **ops, uint16_t num)
+eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
+		      struct rte_crypto_op **ops, uint16_t num)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -564,8 +564,8 @@ eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline unsigned int
-eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_deq)
+eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_deq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -627,8 +627,8 @@ eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static void
-eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_ops)
+eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
+		       unsigned int max_ops)
 {
 	while (max_ops) {
 		unsigned int e_cnt, d_cnt;
@@ -648,7 +648,7 @@ eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
 static int
 eca_service_func(void *args)
 {
-	struct rte_event_crypto_adapter *adapter = args;
+	struct event_crypto_adapter *adapter = args;
 
 	if (rte_spinlock_trylock(&adapter->lock) == 0)
 		return 0;
@@ -659,7 +659,7 @@ eca_service_func(void *args)
 }
 
 static int
-eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
+eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
 {
 	struct rte_event_crypto_adapter_conf adapter_conf;
 	struct rte_service_spec service;
@@ -699,10 +699,9 @@ eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
 }
 
 static void
-eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
-			struct crypto_device_info *dev_info,
-			int32_t queue_pair_id,
-			uint8_t add)
+eca_update_qp_info(struct event_crypto_adapter *adapter,
+		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
+		   uint8_t add)
 {
 	struct crypto_queue_pair_info *qp_info;
 	int enabled;
@@ -729,9 +728,8 @@ eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
 }
 
 static int
-eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
-		uint8_t cdev_id,
-		int queue_pair_id)
+eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
+		   int queue_pair_id)
 {
 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
 	struct crypto_queue_pair_info *qpairs;
@@ -773,7 +771,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			int32_t queue_pair_id,
 			const struct rte_event *event)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_eventdev *dev;
 	struct crypto_device_info *dev_info;
 	uint32_t cap;
@@ -889,7 +887,7 @@ int
 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 					int32_t queue_pair_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	int ret;
@@ -975,7 +973,7 @@ rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 static int
 eca_adapter_ctrl(uint8_t id, int start)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1017,7 +1015,7 @@ eca_adapter_ctrl(uint8_t id, int start)
 int
 rte_event_crypto_adapter_start(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 	adapter = eca_id_to_adapter(id);
@@ -1039,7 +1037,7 @@ int
 rte_event_crypto_adapter_stats_get(uint8_t id,
 				struct rte_event_crypto_adapter_stats *stats)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_crypto_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -1083,7 +1081,7 @@ rte_event_crypto_adapter_stats_get(uint8_t id,
 int
 rte_event_crypto_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1111,7 +1109,7 @@ rte_event_crypto_adapter_stats_reset(uint8_t id)
 int
 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -1128,7 +1126,7 @@ rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 int
 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 13dfb28401..f8225ebd3d 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -78,14 +78,14 @@ struct eth_rx_vector_data {
 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
 
 /* Instance per adapter */
-struct rte_eth_event_enqueue_buffer {
+struct eth_event_enqueue_buffer {
 	/* Count of events in this buffer */
 	uint16_t count;
 	/* Array of events in this buffer */
 	struct rte_event events[ETH_EVENT_BUFFER_SIZE];
 };
 
-struct rte_event_eth_rx_adapter {
+struct event_eth_rx_adapter {
 	/* RSS key */
 	uint8_t rss_key_be[RSS_KEY_SIZE];
 	/* Event device identifier */
@@ -109,7 +109,7 @@ struct rte_event_eth_rx_adapter {
 	/* Next entry in wrr[] to begin polling */
 	uint32_t wrr_pos;
 	/* Event burst buffer */
-	struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
+	struct eth_event_enqueue_buffer event_enqueue_buffer;
 	/* Vector enable flag */
 	uint8_t ena_vector;
 	/* Timestamp of previous vector expiry list traversal */
@@ -231,7 +231,7 @@ struct eth_rx_queue_info {
 	struct eth_rx_vector_data vector_data;
 };
 
-static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
+static struct event_eth_rx_adapter **event_eth_rx_adapter;
 
 static inline int
 rxa_validate_id(uint8_t id)
@@ -247,7 +247,7 @@ rxa_validate_id(uint8_t id)
 } while (0)
 
 static inline int
-rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
 {
 	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
 }
@@ -265,10 +265,9 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
  */
 static int
-rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
-	 unsigned int n, int *cw,
-	 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
-	 uint16_t gcd, int prev)
+rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
+	     struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
+	     uint16_t gcd, int prev)
 {
 	int i = prev;
 	uint16_t w;
@@ -373,10 +372,9 @@ rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
  */
 static void
-rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_intr)
+rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_intr)
 {
 	uint32_t intr_diff;
 
@@ -392,12 +390,10 @@ rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * interrupt queues could currently be poll mode Rx queues
  */
 static void
-rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+			  uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -424,11 +420,9 @@ rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
  * after deleting poll mode rx queues
  */
 static void
-rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_wrr)
 {
 	uint32_t poll_diff;
 	uint32_t wrr_len_diff;
@@ -449,13 +443,10 @@ rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
 /* Calculate nb_rx_* after adding poll mode rx queues
  */
 static void
-rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint16_t wt,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint16_t wt, uint32_t *nb_rx_poll,
+			  uint32_t *nb_rx_intr, uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -482,13 +473,10 @@ rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after adding rx_queue_id */
 static void
-rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint16_t wt,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	if (wt != 0)
 		rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
@@ -500,12 +488,10 @@ rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after deleting rx_queue_id */
 static void
-rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
 				nb_wrr);
@@ -517,8 +503,7 @@ rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the rx_poll array
  */
 static struct eth_rx_poll_entry *
-rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint32_t num_rx_polled)
+rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
 {
 	size_t len;
 
@@ -534,7 +519,7 @@ rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the WRR array
  */
 static uint32_t *
-rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
+rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
 {
 	size_t len;
 
@@ -547,11 +532,9 @@ rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
 }
 
 static int
-rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint32_t nb_poll,
-		uint32_t nb_wrr,
-		struct eth_rx_poll_entry **rx_poll,
-		uint32_t **wrr_sched)
+rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
+		      uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
+		      uint32_t **wrr_sched)
 {
 
 	if (nb_poll == 0) {
@@ -576,9 +559,8 @@ rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Precalculate WRR polling sequence for all queues in rx_adapter */
 static void
-rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_rx_poll_entry *rx_poll,
-		uint32_t *rx_wrr)
+rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
+		      struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
 {
 	uint16_t d;
 	uint16_t q;
@@ -705,13 +687,13 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
 }
 
 static inline int
-rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
 {
 	return !!rx_adapter->enq_block_count;
 }
 
 static inline void
-rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->rx_enq_block_start_ts)
 		return;
@@ -724,8 +706,8 @@ rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
-		    struct rte_event_eth_rx_adapter_stats *stats)
+rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
+		     struct rte_event_eth_rx_adapter_stats *stats)
 {
 	if (unlikely(!stats->rx_enq_start_ts))
 		stats->rx_enq_start_ts = rte_get_tsc_cycles();
@@ -744,10 +726,10 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue buffered events to event device */
 static inline uint16_t
-rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter)
 {
-	struct rte_eth_event_enqueue_buffer *buf =
-	    &rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
 
 	if (!buf->count)
@@ -774,7 +756,7 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
 		struct eth_rx_vector_data *vec)
 {
 	vec->vector_ev->nb_elem = 0;
@@ -785,9 +767,9 @@ rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline uint16_t
-rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
 			struct eth_rx_queue_info *queue_info,
-			struct rte_eth_event_enqueue_buffer *buf,
+			struct eth_event_enqueue_buffer *buf,
 			struct rte_mbuf **mbufs, uint16_t num)
 {
 	struct rte_event *ev = &buf->events[buf->count];
@@ -845,19 +827,16 @@ rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		uint16_t rx_queue_id,
-		struct rte_mbuf **mbufs,
-		uint16_t num)
+rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num)
 {
 	uint32_t i;
 	struct eth_device_info *dev_info =
 					&rx_adapter->eth_devices[eth_dev_id];
 	struct eth_rx_queue_info *eth_rx_queue_info =
 					&dev_info->rx_queue[rx_queue_id];
-	struct rte_eth_event_enqueue_buffer *buf =
-					&rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event *ev = &buf->events[buf->count];
 	uint64_t event = eth_rx_queue_info->event;
 	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
@@ -909,16 +888,13 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue packets from  <port, q>  to event buffer */
 static inline uint32_t
-rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint16_t port_id,
-	uint16_t queue_id,
-	uint32_t rx_count,
-	uint32_t max_rx,
-	int *rxq_empty)
+rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
+	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
+	   int *rxq_empty)
 {
 	struct rte_mbuf *mbufs[BATCH_SIZE];
-	struct rte_eth_event_enqueue_buffer *buf =
-					&rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event_eth_rx_adapter_stats *stats =
 					&rx_adapter->stats;
 	uint16_t n;
@@ -953,8 +929,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
-		void *data)
+rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
 {
 	uint16_t port_id;
 	uint16_t queue;
@@ -994,8 +969,8 @@ rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
-			uint32_t num_intr_vec)
+rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
+			  uint32_t num_intr_vec)
 {
 	if (rx_adapter->num_intr_vec + num_intr_vec >
 				RTE_EVENT_ETH_INTR_RING_SIZE) {
@@ -1010,9 +985,9 @@ rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Delete entries for (dev, queue) from the interrupt ring */
 static void
-rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			uint16_t rx_queue_id)
+rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info,
+			  uint16_t rx_queue_id)
 {
 	int i, n;
 	union queue_data qd;
@@ -1045,7 +1020,7 @@ rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
 static void *
 rxa_intr_thread(void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
+	struct event_eth_rx_adapter *rx_adapter = arg;
 	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
 	int n, i;
 
@@ -1068,12 +1043,12 @@ rxa_intr_thread(void *arg)
  * mbufs to eventdev
  */
 static inline uint32_t
-rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t n;
 	uint32_t nb_rx = 0;
 	int rxq_empty;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct eth_event_enqueue_buffer *buf;
 	rte_spinlock_t *ring_lock;
 	uint8_t max_done = 0;
 
@@ -1188,11 +1163,11 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
  * it.
  */
 static inline uint32_t
-rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t num_queue;
 	uint32_t nb_rx = 0;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct eth_event_enqueue_buffer *buf;
 	uint32_t wrr_pos;
 	uint32_t max_nb_rx;
 
@@ -1233,8 +1208,8 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
 static void
 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
-	struct rte_eth_event_enqueue_buffer *buf =
+	struct event_eth_rx_adapter *rx_adapter = arg;
+	struct eth_event_enqueue_buffer *buf =
 		&rx_adapter->event_enqueue_buffer;
 	struct rte_event *ev;
 
@@ -1257,7 +1232,7 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 static int
 rxa_service_func(void *args)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = args;
+	struct event_eth_rx_adapter *rx_adapter = args;
 	struct rte_event_eth_rx_adapter_stats *stats;
 
 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
@@ -1318,7 +1293,7 @@ rte_event_eth_rx_adapter_init(void)
 	return 0;
 }
 
-static inline struct rte_event_eth_rx_adapter *
+static inline struct event_eth_rx_adapter *
 rxa_id_to_adapter(uint8_t id)
 {
 	return event_eth_rx_adapter ?
@@ -1335,7 +1310,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	uint8_t port_id;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
+	struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
 	dev_conf = dev->data->dev_conf;
@@ -1384,7 +1359,7 @@ rxa_epoll_create1(void)
 }
 
 static int
-rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->epd != INIT_FD)
 		return 0;
@@ -1401,7 +1376,7 @@ rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 	char thread_name[RTE_MAX_THREAD_NAME_LEN];
@@ -1445,7 +1420,7 @@ rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 
@@ -1466,7 +1441,7 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
 {
 	int ret;
 
@@ -1484,9 +1459,8 @@ rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1514,9 +1488,8 @@ rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id)
+rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 {
 	int err;
 	int i;
@@ -1573,9 +1546,8 @@ rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
+		struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err, err1;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1663,9 +1635,8 @@ rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int rx_queue_id)
+rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 
 {
 	int i, j, err;
@@ -1713,9 +1684,8 @@ rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	return err;
 }
 
-
 static int
-rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
 {
 	int ret;
 	struct rte_service_spec service;
@@ -1758,10 +1728,9 @@ rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
 }
 
 static void
-rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int32_t rx_queue_id,
-		uint8_t add)
+rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, int32_t rx_queue_id,
+		 uint8_t add)
 {
 	struct eth_rx_queue_info *queue_info;
 	int enabled;
@@ -1811,9 +1780,8 @@ rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
 }
 
 static void
-rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id)
+rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
+	   struct eth_device_info *dev_info, int32_t rx_queue_id)
 {
 	struct eth_rx_vector_data *vec;
 	int pollq;
@@ -1854,10 +1822,9 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static void
-rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id,
-	const struct rte_event_eth_rx_adapter_queue_conf *conf)
+rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
+	      struct eth_device_info *dev_info, int32_t rx_queue_id,
+	      const struct rte_event_eth_rx_adapter_queue_conf *conf)
 {
 	struct eth_rx_queue_info *queue_info;
 	const struct rte_event *ev = &conf->ev;
@@ -1922,7 +1889,7 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 
 static void
 rxa_sw_event_vector_configure(
-	struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+	struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 	int rx_queue_id,
 	const struct rte_event_eth_rx_adapter_event_vector_config *config)
 {
@@ -1956,10 +1923,10 @@ rxa_sw_event_vector_configure(
 			      config->vector_timeout_ns >> 1;
 }
 
-static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		int rx_queue_id,
-		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+static int
+rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+	   int rx_queue_id,
+	   const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
 	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
 	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
@@ -2088,7 +2055,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
 static int
 rxa_ctrl(uint8_t id, int start)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2135,7 +2102,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
 				rte_event_eth_rx_adapter_conf_cb conf_cb,
 				void *conf_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	int ret;
 	int socket_id;
 	uint16_t i;
@@ -2235,7 +2202,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_eth_rx_adapter_free(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2267,7 +2234,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
 {
 	int ret;
 	uint32_t cap;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 
@@ -2385,7 +2352,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
 {
 	int ret = 0;
 	struct rte_eventdev *dev;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	uint32_t nb_rx_poll = 0;
@@ -2505,7 +2472,7 @@ rte_event_eth_rx_adapter_queue_event_vector_config(
 	struct rte_event_eth_rx_adapter_event_vector_config *config)
 {
 	struct rte_event_eth_rx_adapter_vector_limits limits;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	uint32_t cap;
 	int ret;
@@ -2632,7 +2599,7 @@ int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
 			       struct rte_event_eth_rx_adapter_stats *stats)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_eth_rx_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -2673,7 +2640,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 int
 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2701,7 +2668,7 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 int
 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2721,7 +2688,7 @@ rte_event_eth_rx_adapter_cb_register(uint8_t id,
 					rte_event_eth_rx_adapter_cb_fn cb_fn,
 					void *cb_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	int ret;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 31fa9ac4b8..f1fcd6ce3d 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1193,7 +1193,7 @@ struct rte_event {
 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
 /**< The application can override the adapter generated flow ID in the
  * event. This flow ID can be specified when adding an ethdev Rx queue
- * to the adapter using the ev member of struct rte_event_eth_rx_adapter
+ * to the adapter using the ev.flow_id member.
  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
  */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 10/14] eventdev: rearrange fields in timer object
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (7 preceding siblings ...)
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
                       ` (6 subsequent siblings)
  15 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Erik Gabriel Carrillo; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Rearrange fields in rte_event_timer data structure to remove holes.
Also, remove use of volatile from rte_event_timer.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_timer_adapter.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h
index cad6d3b4c5..1551741820 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -475,8 +475,6 @@ struct rte_event_timer {
 	 *  - op: RTE_EVENT_OP_NEW
 	 *  - event_type: RTE_EVENT_TYPE_TIMER
 	 */
-	volatile enum rte_event_timer_state state;
-	/**< State of the event timer. */
 	uint64_t timeout_ticks;
 	/**< Expiry timer ticks expressed in number of *timer_ticks_ns* from
 	 * now.
@@ -488,6 +486,8 @@ struct rte_event_timer {
 	 * implementation specific values to share between the arm and cancel
 	 * operations.  The application should not modify this field.
 	 */
+	enum rte_event_timer_state state;
+	/**< State of the event timer. */
 	uint8_t user_meta[0];
 	/**< Memory to store user specific metadata.
 	 * The event timer adapter implementation should not modify this area.
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters memory to hugepage
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (8 preceding siblings ...)
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 10/14] eventdev: rearrange fields in timer object pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-07 20:49       ` Carrillo, Erik G
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 12/14] eventdev: promote event vector API to stable pbhagavatula
                       ` (5 subsequent siblings)
  15 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Erik Gabriel Carrillo; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move memory used by timer adapters to hugepage.
Allocate memory on the first adapter create or lookup to address
both primary and secondary process usecases.
This will prevent TLB misses if any and aligns to memory structure
of other subsystems.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_timer_adapter.c | 24 +++++++++++++++++++++++-
 1 file changed, 23 insertions(+), 1 deletion(-)

diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index ae55407042..c4dc7a5fd4 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -33,7 +33,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
 
-static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
+static struct rte_event_timer_adapter *adapters;
 
 static const struct event_timer_adapter_ops swtim_ops;
 
@@ -138,6 +138,17 @@ rte_event_timer_adapter_create_ext(
 	int n, ret;
 	struct rte_eventdev *dev;
 
+	if (adapters == NULL) {
+		adapters = rte_zmalloc("Eventdev",
+				       sizeof(struct rte_event_timer_adapter) *
+					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+				       RTE_CACHE_LINE_SIZE);
+		if (adapters == NULL) {
+			rte_errno = ENOMEM;
+			return NULL;
+		}
+	}
+
 	if (conf == NULL) {
 		rte_errno = EINVAL;
 		return NULL;
@@ -312,6 +323,17 @@ rte_event_timer_adapter_lookup(uint16_t adapter_id)
 	int ret;
 	struct rte_eventdev *dev;
 
+	if (adapters == NULL) {
+		adapters = rte_zmalloc("Eventdev",
+				       sizeof(struct rte_event_timer_adapter) *
+					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+				       RTE_CACHE_LINE_SIZE);
+		if (adapters == NULL) {
+			rte_errno = ENOMEM;
+			return NULL;
+		}
+	}
+
 	if (adapters[adapter_id].allocated)
 		return &adapters[adapter_id]; /* Adapter is already loaded */
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 12/14] eventdev: promote event vector API to stable
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (9 preceding siblings ...)
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 13/14] eventdev: make trace APIs internal pbhagavatula
                       ` (4 subsequent siblings)
  15 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Jay Jayatheerthan, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Promote event vector configuration APIs to stable.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 lib/eventdev/rte_event_eth_rx_adapter.h | 2 --
 lib/eventdev/rte_eventdev.h             | 1 -
 lib/eventdev/version.map                | 6 +++---
 3 files changed, 3 insertions(+), 6 deletions(-)

diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index 182dd2e5dd..d13d817025 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -543,7 +543,6 @@ int rte_event_eth_rx_adapter_cb_register(uint8_t id, uint16_t eth_dev_id,
  *  - 0: Success.
  *  - <0: Error code on failure.
  */
-__rte_experimental
 int rte_event_eth_rx_adapter_vector_limits_get(
 	uint8_t dev_id, uint16_t eth_port_id,
 	struct rte_event_eth_rx_adapter_vector_limits *limits);
@@ -570,7 +569,6 @@ int rte_event_eth_rx_adapter_vector_limits_get(
  *  - 0: Success, Receive queue configured correctly.
  *  - <0: Error code on failure.
  */
-__rte_experimental
 int rte_event_eth_rx_adapter_queue_event_vector_config(
 	uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
 	struct rte_event_eth_rx_adapter_event_vector_config *config);
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index f1fcd6ce3d..14d4d9ec81 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1734,7 +1734,6 @@ int rte_event_dev_selftest(uint8_t dev_id);
  *    - ENOMEM - no appropriate memory area found in which to create memzone
  *    - ENAMETOOLONG - mempool name requested is too long.
  */
-__rte_experimental
 struct rte_mempool *
 rte_event_vector_pool_create(const char *name, unsigned int n,
 			     unsigned int cache_size, uint16_t nb_elem,
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index a3a732089b..068d186c66 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -38,10 +38,12 @@ DPDK_22 {
 	rte_event_eth_rx_adapter_free;
 	rte_event_eth_rx_adapter_queue_add;
 	rte_event_eth_rx_adapter_queue_del;
+	rte_event_eth_rx_adapter_queue_event_vector_config;
 	rte_event_eth_rx_adapter_service_id_get;
 	rte_event_eth_rx_adapter_start;
 	rte_event_eth_rx_adapter_stats_get;
 	rte_event_eth_rx_adapter_stats_reset;
+	rte_event_eth_rx_adapter_vector_limits_get;
 	rte_event_eth_rx_adapter_stop;
 	rte_event_eth_tx_adapter_caps_get;
 	rte_event_eth_tx_adapter_create;
@@ -83,6 +85,7 @@ DPDK_22 {
 	rte_event_timer_arm_burst;
 	rte_event_timer_arm_tmo_tick_burst;
 	rte_event_timer_cancel_burst;
+	rte_event_vector_pool_create;
 	rte_eventdevs;
 
 	#added in 21.11
@@ -135,9 +138,6 @@ EXPERIMENTAL {
 	__rte_eventdev_trace_port_setup;
 
 	#added in 21.05
-	rte_event_vector_pool_create;
-	rte_event_eth_rx_adapter_vector_limits_get;
-	rte_event_eth_rx_adapter_queue_event_vector_config;
 	__rte_eventdev_trace_crypto_adapter_enqueue;
 };
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 13/14] eventdev: make trace APIs internal
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (10 preceding siblings ...)
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 12/14] eventdev: promote event vector API to stable pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-11  9:59       ` Gujjar, Abhinandan S
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 14/14] eventdev: mark trace variables as internal pbhagavatula
                       ` (3 subsequent siblings)
  15 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan, Erik Gabriel Carrillo
  Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Slowpath trace APIs are only used in rte_eventdev.c so make them
as internal.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
 lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} | 0
 lib/eventdev/eventdev_trace_points.c                    | 2 +-
 lib/eventdev/meson.build                                | 2 +-
 lib/eventdev/rte_event_crypto_adapter.c                 | 2 +-
 lib/eventdev/rte_event_eth_rx_adapter.c                 | 2 +-
 lib/eventdev/rte_event_eth_tx_adapter.c                 | 2 +-
 lib/eventdev/rte_event_timer_adapter.c                  | 2 +-
 lib/eventdev/rte_eventdev.c                             | 2 +-
 8 files changed, 7 insertions(+), 7 deletions(-)
 rename lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} (100%)

diff --git a/lib/eventdev/rte_eventdev_trace.h b/lib/eventdev/eventdev_trace.h
similarity index 100%
rename from lib/eventdev/rte_eventdev_trace.h
rename to lib/eventdev/eventdev_trace.h
diff --git a/lib/eventdev/eventdev_trace_points.c b/lib/eventdev/eventdev_trace_points.c
index 3867ec8008..237d9383fd 100644
--- a/lib/eventdev/eventdev_trace_points.c
+++ b/lib/eventdev/eventdev_trace_points.c
@@ -4,7 +4,7 @@
 
 #include <rte_trace_point_register.h>
 
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 /* Eventdev trace points */
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_configure,
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index f19b831edd..c750e0214f 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -19,7 +19,6 @@ sources = files(
 )
 headers = files(
         'rte_eventdev.h',
-        'rte_eventdev_trace.h',
         'rte_eventdev_trace_fp.h',
         'rte_event_ring.h',
         'rte_event_eth_rx_adapter.h',
@@ -34,6 +33,7 @@ driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
         'eventdev_pmd_vdev.h',
+        'eventdev_trace.h',
         'event_timer_adapter_pmd.h',
 )
 
diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index e9e660a3d2..ae1151fb75 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -16,7 +16,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_crypto_adapter.h"
 
 #define BATCH_SIZE 32
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index f8225ebd3d..7e97fbd21d 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -20,7 +20,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_rx_adapter.h"
 
 #define BATCH_SIZE		32
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.c b/lib/eventdev/rte_event_eth_tx_adapter.c
index 18c0359db7..ee3631bced 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.c
+++ b/lib/eventdev/rte_event_eth_tx_adapter.c
@@ -6,7 +6,7 @@
 #include <rte_ethdev.h>
 
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_tx_adapter.h"
 
 #define TXA_BATCH_SIZE		32
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index c4dc7a5fd4..7404b0cbb2 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -24,7 +24,7 @@
 #include "eventdev_pmd.h"
 #include "rte_event_timer_adapter.h"
 #include "rte_eventdev.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 #define DATA_MZ_NAME_MAX_LEN 64
 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index de6346194e..f881b7cc35 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -36,7 +36,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v3 14/14] eventdev: mark trace variables as internal
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (11 preceding siblings ...)
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 13/14] eventdev: make trace APIs internal pbhagavatula
@ 2021-10-06  6:50     ` pbhagavatula
  2021-10-06  7:11       ` David Marchand
  2021-10-14  9:05     ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " Jerin Jacob
                       ` (2 subsequent siblings)
  15 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-10-06  6:50 UTC (permalink / raw)
  To: jerinj, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Mark rte_trace global variables as internal i.e. remove them
from experimental section of version map.
Some of them are used in inline APIs, mark those as global.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 lib/eventdev/version.map | 77 ++++++++++++++++++----------------------
 1 file changed, 35 insertions(+), 42 deletions(-)

diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 068d186c66..617fff0ae6 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -88,57 +88,19 @@ DPDK_22 {
 	rte_event_vector_pool_create;
 	rte_eventdevs;
 
-	#added in 21.11
-	rte_event_fp_ops;
-
-	local: *;
-};
-
-EXPERIMENTAL {
-	global:
-
 	# added in 20.05
-	__rte_eventdev_trace_configure;
-	__rte_eventdev_trace_queue_setup;
-	__rte_eventdev_trace_port_link;
-	__rte_eventdev_trace_port_unlink;
-	__rte_eventdev_trace_start;
-	__rte_eventdev_trace_stop;
-	__rte_eventdev_trace_close;
+	__rte_eventdev_trace_crypto_adapter_enqueue;
 	__rte_eventdev_trace_deq_burst;
 	__rte_eventdev_trace_enq_burst;
-	__rte_eventdev_trace_eth_rx_adapter_create;
-	__rte_eventdev_trace_eth_rx_adapter_free;
-	__rte_eventdev_trace_eth_rx_adapter_queue_add;
-	__rte_eventdev_trace_eth_rx_adapter_queue_del;
-	__rte_eventdev_trace_eth_rx_adapter_start;
-	__rte_eventdev_trace_eth_rx_adapter_stop;
-	__rte_eventdev_trace_eth_tx_adapter_create;
-	__rte_eventdev_trace_eth_tx_adapter_free;
-	__rte_eventdev_trace_eth_tx_adapter_queue_add;
-	__rte_eventdev_trace_eth_tx_adapter_queue_del;
-	__rte_eventdev_trace_eth_tx_adapter_start;
-	__rte_eventdev_trace_eth_tx_adapter_stop;
 	__rte_eventdev_trace_eth_tx_adapter_enqueue;
-	__rte_eventdev_trace_timer_adapter_create;
-	__rte_eventdev_trace_timer_adapter_start;
-	__rte_eventdev_trace_timer_adapter_stop;
-	__rte_eventdev_trace_timer_adapter_free;
 	__rte_eventdev_trace_timer_arm_burst;
 	__rte_eventdev_trace_timer_arm_tmo_tick_burst;
 	__rte_eventdev_trace_timer_cancel_burst;
-	__rte_eventdev_trace_crypto_adapter_create;
-	__rte_eventdev_trace_crypto_adapter_free;
-	__rte_eventdev_trace_crypto_adapter_queue_pair_add;
-	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
-	__rte_eventdev_trace_crypto_adapter_start;
-	__rte_eventdev_trace_crypto_adapter_stop;
 
-	# changed in 20.11
-	__rte_eventdev_trace_port_setup;
+	#added in 21.11
+	rte_event_fp_ops;
 
-	#added in 21.05
-	__rte_eventdev_trace_crypto_adapter_enqueue;
+	local: *;
 };
 
 INTERNAL {
@@ -157,4 +119,35 @@ INTERNAL {
 	rte_event_pmd_release;
 	rte_event_pmd_vdev_init;
 	rte_event_pmd_vdev_uninit;
+
+	__rte_eventdev_trace_close;
+	__rte_eventdev_trace_configure;
+	__rte_eventdev_trace_crypto_adapter_create;
+	__rte_eventdev_trace_crypto_adapter_free;
+	__rte_eventdev_trace_crypto_adapter_queue_pair_add;
+	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
+	__rte_eventdev_trace_crypto_adapter_start;
+	__rte_eventdev_trace_crypto_adapter_stop;
+	__rte_eventdev_trace_eth_rx_adapter_create;
+	__rte_eventdev_trace_eth_rx_adapter_free;
+	__rte_eventdev_trace_eth_rx_adapter_queue_add;
+	__rte_eventdev_trace_eth_rx_adapter_queue_del;
+	__rte_eventdev_trace_eth_rx_adapter_start;
+	__rte_eventdev_trace_eth_rx_adapter_stop;
+	__rte_eventdev_trace_eth_tx_adapter_create;
+	__rte_eventdev_trace_eth_tx_adapter_free;
+	__rte_eventdev_trace_eth_tx_adapter_queue_add;
+	__rte_eventdev_trace_eth_tx_adapter_queue_del;
+	__rte_eventdev_trace_eth_tx_adapter_start;
+	__rte_eventdev_trace_eth_tx_adapter_stop;
+	__rte_eventdev_trace_port_link;
+	__rte_eventdev_trace_port_setup;
+	__rte_eventdev_trace_port_unlink;
+	__rte_eventdev_trace_queue_setup;
+	__rte_eventdev_trace_start;
+	__rte_eventdev_trace_stop;
+	__rte_eventdev_trace_timer_adapter_create;
+	__rte_eventdev_trace_timer_adapter_free;
+	__rte_eventdev_trace_timer_adapter_start;
+	__rte_eventdev_trace_timer_adapter_stop;
 };
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 14/14] eventdev: mark trace variables as internal
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 14/14] eventdev: mark trace variables as internal pbhagavatula
@ 2021-10-06  7:11       ` David Marchand
  2021-10-14  9:28         ` Jerin Jacob
  0 siblings, 1 reply; 119+ messages in thread
From: David Marchand @ 2021-10-06  7:11 UTC (permalink / raw)
  To: Pavan Nikhilesh, Ray Kinsella; +Cc: Jerin Jacob Kollanukkaran, dev

Hello Pavan, Ray,

On Wed, Oct 6, 2021 at 8:52 AM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Mark rte_trace global variables as internal i.e. remove them
> from experimental section of version map.
> Some of them are used in inline APIs, mark those as global.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Acked-by: Ray Kinsella <mdr@ashroe.eu>

Please, sort those symbols.
I check with ./devtools/update-abi.sh $(cat ABI_VERSION)


> ---
>  lib/eventdev/version.map | 77 ++++++++++++++++++----------------------
>  1 file changed, 35 insertions(+), 42 deletions(-)
>
> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
> index 068d186c66..617fff0ae6 100644
> --- a/lib/eventdev/version.map
> +++ b/lib/eventdev/version.map
> @@ -88,57 +88,19 @@ DPDK_22 {
>         rte_event_vector_pool_create;
>         rte_eventdevs;
>
> -       #added in 21.11
> -       rte_event_fp_ops;
> -
> -       local: *;
> -};
> -
> -EXPERIMENTAL {
> -       global:
> -
>         # added in 20.05

At the next ABI bump, ./devtools/update-abi.sh will strip those
comments from the stable section.
You can notice this when you run ./devtools/update-abi.sh $CURRENT_ABI
as suggested above.

I would strip the comments now that the symbols are going to stable.
Ray, do you have an opinion?


-- 
David Marchand


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters memory to hugepage
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
@ 2021-10-07 20:49       ` Carrillo, Erik G
  2021-10-08  5:38         ` Pavan Nikhilesh Bhagavatula
  0 siblings, 1 reply; 119+ messages in thread
From: Carrillo, Erik G @ 2021-10-07 20:49 UTC (permalink / raw)
  To: pbhagavatula, jerinj; +Cc: dev

Hi Pavan,

Some comments below:

> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Wednesday, October 6, 2021 1:50 AM
> To: jerinj@marvell.com; Carrillo, Erik G <erik.g.carrillo@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters
> memory to hugepage
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Move memory used by timer adapters to hugepage.
> Allocate memory on the first adapter create or lookup to address both
> primary and secondary process usecases.
> This will prevent TLB misses if any and aligns to memory structure of other
> subsystems.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/rte_event_timer_adapter.c | 24
> +++++++++++++++++++++++-
>  1 file changed, 23 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/eventdev/rte_event_timer_adapter.c
> b/lib/eventdev/rte_event_timer_adapter.c
> index ae55407042..c4dc7a5fd4 100644
> --- a/lib/eventdev/rte_event_timer_adapter.c
> +++ b/lib/eventdev/rte_event_timer_adapter.c
> @@ -33,7 +33,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_logtype,
> adapter.timer, NOTICE);
> RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
> RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc,
> NOTICE);
> 
> -static struct rte_event_timer_adapter
> adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
> +static struct rte_event_timer_adapter *adapters;
> 
>  static const struct event_timer_adapter_ops swtim_ops;
> 
> @@ -138,6 +138,17 @@ rte_event_timer_adapter_create_ext(
>  	int n, ret;
>  	struct rte_eventdev *dev;
> 
> +	if (adapters == NULL) {
> +		adapters = rte_zmalloc("Eventdev",
> +				       sizeof(struct rte_event_timer_adapter) *
> +
> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
> +				       RTE_CACHE_LINE_SIZE);
> +		if (adapters == NULL) {
> +			rte_errno = ENOMEM;
> +			return NULL;
> +		}
> +	}
> +
>  	if (conf == NULL) {
>  		rte_errno = EINVAL;
>  		return NULL;
> @@ -312,6 +323,17 @@ rte_event_timer_adapter_lookup(uint16_t
> adapter_id)
>  	int ret;
>  	struct rte_eventdev *dev;
> 
> +	if (adapters == NULL) {
> +		adapters = rte_zmalloc("Eventdev",
> +				       sizeof(struct rte_event_timer_adapter) *
> +
> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
> +				       RTE_CACHE_LINE_SIZE);
> +		if (adapters == NULL) {
> +			rte_errno = ENOMEM;
> +			return NULL;
> +		}
> +	}
> +
>  	if (adapters[adapter_id].allocated)
>  		return &adapters[adapter_id]; /* Adapter is already loaded
> */
> 
> --
> 2.17.1

The rte_event_timer_adapter struct has several fields that have per-process values.  

For example, there are three fast path function pointers and each will be assigned distinct addresses for each process in a multi-process scenario.  The "allocated" field is also per-process.  With the changes above, if a secondary process did a lookup() after a primary process did a create(),  the secondary would get a reference to an object with function pointers that are invalid in the secondary process.

To fully move the adapter object table into shared hugepage memory, those "per-process" members would need to be collected into a per-process data structure that could be independently allocated for each process.  However, that would add one more pointer dereference to get to the fast path functions, and avoiding that was the original reason to put those pointers there.  This is similar to the rte_eventdev struct.

Thanks,
Erik


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters memory to hugepage
  2021-10-07 20:49       ` Carrillo, Erik G
@ 2021-10-08  5:38         ` Pavan Nikhilesh Bhagavatula
  2021-10-08 15:57           ` Carrillo, Erik G
  0 siblings, 1 reply; 119+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2021-10-08  5:38 UTC (permalink / raw)
  To: Carrillo, Erik G, Jerin Jacob Kollanukkaran; +Cc: dev

Hi Erik,

>Hi Pavan,
>
>Some comments below:
>
>> -----Original Message-----
>> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
>> Sent: Wednesday, October 6, 2021 1:50 AM
>> To: jerinj@marvell.com; Carrillo, Erik G <erik.g.carrillo@intel.com>
>> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
>> Subject: [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters
>> memory to hugepage
>>
>> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>
>> Move memory used by timer adapters to hugepage.
>> Allocate memory on the first adapter create or lookup to address both
>> primary and secondary process usecases.
>> This will prevent TLB misses if any and aligns to memory structure of
>other
>> subsystems.
>>
>> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
>> ---
>>  lib/eventdev/rte_event_timer_adapter.c | 24
>> +++++++++++++++++++++++-
>>  1 file changed, 23 insertions(+), 1 deletion(-)
>>
>> diff --git a/lib/eventdev/rte_event_timer_adapter.c
>> b/lib/eventdev/rte_event_timer_adapter.c
>> index ae55407042..c4dc7a5fd4 100644
>> --- a/lib/eventdev/rte_event_timer_adapter.c
>> +++ b/lib/eventdev/rte_event_timer_adapter.c
>> @@ -33,7 +33,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_logtype,
>> adapter.timer, NOTICE);
>> RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer,
>NOTICE);
>> RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc,
>> NOTICE);
>>
>> -static struct rte_event_timer_adapter
>> adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
>> +static struct rte_event_timer_adapter *adapters;
>>
>>  static const struct event_timer_adapter_ops swtim_ops;
>>
>> @@ -138,6 +138,17 @@ rte_event_timer_adapter_create_ext(
>>  	int n, ret;
>>  	struct rte_eventdev *dev;
>>
>> +	if (adapters == NULL) {
>> +		adapters = rte_zmalloc("Eventdev",
>> +				       sizeof(struct
>rte_event_timer_adapter) *
>> +
>> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
>> +				       RTE_CACHE_LINE_SIZE);
>> +		if (adapters == NULL) {
>> +			rte_errno = ENOMEM;
>> +			return NULL;
>> +		}
>> +	}
>> +
>>  	if (conf == NULL) {
>>  		rte_errno = EINVAL;
>>  		return NULL;
>> @@ -312,6 +323,17 @@ rte_event_timer_adapter_lookup(uint16_t
>> adapter_id)
>>  	int ret;
>>  	struct rte_eventdev *dev;
>>
>> +	if (adapters == NULL) {
>> +		adapters = rte_zmalloc("Eventdev",
>> +				       sizeof(struct
>rte_event_timer_adapter) *
>> +
>> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
>> +				       RTE_CACHE_LINE_SIZE);
>> +		if (adapters == NULL) {
>> +			rte_errno = ENOMEM;
>> +			return NULL;
>> +		}
>> +	}
>> +
>>  	if (adapters[adapter_id].allocated)
>>  		return &adapters[adapter_id]; /* Adapter is already
>loaded
>> */
>>
>> --
>> 2.17.1
>
>The rte_event_timer_adapter struct has several fields that have per-
>process values.
>
>For example, there are three fast path function pointers and each will
>be assigned distinct addresses for each process in a multi-process
>scenario.  The "allocated" field is also per-process.  With the changes
>above, if a secondary process did a lookup() after a primary process did
>a create(),  the secondary would get a reference to an object with
>function pointers that are invalid in the secondary process.
>

I understand, the current patch doesn't unify the memory between processes.
Instead, we zmalloc the per-process 'array' that holds the adapter objects when 
ever the process calls either create or lookup and initialize the per-process data structure.

The pointer to the adapter array is static, so when ever a process is initialized it will be NULL.


>To fully move the adapter object table into shared hugepage memory,
>those "per-process" members would need to be collected into a per-
>process data structure that could be independently allocated for each
>process.  However, that would add one more pointer dereference to
>get to the fast path functions, and avoiding that was the original reason
>to put those pointers there.  This is similar to the rte_eventdev struct.
>
>Thanks,
>Erik


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters memory to hugepage
  2021-10-08  5:38         ` Pavan Nikhilesh Bhagavatula
@ 2021-10-08 15:57           ` Carrillo, Erik G
  0 siblings, 0 replies; 119+ messages in thread
From: Carrillo, Erik G @ 2021-10-08 15:57 UTC (permalink / raw)
  To: Pavan Nikhilesh Bhagavatula, Jerin Jacob Kollanukkaran; +Cc: dev

> -----Original Message-----
> From: Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com>
> Sent: Friday, October 8, 2021 12:38 AM
> To: Carrillo, Erik G <erik.g.carrillo@intel.com>; Jerin Jacob Kollanukkaran
> <jerinj@marvell.com>
> Cc: dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters
> memory to hugepage
> 
> Hi Erik,
> 
> >Hi Pavan,
> >
> >Some comments below:
> >
> >> -----Original Message-----
> >> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> >> Sent: Wednesday, October 6, 2021 1:50 AM
> >> To: jerinj@marvell.com; Carrillo, Erik G <erik.g.carrillo@intel.com>
> >> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> >> Subject: [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters
> >> memory to hugepage
> >>
> >> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >>
> >> Move memory used by timer adapters to hugepage.
> >> Allocate memory on the first adapter create or lookup to address both
> >> primary and secondary process usecases.
> >> This will prevent TLB misses if any and aligns to memory structure of
> >other
> >> subsystems.
> >>
> >> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >> ---
> >>  lib/eventdev/rte_event_timer_adapter.c | 24
> >> +++++++++++++++++++++++-
> >>  1 file changed, 23 insertions(+), 1 deletion(-)
> >>
> >> diff --git a/lib/eventdev/rte_event_timer_adapter.c
> >> b/lib/eventdev/rte_event_timer_adapter.c
> >> index ae55407042..c4dc7a5fd4 100644
> >> --- a/lib/eventdev/rte_event_timer_adapter.c
> >> +++ b/lib/eventdev/rte_event_timer_adapter.c
> >> @@ -33,7 +33,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_logtype,
> >> adapter.timer, NOTICE);
> >> RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer,
> >NOTICE);
> >> RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc,
> >> NOTICE);
> >>
> >> -static struct rte_event_timer_adapter
> >> adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
> >> +static struct rte_event_timer_adapter *adapters;
> >>
> >>  static const struct event_timer_adapter_ops swtim_ops;
> >>
> >> @@ -138,6 +138,17 @@ rte_event_timer_adapter_create_ext(
> >>  	int n, ret;
> >>  	struct rte_eventdev *dev;
> >>
> >> +	if (adapters == NULL) {
> >> +		adapters = rte_zmalloc("Eventdev",
> >> +				       sizeof(struct
> >rte_event_timer_adapter) *
> >> +
> >> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
> >> +				       RTE_CACHE_LINE_SIZE);
> >> +		if (adapters == NULL) {
> >> +			rte_errno = ENOMEM;
> >> +			return NULL;
> >> +		}
> >> +	}
> >> +
> >>  	if (conf == NULL) {
> >>  		rte_errno = EINVAL;
> >>  		return NULL;
> >> @@ -312,6 +323,17 @@ rte_event_timer_adapter_lookup(uint16_t
> >> adapter_id)
> >>  	int ret;
> >>  	struct rte_eventdev *dev;
> >>
> >> +	if (adapters == NULL) {
> >> +		adapters = rte_zmalloc("Eventdev",
> >> +				       sizeof(struct
> >rte_event_timer_adapter) *
> >> +
> >> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
> >> +				       RTE_CACHE_LINE_SIZE);
> >> +		if (adapters == NULL) {
> >> +			rte_errno = ENOMEM;
> >> +			return NULL;
> >> +		}
> >> +	}
> >> +
> >>  	if (adapters[adapter_id].allocated)
> >>  		return &adapters[adapter_id]; /* Adapter is already
> >loaded
> >> */
> >>
> >> --
> >> 2.17.1
> >
> >The rte_event_timer_adapter struct has several fields that have per-
> >process values.
> >
> >For example, there are three fast path function pointers and each will
> >be assigned distinct addresses for each process in a multi-process
> >scenario.  The "allocated" field is also per-process.  With the changes
> >above, if a secondary process did a lookup() after a primary process
> >did a create(),  the secondary would get a reference to an object with
> >function pointers that are invalid in the secondary process.
> >
> 
> I understand, the current patch doesn't unify the memory between
> processes.
> Instead, we zmalloc the per-process 'array' that holds the adapter objects
> when ever the process calls either create or lookup and initialize the per-
> process data structure.
> 
> The pointer to the adapter array is static, so when ever a process is initialized
> it will be NULL.
> 

Ah, right - I missed that.  This looks OK to me now.

One other thing: we never do a free of the array we zmalloc'd, but it looks like we could in rte_event_timer_adapter_free(), if we were freeing the last adapter instance.  

> 
> >To fully move the adapter object table into shared hugepage memory,
> >those "per-process" members would need to be collected into a per-
> >process data structure that could be independently allocated for each
> >process.  However, that would add one more pointer dereference to get
> >to the fast path functions, and avoiding that was the original reason
> >to put those pointers there.  This is similar to the rte_eventdev struct.
> >
> >Thanks,
> >Erik


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 06/14] eventdev: use new API for inline functions
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 06/14] eventdev: use new API for inline functions pbhagavatula
@ 2021-10-11  9:51       ` Gujjar, Abhinandan S
  0 siblings, 0 replies; 119+ messages in thread
From: Gujjar, Abhinandan S @ 2021-10-11  9:51 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Jayatheerthan, Jay; +Cc: dev

Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>

> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Wednesday, October 6, 2021 12:20 PM
> To: jerinj@marvell.com; Gujjar, Abhinandan S
> <abhinandan.gujjar@intel.com>; Jayatheerthan, Jay
> <jay.jayatheerthan@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [PATCH v3 06/14] eventdev: use new API for inline
> functions
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Use new driver interface for the fastpath enqueue/dequeue inline
> functions.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
> ---
>  lib/eventdev/rte_event_crypto_adapter.h | 15 +++++---
> lib/eventdev/rte_event_eth_tx_adapter.h | 15 ++++----
>  lib/eventdev/rte_eventdev.h             | 46 +++++++++++++++----------
>  3 files changed, 47 insertions(+), 29 deletions(-)
> 
> diff --git a/lib/eventdev/rte_event_crypto_adapter.h
> b/lib/eventdev/rte_event_crypto_adapter.h
> index 431d05b6ed..eb82818d05 100644
> --- a/lib/eventdev/rte_event_crypto_adapter.h
> +++ b/lib/eventdev/rte_event_crypto_adapter.h
> @@ -568,12 +568,19 @@ rte_event_crypto_adapter_enqueue(uint8_t
> dev_id,
>  				struct rte_event ev[],
>  				uint16_t nb_events)
>  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +	const struct rte_event_fp_ops *fp_ops;
> +	void *port;
> 
> +	fp_ops = &rte_event_fp_ops[dev_id];
> +	port = fp_ops->data[port_id];
>  #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +	if (dev_id >= RTE_EVENT_MAX_DEVS ||
> +	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
> +		rte_errno = EINVAL;
> +		return 0;
> +	}
> 
> -	if (port_id >= dev->data->nb_ports) {
> +	if (port == NULL) {
>  		rte_errno = EINVAL;
>  		return 0;
>  	}
> @@ -581,7 +588,7 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id,
>  	rte_eventdev_trace_crypto_adapter_enqueue(dev_id, port_id, ev,
>  		nb_events);
> 
> -	return dev->ca_enqueue(dev->data->ports[port_id], ev,
> nb_events);
> +	return fp_ops->ca_enqueue(port, ev, nb_events);
>  }
> 
>  #ifdef __cplusplus
> diff --git a/lib/eventdev/rte_event_eth_tx_adapter.h
> b/lib/eventdev/rte_event_eth_tx_adapter.h
> index 8c59547165..3908c2ded5 100644
> --- a/lib/eventdev/rte_event_eth_tx_adapter.h
> +++ b/lib/eventdev/rte_event_eth_tx_adapter.h
> @@ -355,16 +355,19 @@ rte_event_eth_tx_adapter_enqueue(uint8_t
> dev_id,
>  				uint16_t nb_events,
>  				const uint8_t flags)
>  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +	const struct rte_event_fp_ops *fp_ops;
> +	void *port;
> 
> +	fp_ops = &rte_event_fp_ops[dev_id];
> +	port = fp_ops->data[port_id];
>  #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
>  	if (dev_id >= RTE_EVENT_MAX_DEVS ||
> -		!rte_eventdevs[dev_id].attached) {
> +	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
>  		rte_errno = EINVAL;
>  		return 0;
>  	}
> 
> -	if (port_id >= dev->data->nb_ports) {
> +	if (port == NULL) {
>  		rte_errno = EINVAL;
>  		return 0;
>  	}
> @@ -372,11 +375,9 @@ rte_event_eth_tx_adapter_enqueue(uint8_t
> dev_id,
>  	rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
>  		nb_events, flags);
>  	if (flags)
> -		return dev->txa_enqueue_same_dest(dev->data-
> >ports[port_id],
> -						  ev, nb_events);
> +		return fp_ops->txa_enqueue_same_dest(port, ev,
> nb_events);
>  	else
> -		return dev->txa_enqueue(dev->data->ports[port_id], ev,
> -					nb_events);
> +		return fp_ops->txa_enqueue(port, ev, nb_events);
>  }
> 
>  /**
> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> index 1b11d4576d..31fa9ac4b8 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -1747,15 +1747,19 @@ __rte_event_enqueue_burst(uint8_t dev_id,
> uint8_t port_id,
>  			  const struct rte_event ev[], uint16_t nb_events,
>  			  const event_enqueue_burst_t fn)
>  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +	const struct rte_event_fp_ops *fp_ops;
> +	void *port;
> 
> +	fp_ops = &rte_event_fp_ops[dev_id];
> +	port = fp_ops->data[port_id];
>  #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -	if (dev_id >= RTE_EVENT_MAX_DEVS ||
> !rte_eventdevs[dev_id].attached) {
> +	if (dev_id >= RTE_EVENT_MAX_DEVS ||
> +	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
>  		rte_errno = EINVAL;
>  		return 0;
>  	}
> 
> -	if (port_id >= dev->data->nb_ports) {
> +	if (port == NULL) {
>  		rte_errno = EINVAL;
>  		return 0;
>  	}
> @@ -1766,9 +1770,9 @@ __rte_event_enqueue_burst(uint8_t dev_id,
> uint8_t port_id,
>  	 * requests nb_events as const one
>  	 */
>  	if (nb_events == 1)
> -		return (*dev->enqueue)(dev->data->ports[port_id], ev);
> +		return (fp_ops->enqueue)(port, ev);
>  	else
> -		return fn(dev->data->ports[port_id], ev, nb_events);
> +		return fn(port, ev, nb_events);
>  }
> 
>  /**
> @@ -1818,10 +1822,11 @@ static inline uint16_t
> rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
>  			const struct rte_event ev[], uint16_t nb_events)  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +	const struct rte_event_fp_ops *fp_ops;
> 
> +	fp_ops = &rte_event_fp_ops[dev_id];
>  	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -					 dev->enqueue_burst);
> +					 fp_ops->enqueue_burst);
>  }
> 
>  /**
> @@ -1869,10 +1874,11 @@ static inline uint16_t
> rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
>  			    const struct rte_event ev[], uint16_t nb_events)  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +	const struct rte_event_fp_ops *fp_ops;
> 
> +	fp_ops = &rte_event_fp_ops[dev_id];
>  	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -					 dev->enqueue_new_burst);
> +					 fp_ops->enqueue_new_burst);
>  }
> 
>  /**
> @@ -1920,10 +1926,11 @@ static inline uint16_t
> rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
>  				const struct rte_event ev[], uint16_t
> nb_events)  {
> -	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +	const struct rte_event_fp_ops *fp_ops;
> 
> +	fp_ops = &rte_event_fp_ops[dev_id];
>  	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -					 dev->enqueue_forward_burst);
> +					 fp_ops->enqueue_forward_burst);
>  }
> 
>  /**
> @@ -1996,15 +2003,19 @@ static inline uint16_t
> rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event
> ev[],
>  			uint16_t nb_events, uint64_t timeout_ticks)  {
> -	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +	const struct rte_event_fp_ops *fp_ops;
> +	void *port;
> 
> +	fp_ops = &rte_event_fp_ops[dev_id];
> +	port = fp_ops->data[port_id];
>  #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -	if (dev_id >= RTE_EVENT_MAX_DEVS ||
> !rte_eventdevs[dev_id].attached) {
> +	if (dev_id >= RTE_EVENT_MAX_DEVS ||
> +	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
>  		rte_errno = EINVAL;
>  		return 0;
>  	}
> 
> -	if (port_id >= dev->data->nb_ports) {
> +	if (port == NULL) {
>  		rte_errno = EINVAL;
>  		return 0;
>  	}
> @@ -2015,11 +2026,10 @@ rte_event_dequeue_burst(uint8_t dev_id,
> uint8_t port_id, struct rte_event ev[],
>  	 * requests nb_events as const one
>  	 */
>  	if (nb_events == 1)
> -		return (*dev->dequeue)(dev->data->ports[port_id], ev,
> -				       timeout_ticks);
> +		return (fp_ops->dequeue)(port, ev, timeout_ticks);
>  	else
> -		return (*dev->dequeue_burst)(dev->data->ports[port_id],
> ev,
> -					     nb_events, timeout_ticks);
> +		return (fp_ops->dequeue_burst)(port, ev, nb_events,
> +					       timeout_ticks);
>  }
> 
>  #ifdef __cplusplus
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 09/14] eventdev: remove rte prefix for internal structs
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
@ 2021-10-11  9:58       ` Gujjar, Abhinandan S
  0 siblings, 0 replies; 119+ messages in thread
From: Gujjar, Abhinandan S @ 2021-10-11  9:58 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Jayatheerthan, Jay; +Cc: dev

Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>

> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Wednesday, October 6, 2021 12:20 PM
> To: jerinj@marvell.com; Gujjar, Abhinandan S
> <abhinandan.gujjar@intel.com>; Jayatheerthan, Jay
> <jay.jayatheerthan@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [PATCH v3 09/14] eventdev: remove rte prefix for
> internal structs
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Remove rte_ prefix from rte_eth_event_enqueue_buffer,
> rte_event_eth_rx_adapter and rte_event_crypto_adapter as they are only
> used in rte_event_eth_rx_adapter.c and rte_event_crypto_adapter.c
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
> ---
>  lib/eventdev/rte_event_crypto_adapter.c |  66 +++----
> lib/eventdev/rte_event_eth_rx_adapter.c | 249 ++++++++++--------------
>  lib/eventdev/rte_eventdev.h             |   2 +-
>  3 files changed, 141 insertions(+), 176 deletions(-)
> 
> diff --git a/lib/eventdev/rte_event_crypto_adapter.c
> b/lib/eventdev/rte_event_crypto_adapter.c
> index ebfc8326a8..e9e660a3d2 100644
> --- a/lib/eventdev/rte_event_crypto_adapter.c
> +++ b/lib/eventdev/rte_event_crypto_adapter.c
> @@ -30,7 +30,7 @@
>   */
>  #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
> 
> -struct rte_event_crypto_adapter {
> +struct event_crypto_adapter {
>  	/* Event device identifier */
>  	uint8_t eventdev_id;
>  	/* Event port identifier */
> @@ -99,7 +99,7 @@ struct crypto_queue_pair_info {
>  	uint8_t len;
>  } __rte_cache_aligned;
> 
> -static struct rte_event_crypto_adapter **event_crypto_adapter;
> +static struct event_crypto_adapter **event_crypto_adapter;
> 
>  /* Macros to check for valid adapter */  #define
> EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ @@ -
> 141,7 +141,7 @@ eca_init(void)
>  	return 0;
>  }
> 
> -static inline struct rte_event_crypto_adapter *
> +static inline struct event_crypto_adapter *
>  eca_id_to_adapter(uint8_t id)
>  {
>  	return event_crypto_adapter ?
> @@ -158,7 +158,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id,
>  	int started;
>  	int ret;
>  	struct rte_event_port_conf *port_conf = arg;
> -	struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
> +	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
> 
>  	if (adapter == NULL)
>  		return -EINVAL;
> @@ -202,7 +202,7 @@ rte_event_crypto_adapter_create_ext(uint8_t id,
> uint8_t dev_id,
>  				enum rte_event_crypto_adapter_mode
> mode,
>  				void *conf_arg)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
>  	struct rte_event_dev_info dev_info;
>  	int socket_id;
> @@ -304,7 +304,7 @@ rte_event_crypto_adapter_create(uint8_t id, uint8_t
> dev_id,  int  rte_event_crypto_adapter_free(uint8_t id)  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> @@ -329,8 +329,8 @@ rte_event_crypto_adapter_free(uint8_t id)  }
> 
>  static inline unsigned int
> -eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
> -		 struct rte_event *ev, unsigned int cnt)
> +eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct
> rte_event *ev,
> +		     unsigned int cnt)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter-
> >crypto_stats;
>  	union rte_event_crypto_metadata *m_data = NULL; @@ -420,7
> +420,7 @@ eca_enq_to_cryptodev(struct rte_event_crypto_adapter
> *adapter,  }
> 
>  static unsigned int
> -eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
> +eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter-
> >crypto_stats;
>  	struct crypto_device_info *curr_dev;
> @@ -466,8 +466,8 @@ eca_crypto_enq_flush(struct
> rte_event_crypto_adapter *adapter)  }
> 
>  static int
> -eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
> -			unsigned int max_enq)
> +eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
> +			   unsigned int max_enq)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter-
> >crypto_stats;
>  	struct rte_event ev[BATCH_SIZE];
> @@ -500,8 +500,8 @@ eca_crypto_adapter_enq_run(struct
> rte_event_crypto_adapter *adapter,  }
> 
>  static inline void
> -eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
> -		  struct rte_crypto_op **ops, uint16_t num)
> +eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
> +		      struct rte_crypto_op **ops, uint16_t num)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter-
> >crypto_stats;
>  	union rte_event_crypto_metadata *m_data = NULL; @@ -564,8
> +564,8 @@ eca_ops_enqueue_burst(struct rte_event_crypto_adapter
> *adapter,  }
> 
>  static inline unsigned int
> -eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
> -			unsigned int max_deq)
> +eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
> +			   unsigned int max_deq)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter-
> >crypto_stats;
>  	struct crypto_device_info *curr_dev;
> @@ -627,8 +627,8 @@ eca_crypto_adapter_deq_run(struct
> rte_event_crypto_adapter *adapter,  }
> 
>  static void
> -eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
> -			unsigned int max_ops)
> +eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
> +		       unsigned int max_ops)
>  {
>  	while (max_ops) {
>  		unsigned int e_cnt, d_cnt;
> @@ -648,7 +648,7 @@ eca_crypto_adapter_run(struct
> rte_event_crypto_adapter *adapter,  static int  eca_service_func(void *args)
> {
> -	struct rte_event_crypto_adapter *adapter = args;
> +	struct event_crypto_adapter *adapter = args;
> 
>  	if (rte_spinlock_trylock(&adapter->lock) == 0)
>  		return 0;
> @@ -659,7 +659,7 @@ eca_service_func(void *args)  }
> 
>  static int
> -eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
> +eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
>  {
>  	struct rte_event_crypto_adapter_conf adapter_conf;
>  	struct rte_service_spec service;
> @@ -699,10 +699,9 @@ eca_init_service(struct rte_event_crypto_adapter
> *adapter, uint8_t id)  }
> 
>  static void
> -eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
> -			struct crypto_device_info *dev_info,
> -			int32_t queue_pair_id,
> -			uint8_t add)
> +eca_update_qp_info(struct event_crypto_adapter *adapter,
> +		   struct crypto_device_info *dev_info, int32_t
> queue_pair_id,
> +		   uint8_t add)
>  {
>  	struct crypto_queue_pair_info *qp_info;
>  	int enabled;
> @@ -729,9 +728,8 @@ eca_update_qp_info(struct
> rte_event_crypto_adapter *adapter,  }
> 
>  static int
> -eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
> -		uint8_t cdev_id,
> -		int queue_pair_id)
> +eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t
> cdev_id,
> +		   int queue_pair_id)
>  {
>  	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
>  	struct crypto_queue_pair_info *qpairs; @@ -773,7 +771,7 @@
> rte_event_crypto_adapter_queue_pair_add(uint8_t id,
>  			int32_t queue_pair_id,
>  			const struct rte_event *event)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct rte_eventdev *dev;
>  	struct crypto_device_info *dev_info;
>  	uint32_t cap;
> @@ -889,7 +887,7 @@ int
>  rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
>  					int32_t queue_pair_id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct crypto_device_info *dev_info;
>  	struct rte_eventdev *dev;
>  	int ret;
> @@ -975,7 +973,7 @@ rte_event_crypto_adapter_queue_pair_del(uint8_t
> id, uint8_t cdev_id,  static int  eca_adapter_ctrl(uint8_t id, int start)  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct crypto_device_info *dev_info;
>  	struct rte_eventdev *dev;
>  	uint32_t i;
> @@ -1017,7 +1015,7 @@ eca_adapter_ctrl(uint8_t id, int start)  int
> rte_event_crypto_adapter_start(uint8_t id)  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
>  	adapter = eca_id_to_adapter(id);
> @@ -1039,7 +1037,7 @@ int
>  rte_event_crypto_adapter_stats_get(uint8_t id,
>  				struct rte_event_crypto_adapter_stats
> *stats)  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
>  	struct rte_event_crypto_adapter_stats dev_stats;
>  	struct rte_eventdev *dev;
> @@ -1083,7 +1081,7 @@ rte_event_crypto_adapter_stats_get(uint8_t id,
> int  rte_event_crypto_adapter_stats_reset(uint8_t id)  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct crypto_device_info *dev_info;
>  	struct rte_eventdev *dev;
>  	uint32_t i;
> @@ -1111,7 +1109,7 @@ rte_event_crypto_adapter_stats_reset(uint8_t id)
> int  rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t
> *service_id)  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> @@ -1128,7 +1126,7 @@ rte_event_crypto_adapter_service_id_get(uint8_t
> id, uint32_t *service_id)  int
> rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t
> *event_port_id)  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c
> b/lib/eventdev/rte_event_eth_rx_adapter.c
> index 13dfb28401..f8225ebd3d 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.c
> @@ -78,14 +78,14 @@ struct eth_rx_vector_data {
> TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
> 
>  /* Instance per adapter */
> -struct rte_eth_event_enqueue_buffer {
> +struct eth_event_enqueue_buffer {
>  	/* Count of events in this buffer */
>  	uint16_t count;
>  	/* Array of events in this buffer */
>  	struct rte_event events[ETH_EVENT_BUFFER_SIZE];  };
> 
> -struct rte_event_eth_rx_adapter {
> +struct event_eth_rx_adapter {
>  	/* RSS key */
>  	uint8_t rss_key_be[RSS_KEY_SIZE];
>  	/* Event device identifier */
> @@ -109,7 +109,7 @@ struct rte_event_eth_rx_adapter {
>  	/* Next entry in wrr[] to begin polling */
>  	uint32_t wrr_pos;
>  	/* Event burst buffer */
> -	struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer event_enqueue_buffer;
>  	/* Vector enable flag */
>  	uint8_t ena_vector;
>  	/* Timestamp of previous vector expiry list traversal */ @@ -231,7
> +231,7 @@ struct eth_rx_queue_info {
>  	struct eth_rx_vector_data vector_data;  };
> 
> -static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
> +static struct event_eth_rx_adapter **event_eth_rx_adapter;
> 
>  static inline int
>  rxa_validate_id(uint8_t id)
> @@ -247,7 +247,7 @@ rxa_validate_id(uint8_t id)  } while (0)
> 
>  static inline int
> -rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter
> *rx_adapter)
> +rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;  }
> @@ -265,10 +265,9 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
>   * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
>   */
>  static int
> -rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
> -	 unsigned int n, int *cw,
> -	 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
> -	 uint16_t gcd, int prev)
> +rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n,
> int *cw,
> +	     struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
> +	     uint16_t gcd, int prev)
>  {
>  	int i = prev;
>  	uint16_t w;
> @@ -373,10 +372,9 @@ rxa_nb_intr_vect(struct eth_device_info *dev_info,
> int rx_queue_id, int add)
>  /* Calculate nb_rx_intr after deleting interrupt mode rx queues
>   */
>  static void
> -rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint32_t *nb_rx_intr)
> +rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint32_t *nb_rx_intr)
>  {
>  	uint32_t intr_diff;
> 
> @@ -392,12 +390,10 @@ rxa_calc_nb_post_intr_del(struct
> rte_event_eth_rx_adapter *rx_adapter,
>   * interrupt queues could currently be poll mode Rx queues
>   */
>  static void
> -rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint32_t *nb_rx_poll,
> -			uint32_t *nb_rx_intr,
> -			uint32_t *nb_wrr)
> +rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
> +			  uint32_t *nb_wrr)
>  {
>  	uint32_t intr_diff;
>  	uint32_t poll_diff;
> @@ -424,11 +420,9 @@ rxa_calc_nb_post_add_intr(struct
> rte_event_eth_rx_adapter *rx_adapter,
>   * after deleting poll mode rx queues
>   */
>  static void
> -rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint32_t *nb_rx_poll,
> -			uint32_t *nb_wrr)
> +rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint32_t *nb_rx_poll, uint32_t *nb_wrr)
>  {
>  	uint32_t poll_diff;
>  	uint32_t wrr_len_diff;
> @@ -449,13 +443,10 @@ rxa_calc_nb_post_poll_del(struct
> rte_event_eth_rx_adapter *rx_adapter,
>  /* Calculate nb_rx_* after adding poll mode rx queues
>   */
>  static void
> -rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint16_t wt,
> -			uint32_t *nb_rx_poll,
> -			uint32_t *nb_rx_intr,
> -			uint32_t *nb_wrr)
> +rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint16_t wt, uint32_t *nb_rx_poll,
> +			  uint32_t *nb_rx_intr, uint32_t *nb_wrr)
>  {
>  	uint32_t intr_diff;
>  	uint32_t poll_diff;
> @@ -482,13 +473,10 @@ rxa_calc_nb_post_add_poll(struct
> rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Calculate nb_rx_* after adding rx_queue_id */  static void -
> rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int rx_queue_id,
> -		uint16_t wt,
> -		uint32_t *nb_rx_poll,
> -		uint32_t *nb_rx_intr,
> -		uint32_t *nb_wrr)
> +rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
> +		     struct eth_device_info *dev_info, int rx_queue_id,
> +		     uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
> +		     uint32_t *nb_wrr)
>  {
>  	if (wt != 0)
>  		rxa_calc_nb_post_add_poll(rx_adapter, dev_info,
> rx_queue_id, @@ -500,12 +488,10 @@ rxa_calc_nb_post_add(struct
> rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Calculate nb_rx_* after deleting rx_queue_id */  static void -
> rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int rx_queue_id,
> -		uint32_t *nb_rx_poll,
> -		uint32_t *nb_rx_intr,
> -		uint32_t *nb_wrr)
> +rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
> +		     struct eth_device_info *dev_info, int rx_queue_id,
> +		     uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
> +		     uint32_t *nb_wrr)
>  {
>  	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id,
> nb_rx_poll,
>  				nb_wrr);
> @@ -517,8 +503,7 @@ rxa_calc_nb_post_del(struct
> rte_event_eth_rx_adapter *rx_adapter,
>   * Allocate the rx_poll array
>   */
>  static struct eth_rx_poll_entry *
> -rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
> -	uint32_t num_rx_polled)
> +rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t
> +num_rx_polled)
>  {
>  	size_t len;
> 
> @@ -534,7 +519,7 @@ rxa_alloc_poll(struct rte_event_eth_rx_adapter
> *rx_adapter,
>   * Allocate the WRR array
>   */
>  static uint32_t *
> -rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
> +rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
>  {
>  	size_t len;
> 
> @@ -547,11 +532,9 @@ rxa_alloc_wrr(struct rte_event_eth_rx_adapter
> *rx_adapter, int nb_wrr)  }
> 
>  static int
> -rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
> -		uint32_t nb_poll,
> -		uint32_t nb_wrr,
> -		struct eth_rx_poll_entry **rx_poll,
> -		uint32_t **wrr_sched)
> +rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t
> nb_poll,
> +		      uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
> +		      uint32_t **wrr_sched)
>  {
> 
>  	if (nb_poll == 0) {
> @@ -576,9 +559,8 @@ rxa_alloc_poll_arrays(struct
> rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Precalculate WRR polling sequence for all queues in rx_adapter */  static
> void -rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter
> *rx_adapter,
> -		struct eth_rx_poll_entry *rx_poll,
> -		uint32_t *rx_wrr)
> +rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
> +		      struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
>  {
>  	uint16_t d;
>  	uint16_t q;
> @@ -705,13 +687,13 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t
> *rss_key_be)  }
> 
>  static inline int
> -rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	return !!rx_adapter->enq_block_count;
>  }
> 
>  static inline void
> -rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	if (rx_adapter->rx_enq_block_start_ts)
>  		return;
> @@ -724,8 +706,8 @@ rxa_enq_block_start_ts(struct
> rte_event_eth_rx_adapter *rx_adapter)  }
> 
>  static inline void
> -rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
> -		    struct rte_event_eth_rx_adapter_stats *stats)
> +rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
> +		     struct rte_event_eth_rx_adapter_stats *stats)
>  {
>  	if (unlikely(!stats->rx_enq_start_ts))
>  		stats->rx_enq_start_ts = rte_get_tsc_cycles(); @@ -744,10
> +726,10 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter
> *rx_adapter,
> 
>  /* Enqueue buffered events to event device */  static inline uint16_t -
> rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter)
>  {
> -	struct rte_eth_event_enqueue_buffer *buf =
> -	    &rx_adapter->event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer *buf =
> +		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
> 
>  	if (!buf->count)
> @@ -774,7 +756,7 @@ rxa_flush_event_buffer(struct
> rte_event_eth_rx_adapter *rx_adapter)  }
> 
>  static inline void
> -rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
> +rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
>  		struct eth_rx_vector_data *vec)
>  {
>  	vec->vector_ev->nb_elem = 0;
> @@ -785,9 +767,9 @@ rxa_init_vector(struct rte_event_eth_rx_adapter
> *rx_adapter,  }
> 
>  static inline uint16_t
> -rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
> +rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
>  			struct eth_rx_queue_info *queue_info,
> -			struct rte_eth_event_enqueue_buffer *buf,
> +			struct eth_event_enqueue_buffer *buf,
>  			struct rte_mbuf **mbufs, uint16_t num)  {
>  	struct rte_event *ev = &buf->events[buf->count]; @@ -845,19
> +827,16 @@ rxa_create_event_vector(struct rte_event_eth_rx_adapter
> *rx_adapter,  }
> 
>  static inline void
> -rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
> -		uint16_t eth_dev_id,
> -		uint16_t rx_queue_id,
> -		struct rte_mbuf **mbufs,
> -		uint16_t num)
> +rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t
> eth_dev_id,
> +		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t
> num)
>  {
>  	uint32_t i;
>  	struct eth_device_info *dev_info =
>  					&rx_adapter-
> >eth_devices[eth_dev_id];
>  	struct eth_rx_queue_info *eth_rx_queue_info =
>  					&dev_info-
> >rx_queue[rx_queue_id];
> -	struct rte_eth_event_enqueue_buffer *buf =
> -					&rx_adapter-
> >event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer *buf =
> +		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event *ev = &buf->events[buf->count];
>  	uint64_t event = eth_rx_queue_info->event;
>  	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -
> 909,16 +888,13 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter
> *rx_adapter,
> 
>  /* Enqueue packets from  <port, q>  to event buffer */  static inline uint32_t
> -rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
> -	uint16_t port_id,
> -	uint16_t queue_id,
> -	uint32_t rx_count,
> -	uint32_t max_rx,
> -	int *rxq_empty)
> +rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
> +	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
> +	   int *rxq_empty)
>  {
>  	struct rte_mbuf *mbufs[BATCH_SIZE];
> -	struct rte_eth_event_enqueue_buffer *buf =
> -					&rx_adapter-
> >event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer *buf =
> +		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event_eth_rx_adapter_stats *stats =
>  					&rx_adapter->stats;
>  	uint16_t n;
> @@ -953,8 +929,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter
> *rx_adapter,  }
> 
>  static inline void
> -rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
> -		void *data)
> +rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void
> +*data)
>  {
>  	uint16_t port_id;
>  	uint16_t queue;
> @@ -994,8 +969,8 @@ rxa_intr_ring_enqueue(struct
> rte_event_eth_rx_adapter *rx_adapter,  }
> 
>  static int
> -rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
> -			uint32_t num_intr_vec)
> +rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
> +			  uint32_t num_intr_vec)
>  {
>  	if (rx_adapter->num_intr_vec + num_intr_vec >
>  				RTE_EVENT_ETH_INTR_RING_SIZE) {
> @@ -1010,9 +985,9 @@ rxa_intr_ring_check_avail(struct
> rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Delete entries for (dev, queue) from the interrupt ring */  static void -
> rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			uint16_t rx_queue_id)
> +rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info,
> +			  uint16_t rx_queue_id)
>  {
>  	int i, n;
>  	union queue_data qd;
> @@ -1045,7 +1020,7 @@ rxa_intr_ring_del_entries(struct
> rte_event_eth_rx_adapter *rx_adapter,  static void *  rxa_intr_thread(void
> *arg)  {
> -	struct rte_event_eth_rx_adapter *rx_adapter = arg;
> +	struct event_eth_rx_adapter *rx_adapter = arg;
>  	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
>  	int n, i;
> 
> @@ -1068,12 +1043,12 @@ rxa_intr_thread(void *arg)
>   * mbufs to eventdev
>   */
>  static inline uint32_t
> -rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	uint32_t n;
>  	uint32_t nb_rx = 0;
>  	int rxq_empty;
> -	struct rte_eth_event_enqueue_buffer *buf;
> +	struct eth_event_enqueue_buffer *buf;
>  	rte_spinlock_t *ring_lock;
>  	uint8_t max_done = 0;
> 
> @@ -1188,11 +1163,11 @@ rxa_intr_ring_dequeue(struct
> rte_event_eth_rx_adapter *rx_adapter)
>   * it.
>   */
>  static inline uint32_t
> -rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_poll(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	uint32_t num_queue;
>  	uint32_t nb_rx = 0;
> -	struct rte_eth_event_enqueue_buffer *buf;
> +	struct eth_event_enqueue_buffer *buf;
>  	uint32_t wrr_pos;
>  	uint32_t max_nb_rx;
> 
> @@ -1233,8 +1208,8 @@ rxa_poll(struct rte_event_eth_rx_adapter
> *rx_adapter)  static void  rxa_vector_expire(struct eth_rx_vector_data *vec,
> void *arg)  {
> -	struct rte_event_eth_rx_adapter *rx_adapter = arg;
> -	struct rte_eth_event_enqueue_buffer *buf =
> +	struct event_eth_rx_adapter *rx_adapter = arg;
> +	struct eth_event_enqueue_buffer *buf =
>  		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event *ev;
> 
> @@ -1257,7 +1232,7 @@ rxa_vector_expire(struct eth_rx_vector_data *vec,
> void *arg)  static int  rxa_service_func(void *args)  {
> -	struct rte_event_eth_rx_adapter *rx_adapter = args;
> +	struct event_eth_rx_adapter *rx_adapter = args;
>  	struct rte_event_eth_rx_adapter_stats *stats;
> 
>  	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0) @@ -1318,7
> +1293,7 @@ rte_event_eth_rx_adapter_init(void)
>  	return 0;
>  }
> 
> -static inline struct rte_event_eth_rx_adapter *
> +static inline struct event_eth_rx_adapter *
>  rxa_id_to_adapter(uint8_t id)
>  {
>  	return event_eth_rx_adapter ?
> @@ -1335,7 +1310,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
>  	int started;
>  	uint8_t port_id;
>  	struct rte_event_port_conf *port_conf = arg;
> -	struct rte_event_eth_rx_adapter *rx_adapter =
> rxa_id_to_adapter(id);
> +	struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
> 
>  	dev = &rte_eventdevs[rx_adapter->eventdev_id];
>  	dev_conf = dev->data->dev_conf;
> @@ -1384,7 +1359,7 @@ rxa_epoll_create1(void)  }
> 
>  static int
> -rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	if (rx_adapter->epd != INIT_FD)
>  		return 0;
> @@ -1401,7 +1376,7 @@ rxa_init_epd(struct rte_event_eth_rx_adapter
> *rx_adapter)  }
> 
>  static int
> -rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	int err;
>  	char thread_name[RTE_MAX_THREAD_NAME_LEN];
> @@ -1445,7 +1420,7 @@ rxa_create_intr_thread(struct
> rte_event_eth_rx_adapter *rx_adapter)  }
> 
>  static int
> -rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	int err;
> 
> @@ -1466,7 +1441,7 @@ rxa_destroy_intr_thread(struct
> rte_event_eth_rx_adapter *rx_adapter)  }
> 
>  static int
> -rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	int ret;
> 
> @@ -1484,9 +1459,8 @@ rxa_free_intr_resources(struct
> rte_event_eth_rx_adapter *rx_adapter)  }
> 
>  static int
> -rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	uint16_t rx_queue_id)
> +rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
> +		 struct eth_device_info *dev_info, uint16_t rx_queue_id)
>  {
>  	int err;
>  	uint16_t eth_dev_id = dev_info->dev->data->port_id; @@ -1514,9
> +1488,8 @@ rxa_disable_intr(struct rte_event_eth_rx_adapter
> *rx_adapter,  }
> 
>  static int
> -rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int rx_queue_id)
> +rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
> +		   struct eth_device_info *dev_info, int rx_queue_id)
>  {
>  	int err;
>  	int i;
> @@ -1573,9 +1546,8 @@ rxa_del_intr_queue(struct
> rte_event_eth_rx_adapter *rx_adapter,  }
> 
>  static int
> -rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	uint16_t rx_queue_id)
> +rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
> +		struct eth_device_info *dev_info, uint16_t rx_queue_id)
>  {
>  	int err, err1;
>  	uint16_t eth_dev_id = dev_info->dev->data->port_id; @@ -1663,9
> +1635,8 @@ rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
> }
> 
>  static int
> -rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	int rx_queue_id)
> +rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
> +		   struct eth_device_info *dev_info, int rx_queue_id)
> 
>  {
>  	int i, j, err;
> @@ -1713,9 +1684,8 @@ rxa_add_intr_queue(struct
> rte_event_eth_rx_adapter *rx_adapter,
>  	return err;
>  }
> 
> -
>  static int
> -rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
> +rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
>  {
>  	int ret;
>  	struct rte_service_spec service;
> @@ -1758,10 +1728,9 @@ rxa_init_service(struct rte_event_eth_rx_adapter
> *rx_adapter, uint8_t id)  }
> 
>  static void
> -rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int32_t rx_queue_id,
> -		uint8_t add)
> +rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
> +		 struct eth_device_info *dev_info, int32_t rx_queue_id,
> +		 uint8_t add)
>  {
>  	struct eth_rx_queue_info *queue_info;
>  	int enabled;
> @@ -1811,9 +1780,8 @@ rxa_set_vector_data(struct eth_rx_queue_info
> *queue_info, uint16_t vector_count,  }
> 
>  static void
> -rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	int32_t rx_queue_id)
> +rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
> +	   struct eth_device_info *dev_info, int32_t rx_queue_id)
>  {
>  	struct eth_rx_vector_data *vec;
>  	int pollq;
> @@ -1854,10 +1822,9 @@ rxa_sw_del(struct rte_event_eth_rx_adapter
> *rx_adapter,  }
> 
>  static void
> -rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	int32_t rx_queue_id,
> -	const struct rte_event_eth_rx_adapter_queue_conf *conf)
> +rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
> +	      struct eth_device_info *dev_info, int32_t rx_queue_id,
> +	      const struct rte_event_eth_rx_adapter_queue_conf *conf)
>  {
>  	struct eth_rx_queue_info *queue_info;
>  	const struct rte_event *ev = &conf->ev; @@ -1922,7 +1889,7 @@
> rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  static void
>  rxa_sw_event_vector_configure(
> -	struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> +	struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
>  	int rx_queue_id,
>  	const struct rte_event_eth_rx_adapter_event_vector_config
> *config)  { @@ -1956,10 +1923,10 @@ rxa_sw_event_vector_configure(
>  			      config->vector_timeout_ns >> 1;  }
> 
> -static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
> -		uint16_t eth_dev_id,
> -		int rx_queue_id,
> -		const struct rte_event_eth_rx_adapter_queue_conf
> *queue_conf)
> +static int
> +rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t
> eth_dev_id,
> +	   int rx_queue_id,
> +	   const struct rte_event_eth_rx_adapter_queue_conf
> *queue_conf)
>  {
>  	struct eth_device_info *dev_info = &rx_adapter-
> >eth_devices[eth_dev_id];
>  	struct rte_event_eth_rx_adapter_queue_conf temp_conf; @@ -
> 2088,7 +2055,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter
> *rx_adapter,  static int  rxa_ctrl(uint8_t id, int start)  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
>  	uint32_t i;
> @@ -2135,7 +2102,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id,
> uint8_t dev_id,
>  				rte_event_eth_rx_adapter_conf_cb
> conf_cb,
>  				void *conf_arg)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	int ret;
>  	int socket_id;
>  	uint16_t i;
> @@ -2235,7 +2202,7 @@ rte_event_eth_rx_adapter_create(uint8_t id,
> uint8_t dev_id,  int  rte_event_eth_rx_adapter_free(uint8_t id)  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
> 
>  	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -
> EINVAL);
> 
> @@ -2267,7 +2234,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
> {
>  	int ret;
>  	uint32_t cap;
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
> 
> @@ -2385,7 +2352,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id,
> uint16_t eth_dev_id,  {
>  	int ret = 0;
>  	struct rte_eventdev *dev;
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct eth_device_info *dev_info;
>  	uint32_t cap;
>  	uint32_t nb_rx_poll = 0;
> @@ -2505,7 +2472,7 @@
> rte_event_eth_rx_adapter_queue_event_vector_config(
>  	struct rte_event_eth_rx_adapter_event_vector_config *config)  {
>  	struct rte_event_eth_rx_adapter_vector_limits limits;
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	uint32_t cap;
>  	int ret;
> @@ -2632,7 +2599,7 @@ int
>  rte_event_eth_rx_adapter_stats_get(uint8_t id,
>  			       struct rte_event_eth_rx_adapter_stats *stats)  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
>  	struct rte_event_eth_rx_adapter_stats dev_stats;
>  	struct rte_eventdev *dev;
> @@ -2673,7 +2640,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
> int  rte_event_eth_rx_adapter_stats_reset(uint8_t id)  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
>  	uint32_t i;
> @@ -2701,7 +2668,7 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
> int  rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t
> *service_id)  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
> 
>  	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -
> EINVAL);
> 
> @@ -2721,7 +2688,7 @@ rte_event_eth_rx_adapter_cb_register(uint8_t id,
>  					rte_event_eth_rx_adapter_cb_fn
> cb_fn,
>  					void *cb_arg)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct eth_device_info *dev_info;
>  	uint32_t cap;
>  	int ret;
> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> index 31fa9ac4b8..f1fcd6ce3d 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -1193,7 +1193,7 @@ struct rte_event {
>  #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
>  /**< The application can override the adapter generated flow ID in the
>   * event. This flow ID can be specified when adding an ethdev Rx queue
> - * to the adapter using the ev member of struct rte_event_eth_rx_adapter
> + * to the adapter using the ev.flow_id member.
>   * @see struct rte_event_eth_rx_adapter_queue_conf::ev
>   * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
>   */
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 13/14] eventdev: make trace APIs internal
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 13/14] eventdev: make trace APIs internal pbhagavatula
@ 2021-10-11  9:59       ` Gujjar, Abhinandan S
  0 siblings, 0 replies; 119+ messages in thread
From: Gujjar, Abhinandan S @ 2021-10-11  9:59 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Jayatheerthan, Jay, Carrillo, Erik G; +Cc: dev

Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>

> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Wednesday, October 6, 2021 12:20 PM
> To: jerinj@marvell.com; Gujjar, Abhinandan S
> <abhinandan.gujjar@intel.com>; Jayatheerthan, Jay
> <jay.jayatheerthan@intel.com>; Carrillo, Erik G <erik.g.carrillo@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [PATCH v3 13/14] eventdev: make trace APIs internal
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Slowpath trace APIs are only used in rte_eventdev.c so make them as
> internal.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
> ---
>  lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} | 0
>  lib/eventdev/eventdev_trace_points.c                    | 2 +-
>  lib/eventdev/meson.build                                | 2 +-
>  lib/eventdev/rte_event_crypto_adapter.c                 | 2 +-
>  lib/eventdev/rte_event_eth_rx_adapter.c                 | 2 +-
>  lib/eventdev/rte_event_eth_tx_adapter.c                 | 2 +-
>  lib/eventdev/rte_event_timer_adapter.c                  | 2 +-
>  lib/eventdev/rte_eventdev.c                             | 2 +-
>  8 files changed, 7 insertions(+), 7 deletions(-)  rename
> lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} (100%)
> 
> diff --git a/lib/eventdev/rte_eventdev_trace.h
> b/lib/eventdev/eventdev_trace.h similarity index 100% rename from
> lib/eventdev/rte_eventdev_trace.h rename to
> lib/eventdev/eventdev_trace.h diff --git
> a/lib/eventdev/eventdev_trace_points.c
> b/lib/eventdev/eventdev_trace_points.c
> index 3867ec8008..237d9383fd 100644
> --- a/lib/eventdev/eventdev_trace_points.c
> +++ b/lib/eventdev/eventdev_trace_points.c
> @@ -4,7 +4,7 @@
> 
>  #include <rte_trace_point_register.h>
> 
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
> 
>  /* Eventdev trace points */
>  RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_configure,
> diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build index
> f19b831edd..c750e0214f 100644
> --- a/lib/eventdev/meson.build
> +++ b/lib/eventdev/meson.build
> @@ -19,7 +19,6 @@ sources = files(
>  )
>  headers = files(
>          'rte_eventdev.h',
> -        'rte_eventdev_trace.h',
>          'rte_eventdev_trace_fp.h',
>          'rte_event_ring.h',
>          'rte_event_eth_rx_adapter.h',
> @@ -34,6 +33,7 @@ driver_sdk_headers += files(
>          'eventdev_pmd.h',
>          'eventdev_pmd_pci.h',
>          'eventdev_pmd_vdev.h',
> +        'eventdev_trace.h',
>          'event_timer_adapter_pmd.h',
>  )
> 
> diff --git a/lib/eventdev/rte_event_crypto_adapter.c
> b/lib/eventdev/rte_event_crypto_adapter.c
> index e9e660a3d2..ae1151fb75 100644
> --- a/lib/eventdev/rte_event_crypto_adapter.c
> +++ b/lib/eventdev/rte_event_crypto_adapter.c
> @@ -16,7 +16,7 @@
> 
>  #include "rte_eventdev.h"
>  #include "eventdev_pmd.h"
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
>  #include "rte_event_crypto_adapter.h"
> 
>  #define BATCH_SIZE 32
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c
> b/lib/eventdev/rte_event_eth_rx_adapter.c
> index f8225ebd3d..7e97fbd21d 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.c
> @@ -20,7 +20,7 @@
> 
>  #include "rte_eventdev.h"
>  #include "eventdev_pmd.h"
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
>  #include "rte_event_eth_rx_adapter.h"
> 
>  #define BATCH_SIZE		32
> diff --git a/lib/eventdev/rte_event_eth_tx_adapter.c
> b/lib/eventdev/rte_event_eth_tx_adapter.c
> index 18c0359db7..ee3631bced 100644
> --- a/lib/eventdev/rte_event_eth_tx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_tx_adapter.c
> @@ -6,7 +6,7 @@
>  #include <rte_ethdev.h>
> 
>  #include "eventdev_pmd.h"
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
>  #include "rte_event_eth_tx_adapter.h"
> 
>  #define TXA_BATCH_SIZE		32
> diff --git a/lib/eventdev/rte_event_timer_adapter.c
> b/lib/eventdev/rte_event_timer_adapter.c
> index c4dc7a5fd4..7404b0cbb2 100644
> --- a/lib/eventdev/rte_event_timer_adapter.c
> +++ b/lib/eventdev/rte_event_timer_adapter.c
> @@ -24,7 +24,7 @@
>  #include "eventdev_pmd.h"
>  #include "rte_event_timer_adapter.h"
>  #include "rte_eventdev.h"
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
> 
>  #define DATA_MZ_NAME_MAX_LEN 64
>  #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
> diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
> index de6346194e..f881b7cc35 100644
> --- a/lib/eventdev/rte_eventdev.c
> +++ b/lib/eventdev/rte_eventdev.c
> @@ -36,7 +36,7 @@
> 
>  #include "rte_eventdev.h"
>  #include "eventdev_pmd.h"
> -#include "rte_eventdev_trace.h"
> +#include "eventdev_trace.h"
> 
>  static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
> 
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface as internal
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (12 preceding siblings ...)
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 14/14] eventdev: mark trace variables as internal pbhagavatula
@ 2021-10-14  9:05     ` Jerin Jacob
  2021-10-14  9:08     ` Jerin Jacob
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
  15 siblings, 0 replies; 119+ messages in thread
From: Jerin Jacob @ 2021-10-14  9:05 UTC (permalink / raw)
  To: Pavan Nikhilesh
  Cc: Jerin Jacob, Shijith Thotton, Timothy McDaniel, Hemant Agrawal,
	Nipun Gupta, Mattias Rönnblom, Liang Ma, Peter Mccarthy,
	Harry van Haaren, Abhinandan Gujjar, Ray Kinsella, dpdk-dev

On Wed, Oct 6, 2021 at 12:21 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Mark all the driver specific functions as internal, remove
> `rte` prefix from `struct rte_eventdev_ops`.
> Remove experimental tag from internal functions.
> Remove `eventdev_pmd.h` from non-internal header files.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  v3 Changes:
>  - Reset fp_ops when device is torndown.
>  - Add `event_dev_probing_finish()` this function is used for
>    post-initialization processing. In current usecase we use it to
>    initialize fastpath ops.
>
>  v2 Changes:
>  - Rework inline flat array by adding port data into it.
>  - Rearrange rte_event_timer elements.


There is rebase issue with next-evendev. Please rebase

[for-main]dell[dpdk-next-eventdev] $ git pw series apply 19405

Applying: eventdev: make driver interface as internal
Using index info to reconstruct a base tree...
M       drivers/event/cnxk/cn10k_eventdev.c
M       drivers/event/cnxk/cn9k_eventdev.c
M       lib/eventdev/eventdev_pmd.h
M       lib/eventdev/rte_event_crypto_adapter.h
M       lib/eventdev/version.map
Falling back to patching base and 3-way merge...
Auto-merging lib/eventdev/version.map
CONFLICT (content): Merge conflict in lib/eventdev/version.map
Auto-merging lib/eventdev/rte_event_crypto_adapter.h
Auto-merging lib/eventdev/eventdev_pmd.h
Auto-merging drivers/event/cnxk/cn9k_eventdev.c
CONFLICT (content): Merge conflict in drivers/event/cnxk/cn9k_eventdev.c
Auto-merging drivers/event/cnxk/cn10k_eventdev.c
CONFLICT (content): Merge conflict in drivers/event/cnxk/cn10k_eventdev.c
error: Failed to merge in the changes.
hint: Use 'git am --show-current-patch=diff' to see the failed patch
Patch failed at 0001 eventdev: make driver interface as internal
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface as internal
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (13 preceding siblings ...)
  2021-10-14  9:05     ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " Jerin Jacob
@ 2021-10-14  9:08     ` Jerin Jacob
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
  15 siblings, 0 replies; 119+ messages in thread
From: Jerin Jacob @ 2021-10-14  9:08 UTC (permalink / raw)
  To: Pavan Nikhilesh
  Cc: Jerin Jacob, Shijith Thotton, Timothy McDaniel, Hemant Agrawal,
	Nipun Gupta, Mattias Rönnblom, Liang Ma, Peter Mccarthy,
	Harry van Haaren, Abhinandan Gujjar, Ray Kinsella, dpdk-dev

On Wed, Oct 6, 2021 at 12:21 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Mark all the driver specific functions as internal, remove
> `rte` prefix from `struct rte_eventdev_ops`.
> Remove experimental tag from internal functions.
> Remove `eventdev_pmd.h` from non-internal header files.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  v3 Changes:
>  - Reset fp_ops when device is torndown.
>  - Add `event_dev_probing_finish()` this function is used for
>    post-initialization processing. In current usecase we use it to
>    initialize fastpath ops.
>
>  v2 Changes:
>  - Rework inline flat array by adding port data into it.
>  - Rearrange rte_event_timer elements.
>
>  drivers/event/cnxk/cn10k_eventdev.c        |  6 ++---
>  drivers/event/cnxk/cn9k_eventdev.c         | 10 ++++-----
>  drivers/event/dlb2/dlb2.c                  |  2 +-
>  drivers/event/dpaa/dpaa_eventdev.c         |  2 +-
>  drivers/event/dpaa2/dpaa2_eventdev.c       |  2 +-
>  drivers/event/dsw/dsw_evdev.c              |  2 +-
>  drivers/event/octeontx/ssovf_evdev.c       |  2 +-
>  drivers/event/octeontx/ssovf_worker.c      |  4 ++--
>  drivers/event/octeontx2/otx2_evdev.c       | 26 +++++++++++-----------
>  drivers/event/opdl/opdl_evdev.c            |  2 +-
>  drivers/event/skeleton/skeleton_eventdev.c |  2 +-
>  drivers/event/sw/sw_evdev.c                |  2 +-
>  lib/eventdev/eventdev_pmd.h                |  6 ++++-
>  lib/eventdev/eventdev_pmd_pci.h            |  4 +++-
>  lib/eventdev/eventdev_pmd_vdev.h           |  2 ++
>  lib/eventdev/meson.build                   |  6 +++++
>  lib/eventdev/rte_event_crypto_adapter.h    |  1 -
>  lib/eventdev/rte_eventdev.h                | 25 ++++++++++++---------
>  lib/eventdev/version.map                   | 17 +++++++------

Please update the release notes for API and ABI changes,

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [RFC 02/15] eventdev: separate internal structures
  2021-08-23 19:40 ` [dpdk-dev] [RFC 02/15] eventdev: separate internal structures pbhagavatula
@ 2021-10-14  9:11   ` Jerin Jacob
  0 siblings, 0 replies; 119+ messages in thread
From: Jerin Jacob @ 2021-10-14  9:11 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: Jerin Jacob, Ananyev, Konstantin, dpdk-dev

On Tue, Aug 24, 2021 at 1:10 AM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Create rte_eventdev_core.h and move all the internal data structures
> to this file. These structures are mostly used by drivers, but they
> need to be in the public header file as they are accessed by datapath
> inline functions for performance reasons.
> The accessibility of these data structures is not changed.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/eventdev_pmd.h      |   3 -
>  lib/eventdev/meson.build         |   3 +
>  lib/eventdev/rte_eventdev.h      | 715 +++++++++++++------------------
>  lib/eventdev/rte_eventdev_core.h | 144 +++++++
>  4 files changed, 443 insertions(+), 422 deletions(-)
>  create mode 100644 lib/eventdev/rte_eventdev_core.h

Please validate the Doxygen output.


>
> diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
> index 5dab9e2f70..a25d3f1fb5 100644
> --- a/lib/eventdev/eventdev_pmd.h
> +++ b/lib/eventdev/eventdev_pmd.h
> @@ -91,9 +91,6 @@ struct rte_eventdev_global {
>         uint8_t nb_devs;        /**< Number of devices found */
>  };
>
> -extern struct rte_eventdev *rte_eventdevs;
> -/** The pool of rte_eventdev structures. */
> -
>  /**
>   * Get the rte_eventdev structure device pointer for the named device.
>   *
> diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
> index 523ea9ccae..8b51fde361 100644
> --- a/lib/eventdev/meson.build
> +++ b/lib/eventdev/meson.build
> @@ -27,6 +27,9 @@ headers = files(
>          'rte_event_crypto_adapter.h',
>          'rte_event_eth_tx_adapter.h',
>  )
> +indirect_headers += files(
> +        'rte_eventdev_core.h',
> +)
>  driver_sdk_headers += files(
>          'eventdev_pmd.h',
>          'eventdev_pmd_pci.h',
> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> index 6ba116002f..1b11d4576d 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -1324,314 +1324,6 @@ int
>  rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
>                                 uint32_t *caps);
>
> -struct eventdev_ops;
> -struct rte_eventdev;
> -
> -typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
> -/**< @internal Enqueue event on port of a device */
> -
> -typedef uint16_t (*event_enqueue_burst_t)(void *port,
> -                       const struct rte_event ev[], uint16_t nb_events);
> -/**< @internal Enqueue burst of events on port of a device */
> -
> -typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
> -               uint64_t timeout_ticks);
> -/**< @internal Dequeue event from port of a device */
> -
> -typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
> -               uint16_t nb_events, uint64_t timeout_ticks);
> -/**< @internal Dequeue burst of events from port of a device */
> -
> -typedef uint16_t (*event_tx_adapter_enqueue)(void *port,
> -                               struct rte_event ev[], uint16_t nb_events);
> -/**< @internal Enqueue burst of events on port of a device */
> -
> -typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
> -               struct rte_event ev[], uint16_t nb_events);
> -/**< @internal Enqueue burst of events on port of a device supporting
> - * burst having same destination Ethernet port & Tx queue.
> - */
> -
> -typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
> -                               struct rte_event ev[], uint16_t nb_events);
> -/**< @internal Enqueue burst of events on crypto adapter */
> -
> -#define RTE_EVENTDEV_NAME_MAX_LEN      (64)
> -/**< @internal Max length of name of event PMD */
> -
> -/**
> - * @internal
> - * The data part, with no function pointers, associated with each device.
> - *
> - * This structure is safe to place in shared memory to be common among
> - * different processes in a multi-process configuration.
> - */
> -struct rte_eventdev_data {
> -       int socket_id;
> -       /**< Socket ID where memory is allocated */
> -       uint8_t dev_id;
> -       /**< Device ID for this instance */
> -       uint8_t nb_queues;
> -       /**< Number of event queues. */
> -       uint8_t nb_ports;
> -       /**< Number of event ports. */
> -       void **ports;
> -       /**< Array of pointers to ports. */
> -       struct rte_event_port_conf *ports_cfg;
> -       /**< Array of port configuration structures. */
> -       struct rte_event_queue_conf *queues_cfg;
> -       /**< Array of queue configuration structures. */
> -       uint16_t *links_map;
> -       /**< Memory to store queues to port connections. */
> -       void *dev_private;
> -       /**< PMD-specific private data */
> -       uint32_t event_dev_cap;
> -       /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
> -       struct rte_event_dev_config dev_conf;
> -       /**< Configuration applied to device. */
> -       uint8_t service_inited;
> -       /* Service initialization state */
> -       uint32_t service_id;
> -       /* Service ID*/
> -       void *dev_stop_flush_arg;
> -       /**< User-provided argument for event flush function */
> -
> -       RTE_STD_C11
> -       uint8_t dev_started : 1;
> -       /**< Device state: STARTED(1)/STOPPED(0) */
> -
> -       char name[RTE_EVENTDEV_NAME_MAX_LEN];
> -       /**< Unique identifier name */
> -
> -       uint64_t reserved_64s[4]; /**< Reserved for future fields */
> -       void *reserved_ptrs[4];   /**< Reserved for future fields */
> -} __rte_cache_aligned;
> -
> -/** @internal The data structure associated with each event device. */
> -struct rte_eventdev {
> -       event_enqueue_t enqueue;
> -       /**< Pointer to PMD enqueue function. */
> -       event_enqueue_burst_t enqueue_burst;
> -       /**< Pointer to PMD enqueue burst function. */
> -       event_enqueue_burst_t enqueue_new_burst;
> -       /**< Pointer to PMD enqueue burst function(op new variant) */
> -       event_enqueue_burst_t enqueue_forward_burst;
> -       /**< Pointer to PMD enqueue burst function(op forward variant) */
> -       event_dequeue_t dequeue;
> -       /**< Pointer to PMD dequeue function. */
> -       event_dequeue_burst_t dequeue_burst;
> -       /**< Pointer to PMD dequeue burst function. */
> -       event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
> -       /**< Pointer to PMD eth Tx adapter burst enqueue function with
> -        * events destined to same Eth port & Tx queue.
> -        */
> -       event_tx_adapter_enqueue txa_enqueue;
> -       /**< Pointer to PMD eth Tx adapter enqueue function. */
> -       struct rte_eventdev_data *data;
> -       /**< Pointer to device data */
> -       struct eventdev_ops *dev_ops;
> -       /**< Functions exported by PMD */
> -       struct rte_device *dev;
> -       /**< Device info. supplied by probing */
> -
> -       RTE_STD_C11
> -       uint8_t attached : 1;
> -       /**< Flag indicating the device is attached */
> -
> -       event_crypto_adapter_enqueue ca_enqueue;
> -       /**< Pointer to PMD crypto adapter enqueue function. */
> -
> -       uint64_t reserved_64s[4]; /**< Reserved for future fields */
> -       void *reserved_ptrs[3];   /**< Reserved for future fields */
> -} __rte_cache_aligned;
> -
> -extern struct rte_eventdev *rte_eventdevs;
> -/** @internal The pool of rte_eventdev structures. */
> -
> -static __rte_always_inline uint16_t
> -__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events,
> -                       const event_enqueue_burst_t fn)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -
> -       if (port_id >= dev->data->nb_ports) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -#endif
> -       rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
> -       /*
> -        * Allow zero cost non burst mode routine invocation if application
> -        * requests nb_events as const one
> -        */
> -       if (nb_events == 1)
> -               return (*dev->enqueue)(dev->data->ports[port_id], ev);
> -       else
> -               return fn(dev->data->ports[port_id], ev, nb_events);
> -}
> -
> -/**
> - * Enqueue a burst of events objects or an event object supplied in *rte_event*
> - * structure on an  event device designated by its *dev_id* through the event
> - * port specified by *port_id*. Each event object specifies the event queue on
> - * which it will be enqueued.
> - *
> - * The *nb_events* parameter is the number of event objects to enqueue which are
> - * supplied in the *ev* array of *rte_event* structure.
> - *
> - * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
> - * enqueued to the same port that their associated events were dequeued from.
> - *
> - * The rte_event_enqueue_burst() function returns the number of
> - * events objects it actually enqueued. A return value equal to *nb_events*
> - * means that all event objects have been enqueued.
> - *
> - * @param dev_id
> - *   The identifier of the device.
> - * @param port_id
> - *   The identifier of the event port.
> - * @param ev
> - *   Points to an array of *nb_events* objects of type *rte_event* structure
> - *   which contain the event object enqueue operations to be processed.
> - * @param nb_events
> - *   The number of event objects to enqueue, typically number of
> - *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> - *   available for this port.
> - *
> - * @return
> - *   The number of event objects actually enqueued on the event device. The
> - *   return value can be less than the value of the *nb_events* parameter when
> - *   the event devices queue is full or if invalid parameters are specified in a
> - *   *rte_event*. If the return value is less than *nb_events*, the remaining
> - *   events at the end of ev[] are not consumed and the caller has to take care
> - *   of them, and rte_errno is set accordingly. Possible errno values include:
> - *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> - *              ID is invalid, or an event's sched type doesn't match the
> - *              capabilities of the destination queue.
> - *   - ENOSPC   The event port was backpressured and unable to enqueue
> - *              one or more events. This error code is only applicable to
> - *              closed systems.
> - * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> - */
> -static inline uint16_t
> -rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -                       dev->enqueue_burst);
> -}
> -
> -/**
> - * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
> - * an event device designated by its *dev_id* through the event port specified
> - * by *port_id*.
> - *
> - * Provides the same functionality as rte_event_enqueue_burst(), expect that
> - * application can use this API when the all objects in the burst contains
> - * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
> - * function can provide the additional hint to the PMD and optimize if possible.
> - *
> - * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
> - * has event object of operation type != RTE_EVENT_OP_NEW.
> - *
> - * @param dev_id
> - *   The identifier of the device.
> - * @param port_id
> - *   The identifier of the event port.
> - * @param ev
> - *   Points to an array of *nb_events* objects of type *rte_event* structure
> - *   which contain the event object enqueue operations to be processed.
> - * @param nb_events
> - *   The number of event objects to enqueue, typically number of
> - *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> - *   available for this port.
> - *
> - * @return
> - *   The number of event objects actually enqueued on the event device. The
> - *   return value can be less than the value of the *nb_events* parameter when
> - *   the event devices queue is full or if invalid parameters are specified in a
> - *   *rte_event*. If the return value is less than *nb_events*, the remaining
> - *   events at the end of ev[] are not consumed and the caller has to take care
> - *   of them, and rte_errno is set accordingly. Possible errno values include:
> - *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> - *              ID is invalid, or an event's sched type doesn't match the
> - *              capabilities of the destination queue.
> - *   - ENOSPC   The event port was backpressured and unable to enqueue
> - *              one or more events. This error code is only applicable to
> - *              closed systems.
> - * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> - * @see rte_event_enqueue_burst()
> - */
> -static inline uint16_t
> -rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -                       dev->enqueue_new_burst);
> -}
> -
> -/**
> - * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
> - * on an event device designated by its *dev_id* through the event port
> - * specified by *port_id*.
> - *
> - * Provides the same functionality as rte_event_enqueue_burst(), expect that
> - * application can use this API when the all objects in the burst contains
> - * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
> - * function can provide the additional hint to the PMD and optimize if possible.
> - *
> - * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
> - * has event object of operation type != RTE_EVENT_OP_FORWARD.
> - *
> - * @param dev_id
> - *   The identifier of the device.
> - * @param port_id
> - *   The identifier of the event port.
> - * @param ev
> - *   Points to an array of *nb_events* objects of type *rte_event* structure
> - *   which contain the event object enqueue operations to be processed.
> - * @param nb_events
> - *   The number of event objects to enqueue, typically number of
> - *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> - *   available for this port.
> - *
> - * @return
> - *   The number of event objects actually enqueued on the event device. The
> - *   return value can be less than the value of the *nb_events* parameter when
> - *   the event devices queue is full or if invalid parameters are specified in a
> - *   *rte_event*. If the return value is less than *nb_events*, the remaining
> - *   events at the end of ev[] are not consumed and the caller has to take care
> - *   of them, and rte_errno is set accordingly. Possible errno values include:
> - *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> - *              ID is invalid, or an event's sched type doesn't match the
> - *              capabilities of the destination queue.
> - *   - ENOSPC   The event port was backpressured and unable to enqueue
> - *              one or more events. This error code is only applicable to
> - *              closed systems.
> - * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> - * @see rte_event_enqueue_burst()
> - */
> -static inline uint16_t
> -rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -                       dev->enqueue_forward_burst);
> -}
> -
>  /**
>   * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
>   *
> @@ -1662,124 +1354,27 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
>                                         uint64_t *timeout_ticks);
>
>  /**
> - * Dequeue a burst of events objects or an event object from the event port
> - * designated by its *event_port_id*, on an event device designated
> - * by its *dev_id*.
> - *
> - * rte_event_dequeue_burst() does not dictate the specifics of scheduling
> - * algorithm as each eventdev driver may have different criteria to schedule
> - * an event. However, in general, from an application perspective scheduler may
> - * use the following scheme to dispatch an event to the port.
> - *
> - * 1) Selection of event queue based on
> - *   a) The list of event queues are linked to the event port.
> - *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
> - *   queue selection from list is based on event queue priority relative to
> - *   other event queue supplied as *priority* in rte_event_queue_setup()
> - *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
> - *   queue selection from the list is based on event priority supplied as
> - *   *priority* in rte_event_enqueue_burst()
> - * 2) Selection of event
> - *   a) The number of flows available in selected event queue.
> - *   b) Schedule type method associated with the event
> - *
> - * The *nb_events* parameter is the maximum number of event objects to dequeue
> - * which are returned in the *ev* array of *rte_event* structure.
> + * Link multiple source event queues supplied in *queues* to the destination
> + * event port designated by its *port_id* with associated service priority
> + * supplied in *priorities* on the event device designated by its *dev_id*.
>   *
> - * The rte_event_dequeue_burst() function returns the number of events objects
> - * it actually dequeued. A return value equal to *nb_events* means that all
> - * event objects have been dequeued.
> + * The link establishment shall enable the event port *port_id* from
> + * receiving events from the specified event queue(s) supplied in *queues*
>   *
> - * The number of events dequeued is the number of scheduler contexts held by
> - * this port. These contexts are automatically released in the next
> - * rte_event_dequeue_burst() invocation if the port supports implicit
> - * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
> - * operation can be used to release the contexts early.
> + * An event queue may link to one or more event ports.
> + * The number of links can be established from an event queue to event port is
> + * implementation defined.
>   *
> - * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
> - * enqueued to the same port that their associated events were dequeued from.
> + * Event queue(s) to event port link establishment can be changed at runtime
> + * without re-configuring the device to support scaling and to reduce the
> + * latency of critical work by establishing the link with more event ports
> + * at runtime.
>   *
>   * @param dev_id
>   *   The identifier of the device.
> + *
>   * @param port_id
> - *   The identifier of the event port.
> - * @param[out] ev
> - *   Points to an array of *nb_events* objects of type *rte_event* structure
> - *   for output to be populated with the dequeued event objects.
> - * @param nb_events
> - *   The maximum number of event objects to dequeue, typically number of
> - *   rte_event_port_dequeue_depth() available for this port.
> - *
> - * @param timeout_ticks
> - *   - 0 no-wait, returns immediately if there is no event.
> - *   - >0 wait for the event, if the device is configured with
> - *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
> - *   at least one event is available or *timeout_ticks* time.
> - *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
> - *   then this function will wait until the event available or
> - *   *dequeue_timeout_ns* ns which was previously supplied to
> - *   rte_event_dev_configure()
> - *
> - * @return
> - * The number of event objects actually dequeued from the port. The return
> - * value can be less than the value of the *nb_events* parameter when the
> - * event port's queue is not full.
> - *
> - * @see rte_event_port_dequeue_depth()
> - */
> -static inline uint16_t
> -rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
> -                       uint16_t nb_events, uint64_t timeout_ticks)
> -{
> -       struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -
> -       if (port_id >= dev->data->nb_ports) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -#endif
> -       rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
> -       /*
> -        * Allow zero cost non burst mode routine invocation if application
> -        * requests nb_events as const one
> -        */
> -       if (nb_events == 1)
> -               return (*dev->dequeue)(
> -                       dev->data->ports[port_id], ev, timeout_ticks);
> -       else
> -               return (*dev->dequeue_burst)(
> -                       dev->data->ports[port_id], ev, nb_events,
> -                               timeout_ticks);
> -}
> -
> -/**
> - * Link multiple source event queues supplied in *queues* to the destination
> - * event port designated by its *port_id* with associated service priority
> - * supplied in *priorities* on the event device designated by its *dev_id*.
> - *
> - * The link establishment shall enable the event port *port_id* from
> - * receiving events from the specified event queue(s) supplied in *queues*
> - *
> - * An event queue may link to one or more event ports.
> - * The number of links can be established from an event queue to event port is
> - * implementation defined.
> - *
> - * Event queue(s) to event port link establishment can be changed at runtime
> - * without re-configuring the device to support scaling and to reduce the
> - * latency of critical work by establishing the link with more event ports
> - * at runtime.
> - *
> - * @param dev_id
> - *   The identifier of the device.
> - *
> - * @param port_id
> - *   Event port identifier to select the destination port to link.
> + *   Event port identifier to select the destination port to link.
>   *
>   * @param queues
>   *   Points to an array of *nb_links* event queues to be linked
> @@ -2145,6 +1740,288 @@ rte_event_vector_pool_create(const char *name, unsigned int n,
>                              unsigned int cache_size, uint16_t nb_elem,
>                              int socket_id);
>
> +#include <rte_eventdev_core.h>
> +
> +static __rte_always_inline uint16_t
> +__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> +                         const struct rte_event ev[], uint16_t nb_events,
> +                         const event_enqueue_burst_t fn)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> +       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +
> +       if (port_id >= dev->data->nb_ports) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +#endif
> +       rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
> +       /*
> +        * Allow zero cost non burst mode routine invocation if application
> +        * requests nb_events as const one
> +        */
> +       if (nb_events == 1)
> +               return (*dev->enqueue)(dev->data->ports[port_id], ev);
> +       else
> +               return fn(dev->data->ports[port_id], ev, nb_events);
> +}
> +
> +/**
> + * Enqueue a burst of events objects or an event object supplied in *rte_event*
> + * structure on an  event device designated by its *dev_id* through the event
> + * port specified by *port_id*. Each event object specifies the event queue on
> + * which it will be enqueued.
> + *
> + * The *nb_events* parameter is the number of event objects to enqueue which are
> + * supplied in the *ev* array of *rte_event* structure.
> + *
> + * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
> + * enqueued to the same port that their associated events were dequeued from.
> + *
> + * The rte_event_enqueue_burst() function returns the number of
> + * events objects it actually enqueued. A return value equal to *nb_events*
> + * means that all event objects have been enqueued.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param port_id
> + *   The identifier of the event port.
> + * @param ev
> + *   Points to an array of *nb_events* objects of type *rte_event* structure
> + *   which contain the event object enqueue operations to be processed.
> + * @param nb_events
> + *   The number of event objects to enqueue, typically number of
> + *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> + *   available for this port.
> + *
> + * @return
> + *   The number of event objects actually enqueued on the event device. The
> + *   return value can be less than the value of the *nb_events* parameter when
> + *   the event devices queue is full or if invalid parameters are specified in a
> + *   *rte_event*. If the return value is less than *nb_events*, the remaining
> + *   events at the end of ev[] are not consumed and the caller has to take care
> + *   of them, and rte_errno is set accordingly. Possible errno values include:
> + *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> + *              ID is invalid, or an event's sched type doesn't match the
> + *              capabilities of the destination queue.
> + *   - ENOSPC   The event port was backpressured and unable to enqueue
> + *              one or more events. This error code is only applicable to
> + *              closed systems.
> + * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> + */
> +static inline uint16_t
> +rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> +                       const struct rte_event ev[], uint16_t nb_events)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> +                                        dev->enqueue_burst);
> +}
> +
> +/**
> + * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
> + * an event device designated by its *dev_id* through the event port specified
> + * by *port_id*.
> + *
> + * Provides the same functionality as rte_event_enqueue_burst(), expect that
> + * application can use this API when the all objects in the burst contains
> + * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
> + * function can provide the additional hint to the PMD and optimize if possible.
> + *
> + * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
> + * has event object of operation type != RTE_EVENT_OP_NEW.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param port_id
> + *   The identifier of the event port.
> + * @param ev
> + *   Points to an array of *nb_events* objects of type *rte_event* structure
> + *   which contain the event object enqueue operations to be processed.
> + * @param nb_events
> + *   The number of event objects to enqueue, typically number of
> + *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> + *   available for this port.
> + *
> + * @return
> + *   The number of event objects actually enqueued on the event device. The
> + *   return value can be less than the value of the *nb_events* parameter when
> + *   the event devices queue is full or if invalid parameters are specified in a
> + *   *rte_event*. If the return value is less than *nb_events*, the remaining
> + *   events at the end of ev[] are not consumed and the caller has to take care
> + *   of them, and rte_errno is set accordingly. Possible errno values include:
> + *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> + *              ID is invalid, or an event's sched type doesn't match the
> + *              capabilities of the destination queue.
> + *   - ENOSPC   The event port was backpressured and unable to enqueue
> + *              one or more events. This error code is only applicable to
> + *              closed systems.
> + * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> + * @see rte_event_enqueue_burst()
> + */
> +static inline uint16_t
> +rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
> +                           const struct rte_event ev[], uint16_t nb_events)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> +                                        dev->enqueue_new_burst);
> +}
> +
> +/**
> + * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
> + * on an event device designated by its *dev_id* through the event port
> + * specified by *port_id*.
> + *
> + * Provides the same functionality as rte_event_enqueue_burst(), expect that
> + * application can use this API when the all objects in the burst contains
> + * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
> + * function can provide the additional hint to the PMD and optimize if possible.
> + *
> + * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
> + * has event object of operation type != RTE_EVENT_OP_FORWARD.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param port_id
> + *   The identifier of the event port.
> + * @param ev
> + *   Points to an array of *nb_events* objects of type *rte_event* structure
> + *   which contain the event object enqueue operations to be processed.
> + * @param nb_events
> + *   The number of event objects to enqueue, typically number of
> + *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> + *   available for this port.
> + *
> + * @return
> + *   The number of event objects actually enqueued on the event device. The
> + *   return value can be less than the value of the *nb_events* parameter when
> + *   the event devices queue is full or if invalid parameters are specified in a
> + *   *rte_event*. If the return value is less than *nb_events*, the remaining
> + *   events at the end of ev[] are not consumed and the caller has to take care
> + *   of them, and rte_errno is set accordingly. Possible errno values include:
> + *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> + *              ID is invalid, or an event's sched type doesn't match the
> + *              capabilities of the destination queue.
> + *   - ENOSPC   The event port was backpressured and unable to enqueue
> + *              one or more events. This error code is only applicable to
> + *              closed systems.
> + * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> + * @see rte_event_enqueue_burst()
> + */
> +static inline uint16_t
> +rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
> +                               const struct rte_event ev[], uint16_t nb_events)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> +                                        dev->enqueue_forward_burst);
> +}
> +
> +/**
> + * Dequeue a burst of events objects or an event object from the event port
> + * designated by its *event_port_id*, on an event device designated
> + * by its *dev_id*.
> + *
> + * rte_event_dequeue_burst() does not dictate the specifics of scheduling
> + * algorithm as each eventdev driver may have different criteria to schedule
> + * an event. However, in general, from an application perspective scheduler may
> + * use the following scheme to dispatch an event to the port.
> + *
> + * 1) Selection of event queue based on
> + *   a) The list of event queues are linked to the event port.
> + *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
> + *   queue selection from list is based on event queue priority relative to
> + *   other event queue supplied as *priority* in rte_event_queue_setup()
> + *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
> + *   queue selection from the list is based on event priority supplied as
> + *   *priority* in rte_event_enqueue_burst()
> + * 2) Selection of event
> + *   a) The number of flows available in selected event queue.
> + *   b) Schedule type method associated with the event
> + *
> + * The *nb_events* parameter is the maximum number of event objects to dequeue
> + * which are returned in the *ev* array of *rte_event* structure.
> + *
> + * The rte_event_dequeue_burst() function returns the number of events objects
> + * it actually dequeued. A return value equal to *nb_events* means that all
> + * event objects have been dequeued.
> + *
> + * The number of events dequeued is the number of scheduler contexts held by
> + * this port. These contexts are automatically released in the next
> + * rte_event_dequeue_burst() invocation if the port supports implicit
> + * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
> + * operation can be used to release the contexts early.
> + *
> + * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
> + * enqueued to the same port that their associated events were dequeued from.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param port_id
> + *   The identifier of the event port.
> + * @param[out] ev
> + *   Points to an array of *nb_events* objects of type *rte_event* structure
> + *   for output to be populated with the dequeued event objects.
> + * @param nb_events
> + *   The maximum number of event objects to dequeue, typically number of
> + *   rte_event_port_dequeue_depth() available for this port.
> + *
> + * @param timeout_ticks
> + *   - 0 no-wait, returns immediately if there is no event.
> + *   - >0 wait for the event, if the device is configured with
> + *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
> + *   at least one event is available or *timeout_ticks* time.
> + *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
> + *   then this function will wait until the event available or
> + *   *dequeue_timeout_ns* ns which was previously supplied to
> + *   rte_event_dev_configure()
> + *
> + * @return
> + * The number of event objects actually dequeued from the port. The return
> + * value can be less than the value of the *nb_events* parameter when the
> + * event port's queue is not full.
> + *
> + * @see rte_event_port_dequeue_depth()
> + */
> +static inline uint16_t
> +rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
> +                       uint16_t nb_events, uint64_t timeout_ticks)
> +{
> +       struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> +       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +
> +       if (port_id >= dev->data->nb_ports) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +#endif
> +       rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
> +       /*
> +        * Allow zero cost non burst mode routine invocation if application
> +        * requests nb_events as const one
> +        */
> +       if (nb_events == 1)
> +               return (*dev->dequeue)(dev->data->ports[port_id], ev,
> +                                      timeout_ticks);
> +       else
> +               return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
> +                                            nb_events, timeout_ticks);
> +}
> +
>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
> new file mode 100644
> index 0000000000..97dfec1ae1
> --- /dev/null
> +++ b/lib/eventdev/rte_eventdev_core.h
> @@ -0,0 +1,144 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2018 Intel Corporation.
> + * Copyright(C) 2021 Marvell.
> + * Copyright 2016 NXP
> + * All rights reserved.
> + */
> +
> +#ifndef _RTE_EVENTDEV_CORE_H_
> +#define _RTE_EVENTDEV_CORE_H_
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
> +/**< @internal Enqueue event on port of a device */
> +
> +typedef uint16_t (*event_enqueue_burst_t)(void *port,
> +                                         const struct rte_event ev[],
> +                                         uint16_t nb_events);
> +/**< @internal Enqueue burst of events on port of a device */
> +
> +typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
> +                                   uint64_t timeout_ticks);
> +/**< @internal Dequeue event from port of a device */
> +
> +typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
> +                                         uint16_t nb_events,
> +                                         uint64_t timeout_ticks);
> +/**< @internal Dequeue burst of events from port of a device */
> +
> +typedef uint16_t (*event_tx_adapter_enqueue)(void *port, struct rte_event ev[],
> +                                            uint16_t nb_events);
> +/**< @internal Enqueue burst of events on port of a device */
> +
> +typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
> +                                                      struct rte_event ev[],
> +                                                      uint16_t nb_events);
> +/**< @internal Enqueue burst of events on port of a device supporting
> + * burst having same destination Ethernet port & Tx queue.
> + */
> +
> +typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
> +                                                struct rte_event ev[],
> +                                                uint16_t nb_events);
> +/**< @internal Enqueue burst of events on crypto adapter */
> +
> +#define RTE_EVENTDEV_NAME_MAX_LEN (64)
> +/**< @internal Max length of name of event PMD */
> +
> +/**
> + * @internal
> + * The data part, with no function pointers, associated with each device.
> + *
> + * This structure is safe to place in shared memory to be common among
> + * different processes in a multi-process configuration.
> + */
> +struct rte_eventdev_data {
> +       int socket_id;
> +       /**< Socket ID where memory is allocated */
> +       uint8_t dev_id;
> +       /**< Device ID for this instance */
> +       uint8_t nb_queues;
> +       /**< Number of event queues. */
> +       uint8_t nb_ports;
> +       /**< Number of event ports. */
> +       void **ports;
> +       /**< Array of pointers to ports. */
> +       struct rte_event_port_conf *ports_cfg;
> +       /**< Array of port configuration structures. */
> +       struct rte_event_queue_conf *queues_cfg;
> +       /**< Array of queue configuration structures. */
> +       uint16_t *links_map;
> +       /**< Memory to store queues to port connections. */
> +       void *dev_private;
> +       /**< PMD-specific private data */
> +       uint32_t event_dev_cap;
> +       /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
> +       struct rte_event_dev_config dev_conf;
> +       /**< Configuration applied to device. */
> +       uint8_t service_inited;
> +       /* Service initialization state */
> +       uint32_t service_id;
> +       /* Service ID*/
> +       void *dev_stop_flush_arg;
> +       /**< User-provided argument for event flush function */
> +
> +       RTE_STD_C11
> +       uint8_t dev_started : 1;
> +       /**< Device state: STARTED(1)/STOPPED(0) */
> +
> +       char name[RTE_EVENTDEV_NAME_MAX_LEN];
> +       /**< Unique identifier name */
> +
> +       uint64_t reserved_64s[4]; /**< Reserved for future fields */
> +       void *reserved_ptrs[4];   /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +/** @internal The data structure associated with each event device. */
> +struct rte_eventdev {
> +       event_enqueue_t enqueue;
> +       /**< Pointer to PMD enqueue function. */
> +       event_enqueue_burst_t enqueue_burst;
> +       /**< Pointer to PMD enqueue burst function. */
> +       event_enqueue_burst_t enqueue_new_burst;
> +       /**< Pointer to PMD enqueue burst function(op new variant) */
> +       event_enqueue_burst_t enqueue_forward_burst;
> +       /**< Pointer to PMD enqueue burst function(op forward variant) */
> +       event_dequeue_t dequeue;
> +       /**< Pointer to PMD dequeue function. */
> +       event_dequeue_burst_t dequeue_burst;
> +       /**< Pointer to PMD dequeue burst function. */
> +       event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
> +       /**< Pointer to PMD eth Tx adapter burst enqueue function with
> +        * events destined to same Eth port & Tx queue.
> +        */
> +       event_tx_adapter_enqueue txa_enqueue;
> +       /**< Pointer to PMD eth Tx adapter enqueue function. */
> +       struct rte_eventdev_data *data;
> +       /**< Pointer to device data */
> +       struct eventdev_ops *dev_ops;
> +       /**< Functions exported by PMD */
> +       struct rte_device *dev;
> +       /**< Device info. supplied by probing */
> +
> +       RTE_STD_C11
> +       uint8_t attached : 1;
> +       /**< Flag indicating the device is attached */
> +
> +       event_crypto_adapter_enqueue ca_enqueue;
> +       /**< Pointer to PMD crypto adapter enqueue function. */
> +
> +       uint64_t reserved_64s[4]; /**< Reserved for future fields */
> +       void *reserved_ptrs[3];   /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +extern struct rte_eventdev *rte_eventdevs;
> +/** @internal The pool of rte_eventdev structures. */
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /*_RTE_EVENTDEV_CORE_H_*/
> --
> 2.17.1
>

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 04/14] eventdev: move inline APIs into separate structure
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 04/14] eventdev: move inline APIs into separate structure pbhagavatula
@ 2021-10-14  9:20       ` Jerin Jacob
  0 siblings, 0 replies; 119+ messages in thread
From: Jerin Jacob @ 2021-10-14  9:20 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: Jerin Jacob, Ray Kinsella, dpdk-dev

On Wed, Oct 6, 2021 at 12:21 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Move fastpath inline function pointers from rte_eventdev into a
> separate structure accessed via a flat array.
> The intension is to make rte_eventdev and related structures private

intention

> to avoid future API/ABI breakages.`
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Acked-by: Ray Kinsella <mdr@ashroe.eu>
> ---
>  lib/eventdev/eventdev_pmd.h      |  38 +++++++++++
>  lib/eventdev/eventdev_pmd_pci.h  |   4 +-
>  lib/eventdev/eventdev_private.c  | 112 +++++++++++++++++++++++++++++++
>  lib/eventdev/meson.build         |   1 +
>  lib/eventdev/rte_eventdev.c      |  22 +++++-
>  lib/eventdev/rte_eventdev_core.h |  28 ++++++++
>  lib/eventdev/version.map         |   6 ++
>  7 files changed, 209 insertions(+), 2 deletions(-)
>  create mode 100644 lib/eventdev/eventdev_private.c
>
 sources = files(
> +        'eventdev_private.c',
>          'rte_eventdev.c',
>          'rte_event_ring.c',
>          'eventdev_trace_points.c',

Since you are reworking, please sort this in alphabetical order.


>
> +struct rte_event_fp_ops {
> +       event_enqueue_t enqueue;
> +       /**< PMD enqueue function. */
> +       event_enqueue_burst_t enqueue_burst;
> +       /**< PMD enqueue burst function. */
> +       event_enqueue_burst_t enqueue_new_burst;
> +       /**< PMD enqueue burst new function. */
> +       event_enqueue_burst_t enqueue_forward_burst;
> +       /**< PMD enqueue burst fwd function. */
> +       event_dequeue_t dequeue;
> +       /**< PMD dequeue function. */
> +       event_dequeue_burst_t dequeue_burst;
> +       /**< PMD dequeue burst function. */
> +       event_tx_adapter_enqueue_t txa_enqueue;
> +       /**< PMD Tx adapter enqueue function. */
> +       event_tx_adapter_enqueue_t txa_enqueue_same_dest;
> +       /**< PMD Tx adapter enqueue same destination function. */
> +       event_crypto_adapter_enqueue_t ca_enqueue;
> +       /**< PMD Crypto adapter enqueue function. */
> +       uintptr_t reserved[2];
> +
> +       void **data;

Since access to data is a must for all ops, Please move that to first.
Also, you can merge reserved and reserved2 in that case.


> +       /**< points to array of internal port data pointers */
> +       uintptr_t reserved2[4];
> +} __rte_cache_aligned;
> +
> +extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
> +
>  #define RTE_EVENTDEV_NAME_MAX_LEN (64)
>  /**< @internal Max length of name of event PMD */
>
> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
> index 5f1fe412a4..a3a732089b 100644
> --- a/lib/eventdev/version.map
> +++ b/lib/eventdev/version.map
> @@ -85,6 +85,9 @@ DPDK_22 {
>         rte_event_timer_cancel_burst;
>         rte_eventdevs;
>
> +       #added in 21.11
> +       rte_event_fp_ops;
> +
>         local: *;
>  };
>
> @@ -141,6 +144,9 @@ EXPERIMENTAL {
>  INTERNAL {
>         global:
>
> +       event_dev_fp_ops_reset;
> +       event_dev_fp_ops_set;
> +       event_dev_probing_finish;
>         rte_event_pmd_selftest_seqn_dynfield_offset;
>         rte_event_pmd_allocate;
>         rte_event_pmd_get_named_dev;
> --
> 2.17.1
>

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 05/14] drivers/event: invoke probing finish function
  2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 05/14] drivers/event: invoke probing finish function pbhagavatula
@ 2021-10-14  9:22       ` Jerin Jacob
  0 siblings, 0 replies; 119+ messages in thread
From: Jerin Jacob @ 2021-10-14  9:22 UTC (permalink / raw)
  To: Pavan Nikhilesh
  Cc: Jerin Jacob, Hemant Agrawal, Nipun Gupta, Mattias Rönnblom,
	Liang Ma, Peter Mccarthy, Harry van Haaren, dpdk-dev

On Wed, Oct 6, 2021 at 12:21 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Invoke event_dev_probing_finish() functions at the end of probing,

functions -> function

> this function sets the function pointers in the fp_ops flat array.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v3 14/14] eventdev: mark trace variables as internal
  2021-10-06  7:11       ` David Marchand
@ 2021-10-14  9:28         ` Jerin Jacob
  0 siblings, 0 replies; 119+ messages in thread
From: Jerin Jacob @ 2021-10-14  9:28 UTC (permalink / raw)
  To: David Marchand
  Cc: Pavan Nikhilesh, Ray Kinsella, Jerin Jacob Kollanukkaran, dev

On Wed, Oct 6, 2021 at 12:41 PM David Marchand
<david.marchand@redhat.com> wrote:
>
> Hello Pavan, Ray,
>
> On Wed, Oct 6, 2021 at 8:52 AM <pbhagavatula@marvell.com> wrote:
> >
> > From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >
> > Mark rte_trace global variables as internal i.e. remove them
> > from experimental section of version map.
> > Some of them are used in inline APIs, mark those as global.
> >
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> > Acked-by: Ray Kinsella <mdr@ashroe.eu>
>
> Please, sort those symbols.
> I check with ./devtools/update-abi.sh $(cat ABI_VERSION)
>
>
> > ---
> >  lib/eventdev/version.map | 77 ++++++++++++++++++----------------------
> >  1 file changed, 35 insertions(+), 42 deletions(-)
> >
> > diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
> > index 068d186c66..617fff0ae6 100644
> > --- a/lib/eventdev/version.map
> > +++ b/lib/eventdev/version.map
> > @@ -88,57 +88,19 @@ DPDK_22 {
> >         rte_event_vector_pool_create;
> >         rte_eventdevs;
> >
> > -       #added in 21.11
> > -       rte_event_fp_ops;
> > -
> > -       local: *;
> > -};
> > -
> > -EXPERIMENTAL {
> > -       global:
> > -
> >         # added in 20.05
>
> At the next ABI bump, ./devtools/update-abi.sh will strip those
> comments from the stable section.
> You can notice this when you run ./devtools/update-abi.sh $CURRENT_ABI
> as suggested above.

Please do the David suggestion on sorting the map file.

> I would strip the comments now that the symbols are going to stable.
> Ray, do you have an opinion?
>
>
> --
> David Marchand
>

^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 01/14] eventdev: make driver interface as internal
  2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
                       ` (14 preceding siblings ...)
  2021-10-14  9:08     ` Jerin Jacob
@ 2021-10-15 19:02     ` pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 02/14] eventdev: separate internal structures pbhagavatula
                         ` (14 more replies)
  15 siblings, 15 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Timothy McDaniel,
	Hemant Agrawal, Nipun Gupta, Mattias Rönnblom, Liang Ma,
	Peter Mccarthy, Harry van Haaren, Abhinandan Gujjar,
	Ray Kinsella
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Mark all the driver specific functions as internal, remove
`rte` prefix from `struct rte_eventdev_ops`.
Remove experimental tag from internal functions.
Remove `eventdev_pmd.h` from non-internal header files.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 v4 Changes:
 - Update release notes. (Jerin)
 - Rearrange fp_ops fields. (Jerin)
 - Free timer array memory when freeing the last adapter. (Erik)
 - Rebase onto next-event.
 - Fix spell checks.
 - Rearrange version.map (David)

 v3 Changes:
 - Reset fp_ops when device is torndown.
 - Add `event_dev_probing_finish()` this function is used for
   post-initialization processing. In current usecase we use it to
   initialize fastpath ops.

 v2 Changes:
 - Rework inline flat array by adding port data into it.
 - Rearrange rte_event_timer elements.

 drivers/event/cnxk/cn10k_eventdev.c        |  6 ++---
 drivers/event/cnxk/cn9k_eventdev.c         | 10 ++++-----
 drivers/event/dlb2/dlb2.c                  |  2 +-
 drivers/event/dpaa/dpaa_eventdev.c         |  2 +-
 drivers/event/dpaa2/dpaa2_eventdev.c       |  2 +-
 drivers/event/dsw/dsw_evdev.c              |  2 +-
 drivers/event/octeontx/ssovf_evdev.c       |  2 +-
 drivers/event/octeontx/ssovf_worker.c      |  4 ++--
 drivers/event/octeontx2/otx2_evdev.c       | 26 +++++++++++-----------
 drivers/event/opdl/opdl_evdev.c            |  2 +-
 drivers/event/skeleton/skeleton_eventdev.c |  2 +-
 drivers/event/sw/sw_evdev.c                |  2 +-
 lib/eventdev/eventdev_pmd.h                |  6 ++++-
 lib/eventdev/eventdev_pmd_pci.h            |  4 +++-
 lib/eventdev/eventdev_pmd_vdev.h           |  2 ++
 lib/eventdev/meson.build                   |  6 +++++
 lib/eventdev/rte_event_crypto_adapter.h    |  1 -
 lib/eventdev/rte_eventdev.h                | 25 ++++++++++++---------
 lib/eventdev/version.map                   | 17 +++++++-------
 19 files changed, 70 insertions(+), 53 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index bfd470cffd..612c299b59 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -380,7 +380,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
@@ -388,7 +388,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
@@ -788,7 +788,7 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 }

-static struct rte_eventdev_ops cn10k_sso_dev_ops = {
+static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
 	.queue_def_conf = cnxk_sso_queue_def_conf,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 806dcb0a45..d757da7c37 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -514,7 +514,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
@@ -522,7 +522,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
@@ -530,7 +530,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
@@ -538,7 +538,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] =                                         \
@@ -1060,7 +1060,7 @@ cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 }

-static struct rte_eventdev_ops cn9k_sso_dev_ops = {
+static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
 	.queue_def_conf = cnxk_sso_queue_def_conf,
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 252bbd8d5e..c8742ddb2c 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -4384,7 +4384,7 @@ dlb2_entry_points_init(struct rte_eventdev *dev)
 	struct dlb2_eventdev *dlb2;

 	/* Expose PMD's eventdev interface */
-	static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
+	static struct eventdev_ops dlb2_eventdev_entry_ops = {
 		.dev_infos_get    = dlb2_eventdev_info_get,
 		.dev_configure    = dlb2_eventdev_configure,
 		.dev_start        = dlb2_eventdev_start,
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index ec74160325..9f14390d28 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -925,7 +925,7 @@ dpaa_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }

-static struct rte_eventdev_ops dpaa_eventdev_ops = {
+static struct eventdev_ops dpaa_eventdev_ops = {
 	.dev_infos_get    = dpaa_event_dev_info_get,
 	.dev_configure    = dpaa_event_dev_configure,
 	.dev_start        = dpaa_event_dev_start,
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 5ccf22f77f..d577f64824 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1015,7 +1015,7 @@ dpaa2_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }

-static struct rte_eventdev_ops dpaa2_eventdev_ops = {
+static struct eventdev_ops dpaa2_eventdev_ops = {
 	.dev_infos_get    = dpaa2_eventdev_info_get,
 	.dev_configure    = dpaa2_eventdev_configure,
 	.dev_start        = dpaa2_eventdev_start,
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 2301a4b7a0..01f060fff3 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -398,7 +398,7 @@ dsw_crypto_adapter_caps_get(const struct rte_eventdev *dev  __rte_unused,
 	return 0;
 }

-static struct rte_eventdev_ops dsw_evdev_ops = {
+static struct eventdev_ops dsw_evdev_ops = {
 	.port_setup = dsw_port_setup,
 	.port_def_conf = dsw_port_def_conf,
 	.port_release = dsw_port_release,
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index b93f6ec8c6..4a8c6a13a5 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -790,7 +790,7 @@ ssovf_crypto_adapter_qp_del(const struct rte_eventdev *dev,
 }

 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops ssovf_ops = {
+static struct eventdev_ops ssovf_ops = {
 	.dev_infos_get    = ssovf_info_get,
 	.dev_configure    = ssovf_configure,
 	.queue_def_conf   = ssovf_queue_def_conf,
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index 8b056ddc5a..2df940f0f1 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -343,11 +343,11 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)

 	dev->ca_enqueue = ssow_crypto_adapter_enqueue;

-	const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
+	const event_tx_adapter_enqueue_t ssow_txa_enqueue[2][2][2][2] = {
 #define T(name, f3, f2, f1, f0, sz, flags)				\
 	[f3][f2][f1][f0] =  sso_event_tx_adapter_enqueue_ ##name,

-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+		SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 	};

diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index 38a6b651d9..f26bed334f 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -178,41 +178,41 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_tx_adptr_enq_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_tx_adptr_enq_seg_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_dual_tx_adptr_enq_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

 	event_dev->enqueue			= otx2_ssogws_enq;
 	event_dev->enqueue_burst		= otx2_ssogws_enq_burst;
@@ -1596,7 +1596,7 @@ otx2_sso_close(struct rte_eventdev *event_dev)
 }

 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops otx2_sso_ops = {
+static struct eventdev_ops otx2_sso_ops = {
 	.dev_infos_get    = otx2_sso_info_get,
 	.dev_configure    = otx2_sso_configure,
 	.queue_def_conf   = otx2_sso_queue_def_conf,
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index cfa9733b64..739dc64c82 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -609,7 +609,7 @@ set_do_test(const char *key __rte_unused, const char *value, void *opaque)
 static int
 opdl_probe(struct rte_vdev_device *vdev)
 {
-	static struct rte_eventdev_ops evdev_opdl_ops = {
+	static struct eventdev_ops evdev_opdl_ops = {
 		.dev_configure = opdl_dev_configure,
 		.dev_infos_get = opdl_info_get,
 		.dev_close = opdl_close,
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index 6fd1102596..c9e17e7cb1 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -320,7 +320,7 @@ skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)


 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops skeleton_eventdev_ops = {
+static struct eventdev_ops skeleton_eventdev_ops = {
 	.dev_infos_get    = skeleton_eventdev_info_get,
 	.dev_configure    = skeleton_eventdev_configure,
 	.dev_start        = skeleton_eventdev_start,
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index a5e6ca22e8..9b72073322 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -945,7 +945,7 @@ static int32_t sw_sched_service_func(void *args)
 static int
 sw_probe(struct rte_vdev_device *vdev)
 {
-	static struct rte_eventdev_ops evdev_sw_ops = {
+	static struct eventdev_ops evdev_sw_ops = {
 			.dev_configure = sw_dev_configure,
 			.dev_infos_get = sw_info_get,
 			.dev_close = sw_close,
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 7ac31e9f92..688f30d45e 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -99,6 +99,7 @@ extern struct rte_eventdev *rte_eventdevs;
  * @return
  *   - The rte_eventdev structure pointer for the given device ID.
  */
+__rte_internal
 static inline struct rte_eventdev *
 rte_event_pmd_get_named_dev(const char *name)
 {
@@ -127,6 +128,7 @@ rte_event_pmd_get_named_dev(const char *name)
  * @return
  *   - If the device index is valid (1) or not (0).
  */
+__rte_internal
 static inline unsigned
 rte_event_pmd_is_valid_dev(uint8_t dev_id)
 {
@@ -1056,7 +1058,7 @@ typedef int (*eventdev_eth_tx_adapter_stats_reset_t)(uint8_t id,
 					const struct rte_eventdev *dev);

 /** Event device operations function pointer table */
-struct rte_eventdev_ops {
+struct eventdev_ops {
 	eventdev_info_get_t dev_infos_get;	/**< Get device info. */
 	eventdev_configure_t dev_configure;	/**< Configure device. */
 	eventdev_start_t dev_start;		/**< Start device. */
@@ -1173,6 +1175,7 @@ struct rte_eventdev_ops {
  * @return
  *   - Slot in the rte_dev_devices array for a new device;
  */
+__rte_internal
 struct rte_eventdev *
 rte_event_pmd_allocate(const char *name, int socket_id);

@@ -1184,6 +1187,7 @@ rte_event_pmd_allocate(const char *name, int socket_id);
  * @return
  *   - 0 on success, negative on error
  */
+__rte_internal
 int
 rte_event_pmd_release(struct rte_eventdev *eventdev);

diff --git a/lib/eventdev/eventdev_pmd_pci.h b/lib/eventdev/eventdev_pmd_pci.h
index 1545b240f2..2f12a5eb24 100644
--- a/lib/eventdev/eventdev_pmd_pci.h
+++ b/lib/eventdev/eventdev_pmd_pci.h
@@ -31,7 +31,7 @@ typedef int (*eventdev_pmd_pci_callback_t)(struct rte_eventdev *dev);
  * interface.  Same as rte_event_pmd_pci_probe, except caller can specify
  * the name.
  */
-__rte_experimental
+__rte_internal
 static inline int
 rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
 			      struct rte_pci_device *pci_dev,
@@ -85,6 +85,7 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
  * Wrapper for use by pci drivers as a .probe function to attach to a event
  * interface.
  */
+__rte_internal
 static inline int
 rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
 			    struct rte_pci_device *pci_dev,
@@ -108,6 +109,7 @@ rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
  * Wrapper for use by pci drivers as a .remove function to detach a event
  * interface.
  */
+__rte_internal
 static inline int
 rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev,
 			     eventdev_pmd_pci_callback_t devuninit)
diff --git a/lib/eventdev/eventdev_pmd_vdev.h b/lib/eventdev/eventdev_pmd_vdev.h
index 2d33924e6c..d9ee7277dd 100644
--- a/lib/eventdev/eventdev_pmd_vdev.h
+++ b/lib/eventdev/eventdev_pmd_vdev.h
@@ -37,6 +37,7 @@
  *   - Eventdev pointer if device is successfully created.
  *   - NULL if device cannot be created.
  */
+__rte_internal
 static inline struct rte_eventdev *
 rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
 		int socket_id)
@@ -74,6 +75,7 @@ rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
  * @return
  *   - 0 on success, negative on error
  */
+__rte_internal
 static inline int
 rte_event_pmd_vdev_uninit(const char *name)
 {
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 32abeba794..523ea9ccae 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -27,5 +27,11 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
+driver_sdk_headers += files(
+        'eventdev_pmd.h',
+        'eventdev_pmd_pci.h',
+        'eventdev_pmd_vdev.h',
+)
+
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
 deps += ['telemetry']
diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index edbd5c61a3..1a8ff75384 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -171,7 +171,6 @@ extern "C" {
 #include <stdint.h>

 #include "rte_eventdev.h"
-#include "eventdev_pmd.h"

 /**
  * Crypto event adapter mode
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index a9c496fb62..0c701888d5 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1324,7 +1324,7 @@ int
 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 				uint32_t *caps);

-struct rte_eventdev_ops;
+struct eventdev_ops;
 struct rte_eventdev;

 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
@@ -1342,18 +1342,21 @@ typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
 		uint16_t nb_events, uint64_t timeout_ticks);
 /**< @internal Dequeue burst of events from port of a device */

-typedef uint16_t (*event_tx_adapter_enqueue)(void *port,
-				struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
+					       struct rte_event ev[],
+					       uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */

-typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
-		struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_tx_adapter_enqueue_same_dest_t)(void *port,
+							 struct rte_event ev[],
+							 uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device supporting
  * burst having same destination Ethernet port & Tx queue.
  */

-typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
-				struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
+						   struct rte_event ev[],
+						   uint16_t nb_events);
 /**< @internal Enqueue burst of events on crypto adapter */

 #define RTE_EVENTDEV_NAME_MAX_LEN	(64)
@@ -1421,15 +1424,15 @@ struct rte_eventdev {
 	/**< Pointer to PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
+	event_tx_adapter_enqueue_same_dest_t txa_enqueue_same_dest;
 	/**< Pointer to PMD eth Tx adapter burst enqueue function with
 	 * events destined to same Eth port & Tx queue.
 	 */
-	event_tx_adapter_enqueue txa_enqueue;
+	event_tx_adapter_enqueue_t txa_enqueue;
 	/**< Pointer to PMD eth Tx adapter enqueue function. */
 	struct rte_eventdev_data *data;
 	/**< Pointer to device data */
-	struct rte_eventdev_ops *dev_ops;
+	struct eventdev_ops *dev_ops;
 	/**< Functions exported by PMD */
 	struct rte_device *dev;
 	/**< Device info. supplied by probing */
@@ -1438,7 +1441,7 @@ struct rte_eventdev {
 	uint8_t attached : 1;
 	/**< Flag indicating the device is attached */

-	event_crypto_adapter_enqueue ca_enqueue;
+	event_crypto_adapter_enqueue_t ca_enqueue;
 	/**< Pointer to PMD crypto adapter enqueue function. */

 	uint64_t reserved_64s[4]; /**< Reserved for future fields */
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 7de18497a6..cd72f45d29 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -55,12 +55,6 @@ DPDK_22 {
 	rte_event_eth_tx_adapter_stats_get;
 	rte_event_eth_tx_adapter_stats_reset;
 	rte_event_eth_tx_adapter_stop;
-	rte_event_pmd_allocate;
-	rte_event_pmd_pci_probe;
-	rte_event_pmd_pci_remove;
-	rte_event_pmd_release;
-	rte_event_pmd_vdev_init;
-	rte_event_pmd_vdev_uninit;
 	rte_event_port_attr_get;
 	rte_event_port_default_conf_get;
 	rte_event_port_link;
@@ -136,8 +130,6 @@ EXPERIMENTAL {

 	# changed in 20.11
 	__rte_eventdev_trace_port_setup;
-	# added in 20.11
-	rte_event_pmd_pci_probe_named;
 	# added in 21.11
 	rte_event_eth_rx_adapter_create_with_params;

@@ -152,4 +144,13 @@ INTERNAL {
 	global:

 	rte_event_pmd_selftest_seqn_dynfield_offset;
+	rte_event_pmd_allocate;
+	rte_event_pmd_get_named_dev;
+	rte_event_pmd_is_valid_dev;
+	rte_event_pmd_pci_probe;
+	rte_event_pmd_pci_probe_named;
+	rte_event_pmd_pci_remove;
+	rte_event_pmd_release;
+	rte_event_pmd_vdev_init;
+	rte_event_pmd_vdev_uninit;
 };
--
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 02/14] eventdev: separate internal structures
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 03/14] eventdev: allocate max space for internal arrays pbhagavatula
                         ` (13 subsequent siblings)
  14 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Create rte_eventdev_core.h and move all the internal data structures
to this file. These structures are mostly used by drivers, but they
need to be in the public header file as they are accessed by datapath
inline functions for performance reasons.
The accessibility of these data structures is not changed.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/eventdev_pmd.h      |   3 -
 lib/eventdev/meson.build         |   3 +
 lib/eventdev/rte_eventdev.h      | 718 +++++++++++++------------------
 lib/eventdev/rte_eventdev_core.h | 138 ++++++
 4 files changed, 437 insertions(+), 425 deletions(-)
 create mode 100644 lib/eventdev/rte_eventdev_core.h

diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 688f30d45e..9b2aec8371 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -87,9 +87,6 @@ struct rte_eventdev_global {
 	uint8_t nb_devs;	/**< Number of devices found */
 };
 
-extern struct rte_eventdev *rte_eventdevs;
-/** The pool of rte_eventdev structures. */
-
 /**
  * Get the rte_eventdev structure device pointer for the named device.
  *
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 523ea9ccae..8b51fde361 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -27,6 +27,9 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
+indirect_headers += files(
+        'rte_eventdev_core.h',
+)
 driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 0c701888d5..1b11d4576d 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1324,317 +1324,6 @@ int
 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 				uint32_t *caps);
 
-struct eventdev_ops;
-struct rte_eventdev;
-
-typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
-/**< @internal Enqueue event on port of a device */
-
-typedef uint16_t (*event_enqueue_burst_t)(void *port,
-			const struct rte_event ev[], uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device */
-
-typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
-		uint64_t timeout_ticks);
-/**< @internal Dequeue event from port of a device */
-
-typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
-		uint16_t nb_events, uint64_t timeout_ticks);
-/**< @internal Dequeue burst of events from port of a device */
-
-typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
-					       struct rte_event ev[],
-					       uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device */
-
-typedef uint16_t (*event_tx_adapter_enqueue_same_dest_t)(void *port,
-							 struct rte_event ev[],
-							 uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device supporting
- * burst having same destination Ethernet port & Tx queue.
- */
-
-typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
-						   struct rte_event ev[],
-						   uint16_t nb_events);
-/**< @internal Enqueue burst of events on crypto adapter */
-
-#define RTE_EVENTDEV_NAME_MAX_LEN	(64)
-/**< @internal Max length of name of event PMD */
-
-/**
- * @internal
- * The data part, with no function pointers, associated with each device.
- *
- * This structure is safe to place in shared memory to be common among
- * different processes in a multi-process configuration.
- */
-struct rte_eventdev_data {
-	int socket_id;
-	/**< Socket ID where memory is allocated */
-	uint8_t dev_id;
-	/**< Device ID for this instance */
-	uint8_t nb_queues;
-	/**< Number of event queues. */
-	uint8_t nb_ports;
-	/**< Number of event ports. */
-	void **ports;
-	/**< Array of pointers to ports. */
-	struct rte_event_port_conf *ports_cfg;
-	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf *queues_cfg;
-	/**< Array of queue configuration structures. */
-	uint16_t *links_map;
-	/**< Memory to store queues to port connections. */
-	void *dev_private;
-	/**< PMD-specific private data */
-	uint32_t event_dev_cap;
-	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
-	struct rte_event_dev_config dev_conf;
-	/**< Configuration applied to device. */
-	uint8_t service_inited;
-	/* Service initialization state */
-	uint32_t service_id;
-	/* Service ID*/
-	void *dev_stop_flush_arg;
-	/**< User-provided argument for event flush function */
-
-	RTE_STD_C11
-	uint8_t dev_started : 1;
-	/**< Device state: STARTED(1)/STOPPED(0) */
-
-	char name[RTE_EVENTDEV_NAME_MAX_LEN];
-	/**< Unique identifier name */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[4];   /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-/** @internal The data structure associated with each event device. */
-struct rte_eventdev {
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
-	event_enqueue_burst_t enqueue_burst;
-	/**< Pointer to PMD enqueue burst function. */
-	event_enqueue_burst_t enqueue_new_burst;
-	/**< Pointer to PMD enqueue burst function(op new variant) */
-	event_enqueue_burst_t enqueue_forward_burst;
-	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
-	event_dequeue_burst_t dequeue_burst;
-	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_same_dest_t txa_enqueue_same_dest;
-	/**< Pointer to PMD eth Tx adapter burst enqueue function with
-	 * events destined to same Eth port & Tx queue.
-	 */
-	event_tx_adapter_enqueue_t txa_enqueue;
-	/**< Pointer to PMD eth Tx adapter enqueue function. */
-	struct rte_eventdev_data *data;
-	/**< Pointer to device data */
-	struct eventdev_ops *dev_ops;
-	/**< Functions exported by PMD */
-	struct rte_device *dev;
-	/**< Device info. supplied by probing */
-
-	RTE_STD_C11
-	uint8_t attached : 1;
-	/**< Flag indicating the device is attached */
-
-	event_crypto_adapter_enqueue_t ca_enqueue;
-	/**< Pointer to PMD crypto adapter enqueue function. */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[3];   /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-extern struct rte_eventdev *rte_eventdevs;
-/** @internal The pool of rte_eventdev structures. */
-
-static __rte_always_inline uint16_t
-__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events,
-			const event_enqueue_burst_t fn)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->enqueue)(dev->data->ports[port_id], ev);
-	else
-		return fn(dev->data->ports[port_id], ev, nb_events);
-}
-
-/**
- * Enqueue a burst of events objects or an event object supplied in *rte_event*
- * structure on an  event device designated by its *dev_id* through the event
- * port specified by *port_id*. Each event object specifies the event queue on
- * which it will be enqueued.
- *
- * The *nb_events* parameter is the number of event objects to enqueue which are
- * supplied in the *ev* array of *rte_event* structure.
- *
- * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
- * enqueued to the same port that their associated events were dequeued from.
- *
- * The rte_event_enqueue_burst() function returns the number of
- * events objects it actually enqueued. A return value equal to *nb_events*
- * means that all event objects have been enqueued.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- */
-static inline uint16_t
-rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_burst);
-}
-
-/**
- * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
- * an event device designated by its *dev_id* through the event port specified
- * by *port_id*.
- *
- * Provides the same functionality as rte_event_enqueue_burst(), expect that
- * application can use this API when the all objects in the burst contains
- * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
- * function can provide the additional hint to the PMD and optimize if possible.
- *
- * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
- * has event object of operation type != RTE_EVENT_OP_NEW.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- * @see rte_event_enqueue_burst()
- */
-static inline uint16_t
-rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_new_burst);
-}
-
-/**
- * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
- * on an event device designated by its *dev_id* through the event port
- * specified by *port_id*.
- *
- * Provides the same functionality as rte_event_enqueue_burst(), expect that
- * application can use this API when the all objects in the burst contains
- * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
- * function can provide the additional hint to the PMD and optimize if possible.
- *
- * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
- * has event object of operation type != RTE_EVENT_OP_FORWARD.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- * @see rte_event_enqueue_burst()
- */
-static inline uint16_t
-rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_forward_burst);
-}
-
 /**
  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
  *
@@ -1665,124 +1354,27 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
 					uint64_t *timeout_ticks);
 
 /**
- * Dequeue a burst of events objects or an event object from the event port
- * designated by its *event_port_id*, on an event device designated
- * by its *dev_id*.
- *
- * rte_event_dequeue_burst() does not dictate the specifics of scheduling
- * algorithm as each eventdev driver may have different criteria to schedule
- * an event. However, in general, from an application perspective scheduler may
- * use the following scheme to dispatch an event to the port.
- *
- * 1) Selection of event queue based on
- *   a) The list of event queues are linked to the event port.
- *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
- *   queue selection from list is based on event queue priority relative to
- *   other event queue supplied as *priority* in rte_event_queue_setup()
- *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
- *   queue selection from the list is based on event priority supplied as
- *   *priority* in rte_event_enqueue_burst()
- * 2) Selection of event
- *   a) The number of flows available in selected event queue.
- *   b) Schedule type method associated with the event
- *
- * The *nb_events* parameter is the maximum number of event objects to dequeue
- * which are returned in the *ev* array of *rte_event* structure.
+ * Link multiple source event queues supplied in *queues* to the destination
+ * event port designated by its *port_id* with associated service priority
+ * supplied in *priorities* on the event device designated by its *dev_id*.
  *
- * The rte_event_dequeue_burst() function returns the number of events objects
- * it actually dequeued. A return value equal to *nb_events* means that all
- * event objects have been dequeued.
+ * The link establishment shall enable the event port *port_id* from
+ * receiving events from the specified event queue(s) supplied in *queues*
  *
- * The number of events dequeued is the number of scheduler contexts held by
- * this port. These contexts are automatically released in the next
- * rte_event_dequeue_burst() invocation if the port supports implicit
- * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
- * operation can be used to release the contexts early.
+ * An event queue may link to one or more event ports.
+ * The number of links can be established from an event queue to event port is
+ * implementation defined.
  *
- * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
- * enqueued to the same port that their associated events were dequeued from.
+ * Event queue(s) to event port link establishment can be changed at runtime
+ * without re-configuring the device to support scaling and to reduce the
+ * latency of critical work by establishing the link with more event ports
+ * at runtime.
  *
  * @param dev_id
  *   The identifier of the device.
+ *
  * @param port_id
- *   The identifier of the event port.
- * @param[out] ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   for output to be populated with the dequeued event objects.
- * @param nb_events
- *   The maximum number of event objects to dequeue, typically number of
- *   rte_event_port_dequeue_depth() available for this port.
- *
- * @param timeout_ticks
- *   - 0 no-wait, returns immediately if there is no event.
- *   - >0 wait for the event, if the device is configured with
- *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
- *   at least one event is available or *timeout_ticks* time.
- *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
- *   then this function will wait until the event available or
- *   *dequeue_timeout_ns* ns which was previously supplied to
- *   rte_event_dev_configure()
- *
- * @return
- * The number of event objects actually dequeued from the port. The return
- * value can be less than the value of the *nb_events* parameter when the
- * event port's queue is not full.
- *
- * @see rte_event_port_dequeue_depth()
- */
-static inline uint16_t
-rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
-			uint16_t nb_events, uint64_t timeout_ticks)
-{
-	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->dequeue)(
-			dev->data->ports[port_id], ev, timeout_ticks);
-	else
-		return (*dev->dequeue_burst)(
-			dev->data->ports[port_id], ev, nb_events,
-				timeout_ticks);
-}
-
-/**
- * Link multiple source event queues supplied in *queues* to the destination
- * event port designated by its *port_id* with associated service priority
- * supplied in *priorities* on the event device designated by its *dev_id*.
- *
- * The link establishment shall enable the event port *port_id* from
- * receiving events from the specified event queue(s) supplied in *queues*
- *
- * An event queue may link to one or more event ports.
- * The number of links can be established from an event queue to event port is
- * implementation defined.
- *
- * Event queue(s) to event port link establishment can be changed at runtime
- * without re-configuring the device to support scaling and to reduce the
- * latency of critical work by establishing the link with more event ports
- * at runtime.
- *
- * @param dev_id
- *   The identifier of the device.
- *
- * @param port_id
- *   Event port identifier to select the destination port to link.
+ *   Event port identifier to select the destination port to link.
  *
  * @param queues
  *   Points to an array of *nb_links* event queues to be linked
@@ -2148,6 +1740,288 @@ rte_event_vector_pool_create(const char *name, unsigned int n,
 			     unsigned int cache_size, uint16_t nb_elem,
 			     int socket_id);
 
+#include <rte_eventdev_core.h>
+
+static __rte_always_inline uint16_t
+__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			  const struct rte_event ev[], uint16_t nb_events,
+			  const event_enqueue_burst_t fn)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->enqueue)(dev->data->ports[port_id], ev);
+	else
+		return fn(dev->data->ports[port_id], ev, nb_events);
+}
+
+/**
+ * Enqueue a burst of events objects or an event object supplied in *rte_event*
+ * structure on an  event device designated by its *dev_id* through the event
+ * port specified by *port_id*. Each event object specifies the event queue on
+ * which it will be enqueued.
+ *
+ * The *nb_events* parameter is the number of event objects to enqueue which are
+ * supplied in the *ev* array of *rte_event* structure.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * The rte_event_enqueue_burst() function returns the number of
+ * events objects it actually enqueued. A return value equal to *nb_events*
+ * means that all event objects have been enqueued.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ */
+static inline uint16_t
+rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
+ * an event device designated by its *dev_id* through the event port specified
+ * by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_NEW.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
+			    const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_new_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
+ * on an event device designated by its *dev_id* through the event port
+ * specified by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_FORWARD.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
+				const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_forward_burst);
+}
+
+/**
+ * Dequeue a burst of events objects or an event object from the event port
+ * designated by its *event_port_id*, on an event device designated
+ * by its *dev_id*.
+ *
+ * rte_event_dequeue_burst() does not dictate the specifics of scheduling
+ * algorithm as each eventdev driver may have different criteria to schedule
+ * an event. However, in general, from an application perspective scheduler may
+ * use the following scheme to dispatch an event to the port.
+ *
+ * 1) Selection of event queue based on
+ *   a) The list of event queues are linked to the event port.
+ *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
+ *   queue selection from list is based on event queue priority relative to
+ *   other event queue supplied as *priority* in rte_event_queue_setup()
+ *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
+ *   queue selection from the list is based on event priority supplied as
+ *   *priority* in rte_event_enqueue_burst()
+ * 2) Selection of event
+ *   a) The number of flows available in selected event queue.
+ *   b) Schedule type method associated with the event
+ *
+ * The *nb_events* parameter is the maximum number of event objects to dequeue
+ * which are returned in the *ev* array of *rte_event* structure.
+ *
+ * The rte_event_dequeue_burst() function returns the number of events objects
+ * it actually dequeued. A return value equal to *nb_events* means that all
+ * event objects have been dequeued.
+ *
+ * The number of events dequeued is the number of scheduler contexts held by
+ * this port. These contexts are automatically released in the next
+ * rte_event_dequeue_burst() invocation if the port supports implicit
+ * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
+ * operation can be used to release the contexts early.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param[out] ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   for output to be populated with the dequeued event objects.
+ * @param nb_events
+ *   The maximum number of event objects to dequeue, typically number of
+ *   rte_event_port_dequeue_depth() available for this port.
+ *
+ * @param timeout_ticks
+ *   - 0 no-wait, returns immediately if there is no event.
+ *   - >0 wait for the event, if the device is configured with
+ *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
+ *   at least one event is available or *timeout_ticks* time.
+ *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ *   then this function will wait until the event available or
+ *   *dequeue_timeout_ns* ns which was previously supplied to
+ *   rte_event_dev_configure()
+ *
+ * @return
+ * The number of event objects actually dequeued from the port. The return
+ * value can be less than the value of the *nb_events* parameter when the
+ * event port's queue is not full.
+ *
+ * @see rte_event_port_dequeue_depth()
+ */
+static inline uint16_t
+rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
+			uint16_t nb_events, uint64_t timeout_ticks)
+{
+	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->dequeue)(dev->data->ports[port_id], ev,
+				       timeout_ticks);
+	else
+		return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
+					     nb_events, timeout_ticks);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
new file mode 100644
index 0000000000..b97cdf84fe
--- /dev/null
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright(C) 2021 Marvell.
+ * Copyright 2016 NXP
+ * All rights reserved.
+ */
+
+#ifndef _RTE_EVENTDEV_CORE_H_
+#define _RTE_EVENTDEV_CORE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
+/**< @internal Enqueue event on port of a device */
+
+typedef uint16_t (*event_enqueue_burst_t)(void *port,
+					  const struct rte_event ev[],
+					  uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
+				    uint64_t timeout_ticks);
+/**< @internal Dequeue event from port of a device */
+
+typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
+					  uint16_t nb_events,
+					  uint64_t timeout_ticks);
+/**< @internal Dequeue burst of events from port of a device */
+
+typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
+					       struct rte_event ev[],
+					       uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
+						   struct rte_event ev[],
+						   uint16_t nb_events);
+/**< @internal Enqueue burst of events on crypto adapter */
+
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+	int socket_id;
+	/**< Socket ID where memory is allocated */
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t nb_queues;
+	/**< Number of event queues. */
+	uint8_t nb_ports;
+	/**< Number of event ports. */
+	void **ports;
+	/**< Array of pointers to ports. */
+	struct rte_event_port_conf *ports_cfg;
+	/**< Array of port configuration structures. */
+	struct rte_event_queue_conf *queues_cfg;
+	/**< Array of queue configuration structures. */
+	uint16_t *links_map;
+	/**< Memory to store queues to port connections. */
+	void *dev_private;
+	/**< PMD-specific private data */
+	uint32_t event_dev_cap;
+	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+	struct rte_event_dev_config dev_conf;
+	/**< Configuration applied to device. */
+	uint8_t service_inited;
+	/* Service initialization state */
+	uint32_t service_id;
+	/* Service ID*/
+	void *dev_stop_flush_arg;
+	/**< User-provided argument for event flush function */
+
+	RTE_STD_C11
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	char name[RTE_EVENTDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[4];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+	event_enqueue_t enqueue;
+	/**< Pointer to PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< Pointer to PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< Pointer to PMD enqueue burst function(op new variant) */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< Pointer to PMD enqueue burst function(op forward variant) */
+	event_dequeue_t dequeue;
+	/**< Pointer to PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< Pointer to PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< Pointer to PMD eth Tx adapter burst enqueue function with
+	 * events destined to same Eth port & Tx queue.
+	 */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< Pointer to PMD eth Tx adapter enqueue function. */
+	struct rte_eventdev_data *data;
+	/**< Pointer to device data */
+	struct eventdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	struct rte_device *dev;
+	/**< Device info. supplied by probing */
+
+	RTE_STD_C11
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
+
+	event_crypto_adapter_enqueue_t ca_enqueue;
+	/**< Pointer to PMD crypto adapter enqueue function. */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[3];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*_RTE_EVENTDEV_CORE_H_*/
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 03/14] eventdev: allocate max space for internal arrays
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 02/14] eventdev: separate internal structures pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 04/14] eventdev: move inline APIs into separate structure pbhagavatula
                         ` (12 subsequent siblings)
  14 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Bruce Richardson, Anatoly Burakov; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Allocate max space for internal port, port config, queue config and
link map arrays.
Introduce new macro RTE_EVENT_MAX_PORTS_PER_DEV and set it to max
possible value.
This simplifies the port and queue reconfigure scenarios and will
also allow inline functions to refer pointer to internal port data
without extra checking of current number of configured queues.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 config/rte_config.h              |   1 +
 lib/eventdev/rte_eventdev.c      | 154 +++++++------------------------
 lib/eventdev/rte_eventdev_core.h |   9 +-
 3 files changed, 38 insertions(+), 126 deletions(-)

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c07d..e0ead8b251 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -72,6 +72,7 @@
 
 /* eventdev defines */
 #define RTE_EVENT_MAX_DEVS 16
+#define RTE_EVENT_MAX_PORTS_PER_DEV 255
 #define RTE_EVENT_MAX_QUEUES_PER_DEV 255
 #define RTE_EVENT_TIMER_ADAPTER_NUM_MAX 32
 #define RTE_EVENT_ETH_INTR_RING_SIZE 1024
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index e347d6dfd5..bfcfa31cd1 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -209,7 +209,7 @@ rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 }
 
 static inline int
-rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
+event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 {
 	uint8_t old_nb_queues = dev->data->nb_queues;
 	struct rte_event_queue_conf *queues_cfg;
@@ -218,37 +218,13 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
 			 dev->data->dev_id);
 
-	/* First time configuration */
-	if (dev->data->queues_cfg == NULL && nb_queues != 0) {
-		/* Allocate memory to store queue configuration */
-		dev->data->queues_cfg = rte_zmalloc_socket(
-				"eventdev->data->queues_cfg",
-				sizeof(dev->data->queues_cfg[0]) * nb_queues,
-				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->queues_cfg == NULL) {
-			dev->data->nb_queues = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
-					"nb_queues %u", nb_queues);
-			return -(ENOMEM);
-		}
-	/* Re-configure */
-	} else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
+	if (nb_queues != 0) {
+		queues_cfg = dev->data->queues_cfg;
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
 
 		for (i = nb_queues; i < old_nb_queues; i++)
 			(*dev->dev_ops->queue_release)(dev, i);
 
-		/* Re allocate memory to store queue configuration */
-		queues_cfg = dev->data->queues_cfg;
-		queues_cfg = rte_realloc(queues_cfg,
-				sizeof(queues_cfg[0]) * nb_queues,
-				RTE_CACHE_LINE_SIZE);
-		if (queues_cfg == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
-						" nb_queues %u", nb_queues);
-			return -(ENOMEM);
-		}
-		dev->data->queues_cfg = queues_cfg;
 
 		if (nb_queues > old_nb_queues) {
 			uint8_t new_qs = nb_queues - old_nb_queues;
@@ -256,7 +232,7 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 			memset(queues_cfg + old_nb_queues, 0,
 				sizeof(queues_cfg[0]) * new_qs);
 		}
-	} else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
+	} else {
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
 
 		for (i = nb_queues; i < old_nb_queues; i++)
@@ -270,7 +246,7 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
 
 static inline int
-rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
+event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 {
 	uint8_t old_nb_ports = dev->data->nb_ports;
 	void **ports;
@@ -281,46 +257,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
 			 dev->data->dev_id);
 
-	/* First time configuration */
-	if (dev->data->ports == NULL && nb_ports != 0) {
-		dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
-				sizeof(dev->data->ports[0]) * nb_ports,
-				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->ports == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Allocate memory to store port configurations */
-		dev->data->ports_cfg =
-			rte_zmalloc_socket("eventdev->ports_cfg",
-			sizeof(dev->data->ports_cfg[0]) * nb_ports,
-			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->ports_cfg == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Allocate memory to store queue to port link connection */
-		dev->data->links_map =
-			rte_zmalloc_socket("eventdev->links_map",
-			sizeof(dev->data->links_map[0]) * nb_ports *
-			RTE_EVENT_MAX_QUEUES_PER_DEV,
-			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->links_map == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-		for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
-			dev->data->links_map[i] =
-				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
-	} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
+	if (nb_ports != 0) { /* re-config */
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
 
 		ports = dev->data->ports;
@@ -330,37 +267,6 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 		for (i = nb_ports; i < old_nb_ports; i++)
 			(*dev->dev_ops->port_release)(ports[i]);
 
-		/* Realloc memory for ports */
-		ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
-				RTE_CACHE_LINE_SIZE);
-		if (ports == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
-						" nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Realloc memory for ports_cfg */
-		ports_cfg = rte_realloc(ports_cfg,
-			sizeof(ports_cfg[0]) * nb_ports,
-			RTE_CACHE_LINE_SIZE);
-		if (ports_cfg == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
-						" nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Realloc memory to store queue to port link connection */
-		links_map = rte_realloc(links_map,
-			sizeof(dev->data->links_map[0]) * nb_ports *
-			RTE_EVENT_MAX_QUEUES_PER_DEV,
-			RTE_CACHE_LINE_SIZE);
-		if (links_map == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
 		if (nb_ports > old_nb_ports) {
 			uint8_t new_ps = nb_ports - old_nb_ports;
 			unsigned int old_links_map_end =
@@ -376,16 +282,14 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 				links_map[i] =
 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
 		}
-
-		dev->data->ports = ports;
-		dev->data->ports_cfg = ports_cfg;
-		dev->data->links_map = links_map;
-	} else if (dev->data->ports != NULL && nb_ports == 0) {
+	} else {
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
 
 		ports = dev->data->ports;
-		for (i = nb_ports; i < old_nb_ports; i++)
+		for (i = nb_ports; i < old_nb_ports; i++) {
 			(*dev->dev_ops->port_release)(ports[i]);
+			ports[i] = NULL;
+		}
 	}
 
 	dev->data->nb_ports = nb_ports;
@@ -550,19 +454,19 @@ rte_event_dev_configure(uint8_t dev_id,
 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
 
 	/* Setup new number of queues and reconfigure device. */
-	diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
+	diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
 	if (diag != 0) {
-		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
-				dev_id, diag);
+		RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
+				 diag);
 		return diag;
 	}
 
 	/* Setup new number of ports and reconfigure device. */
-	diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
+	diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
 	if (diag != 0) {
-		rte_event_dev_queue_config(dev, 0);
-		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
-				dev_id, diag);
+		event_dev_queue_config(dev, 0);
+		RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
+				 diag);
 		return diag;
 	}
 
@@ -570,8 +474,8 @@ rte_event_dev_configure(uint8_t dev_id,
 	diag = (*dev->dev_ops->dev_configure)(dev);
 	if (diag != 0) {
 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
-		rte_event_dev_queue_config(dev, 0);
-		rte_event_dev_port_config(dev, 0);
+		event_dev_queue_config(dev, 0);
+		event_dev_port_config(dev, 0);
 	}
 
 	dev->data->event_dev_cap = info.event_dev_cap;
@@ -1403,8 +1307,8 @@ rte_event_dev_close(uint8_t dev_id)
 }
 
 static inline int
-rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
-		int socket_id)
+eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
+		    int socket_id)
 {
 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
 	const struct rte_memzone *mz;
@@ -1426,14 +1330,20 @@ rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
 		return -ENOMEM;
 
 	*data = mz->addr;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
 		memset(*data, 0, sizeof(struct rte_eventdev_data));
+		for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
+					RTE_EVENT_MAX_QUEUES_PER_DEV;
+		     n++)
+			(*data)->links_map[n] =
+				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+	}
 
 	return 0;
 }
 
 static inline uint8_t
-rte_eventdev_find_free_device_index(void)
+eventdev_find_free_device_index(void)
 {
 	uint8_t dev_id;
 
@@ -1475,7 +1385,7 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 		return NULL;
 	}
 
-	dev_id = rte_eventdev_find_free_device_index();
+	dev_id = eventdev_find_free_device_index();
 	if (dev_id == RTE_EVENT_MAX_DEVS) {
 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
 		return NULL;
@@ -1490,8 +1400,8 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 	if (eventdev->data == NULL) {
 		struct rte_eventdev_data *eventdev_data = NULL;
 
-		int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
-				socket_id);
+		int retval =
+			eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
 
 		if (retval < 0 || eventdev_data == NULL)
 			return NULL;
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index b97cdf84fe..115b97e431 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -58,13 +58,14 @@ struct rte_eventdev_data {
 	/**< Number of event queues. */
 	uint8_t nb_ports;
 	/**< Number of event ports. */
-	void **ports;
+	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
 	/**< Array of pointers to ports. */
-	struct rte_event_port_conf *ports_cfg;
+	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
 	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf *queues_cfg;
+	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
 	/**< Array of queue configuration structures. */
-	uint16_t *links_map;
+	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
+			   RTE_EVENT_MAX_QUEUES_PER_DEV];
 	/**< Memory to store queues to port connections. */
 	void *dev_private;
 	/**< PMD-specific private data */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 04/14] eventdev: move inline APIs into separate structure
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 02/14] eventdev: separate internal structures pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 03/14] eventdev: allocate max space for internal arrays pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 05/14] drivers/event: invoke probing finish function pbhagavatula
                         ` (11 subsequent siblings)
  14 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move fastpath inline function pointers from rte_eventdev into a
separate structure accessed via a flat array.
The intention is to make rte_eventdev and related structures private
to avoid future API/ABI breakages.`

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 lib/eventdev/eventdev_pmd.h      |  38 +++++++++++
 lib/eventdev/eventdev_pmd_pci.h  |   4 +-
 lib/eventdev/eventdev_private.c  | 112 +++++++++++++++++++++++++++++++
 lib/eventdev/meson.build         |  21 +++---
 lib/eventdev/rte_eventdev.c      |  22 +++++-
 lib/eventdev/rte_eventdev_core.h |  26 +++++++
 lib/eventdev/version.map         |   6 ++
 7 files changed, 217 insertions(+), 12 deletions(-)
 create mode 100644 lib/eventdev/eventdev_private.c

diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 9b2aec8371..0532b542d4 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -1188,4 +1188,42 @@ __rte_internal
 int
 rte_event_pmd_release(struct rte_eventdev *eventdev);
 
+/**
+ *
+ * @internal
+ * This is the last step of device probing.
+ * It must be called after a port is allocated and initialized successfully.
+ *
+ * @param eventdev
+ *  New event device.
+ */
+__rte_internal
+void
+event_dev_probing_finish(struct rte_eventdev *eventdev);
+
+/**
+ * Reset eventdevice fastpath APIs to dummy values.
+ *
+ * @param fp_ops
+ * The *fp_ops* pointer to reset.
+ */
+__rte_internal
+void
+event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op);
+
+/**
+ * Set eventdevice fastpath APIs to event device values.
+ *
+ * @param fp_ops
+ * The *fp_ops* pointer to set.
+ */
+__rte_internal
+void
+event_dev_fp_ops_set(struct rte_event_fp_ops *fp_ops,
+		     const struct rte_eventdev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _RTE_EVENTDEV_PMD_H_ */
diff --git a/lib/eventdev/eventdev_pmd_pci.h b/lib/eventdev/eventdev_pmd_pci.h
index 2f12a5eb24..499852db16 100644
--- a/lib/eventdev/eventdev_pmd_pci.h
+++ b/lib/eventdev/eventdev_pmd_pci.h
@@ -67,8 +67,10 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
 
 	/* Invoke PMD device initialization function */
 	retval = devinit(eventdev);
-	if (retval == 0)
+	if (retval == 0) {
+		event_dev_probing_finish(eventdev);
 		return 0;
+	}
 
 	RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
 			" failed", pci_drv->driver.name,
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
new file mode 100644
index 0000000000..9084833847
--- /dev/null
+++ b/lib/eventdev/eventdev_private.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "eventdev_pmd.h"
+#include "rte_eventdev.h"
+
+static uint16_t
+dummy_event_enqueue(__rte_unused void *port,
+		    __rte_unused const struct rte_event *ev)
+{
+	RTE_EDEV_LOG_ERR(
+		"event enqueue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_enqueue_burst(__rte_unused void *port,
+			  __rte_unused const struct rte_event ev[],
+			  __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event enqueue burst requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
+		    __rte_unused uint64_t timeout_ticks)
+{
+	RTE_EDEV_LOG_ERR(
+		"event dequeue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_dequeue_burst(__rte_unused void *port,
+			  __rte_unused struct rte_event ev[],
+			  __rte_unused uint16_t nb_events,
+			  __rte_unused uint64_t timeout_ticks)
+{
+	RTE_EDEV_LOG_ERR(
+		"event dequeue burst requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue(__rte_unused void *port,
+			       __rte_unused struct rte_event ev[],
+			       __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event Tx adapter enqueue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue_same_dest(__rte_unused void *port,
+					 __rte_unused struct rte_event ev[],
+					 __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event Tx adapter enqueue same destination requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_crypto_adapter_enqueue(__rte_unused void *port,
+				   __rte_unused struct rte_event ev[],
+				   __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event crypto adapter enqueue requested for unconfigured event device");
+	return 0;
+}
+
+void
+event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
+{
+	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
+	static const struct rte_event_fp_ops dummy = {
+		.enqueue = dummy_event_enqueue,
+		.enqueue_burst = dummy_event_enqueue_burst,
+		.enqueue_new_burst = dummy_event_enqueue_burst,
+		.enqueue_forward_burst = dummy_event_enqueue_burst,
+		.dequeue = dummy_event_dequeue,
+		.dequeue_burst = dummy_event_dequeue_burst,
+		.txa_enqueue = dummy_event_tx_adapter_enqueue,
+		.txa_enqueue_same_dest =
+			dummy_event_tx_adapter_enqueue_same_dest,
+		.ca_enqueue = dummy_event_crypto_adapter_enqueue,
+		.data = dummy_data,
+	};
+
+	*fp_op = dummy;
+}
+
+void
+event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
+		     const struct rte_eventdev *dev)
+{
+	fp_op->enqueue = dev->enqueue;
+	fp_op->enqueue_burst = dev->enqueue_burst;
+	fp_op->enqueue_new_burst = dev->enqueue_new_burst;
+	fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
+	fp_op->dequeue = dev->dequeue;
+	fp_op->dequeue_burst = dev->dequeue_burst;
+	fp_op->txa_enqueue = dev->txa_enqueue;
+	fp_op->txa_enqueue_same_dest = dev->txa_enqueue_same_dest;
+	fp_op->ca_enqueue = dev->ca_enqueue;
+	fp_op->data = dev->data->ports;
+}
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 8b51fde361..cb9abe92f6 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -8,24 +8,25 @@ else
 endif
 
 sources = files(
-        'rte_eventdev.c',
-        'rte_event_ring.c',
+        'eventdev_private.c',
         'eventdev_trace_points.c',
-        'rte_event_eth_rx_adapter.c',
-        'rte_event_timer_adapter.c',
         'rte_event_crypto_adapter.c',
+        'rte_event_eth_rx_adapter.c',
         'rte_event_eth_tx_adapter.c',
+        'rte_event_ring.c',
+        'rte_event_timer_adapter.c',
+        'rte_eventdev.c',
 )
 headers = files(
-        'rte_eventdev.h',
-        'rte_eventdev_trace.h',
-        'rte_eventdev_trace_fp.h',
-        'rte_event_ring.h',
+        'rte_event_crypto_adapter.h',
         'rte_event_eth_rx_adapter.h',
+        'rte_event_eth_tx_adapter.h',
+        'rte_event_ring.h',
         'rte_event_timer_adapter.h',
         'rte_event_timer_adapter_pmd.h',
-        'rte_event_crypto_adapter.h',
-        'rte_event_eth_tx_adapter.h',
+        'rte_eventdev.h',
+        'rte_eventdev_trace.h',
+        'rte_eventdev_trace_fp.h',
 )
 indirect_headers += files(
         'rte_eventdev_core.h',
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index bfcfa31cd1..4c30a37831 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -46,6 +46,9 @@ static struct rte_eventdev_global eventdev_globals = {
 	.nb_devs		= 0
 };
 
+/* Public fastpath APIs. */
+struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
+
 /* Event dev north bound API implementation */
 
 uint8_t
@@ -300,8 +303,8 @@ int
 rte_event_dev_configure(uint8_t dev_id,
 			const struct rte_event_dev_config *dev_conf)
 {
-	struct rte_eventdev *dev;
 	struct rte_event_dev_info info;
+	struct rte_eventdev *dev;
 	int diag;
 
 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -470,10 +473,13 @@ rte_event_dev_configure(uint8_t dev_id,
 		return diag;
 	}
 
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
+
 	/* Configure the device */
 	diag = (*dev->dev_ops->dev_configure)(dev);
 	if (diag != 0) {
 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
+		event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 		event_dev_queue_config(dev, 0);
 		event_dev_port_config(dev, 0);
 	}
@@ -1244,6 +1250,8 @@ rte_event_dev_start(uint8_t dev_id)
 	else
 		return diag;
 
+	event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
+
 	return 0;
 }
 
@@ -1284,6 +1292,7 @@ rte_event_dev_stop(uint8_t dev_id)
 	dev->data->dev_started = 0;
 	(*dev->dev_ops->dev_stop)(dev);
 	rte_eventdev_trace_stop(dev_id);
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 }
 
 int
@@ -1302,6 +1311,7 @@ rte_event_dev_close(uint8_t dev_id)
 		return -EBUSY;
 	}
 
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 	rte_eventdev_trace_close(dev_id);
 	return (*dev->dev_ops->dev_close)(dev);
 }
@@ -1435,6 +1445,7 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
 	if (eventdev == NULL)
 		return -EINVAL;
 
+	event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
 	eventdev->attached = RTE_EVENTDEV_DETACHED;
 	eventdev_globals.nb_devs--;
 
@@ -1460,6 +1471,15 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
 	return 0;
 }
 
+void
+event_dev_probing_finish(struct rte_eventdev *eventdev)
+{
+	if (eventdev == NULL)
+		return;
+
+	event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
+			     eventdev);
+}
 
 static int
 handle_dev_list(const char *cmd __rte_unused,
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 115b97e431..916023f71f 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -39,6 +39,32 @@ typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
 						   uint16_t nb_events);
 /**< @internal Enqueue burst of events on crypto adapter */
 
+struct rte_event_fp_ops {
+	void **data;
+	/**< points to array of internal port data pointers */
+	event_enqueue_t enqueue;
+	/**< PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< PMD enqueue burst new function. */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< PMD enqueue burst fwd function. */
+	event_dequeue_t dequeue;
+	/**< PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< PMD Tx adapter enqueue function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< PMD Tx adapter enqueue same destination function. */
+	event_crypto_adapter_enqueue_t ca_enqueue;
+	/**< PMD Crypto adapter enqueue function. */
+	uintptr_t reserved[6];
+} __rte_cache_aligned;
+
+extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
+
 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
 /**< @internal Max length of name of event PMD */
 
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index cd72f45d29..e684154bf9 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -85,6 +85,9 @@ DPDK_22 {
 	rte_event_timer_cancel_burst;
 	rte_eventdevs;
 
+	#added in 21.11
+	rte_event_fp_ops;
+
 	local: *;
 };
 
@@ -143,6 +146,9 @@ EXPERIMENTAL {
 INTERNAL {
 	global:
 
+	event_dev_fp_ops_reset;
+	event_dev_fp_ops_set;
+	event_dev_probing_finish;
 	rte_event_pmd_selftest_seqn_dynfield_offset;
 	rte_event_pmd_allocate;
 	rte_event_pmd_get_named_dev;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 05/14] drivers/event: invoke probing finish function
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (2 preceding siblings ...)
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 04/14] eventdev: move inline APIs into separate structure pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-17 15:34         ` Hemant Agrawal
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 06/14] eventdev: use new API for inline functions pbhagavatula
                         ` (10 subsequent siblings)
  14 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Hemant Agrawal, Nipun Gupta, Mattias Rönnblom,
	Liang Ma, Peter Mccarthy, Harry van Haaren
  Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Invoke event_dev_probing_finish() function at the end of probing,
this function sets the function pointers in the fp_ops flat array.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/dpaa/dpaa_eventdev.c         | 4 +++-
 drivers/event/dpaa2/dpaa2_eventdev.c       | 4 +++-
 drivers/event/dsw/dsw_evdev.c              | 1 +
 drivers/event/octeontx/ssovf_evdev.c       | 1 +
 drivers/event/opdl/opdl_evdev.c            | 4 +++-
 drivers/event/skeleton/skeleton_eventdev.c | 1 +
 drivers/event/sw/sw_evdev.c                | 2 ++
 7 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 9f14390d28..14ca341829 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -1026,10 +1026,12 @@ dpaa_event_dev_create(const char *name, const char *params)
 
 	/* For secondary processes, the primary has done all the work */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
+		goto done;
 
 	priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
 
+done:
+	event_dev_probing_finish(eventdev);
 	return 0;
 fail:
 	return -EFAULT;
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index d577f64824..1d3ad8ffd6 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1110,7 +1110,7 @@ dpaa2_eventdev_create(const char *name)
 
 	/* For secondary processes, the primary has done all the work */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
+		goto done;
 
 	priv = eventdev->data->dev_private;
 	priv->max_event_queues = 0;
@@ -1139,6 +1139,8 @@ dpaa2_eventdev_create(const char *name)
 
 	RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
 
+done:
+	event_dev_probing_finish(eventdev);
 	return 0;
 fail:
 	return -EFAULT;
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 01f060fff3..17568967be 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -448,6 +448,7 @@ dsw_probe(struct rte_vdev_device *vdev)
 	dsw = dev->data->dev_private;
 	dsw->data = dev->data;
 
+	event_dev_probing_finish(dev);
 	return 0;
 }
 
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 4a8c6a13a5..eb80eeafe1 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -933,6 +933,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
 			edev->max_event_ports);
 
 	ssovf_init_once = 1;
+	event_dev_probing_finish(eventdev);
 	return 0;
 
 error:
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index 739dc64c82..5007e9a7bf 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -720,7 +720,7 @@ opdl_probe(struct rte_vdev_device *vdev)
 	dev->dequeue_burst = opdl_event_dequeue_burst;
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
+		goto done;
 
 	opdl = dev->data->dev_private;
 	opdl->data = dev->data;
@@ -733,6 +733,8 @@ opdl_probe(struct rte_vdev_device *vdev)
 	if (do_test == 1)
 		test_result =  opdl_selftest();
 
+done:
+	event_dev_probing_finish(dev);
 	return test_result;
 }
 
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index c9e17e7cb1..af0efb3302 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -443,6 +443,7 @@ skeleton_eventdev_create(const char *name, int socket_id)
 	eventdev->dequeue       = skeleton_eventdev_dequeue;
 	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
 
+	event_dev_probing_finish(eventdev);
 	return 0;
 fail:
 	return -EFAULT;
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 9b72073322..e99b47afbe 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -1124,6 +1124,8 @@ sw_probe(struct rte_vdev_device *vdev)
 	dev->data->service_inited = 1;
 	dev->data->service_id = sw->service_id;
 
+	event_dev_probing_finish(dev);
+
 	return 0;
 }
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 06/14] eventdev: use new API for inline functions
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (3 preceding siblings ...)
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 05/14] drivers/event: invoke probing finish function pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 07/14] eventdev: hide event device related structures pbhagavatula
                         ` (9 subsequent siblings)
  14 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Use new driver interface for the fastpath enqueue/dequeue inline
functions.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
---
 lib/eventdev/rte_event_crypto_adapter.h | 15 +++++---
 lib/eventdev/rte_event_eth_tx_adapter.h | 15 ++++----
 lib/eventdev/rte_eventdev.h             | 46 +++++++++++++++----------
 3 files changed, 47 insertions(+), 29 deletions(-)

diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index 1a8ff75384..d90a19e72c 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -569,12 +569,19 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id,
 				struct rte_event ev[],
 				uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
+		rte_errno = EINVAL;
+		return 0;
+	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -582,7 +589,7 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id,
 	rte_eventdev_trace_crypto_adapter_enqueue(dev_id, port_id, ev,
 		nb_events);
 
-	return dev->ca_enqueue(dev->data->ports[port_id], ev, nb_events);
+	return fp_ops->ca_enqueue(port, ev, nb_events);
 }
 
 #ifdef __cplusplus
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.h b/lib/eventdev/rte_event_eth_tx_adapter.h
index 8c59547165..3908c2ded5 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.h
+++ b/lib/eventdev/rte_event_eth_tx_adapter.h
@@ -355,16 +355,19 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
 				uint16_t nb_events,
 				const uint8_t flags)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
-		!rte_eventdevs[dev_id].attached) {
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -372,11 +375,9 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
 	rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
 		nb_events, flags);
 	if (flags)
-		return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
-						  ev, nb_events);
+		return fp_ops->txa_enqueue_same_dest(port, ev, nb_events);
 	else
-		return dev->txa_enqueue(dev->data->ports[port_id], ev,
-					nb_events);
+		return fp_ops->txa_enqueue(port, ev, nb_events);
 }
 
 /**
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 1b11d4576d..31fa9ac4b8 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1747,15 +1747,19 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 			  const struct rte_event ev[], uint16_t nb_events,
 			  const event_enqueue_burst_t fn)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -1766,9 +1770,9 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 	 * requests nb_events as const one
 	 */
 	if (nb_events == 1)
-		return (*dev->enqueue)(dev->data->ports[port_id], ev);
+		return (fp_ops->enqueue)(port, ev);
 	else
-		return fn(dev->data->ports[port_id], ev, nb_events);
+		return fn(port, ev, nb_events);
 }
 
 /**
@@ -1818,10 +1822,11 @@ static inline uint16_t
 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 			const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_burst);
+					 fp_ops->enqueue_burst);
 }
 
 /**
@@ -1869,10 +1874,11 @@ static inline uint16_t
 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
 			    const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_new_burst);
+					 fp_ops->enqueue_new_burst);
 }
 
 /**
@@ -1920,10 +1926,11 @@ static inline uint16_t
 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
 				const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_forward_burst);
+					 fp_ops->enqueue_forward_burst);
 }
 
 /**
@@ -1996,15 +2003,19 @@ static inline uint16_t
 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 			uint16_t nb_events, uint64_t timeout_ticks)
 {
-	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -2015,11 +2026,10 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 	 * requests nb_events as const one
 	 */
 	if (nb_events == 1)
-		return (*dev->dequeue)(dev->data->ports[port_id], ev,
-				       timeout_ticks);
+		return (fp_ops->dequeue)(port, ev, timeout_ticks);
 	else
-		return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
-					     nb_events, timeout_ticks);
+		return (fp_ops->dequeue_burst)(port, ev, nb_events,
+					       timeout_ticks);
 }
 
 #ifdef __cplusplus
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 07/14] eventdev: hide event device related structures
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (4 preceding siblings ...)
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 06/14] eventdev: use new API for inline functions pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-18  7:07         ` Harman Kalra
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 08/14] eventdev: hide timer adapter PMD file pbhagavatula
                         ` (8 subsequent siblings)
  14 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Timothy McDaniel, Mattias Rönnblom, Pavan Nikhilesh,
	Harman Kalra, Ray Kinsella
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move rte_eventdev, rte_eventdev_data structures to eventdev_pmd.h.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/dlb2/dlb2_inline_fns.h   |  2 +
 drivers/event/dsw/dsw_evdev.h          |  2 +
 drivers/event/octeontx/timvf_worker.h  |  2 +
 drivers/net/octeontx/octeontx_ethdev.c |  3 +-
 lib/eventdev/eventdev_pmd.h            | 92 +++++++++++++++++++++++++
 lib/eventdev/rte_eventdev.c            | 22 ------
 lib/eventdev/rte_eventdev_core.h       | 93 --------------------------
 lib/eventdev/version.map               |  2 +-
 8 files changed, 101 insertions(+), 117 deletions(-)

diff --git a/drivers/event/dlb2/dlb2_inline_fns.h b/drivers/event/dlb2/dlb2_inline_fns.h
index ac8d01aa98..1429281cfd 100644
--- a/drivers/event/dlb2/dlb2_inline_fns.h
+++ b/drivers/event/dlb2/dlb2_inline_fns.h
@@ -5,6 +5,8 @@
 #ifndef _DLB2_INLINE_FNS_H_
 #define _DLB2_INLINE_FNS_H_
 
+#include <eventdev_pmd.h>
+
 /* Inline functions required in more than one source file. */
 
 static inline struct dlb2_eventdev *
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index 08889a0990..631daea55c 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -5,6 +5,8 @@
 #ifndef _DSW_EVDEV_H_
 #define _DSW_EVDEV_H_
 
+#include <eventdev_pmd.h>
+
 #include <rte_event_ring.h>
 #include <rte_eventdev.h>
 
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index dede1a4a4f..3f1e77f1d1 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -2,6 +2,8 @@
  * Copyright(c) 2017 Cavium, Inc
  */
 
+#include <eventdev_pmd.h>
+
 #include <rte_common.h>
 #include <rte_branch_prediction.h>
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 7c91494f0e..ddfce57394 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -9,13 +9,14 @@
 #include <string.h>
 #include <unistd.h>
 
+#include <eventdev_pmd.h>
 #include <rte_alarm.h>
 #include <rte_branch_prediction.h>
 #include <rte_bus_vdev.h>
 #include <rte_cycles.h>
 #include <rte_debug.h>
-#include <rte_devargs.h>
 #include <rte_dev.h>
+#include <rte_devargs.h>
 #include <rte_kvargs.h>
 #include <rte_malloc.h>
 #include <rte_mbuf_pool_ops.h>
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 0532b542d4..9aa9943fa5 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -80,6 +80,9 @@
 #define RTE_EVENTDEV_DETACHED  (0)
 #define RTE_EVENTDEV_ATTACHED  (1)
 
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
 struct rte_eth_dev;
 
 /** Global structure used for maintaining state of allocated event devices */
@@ -87,6 +90,95 @@ struct rte_eventdev_global {
 	uint8_t nb_devs;	/**< Number of devices found */
 };
 
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+	int socket_id;
+	/**< Socket ID where memory is allocated */
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t nb_queues;
+	/**< Number of event queues. */
+	uint8_t nb_ports;
+	/**< Number of event ports. */
+	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
+	/**< Array of pointers to ports. */
+	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
+	/**< Array of port configuration structures. */
+	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
+	/**< Array of queue configuration structures. */
+	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
+			   RTE_EVENT_MAX_QUEUES_PER_DEV];
+	/**< Memory to store queues to port connections. */
+	void *dev_private;
+	/**< PMD-specific private data */
+	uint32_t event_dev_cap;
+	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+	struct rte_event_dev_config dev_conf;
+	/**< Configuration applied to device. */
+	uint8_t service_inited;
+	/* Service initialization state */
+	uint32_t service_id;
+	/* Service ID*/
+	void *dev_stop_flush_arg;
+	/**< User-provided argument for event flush function */
+
+	RTE_STD_C11
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	char name[RTE_EVENTDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[4];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+	struct rte_eventdev_data *data;
+	/**< Pointer to device data */
+	struct eventdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	struct rte_device *dev;
+	/**< Device info. supplied by probing */
+
+	RTE_STD_C11
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
+
+	event_enqueue_t enqueue;
+	/**< Pointer to PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< Pointer to PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< Pointer to PMD enqueue burst function(op new variant) */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< Pointer to PMD enqueue burst function(op forward variant) */
+	event_dequeue_t dequeue;
+	/**< Pointer to PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< Pointer to PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< Pointer to PMD eth Tx adapter burst enqueue function with
+	 * events destined to same Eth port & Tx queue.
+	 */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< Pointer to PMD eth Tx adapter enqueue function. */
+	event_crypto_adapter_enqueue_t ca_enqueue;
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[3];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
 /**
  * Get the rte_eventdev structure device pointer for the named device.
  *
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 4c30a37831..e55241defd 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -1365,24 +1365,6 @@ eventdev_find_free_device_index(void)
 	return RTE_EVENT_MAX_DEVS;
 }
 
-static uint16_t
-rte_event_tx_adapter_enqueue(__rte_unused void *port,
-			__rte_unused struct rte_event ev[],
-			__rte_unused uint16_t nb_events)
-{
-	rte_errno = ENOTSUP;
-	return 0;
-}
-
-static uint16_t
-rte_event_crypto_adapter_enqueue(__rte_unused void *port,
-			__rte_unused struct rte_event ev[],
-			__rte_unused uint16_t nb_events)
-{
-	rte_errno = ENOTSUP;
-	return 0;
-}
-
 struct rte_eventdev *
 rte_event_pmd_allocate(const char *name, int socket_id)
 {
@@ -1403,10 +1385,6 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 
 	eventdev = &rte_eventdevs[dev_id];
 
-	eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
-	eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
-	eventdev->ca_enqueue = rte_event_crypto_adapter_enqueue;
-
 	if (eventdev->data == NULL) {
 		struct rte_eventdev_data *eventdev_data = NULL;
 
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 916023f71f..61d5ebdc44 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -65,99 +65,6 @@ struct rte_event_fp_ops {
 
 extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
 
-#define RTE_EVENTDEV_NAME_MAX_LEN (64)
-/**< @internal Max length of name of event PMD */
-
-/**
- * @internal
- * The data part, with no function pointers, associated with each device.
- *
- * This structure is safe to place in shared memory to be common among
- * different processes in a multi-process configuration.
- */
-struct rte_eventdev_data {
-	int socket_id;
-	/**< Socket ID where memory is allocated */
-	uint8_t dev_id;
-	/**< Device ID for this instance */
-	uint8_t nb_queues;
-	/**< Number of event queues. */
-	uint8_t nb_ports;
-	/**< Number of event ports. */
-	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
-	/**< Array of pointers to ports. */
-	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
-	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
-	/**< Array of queue configuration structures. */
-	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
-			   RTE_EVENT_MAX_QUEUES_PER_DEV];
-	/**< Memory to store queues to port connections. */
-	void *dev_private;
-	/**< PMD-specific private data */
-	uint32_t event_dev_cap;
-	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
-	struct rte_event_dev_config dev_conf;
-	/**< Configuration applied to device. */
-	uint8_t service_inited;
-	/* Service initialization state */
-	uint32_t service_id;
-	/* Service ID*/
-	void *dev_stop_flush_arg;
-	/**< User-provided argument for event flush function */
-
-	RTE_STD_C11
-	uint8_t dev_started : 1;
-	/**< Device state: STARTED(1)/STOPPED(0) */
-
-	char name[RTE_EVENTDEV_NAME_MAX_LEN];
-	/**< Unique identifier name */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[4];	  /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-/** @internal The data structure associated with each event device. */
-struct rte_eventdev {
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
-	event_enqueue_burst_t enqueue_burst;
-	/**< Pointer to PMD enqueue burst function. */
-	event_enqueue_burst_t enqueue_new_burst;
-	/**< Pointer to PMD enqueue burst function(op new variant) */
-	event_enqueue_burst_t enqueue_forward_burst;
-	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
-	event_dequeue_burst_t dequeue_burst;
-	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
-	/**< Pointer to PMD eth Tx adapter burst enqueue function with
-	 * events destined to same Eth port & Tx queue.
-	 */
-	event_tx_adapter_enqueue_t txa_enqueue;
-	/**< Pointer to PMD eth Tx adapter enqueue function. */
-	struct rte_eventdev_data *data;
-	/**< Pointer to device data */
-	struct eventdev_ops *dev_ops;
-	/**< Functions exported by PMD */
-	struct rte_device *dev;
-	/**< Device info. supplied by probing */
-
-	RTE_STD_C11
-	uint8_t attached : 1;
-	/**< Flag indicating the device is attached */
-
-	event_crypto_adapter_enqueue_t ca_enqueue;
-	/**< Pointer to PMD crypto adapter enqueue function. */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[3];	  /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-extern struct rte_eventdev *rte_eventdevs;
-/** @internal The pool of rte_eventdev structures. */
-
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index e684154bf9..9f6eb4ba3c 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -83,7 +83,6 @@ DPDK_22 {
 	rte_event_timer_arm_burst;
 	rte_event_timer_arm_tmo_tick_burst;
 	rte_event_timer_cancel_burst;
-	rte_eventdevs;
 
 	#added in 21.11
 	rte_event_fp_ops;
@@ -159,4 +158,5 @@ INTERNAL {
 	rte_event_pmd_release;
 	rte_event_pmd_vdev_init;
 	rte_event_pmd_vdev_uninit;
+	rte_eventdevs;
 };
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 08/14] eventdev: hide timer adapter PMD file
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (5 preceding siblings ...)
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 07/14] eventdev: hide event device related structures pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
                         ` (7 subsequent siblings)
  14 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Mattias Rönnblom,
	Harry van Haaren, Erik Gabriel Carrillo
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Hide rte_event_timer_adapter_pmd.h file as it is an internal file.
Remove rte_ prefix from rte_event_timer_adapter_ops structure.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cnxk_tim_evdev.c           |  5 ++--
 drivers/event/cnxk/cnxk_tim_evdev.h           |  2 +-
 drivers/event/dsw/dsw_evdev.c                 |  4 +--
 drivers/event/octeontx/ssovf_evdev.c          |  2 +-
 drivers/event/octeontx/timvf_evdev.c          | 17 ++++++-----
 drivers/event/octeontx/timvf_evdev.h          |  9 +++---
 drivers/event/octeontx2/otx2_tim_evdev.c      |  5 ++--
 drivers/event/octeontx2/otx2_tim_evdev.h      |  4 +--
 drivers/event/sw/sw_evdev.c                   |  5 ++--
 ...dapter_pmd.h => event_timer_adapter_pmd.h} |  8 ++---
 lib/eventdev/eventdev_pmd.h                   |  8 ++---
 lib/eventdev/meson.build                      |  2 +-
 lib/eventdev/rte_event_timer_adapter.c        | 30 +++++++++----------
 lib/eventdev/rte_event_timer_adapter.h        |  2 +-
 lib/eventdev/rte_eventdev.c                   |  2 +-
 15 files changed, 51 insertions(+), 54 deletions(-)
 rename lib/eventdev/{rte_event_timer_adapter_pmd.h => event_timer_adapter_pmd.h} (95%)

diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c
index c3e9dc508c..100fafb67e 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -5,7 +5,7 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_tim_evdev.h"
 
-static struct rte_event_timer_adapter_ops cnxk_tim_ops;
+static struct event_timer_adapter_ops cnxk_tim_ops;
 
 static int
 cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
@@ -353,8 +353,7 @@ cnxk_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
 
 int
 cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
 
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h
index 9a23952a91..2478a5c1df 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -268,7 +268,7 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 
 int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);
 
 void cnxk_tim_init(struct roc_sso *sso);
 void cnxk_tim_fini(void);
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 17568967be..0652d83ad6 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -381,8 +381,8 @@ dsw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
 
 static int
 dsw_timer_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
-			   uint64_t flags  __rte_unused, uint32_t *caps,
-			   const struct rte_event_timer_adapter_ops **ops)
+			   uint64_t flags __rte_unused, uint32_t *caps,
+			   const struct event_timer_adapter_ops **ops)
 {
 	*caps = 0;
 	*ops = NULL;
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index eb80eeafe1..2245599810 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -721,7 +721,7 @@ ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque)
 
 static int
 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
+		     uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
 			timvf_enable_stats);
diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index 688e9daa66..1f1cda3f7f 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -407,18 +407,19 @@ timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
 	return 0;
 }
 
-static struct rte_event_timer_adapter_ops timvf_ops = {
-	.init		= timvf_ring_create,
-	.uninit		= timvf_ring_free,
-	.start		= timvf_ring_start,
-	.stop		= timvf_ring_stop,
-	.get_info	= timvf_ring_info_get,
+static struct event_timer_adapter_ops timvf_ops = {
+	.init = timvf_ring_create,
+	.uninit = timvf_ring_free,
+	.start = timvf_ring_start,
+	.stop = timvf_ring_stop,
+	.get_info = timvf_ring_info_get,
 };
 
 int
 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats)
+			     uint32_t *caps,
+			     const struct event_timer_adapter_ops **ops,
+			     uint8_t enable_stats)
 {
 	RTE_SET_USED(dev);
 
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index 2977063d66..cef02cd7f9 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -5,13 +5,13 @@
 #ifndef __TIMVF_EVDEV_H__
 #define __TIMVF_EVDEV_H__
 
+#include <event_timer_adapter_pmd.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
 #include <rte_debug.h>
 #include <rte_eal.h>
-#include <rte_eventdev.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
+#include <rte_eventdev.h>
 #include <rte_io.h>
 #include <rte_lcore.h>
 #include <rte_log.h>
@@ -196,8 +196,9 @@ uint8_t timvf_get_ring(void);
 void timvf_release_ring(uint8_t vfid);
 void *timvf_bar(uint8_t id, uint8_t bar);
 int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats);
+				 uint32_t *caps,
+				 const struct event_timer_adapter_ops **ops,
+				 uint8_t enable_stats);
 uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 		struct rte_event_timer **tim, const uint16_t nb_timers);
 uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.c b/drivers/event/octeontx2/otx2_tim_evdev.c
index de50c4c76e..7dcb291043 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.c
+++ b/drivers/event/octeontx2/otx2_tim_evdev.c
@@ -9,7 +9,7 @@
 #include "otx2_evdev.h"
 #include "otx2_tim_evdev.h"
 
-static struct rte_event_timer_adapter_ops otx2_tim_ops;
+static struct event_timer_adapter_ops otx2_tim_ops;
 
 static inline int
 tim_get_msix_offsets(void)
@@ -497,8 +497,7 @@ otx2_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
 
 int
 otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct otx2_tim_evdev *dev = tim_priv_get();
 
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.h b/drivers/event/octeontx2/otx2_tim_evdev.h
index caa6ad3b3c..dac642e0e1 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.h
+++ b/drivers/event/octeontx2/otx2_tim_evdev.h
@@ -5,8 +5,8 @@
 #ifndef __OTX2_TIM_EVDEV_H__
 #define __OTX2_TIM_EVDEV_H__
 
+#include <event_timer_adapter_pmd.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
 #include <rte_reciprocal.h>
 
 #include "otx2_dev.h"
@@ -244,7 +244,7 @@ uint16_t otx2_tim_timer_cancel_burst(
 
 int otx2_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);
 
 void otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev);
 void otx2_tim_fini(void);
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index e99b47afbe..070a4802e9 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -561,10 +561,9 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
 }
 
 static int
-sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
-			  uint64_t flags,
+sw_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 			  uint32_t *caps,
-			  const struct rte_event_timer_adapter_ops **ops)
+			  const struct event_timer_adapter_ops **ops)
 {
 	RTE_SET_USED(dev);
 	RTE_SET_USED(flags);
diff --git a/lib/eventdev/rte_event_timer_adapter_pmd.h b/lib/eventdev/event_timer_adapter_pmd.h
similarity index 95%
rename from lib/eventdev/rte_event_timer_adapter_pmd.h
rename to lib/eventdev/event_timer_adapter_pmd.h
index cf3509dc6f..189017b5c1 100644
--- a/lib/eventdev/rte_event_timer_adapter_pmd.h
+++ b/lib/eventdev/event_timer_adapter_pmd.h
@@ -3,8 +3,8 @@
  * All rights reserved.
  */
 
-#ifndef __RTE_EVENT_TIMER_ADAPTER_PMD_H__
-#define __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+#ifndef __EVENT_TIMER_ADAPTER_PMD_H__
+#define __EVENT_TIMER_ADAPTER_PMD_H__
 
 /**
  * @file
@@ -57,7 +57,7 @@ typedef int (*rte_event_timer_adapter_stats_reset_t)(
  * @internal Structure containing the functions exported by an event timer
  * adapter implementation.
  */
-struct rte_event_timer_adapter_ops {
+struct event_timer_adapter_ops {
 	rte_event_timer_adapter_init_t		init;  /**< Set up adapter */
 	rte_event_timer_adapter_uninit_t	uninit;/**< Tear down adapter */
 	rte_event_timer_adapter_start_t		start; /**< Start adapter */
@@ -111,4 +111,4 @@ struct rte_event_timer_adapter_data {
 }
 #endif
 
-#endif /* __RTE_EVENT_TIMER_ADAPTER_PMD_H__ */
+#endif /* __EVENT_TIMER_ADAPTER_PMD_H__ */
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 9aa9943fa5..d009e24309 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -24,8 +24,8 @@
 #include <rte_mbuf.h>
 #include <rte_mbuf_dyn.h>
 
+#include "event_timer_adapter_pmd.h"
 #include "rte_eventdev.h"
-#include "rte_event_timer_adapter_pmd.h"
 
 /* Logging Macros */
 #define RTE_EDEV_LOG_ERR(...) \
@@ -591,10 +591,8 @@ struct rte_event_eth_rx_adapter_queue_conf;
  *
  */
 typedef int (*eventdev_timer_adapter_caps_get_t)(
-				const struct rte_eventdev *dev,
-				uint64_t flags,
-				uint32_t *caps,
-				const struct rte_event_timer_adapter_ops **ops);
+	const struct rte_eventdev *dev, uint64_t flags, uint32_t *caps,
+	const struct event_timer_adapter_ops **ops);
 
 /**
  * Add ethernet Rx queues to event device. This callback is invoked if
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index cb9abe92f6..22c3289912 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -23,7 +23,6 @@ headers = files(
         'rte_event_eth_tx_adapter.h',
         'rte_event_ring.h',
         'rte_event_timer_adapter.h',
-        'rte_event_timer_adapter_pmd.h',
         'rte_eventdev.h',
         'rte_eventdev_trace.h',
         'rte_eventdev_trace_fp.h',
@@ -35,6 +34,7 @@ driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
         'eventdev_pmd_vdev.h',
+        'event_timer_adapter_pmd.h',
 )
 
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index ee20b39f4b..ae55407042 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -20,11 +20,11 @@
 #include <rte_service_component.h>
 #include <rte_cycles.h>
 
-#include "rte_eventdev.h"
+#include "event_timer_adapter_pmd.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
 #include "rte_event_timer_adapter.h"
-#include "rte_event_timer_adapter_pmd.h"
+#include "rte_eventdev.h"
+#include "rte_eventdev_trace.h"
 
 #define DATA_MZ_NAME_MAX_LEN 64
 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
@@ -35,7 +35,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
 
 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
 
-static const struct rte_event_timer_adapter_ops swtim_ops;
+static const struct event_timer_adapter_ops swtim_ops;
 
 #define EVTIM_LOG(level, logtype, ...) \
 	rte_log(RTE_LOG_ ## level, logtype, \
@@ -1207,15 +1207,15 @@ swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
 }
 
-static const struct rte_event_timer_adapter_ops swtim_ops = {
-	.init			= swtim_init,
-	.uninit			= swtim_uninit,
-	.start			= swtim_start,
-	.stop			= swtim_stop,
-	.get_info		= swtim_get_info,
-	.stats_get		= swtim_stats_get,
-	.stats_reset		= swtim_stats_reset,
-	.arm_burst		= swtim_arm_burst,
-	.arm_tmo_tick_burst	= swtim_arm_tmo_tick_burst,
-	.cancel_burst		= swtim_cancel_burst,
+static const struct event_timer_adapter_ops swtim_ops = {
+	.init = swtim_init,
+	.uninit = swtim_uninit,
+	.start = swtim_start,
+	.stop = swtim_stop,
+	.get_info = swtim_get_info,
+	.stats_get = swtim_stats_get,
+	.stats_reset = swtim_stats_reset,
+	.arm_burst = swtim_arm_burst,
+	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
+	.cancel_burst = swtim_cancel_burst,
 };
diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h
index 4e0d2a819b..cad6d3b4c5 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -523,7 +523,7 @@ struct rte_event_timer_adapter {
 	/**< Pointer to driver cancel function. */
 	struct rte_event_timer_adapter_data *data;
 	/**< Pointer to shared adapter data */
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;
 	/**< Functions exported by adapter driver */
 
 	RTE_STD_C11
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index e55241defd..de6346194e 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -142,7 +142,7 @@ int
 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
 {
 	struct rte_eventdev *dev;
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;
 
 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 09/14] eventdev: remove rte prefix for internal structs
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (6 preceding siblings ...)
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 08/14] eventdev: hide timer adapter PMD file pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 10/14] eventdev: rearrange fields in timer object pbhagavatula
                         ` (6 subsequent siblings)
  14 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Remove rte_ prefix from rte_eth_event_enqueue_buffer,
rte_event_eth_rx_adapter and rte_event_crypto_adapter
as they are only used in rte_event_eth_rx_adapter.c and
rte_event_crypto_adapter.c

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
---
 lib/eventdev/rte_event_crypto_adapter.c |  66 +++---
 lib/eventdev/rte_event_eth_rx_adapter.c | 258 ++++++++++--------------
 lib/eventdev/rte_eventdev.h             |   2 +-
 3 files changed, 145 insertions(+), 181 deletions(-)

diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index ebfc8326a8..e9e660a3d2 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -30,7 +30,7 @@
  */
 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
 
-struct rte_event_crypto_adapter {
+struct event_crypto_adapter {
 	/* Event device identifier */
 	uint8_t eventdev_id;
 	/* Event port identifier */
@@ -99,7 +99,7 @@ struct crypto_queue_pair_info {
 	uint8_t len;
 } __rte_cache_aligned;
 
-static struct rte_event_crypto_adapter **event_crypto_adapter;
+static struct event_crypto_adapter **event_crypto_adapter;
 
 /* Macros to check for valid adapter */
 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
@@ -141,7 +141,7 @@ eca_init(void)
 	return 0;
 }
 
-static inline struct rte_event_crypto_adapter *
+static inline struct event_crypto_adapter *
 eca_id_to_adapter(uint8_t id)
 {
 	return event_crypto_adapter ?
@@ -158,7 +158,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	int ret;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
+	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
 
 	if (adapter == NULL)
 		return -EINVAL;
@@ -202,7 +202,7 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
 				enum rte_event_crypto_adapter_mode mode,
 				void *conf_arg)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
 	struct rte_event_dev_info dev_info;
 	int socket_id;
@@ -304,7 +304,7 @@ rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_crypto_adapter_free(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -329,8 +329,8 @@ rte_event_crypto_adapter_free(uint8_t id)
 }
 
 static inline unsigned int
-eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
-		 struct rte_event *ev, unsigned int cnt)
+eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
+		     unsigned int cnt)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -420,7 +420,7 @@ eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
 }
 
 static unsigned int
-eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
+eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -466,8 +466,8 @@ eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
 }
 
 static int
-eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_enq)
+eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_enq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct rte_event ev[BATCH_SIZE];
@@ -500,8 +500,8 @@ eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline void
-eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
-		  struct rte_crypto_op **ops, uint16_t num)
+eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
+		      struct rte_crypto_op **ops, uint16_t num)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -564,8 +564,8 @@ eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline unsigned int
-eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_deq)
+eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_deq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -627,8 +627,8 @@ eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static void
-eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_ops)
+eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
+		       unsigned int max_ops)
 {
 	while (max_ops) {
 		unsigned int e_cnt, d_cnt;
@@ -648,7 +648,7 @@ eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
 static int
 eca_service_func(void *args)
 {
-	struct rte_event_crypto_adapter *adapter = args;
+	struct event_crypto_adapter *adapter = args;
 
 	if (rte_spinlock_trylock(&adapter->lock) == 0)
 		return 0;
@@ -659,7 +659,7 @@ eca_service_func(void *args)
 }
 
 static int
-eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
+eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
 {
 	struct rte_event_crypto_adapter_conf adapter_conf;
 	struct rte_service_spec service;
@@ -699,10 +699,9 @@ eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
 }
 
 static void
-eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
-			struct crypto_device_info *dev_info,
-			int32_t queue_pair_id,
-			uint8_t add)
+eca_update_qp_info(struct event_crypto_adapter *adapter,
+		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
+		   uint8_t add)
 {
 	struct crypto_queue_pair_info *qp_info;
 	int enabled;
@@ -729,9 +728,8 @@ eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
 }
 
 static int
-eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
-		uint8_t cdev_id,
-		int queue_pair_id)
+eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
+		   int queue_pair_id)
 {
 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
 	struct crypto_queue_pair_info *qpairs;
@@ -773,7 +771,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			int32_t queue_pair_id,
 			const struct rte_event *event)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_eventdev *dev;
 	struct crypto_device_info *dev_info;
 	uint32_t cap;
@@ -889,7 +887,7 @@ int
 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 					int32_t queue_pair_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	int ret;
@@ -975,7 +973,7 @@ rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 static int
 eca_adapter_ctrl(uint8_t id, int start)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1017,7 +1015,7 @@ eca_adapter_ctrl(uint8_t id, int start)
 int
 rte_event_crypto_adapter_start(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 	adapter = eca_id_to_adapter(id);
@@ -1039,7 +1037,7 @@ int
 rte_event_crypto_adapter_stats_get(uint8_t id,
 				struct rte_event_crypto_adapter_stats *stats)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_crypto_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -1083,7 +1081,7 @@ rte_event_crypto_adapter_stats_get(uint8_t id,
 int
 rte_event_crypto_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1111,7 +1109,7 @@ rte_event_crypto_adapter_stats_reset(uint8_t id)
 int
 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -1128,7 +1126,7 @@ rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 int
 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index bd68b8efe1..7d37456856 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -82,7 +82,7 @@ struct eth_rx_vector_data {
 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
 
 /* Instance per adapter */
-struct rte_eth_event_enqueue_buffer {
+struct eth_event_enqueue_buffer {
 	/* Count of events in this buffer */
 	uint16_t count;
 	/* Array of events in this buffer */
@@ -98,7 +98,7 @@ struct rte_eth_event_enqueue_buffer {
 	uint16_t last_mask;
 };
 
-struct rte_event_eth_rx_adapter {
+struct event_eth_rx_adapter {
 	/* RSS key */
 	uint8_t rss_key_be[RSS_KEY_SIZE];
 	/* Event device identifier */
@@ -124,7 +124,7 @@ struct rte_event_eth_rx_adapter {
 	/* Next entry in wrr[] to begin polling */
 	uint32_t wrr_pos;
 	/* Event burst buffer */
-	struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
+	struct eth_event_enqueue_buffer event_enqueue_buffer;
 	/* Vector enable flag */
 	uint8_t ena_vector;
 	/* Timestamp of previous vector expiry list traversal */
@@ -244,10 +244,10 @@ struct eth_rx_queue_info {
 	uint32_t flow_id_mask;	/* Set to ~0 if app provides flow id else 0 */
 	uint64_t event;
 	struct eth_rx_vector_data vector_data;
-	struct rte_eth_event_enqueue_buffer *event_buf;
+	struct eth_event_enqueue_buffer *event_buf;
 };
 
-static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
+static struct event_eth_rx_adapter **event_eth_rx_adapter;
 
 /* Enable dynamic timestamp field in mbuf */
 static uint64_t event_eth_rx_timestamp_dynflag;
@@ -266,9 +266,9 @@ rxa_validate_id(uint8_t id)
 	return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
 }
 
-static inline struct rte_eth_event_enqueue_buffer *
-rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter,
-		  uint16_t eth_dev_id, uint16_t rx_queue_id)
+static inline struct eth_event_enqueue_buffer *
+rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+		  uint16_t rx_queue_id)
 {
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_device_info *dev_info =
@@ -286,7 +286,7 @@ rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter,
 } while (0)
 
 static inline int
-rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
 {
 	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
 }
@@ -304,10 +304,9 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
  */
 static int
-rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
-	 unsigned int n, int *cw,
-	 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
-	 uint16_t gcd, int prev)
+rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
+	     struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
+	     uint16_t gcd, int prev)
 {
 	int i = prev;
 	uint16_t w;
@@ -412,10 +411,9 @@ rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
  */
 static void
-rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_intr)
+rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_intr)
 {
 	uint32_t intr_diff;
 
@@ -431,12 +429,10 @@ rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * interrupt queues could currently be poll mode Rx queues
  */
 static void
-rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+			  uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -463,11 +459,9 @@ rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
  * after deleting poll mode rx queues
  */
 static void
-rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_wrr)
 {
 	uint32_t poll_diff;
 	uint32_t wrr_len_diff;
@@ -488,13 +482,10 @@ rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
 /* Calculate nb_rx_* after adding poll mode rx queues
  */
 static void
-rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint16_t wt,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint16_t wt, uint32_t *nb_rx_poll,
+			  uint32_t *nb_rx_intr, uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -521,13 +512,10 @@ rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after adding rx_queue_id */
 static void
-rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint16_t wt,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	if (wt != 0)
 		rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
@@ -539,12 +527,10 @@ rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after deleting rx_queue_id */
 static void
-rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
 				nb_wrr);
@@ -556,8 +542,7 @@ rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the rx_poll array
  */
 static struct eth_rx_poll_entry *
-rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint32_t num_rx_polled)
+rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
 {
 	size_t len;
 
@@ -573,7 +558,7 @@ rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the WRR array
  */
 static uint32_t *
-rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
+rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
 {
 	size_t len;
 
@@ -586,11 +571,9 @@ rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
 }
 
 static int
-rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint32_t nb_poll,
-		uint32_t nb_wrr,
-		struct eth_rx_poll_entry **rx_poll,
-		uint32_t **wrr_sched)
+rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
+		      uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
+		      uint32_t **wrr_sched)
 {
 
 	if (nb_poll == 0) {
@@ -615,9 +598,8 @@ rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Precalculate WRR polling sequence for all queues in rx_adapter */
 static void
-rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_rx_poll_entry *rx_poll,
-		uint32_t *rx_wrr)
+rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
+		      struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
 {
 	uint16_t d;
 	uint16_t q;
@@ -744,13 +726,13 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
 }
 
 static inline int
-rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
 {
 	return !!rx_adapter->enq_block_count;
 }
 
 static inline void
-rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->rx_enq_block_start_ts)
 		return;
@@ -763,8 +745,8 @@ rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
-		    struct rte_event_eth_rx_adapter_stats *stats)
+rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
+		     struct rte_event_eth_rx_adapter_stats *stats)
 {
 	if (unlikely(!stats->rx_enq_start_ts))
 		stats->rx_enq_start_ts = rte_get_tsc_cycles();
@@ -783,8 +765,8 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue buffered events to event device */
 static inline uint16_t
-rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
-		       struct rte_eth_event_enqueue_buffer *buf)
+rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
+		       struct eth_event_enqueue_buffer *buf)
 {
 	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
 	uint16_t count = buf->last ? buf->last - buf->head : buf->count;
@@ -828,7 +810,7 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
 		struct eth_rx_vector_data *vec)
 {
 	vec->vector_ev->nb_elem = 0;
@@ -839,9 +821,9 @@ rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline uint16_t
-rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
 			struct eth_rx_queue_info *queue_info,
-			struct rte_eth_event_enqueue_buffer *buf,
+			struct eth_event_enqueue_buffer *buf,
 			struct rte_mbuf **mbufs, uint16_t num)
 {
 	struct rte_event *ev = &buf->events[buf->count];
@@ -899,12 +881,9 @@ rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		uint16_t rx_queue_id,
-		struct rte_mbuf **mbufs,
-		uint16_t num,
-		struct rte_eth_event_enqueue_buffer *buf)
+rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
+		 struct eth_event_enqueue_buffer *buf)
 {
 	uint32_t i;
 	struct eth_device_info *dev_info =
@@ -983,7 +962,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline bool
-rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf)
+rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
 {
 	uint32_t nb_req = buf->tail + BATCH_SIZE;
 
@@ -1004,13 +983,9 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf)
 
 /* Enqueue packets from  <port, q>  to event buffer */
 static inline uint32_t
-rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint16_t port_id,
-	uint16_t queue_id,
-	uint32_t rx_count,
-	uint32_t max_rx,
-	int *rxq_empty,
-	struct rte_eth_event_enqueue_buffer *buf)
+rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
+	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
+	   int *rxq_empty, struct eth_event_enqueue_buffer *buf)
 {
 	struct rte_mbuf *mbufs[BATCH_SIZE];
 	struct rte_event_eth_rx_adapter_stats *stats =
@@ -1047,8 +1022,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
-		void *data)
+rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
 {
 	uint16_t port_id;
 	uint16_t queue;
@@ -1088,8 +1062,8 @@ rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
-			uint32_t num_intr_vec)
+rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
+			  uint32_t num_intr_vec)
 {
 	if (rx_adapter->num_intr_vec + num_intr_vec >
 				RTE_EVENT_ETH_INTR_RING_SIZE) {
@@ -1104,9 +1078,9 @@ rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Delete entries for (dev, queue) from the interrupt ring */
 static void
-rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			uint16_t rx_queue_id)
+rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info,
+			  uint16_t rx_queue_id)
 {
 	int i, n;
 	union queue_data qd;
@@ -1139,7 +1113,7 @@ rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
 static void *
 rxa_intr_thread(void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
+	struct event_eth_rx_adapter *rx_adapter = arg;
 	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
 	int n, i;
 
@@ -1162,12 +1136,12 @@ rxa_intr_thread(void *arg)
  * mbufs to eventdev
  */
 static inline uint32_t
-rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t n;
 	uint32_t nb_rx = 0;
 	int rxq_empty;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct eth_event_enqueue_buffer *buf;
 	rte_spinlock_t *ring_lock;
 	uint8_t max_done = 0;
 
@@ -1282,11 +1256,11 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
  * it.
  */
 static inline uint32_t
-rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t num_queue;
 	uint32_t nb_rx = 0;
-	struct rte_eth_event_enqueue_buffer *buf = NULL;
+	struct eth_event_enqueue_buffer *buf = NULL;
 	uint32_t wrr_pos;
 	uint32_t max_nb_rx;
 
@@ -1333,8 +1307,8 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
 static void
 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
-	struct rte_eth_event_enqueue_buffer *buf = NULL;
+	struct event_eth_rx_adapter *rx_adapter = arg;
+	struct eth_event_enqueue_buffer *buf = NULL;
 	struct rte_event *ev;
 
 	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
@@ -1358,7 +1332,7 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 static int
 rxa_service_func(void *args)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = args;
+	struct event_eth_rx_adapter *rx_adapter = args;
 	struct rte_event_eth_rx_adapter_stats *stats;
 
 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
@@ -1434,7 +1408,7 @@ rxa_memzone_lookup(void)
 	return 0;
 }
 
-static inline struct rte_event_eth_rx_adapter *
+static inline struct event_eth_rx_adapter *
 rxa_id_to_adapter(uint8_t id)
 {
 	return event_eth_rx_adapter ?
@@ -1451,7 +1425,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	uint8_t port_id;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
+	struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
 	dev_conf = dev->data->dev_conf;
@@ -1500,7 +1474,7 @@ rxa_epoll_create1(void)
 }
 
 static int
-rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->epd != INIT_FD)
 		return 0;
@@ -1517,7 +1491,7 @@ rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 	char thread_name[RTE_MAX_THREAD_NAME_LEN];
@@ -1561,7 +1535,7 @@ rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 
@@ -1582,7 +1556,7 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
 {
 	int ret;
 
@@ -1600,9 +1574,8 @@ rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1630,9 +1603,8 @@ rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id)
+rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 {
 	int err;
 	int i;
@@ -1689,9 +1661,8 @@ rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
+		struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err, err1;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1779,9 +1750,8 @@ rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int rx_queue_id)
+rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 
 {
 	int i, j, err;
@@ -1829,9 +1799,8 @@ rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	return err;
 }
 
-
 static int
-rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
 {
 	int ret;
 	struct rte_service_spec service;
@@ -1874,10 +1843,9 @@ rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
 }
 
 static void
-rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int32_t rx_queue_id,
-		uint8_t add)
+rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, int32_t rx_queue_id,
+		 uint8_t add)
 {
 	struct eth_rx_queue_info *queue_info;
 	int enabled;
@@ -1927,9 +1895,8 @@ rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
 }
 
 static void
-rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id)
+rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
+	   struct eth_device_info *dev_info, int32_t rx_queue_id)
 {
 	struct eth_rx_vector_data *vec;
 	int pollq;
@@ -1968,7 +1935,7 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
 	dev_info->nb_rx_intr -= intrq;
 	dev_info->nb_shared_intr -= intrq && sintrq;
 	if (rx_adapter->use_queue_event_buf) {
-		struct rte_eth_event_enqueue_buffer *event_buf =
+		struct eth_event_enqueue_buffer *event_buf =
 			dev_info->rx_queue[rx_queue_id].event_buf;
 		rte_free(event_buf->events);
 		rte_free(event_buf);
@@ -1977,10 +1944,9 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id,
-	const struct rte_event_eth_rx_adapter_queue_conf *conf)
+rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
+	      struct eth_device_info *dev_info, int32_t rx_queue_id,
+	      const struct rte_event_eth_rx_adapter_queue_conf *conf)
 {
 	struct eth_rx_queue_info *queue_info;
 	const struct rte_event *ev = &conf->ev;
@@ -1988,7 +1954,7 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	int intrq;
 	int sintrq;
 	struct rte_event *qi_ev;
-	struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL;
+	struct eth_event_enqueue_buffer *new_rx_buf = NULL;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
 	int ret;
 
@@ -2098,10 +2064,10 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	return 0;
 }
 
-static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		int rx_queue_id,
-		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+static int
+rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+	   int rx_queue_id,
+	   const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
 	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
 	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
@@ -2242,7 +2208,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
 static int
 rxa_ctrl(uint8_t id, int start)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2290,8 +2256,8 @@ rxa_create(uint8_t id, uint8_t dev_id,
 	   rte_event_eth_rx_adapter_conf_cb conf_cb,
 	   void *conf_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_event_enqueue_buffer *buf;
 	struct rte_event *events;
 	int ret;
 	int socket_id;
@@ -2488,7 +2454,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_eth_rx_adapter_free(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2522,7 +2488,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
 {
 	int ret;
 	uint32_t cap;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	struct rte_event_eth_rx_adapter_vector_limits limits;
@@ -2682,7 +2648,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
 {
 	int ret = 0;
 	struct rte_eventdev *dev;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	uint32_t nb_rx_poll = 0;
@@ -2852,8 +2818,8 @@ int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
 			       struct rte_event_eth_rx_adapter_stats *stats)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_event_enqueue_buffer *buf;
 	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_eth_rx_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -2907,7 +2873,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 int
 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2938,7 +2904,7 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 int
 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	if (rxa_memzone_lookup())
 		return -ENOMEM;
@@ -2961,7 +2927,7 @@ rte_event_eth_rx_adapter_cb_register(uint8_t id,
 					rte_event_eth_rx_adapter_cb_fn cb_fn,
 					void *cb_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	int ret;
@@ -3007,7 +2973,7 @@ rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
 			struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
 	struct rte_eventdev *dev;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	struct eth_rx_queue_info *queue_info;
 	struct rte_event *qi_ev;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 31fa9ac4b8..f1fcd6ce3d 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1193,7 +1193,7 @@ struct rte_event {
 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
 /**< The application can override the adapter generated flow ID in the
  * event. This flow ID can be specified when adding an ethdev Rx queue
- * to the adapter using the ev member of struct rte_event_eth_rx_adapter
+ * to the adapter using the ev.flow_id member.
  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
  */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 10/14] eventdev: rearrange fields in timer object
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (7 preceding siblings ...)
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
                         ` (5 subsequent siblings)
  14 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Erik Gabriel Carrillo; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Rearrange fields in rte_event_timer data structure to remove holes.
Also, remove use of volatile from rte_event_timer.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_timer_adapter.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h
index cad6d3b4c5..1551741820 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -475,8 +475,6 @@ struct rte_event_timer {
 	 *  - op: RTE_EVENT_OP_NEW
 	 *  - event_type: RTE_EVENT_TYPE_TIMER
 	 */
-	volatile enum rte_event_timer_state state;
-	/**< State of the event timer. */
 	uint64_t timeout_ticks;
 	/**< Expiry timer ticks expressed in number of *timer_ticks_ns* from
 	 * now.
@@ -488,6 +486,8 @@ struct rte_event_timer {
 	 * implementation specific values to share between the arm and cancel
 	 * operations.  The application should not modify this field.
 	 */
+	enum rte_event_timer_state state;
+	/**< State of the event timer. */
 	uint8_t user_meta[0];
 	/**< Memory to store user specific metadata.
 	 * The event timer adapter implementation should not modify this area.
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 11/14] eventdev: move timer adapters memory to hugepage
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (8 preceding siblings ...)
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 10/14] eventdev: rearrange fields in timer object pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 12/14] eventdev: promote event vector API to stable pbhagavatula
                         ` (4 subsequent siblings)
  14 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Erik Gabriel Carrillo; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move memory used by timer adapters to hugepage.
Allocate memory on the first adapter create or lookup to address
both primary and secondary process usecases.
This will prevent TLB misses if any and aligns to memory structure
of other subsystems.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_timer_adapter.c | 36 ++++++++++++++++++++++++--
 1 file changed, 34 insertions(+), 2 deletions(-)

diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index ae55407042..894f532ef0 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -33,7 +33,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
 
-static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
+static struct rte_event_timer_adapter *adapters;
 
 static const struct event_timer_adapter_ops swtim_ops;
 
@@ -138,6 +138,17 @@ rte_event_timer_adapter_create_ext(
 	int n, ret;
 	struct rte_eventdev *dev;
 
+	if (adapters == NULL) {
+		adapters = rte_zmalloc("Eventdev",
+				       sizeof(struct rte_event_timer_adapter) *
+					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+				       RTE_CACHE_LINE_SIZE);
+		if (adapters == NULL) {
+			rte_errno = ENOMEM;
+			return NULL;
+		}
+	}
+
 	if (conf == NULL) {
 		rte_errno = EINVAL;
 		return NULL;
@@ -312,6 +323,17 @@ rte_event_timer_adapter_lookup(uint16_t adapter_id)
 	int ret;
 	struct rte_eventdev *dev;
 
+	if (adapters == NULL) {
+		adapters = rte_zmalloc("Eventdev",
+				       sizeof(struct rte_event_timer_adapter) *
+					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+				       RTE_CACHE_LINE_SIZE);
+		if (adapters == NULL) {
+			rte_errno = ENOMEM;
+			return NULL;
+		}
+	}
+
 	if (adapters[adapter_id].allocated)
 		return &adapters[adapter_id]; /* Adapter is already loaded */
 
@@ -358,7 +380,7 @@ rte_event_timer_adapter_lookup(uint16_t adapter_id)
 int
 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
 {
-	int ret;
+	int i, ret;
 
 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
@@ -382,6 +404,16 @@ rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
 	adapter->data = NULL;
 	adapter->allocated = 0;
 
+	ret = 0;
+	for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
+		if (adapters[i].allocated)
+			ret = adapter[i].allocated;
+
+	if (!ret) {
+		rte_free(adapters);
+		adapters = NULL;
+	}
+
 	rte_eventdev_trace_timer_adapter_free(adapter);
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 12/14] eventdev: promote event vector API to stable
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (9 preceding siblings ...)
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 13/14] eventdev: make trace APIs internal pbhagavatula
                         ` (3 subsequent siblings)
  14 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Jay Jayatheerthan, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Promote event vector configuration APIs to stable.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 lib/eventdev/rte_event_eth_rx_adapter.h | 1 -
 lib/eventdev/rte_eventdev.h             | 1 -
 lib/eventdev/version.map                | 4 ++--
 3 files changed, 2 insertions(+), 4 deletions(-)

diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index c4257e750d..ab625f7273 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -588,7 +588,6 @@ int rte_event_eth_rx_adapter_cb_register(uint8_t id, uint16_t eth_dev_id,
  *  - 0: Success.
  *  - <0: Error code on failure.
  */
-__rte_experimental
 int rte_event_eth_rx_adapter_vector_limits_get(
 	uint8_t dev_id, uint16_t eth_port_id,
 	struct rte_event_eth_rx_adapter_vector_limits *limits);
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index f1fcd6ce3d..14d4d9ec81 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1734,7 +1734,6 @@ int rte_event_dev_selftest(uint8_t dev_id);
  *    - ENOMEM - no appropriate memory area found in which to create memzone
  *    - ENAMETOOLONG - mempool name requested is too long.
  */
-__rte_experimental
 struct rte_mempool *
 rte_event_vector_pool_create(const char *name, unsigned int n,
 			     unsigned int cache_size, uint16_t nb_elem,
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 9f6eb4ba3c..8f2fb0cf14 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -42,6 +42,7 @@ DPDK_22 {
 	rte_event_eth_rx_adapter_start;
 	rte_event_eth_rx_adapter_stats_get;
 	rte_event_eth_rx_adapter_stats_reset;
+	rte_event_eth_rx_adapter_vector_limits_get;
 	rte_event_eth_rx_adapter_stop;
 	rte_event_eth_tx_adapter_caps_get;
 	rte_event_eth_tx_adapter_create;
@@ -83,6 +84,7 @@ DPDK_22 {
 	rte_event_timer_arm_burst;
 	rte_event_timer_arm_tmo_tick_burst;
 	rte_event_timer_cancel_burst;
+	rte_event_vector_pool_create;
 
 	#added in 21.11
 	rte_event_fp_ops;
@@ -136,8 +138,6 @@ EXPERIMENTAL {
 	rte_event_eth_rx_adapter_create_with_params;
 
 	#added in 21.05
-	rte_event_vector_pool_create;
-	rte_event_eth_rx_adapter_vector_limits_get;
 	__rte_eventdev_trace_crypto_adapter_enqueue;
 	rte_event_eth_rx_adapter_queue_conf_get;
 };
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 13/14] eventdev: make trace APIs internal
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (10 preceding siblings ...)
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 12/14] eventdev: promote event vector API to stable pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 14/14] eventdev: mark trace variables as internal pbhagavatula
                         ` (2 subsequent siblings)
  14 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan, Erik Gabriel Carrillo
  Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Slowpath trace APIs are only used in rte_eventdev.c so make them
as internal.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
---
 lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} | 0
 lib/eventdev/eventdev_trace_points.c                    | 2 +-
 lib/eventdev/meson.build                                | 2 +-
 lib/eventdev/rte_event_crypto_adapter.c                 | 2 +-
 lib/eventdev/rte_event_eth_rx_adapter.c                 | 2 +-
 lib/eventdev/rte_event_eth_tx_adapter.c                 | 2 +-
 lib/eventdev/rte_event_timer_adapter.c                  | 2 +-
 lib/eventdev/rte_eventdev.c                             | 2 +-
 8 files changed, 7 insertions(+), 7 deletions(-)
 rename lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} (100%)

diff --git a/lib/eventdev/rte_eventdev_trace.h b/lib/eventdev/eventdev_trace.h
similarity index 100%
rename from lib/eventdev/rte_eventdev_trace.h
rename to lib/eventdev/eventdev_trace.h
diff --git a/lib/eventdev/eventdev_trace_points.c b/lib/eventdev/eventdev_trace_points.c
index 3867ec8008..237d9383fd 100644
--- a/lib/eventdev/eventdev_trace_points.c
+++ b/lib/eventdev/eventdev_trace_points.c
@@ -4,7 +4,7 @@
 
 #include <rte_trace_point_register.h>
 
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 /* Eventdev trace points */
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_configure,
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 22c3289912..abe88f733a 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -24,7 +24,6 @@ headers = files(
         'rte_event_ring.h',
         'rte_event_timer_adapter.h',
         'rte_eventdev.h',
-        'rte_eventdev_trace.h',
         'rte_eventdev_trace_fp.h',
 )
 indirect_headers += files(
@@ -34,6 +33,7 @@ driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
         'eventdev_pmd_vdev.h',
+        'eventdev_trace.h',
         'event_timer_adapter_pmd.h',
 )
 
diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index e9e660a3d2..ae1151fb75 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -16,7 +16,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_crypto_adapter.h"
 
 #define BATCH_SIZE 32
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 7d37456856..106b68c2f4 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -22,7 +22,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_rx_adapter.h"
 
 #define BATCH_SIZE		32
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.c b/lib/eventdev/rte_event_eth_tx_adapter.c
index 18c0359db7..ee3631bced 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.c
+++ b/lib/eventdev/rte_event_eth_tx_adapter.c
@@ -6,7 +6,7 @@
 #include <rte_ethdev.h>
 
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_tx_adapter.h"
 
 #define TXA_BATCH_SIZE		32
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index 894f532ef0..86b6c3fc6f 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -24,7 +24,7 @@
 #include "eventdev_pmd.h"
 #include "rte_event_timer_adapter.h"
 #include "rte_eventdev.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 #define DATA_MZ_NAME_MAX_LEN 64
 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index de6346194e..f881b7cc35 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -36,7 +36,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v4 14/14] eventdev: mark trace variables as internal
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (11 preceding siblings ...)
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 13/14] eventdev: make trace APIs internal pbhagavatula
@ 2021-10-15 19:02       ` pbhagavatula
  2021-10-17  5:58         ` Jerin Jacob
  2021-10-17 15:35       ` [dpdk-dev] [PATCH v4 01/14] eventdev: make driver interface " Hemant Agrawal
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
  14 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-10-15 19:02 UTC (permalink / raw)
  To: jerinj, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Mark rte_trace global variables as internal i.e. remove them
from experimental section of version map.
Some of them are used in inline APIs, mark those as global.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 doc/guides/rel_notes/release_21_11.rst | 12 +++++
 lib/eventdev/version.map               | 71 ++++++++++++--------------
 2 files changed, 44 insertions(+), 39 deletions(-)

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 38e601c236..5b4a05c3ae 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -226,6 +226,9 @@ API Changes
   the crypto/security operation. This field will be used to communicate
   events such as soft expiry with IPsec in lookaside mode.
 
+* eventdev: Event vector configuration APIs have been made stable.
+  Move memory used by timer adapters to hugepage. This will prevent TLB misses
+  if any and aligns to memory structure of other subsystems.
 
 ABI Changes
 -----------
@@ -277,6 +280,15 @@ ABI Changes
   were added in structure ``rte_event_eth_rx_adapter_stats`` to get additional
   status.
 
+* eventdev: A new structure ``rte_event_fp_ops`` has been added which is now used
+  by the fastpath inline functions. The structures ``rte_eventdev``,
+  ``rte_eventdev_data`` have been made internal. ``rte_eventdevs[]`` can't be
+  accessed directly by user any more. This change is transparent to both
+  applications and PMDs.
+
+* eventdev: Re-arrange fields in ``rte_event_timer`` to remove holes.
+  ``rte_event_timer_adapter_pmd.h`` has been made internal.
+
 
 Known Issues
 ------------
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 8f2fb0cf14..cd37164141 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -1,6 +1,13 @@
 DPDK_22 {
 	global:
 
+	__rte_eventdev_trace_crypto_adapter_enqueue;
+	__rte_eventdev_trace_deq_burst;
+	__rte_eventdev_trace_enq_burst;
+	__rte_eventdev_trace_eth_tx_adapter_enqueue;
+	__rte_eventdev_trace_timer_arm_burst;
+	__rte_eventdev_trace_timer_arm_tmo_tick_burst;
+	__rte_eventdev_trace_timer_cancel_burst;
 	rte_event_crypto_adapter_caps_get;
 	rte_event_crypto_adapter_create;
 	rte_event_crypto_adapter_create_ext;
@@ -42,8 +49,8 @@ DPDK_22 {
 	rte_event_eth_rx_adapter_start;
 	rte_event_eth_rx_adapter_stats_get;
 	rte_event_eth_rx_adapter_stats_reset;
-	rte_event_eth_rx_adapter_vector_limits_get;
 	rte_event_eth_rx_adapter_stop;
+	rte_event_eth_rx_adapter_vector_limits_get;
 	rte_event_eth_tx_adapter_caps_get;
 	rte_event_eth_tx_adapter_create;
 	rte_event_eth_tx_adapter_create_ext;
@@ -56,6 +63,7 @@ DPDK_22 {
 	rte_event_eth_tx_adapter_stats_get;
 	rte_event_eth_tx_adapter_stats_reset;
 	rte_event_eth_tx_adapter_stop;
+	rte_event_fp_ops;
 	rte_event_port_attr_get;
 	rte_event_port_default_conf_get;
 	rte_event_port_link;
@@ -86,25 +94,28 @@ DPDK_22 {
 	rte_event_timer_cancel_burst;
 	rte_event_vector_pool_create;
 
-	#added in 21.11
-	rte_event_fp_ops;
-
 	local: *;
 };
 
 EXPERIMENTAL {
 	global:
 
-	# added in 20.05
-	__rte_eventdev_trace_configure;
-	__rte_eventdev_trace_queue_setup;
-	__rte_eventdev_trace_port_link;
-	__rte_eventdev_trace_port_unlink;
-	__rte_eventdev_trace_start;
-	__rte_eventdev_trace_stop;
+	# added in 21.11
+	rte_event_eth_rx_adapter_create_with_params;
+	rte_event_eth_rx_adapter_queue_conf_get;
+};
+
+INTERNAL {
+	global:
+
 	__rte_eventdev_trace_close;
-	__rte_eventdev_trace_deq_burst;
-	__rte_eventdev_trace_enq_burst;
+	__rte_eventdev_trace_configure;
+	__rte_eventdev_trace_crypto_adapter_create;
+	__rte_eventdev_trace_crypto_adapter_free;
+	__rte_eventdev_trace_crypto_adapter_queue_pair_add;
+	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
+	__rte_eventdev_trace_crypto_adapter_start;
+	__rte_eventdev_trace_crypto_adapter_stop;
 	__rte_eventdev_trace_eth_rx_adapter_create;
 	__rte_eventdev_trace_eth_rx_adapter_free;
 	__rte_eventdev_trace_eth_rx_adapter_queue_add;
@@ -117,38 +128,19 @@ EXPERIMENTAL {
 	__rte_eventdev_trace_eth_tx_adapter_queue_del;
 	__rte_eventdev_trace_eth_tx_adapter_start;
 	__rte_eventdev_trace_eth_tx_adapter_stop;
-	__rte_eventdev_trace_eth_tx_adapter_enqueue;
+	__rte_eventdev_trace_port_link;
+	__rte_eventdev_trace_port_setup;
+	__rte_eventdev_trace_port_unlink;
+	__rte_eventdev_trace_queue_setup;
+	__rte_eventdev_trace_start;
+	__rte_eventdev_trace_stop;
 	__rte_eventdev_trace_timer_adapter_create;
+	__rte_eventdev_trace_timer_adapter_free;
 	__rte_eventdev_trace_timer_adapter_start;
 	__rte_eventdev_trace_timer_adapter_stop;
-	__rte_eventdev_trace_timer_adapter_free;
-	__rte_eventdev_trace_timer_arm_burst;
-	__rte_eventdev_trace_timer_arm_tmo_tick_burst;
-	__rte_eventdev_trace_timer_cancel_burst;
-	__rte_eventdev_trace_crypto_adapter_create;
-	__rte_eventdev_trace_crypto_adapter_free;
-	__rte_eventdev_trace_crypto_adapter_queue_pair_add;
-	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
-	__rte_eventdev_trace_crypto_adapter_start;
-	__rte_eventdev_trace_crypto_adapter_stop;
-
-	# changed in 20.11
-	__rte_eventdev_trace_port_setup;
-	# added in 21.11
-	rte_event_eth_rx_adapter_create_with_params;
-
-	#added in 21.05
-	__rte_eventdev_trace_crypto_adapter_enqueue;
-	rte_event_eth_rx_adapter_queue_conf_get;
-};
-
-INTERNAL {
-	global:
-
 	event_dev_fp_ops_reset;
 	event_dev_fp_ops_set;
 	event_dev_probing_finish;
-	rte_event_pmd_selftest_seqn_dynfield_offset;
 	rte_event_pmd_allocate;
 	rte_event_pmd_get_named_dev;
 	rte_event_pmd_is_valid_dev;
@@ -156,6 +148,7 @@ INTERNAL {
 	rte_event_pmd_pci_probe_named;
 	rte_event_pmd_pci_remove;
 	rte_event_pmd_release;
+	rte_event_pmd_selftest_seqn_dynfield_offset;
 	rte_event_pmd_vdev_init;
 	rte_event_pmd_vdev_uninit;
 	rte_eventdevs;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v4 14/14] eventdev: mark trace variables as internal
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 14/14] eventdev: mark trace variables as internal pbhagavatula
@ 2021-10-17  5:58         ` Jerin Jacob
  2021-10-18 15:06           ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
  0 siblings, 1 reply; 119+ messages in thread
From: Jerin Jacob @ 2021-10-17  5:58 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: Jerin Jacob, Ray Kinsella, dpdk-dev

On Sat, Oct 16, 2021 at 12:34 AM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Mark rte_trace global variables as internal i.e. remove them
> from experimental section of version map.
> Some of them are used in inline APIs, mark those as global.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Acked-by: Ray Kinsella <mdr@ashroe.eu>
> ---
>  doc/guides/rel_notes/release_21_11.rst | 12 +++++
>  lib/eventdev/version.map               | 71 ++++++++++++--------------
>  2 files changed, 44 insertions(+), 39 deletions(-)
>
> diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
> index 38e601c236..5b4a05c3ae 100644
> --- a/doc/guides/rel_notes/release_21_11.rst
> +++ b/doc/guides/rel_notes/release_21_11.rst
> @@ -226,6 +226,9 @@ API Changes
>    the crypto/security operation. This field will be used to communicate
>    events such as soft expiry with IPsec in lookaside mode.
>
> +* eventdev: Event vector configuration APIs have been made stable.
> +  Move memory used by timer adapters to hugepage. This will prevent TLB misses
> +  if any and aligns to memory structure of other subsystems.
>
>  ABI Changes
>  -----------
> @@ -277,6 +280,15 @@ ABI Changes
>    were added in structure ``rte_event_eth_rx_adapter_stats`` to get additional
>    status.
>
> +* eventdev: A new structure ``rte_event_fp_ops`` has been added which is now used
> +  by the fastpath inline functions. The structures ``rte_eventdev``,
> +  ``rte_eventdev_data`` have been made internal. ``rte_eventdevs[]`` can't be
> +  accessed directly by user any more. This change is transparent to both
> +  applications and PMDs.
> +
> +* eventdev: Re-arrange fields in ``rte_event_timer`` to remove holes.
> +  ``rte_event_timer_adapter_pmd.h`` has been made internal.

Looks good. Please fix the following, If there are no objections, I
will merge the next version.

1) Please move the doc update to respective patches
2) Following checkpath issue
[for-main]dell[dpdk-next-eventdev] $ ./devtools/checkpatches.sh -n 14

### eventdev: move inline APIs into separate structure

INFO: symbol event_dev_fp_ops_reset has been added to the INTERNAL
section of the version map
INFO: symbol event_dev_fp_ops_set has been added to the INTERNAL
section of the version map
INFO: symbol event_dev_probing_finish has been added to the INTERNAL
section of the version map
ERROR: symbol rte_event_fp_ops is added in the DPDK_22 section, but is
expected to be added in the EXPERIMENTAL section of the version map

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v4 05/14] drivers/event: invoke probing finish function
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 05/14] drivers/event: invoke probing finish function pbhagavatula
@ 2021-10-17 15:34         ` Hemant Agrawal
  0 siblings, 0 replies; 119+ messages in thread
From: Hemant Agrawal @ 2021-10-17 15:34 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Nipun Gupta, Mattias Rönnblom,
	Liang Ma, Peter Mccarthy, Harry van Haaren
  Cc: dev

Acked-by:  Hemant Agrawal <hemant.agrawal@nxp.com>

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v4 01/14] eventdev: make driver interface as internal
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (12 preceding siblings ...)
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 14/14] eventdev: mark trace variables as internal pbhagavatula
@ 2021-10-17 15:35       ` Hemant Agrawal
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
  14 siblings, 0 replies; 119+ messages in thread
From: Hemant Agrawal @ 2021-10-17 15:35 UTC (permalink / raw)
  To: pbhagavatula, jerinj, Shijith Thotton, Timothy McDaniel,
	Nipun Gupta, Mattias Rönnblom, Liang Ma, Peter Mccarthy,
	Harry van Haaren, Abhinandan Gujjar, Ray Kinsella
  Cc: dev

For dpaax
Acked-by:  Hemant Agrawal <hemant.agrawal@nxp.com>

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v4 07/14] eventdev: hide event device related structures
  2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 07/14] eventdev: hide event device related structures pbhagavatula
@ 2021-10-18  7:07         ` Harman Kalra
  0 siblings, 0 replies; 119+ messages in thread
From: Harman Kalra @ 2021-10-18  7:07 UTC (permalink / raw)
  To: Pavan Nikhilesh Bhagavatula, Jerin Jacob Kollanukkaran,
	Timothy McDaniel, Mattias Rönnblom,
	Pavan Nikhilesh Bhagavatula, Ray Kinsella
  Cc: dev



> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Saturday, October 16, 2021 12:32 AM
> To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Timothy McDaniel
> <timothy.mcdaniel@intel.com>; Mattias Rönnblom
> <mattias.ronnblom@ericsson.com>; Pavan Nikhilesh Bhagavatula
> <pbhagavatula@marvell.com>; Harman Kalra <hkalra@marvell.com>; Ray
> Kinsella <mdr@ashroe.eu>
> Cc: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v4 07/14] eventdev: hide event device related
> structures
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Move rte_eventdev, rte_eventdev_data structures to eventdev_pmd.h.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>

LGTM from octeontx POV

Acked-by: Harman Kalra <hkalra@marvell.com>


Thanks
Harman


> ---
>  drivers/event/dlb2/dlb2_inline_fns.h   |  2 +
>  drivers/event/dsw/dsw_evdev.h          |  2 +
>  drivers/event/octeontx/timvf_worker.h  |  2 +
> drivers/net/octeontx/octeontx_ethdev.c |  3 +-
>  lib/eventdev/eventdev_pmd.h            | 92 +++++++++++++++++++++++++
>  lib/eventdev/rte_eventdev.c            | 22 ------
>  lib/eventdev/rte_eventdev_core.h       | 93 --------------------------
>  lib/eventdev/version.map               |  2 +-
>  8 files changed, 101 insertions(+), 117 deletions(-)
> 
> diff --git a/drivers/event/dlb2/dlb2_inline_fns.h
> b/drivers/event/dlb2/dlb2_inline_fns.h
> index ac8d01aa98..1429281cfd 100644
> --- a/drivers/event/dlb2/dlb2_inline_fns.h
> +++ b/drivers/event/dlb2/dlb2_inline_fns.h
> @@ -5,6 +5,8 @@
>  #ifndef _DLB2_INLINE_FNS_H_
>  #define _DLB2_INLINE_FNS_H_
> 
> +#include <eventdev_pmd.h>
> +
>  /* Inline functions required in more than one source file. */
> 
>  static inline struct dlb2_eventdev *
> diff --git a/drivers/event/dsw/dsw_evdev.h
> b/drivers/event/dsw/dsw_evdev.h index 08889a0990..631daea55c 100644
> --- a/drivers/event/dsw/dsw_evdev.h
> +++ b/drivers/event/dsw/dsw_evdev.h
> @@ -5,6 +5,8 @@
>  #ifndef _DSW_EVDEV_H_
>  #define _DSW_EVDEV_H_
> 
> +#include <eventdev_pmd.h>
> +
>  #include <rte_event_ring.h>
>  #include <rte_eventdev.h>
> 
> diff --git a/drivers/event/octeontx/timvf_worker.h
> b/drivers/event/octeontx/timvf_worker.h
> index dede1a4a4f..3f1e77f1d1 100644
> --- a/drivers/event/octeontx/timvf_worker.h
> +++ b/drivers/event/octeontx/timvf_worker.h
> @@ -2,6 +2,8 @@
>   * Copyright(c) 2017 Cavium, Inc
>   */
> 
> +#include <eventdev_pmd.h>
> +
>  #include <rte_common.h>
>  #include <rte_branch_prediction.h>
> 
> diff --git a/drivers/net/octeontx/octeontx_ethdev.c
> b/drivers/net/octeontx/octeontx_ethdev.c
> index 7c91494f0e..ddfce57394 100644
> --- a/drivers/net/octeontx/octeontx_ethdev.c
> +++ b/drivers/net/octeontx/octeontx_ethdev.c
> @@ -9,13 +9,14 @@
>  #include <string.h>
>  #include <unistd.h>
> 
> +#include <eventdev_pmd.h>
>  #include <rte_alarm.h>
>  #include <rte_branch_prediction.h>
>  #include <rte_bus_vdev.h>
>  #include <rte_cycles.h>
>  #include <rte_debug.h>
> -#include <rte_devargs.h>
>  #include <rte_dev.h>
> +#include <rte_devargs.h>
>  #include <rte_kvargs.h>
>  #include <rte_malloc.h>
>  #include <rte_mbuf_pool_ops.h>
> diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
> index 0532b542d4..9aa9943fa5 100644
> --- a/lib/eventdev/eventdev_pmd.h
> +++ b/lib/eventdev/eventdev_pmd.h
> @@ -80,6 +80,9 @@
>  #define RTE_EVENTDEV_DETACHED  (0)
>  #define RTE_EVENTDEV_ATTACHED  (1)
> 
> +#define RTE_EVENTDEV_NAME_MAX_LEN (64)
> +/**< @internal Max length of name of event PMD */
> +
>  struct rte_eth_dev;
> 
>  /** Global structure used for maintaining state of allocated event devices */
> @@ -87,6 +90,95 @@ struct rte_eventdev_global {
>  	uint8_t nb_devs;	/**< Number of devices found */
>  };
> 
> +/**
> + * @internal
> + * The data part, with no function pointers, associated with each device.
> + *
> + * This structure is safe to place in shared memory to be common among
> + * different processes in a multi-process configuration.
> + */
> +struct rte_eventdev_data {
> +	int socket_id;
> +	/**< Socket ID where memory is allocated */
> +	uint8_t dev_id;
> +	/**< Device ID for this instance */
> +	uint8_t nb_queues;
> +	/**< Number of event queues. */
> +	uint8_t nb_ports;
> +	/**< Number of event ports. */
> +	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
> +	/**< Array of pointers to ports. */
> +	struct rte_event_port_conf
> ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
> +	/**< Array of port configuration structures. */
> +	struct rte_event_queue_conf
> queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
> +	/**< Array of queue configuration structures. */
> +	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
> +			   RTE_EVENT_MAX_QUEUES_PER_DEV];
> +	/**< Memory to store queues to port connections. */
> +	void *dev_private;
> +	/**< PMD-specific private data */
> +	uint32_t event_dev_cap;
> +	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
> +	struct rte_event_dev_config dev_conf;
> +	/**< Configuration applied to device. */
> +	uint8_t service_inited;
> +	/* Service initialization state */
> +	uint32_t service_id;
> +	/* Service ID*/
> +	void *dev_stop_flush_arg;
> +	/**< User-provided argument for event flush function */
> +
> +	RTE_STD_C11
> +	uint8_t dev_started : 1;
> +	/**< Device state: STARTED(1)/STOPPED(0) */
> +
> +	char name[RTE_EVENTDEV_NAME_MAX_LEN];
> +	/**< Unique identifier name */
> +
> +	uint64_t reserved_64s[4]; /**< Reserved for future fields */
> +	void *reserved_ptrs[4];	  /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +/** @internal The data structure associated with each event device. */
> +struct rte_eventdev {
> +	struct rte_eventdev_data *data;
> +	/**< Pointer to device data */
> +	struct eventdev_ops *dev_ops;
> +	/**< Functions exported by PMD */
> +	struct rte_device *dev;
> +	/**< Device info. supplied by probing */
> +
> +	RTE_STD_C11
> +	uint8_t attached : 1;
> +	/**< Flag indicating the device is attached */
> +
> +	event_enqueue_t enqueue;
> +	/**< Pointer to PMD enqueue function. */
> +	event_enqueue_burst_t enqueue_burst;
> +	/**< Pointer to PMD enqueue burst function. */
> +	event_enqueue_burst_t enqueue_new_burst;
> +	/**< Pointer to PMD enqueue burst function(op new variant) */
> +	event_enqueue_burst_t enqueue_forward_burst;
> +	/**< Pointer to PMD enqueue burst function(op forward variant) */
> +	event_dequeue_t dequeue;
> +	/**< Pointer to PMD dequeue function. */
> +	event_dequeue_burst_t dequeue_burst;
> +	/**< Pointer to PMD dequeue burst function. */
> +	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
> +	/**< Pointer to PMD eth Tx adapter burst enqueue function with
> +	 * events destined to same Eth port & Tx queue.
> +	 */
> +	event_tx_adapter_enqueue_t txa_enqueue;
> +	/**< Pointer to PMD eth Tx adapter enqueue function. */
> +	event_crypto_adapter_enqueue_t ca_enqueue;
> +
> +	uint64_t reserved_64s[4]; /**< Reserved for future fields */
> +	void *reserved_ptrs[3];	  /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +extern struct rte_eventdev *rte_eventdevs;
> +/** @internal The pool of rte_eventdev structures. */
> +
>  /**
>   * Get the rte_eventdev structure device pointer for the named device.
>   *
> diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c index
> 4c30a37831..e55241defd 100644
> --- a/lib/eventdev/rte_eventdev.c
> +++ b/lib/eventdev/rte_eventdev.c
> @@ -1365,24 +1365,6 @@ eventdev_find_free_device_index(void)
>  	return RTE_EVENT_MAX_DEVS;
>  }
> 
> -static uint16_t
> -rte_event_tx_adapter_enqueue(__rte_unused void *port,
> -			__rte_unused struct rte_event ev[],
> -			__rte_unused uint16_t nb_events)
> -{
> -	rte_errno = ENOTSUP;
> -	return 0;
> -}
> -
> -static uint16_t
> -rte_event_crypto_adapter_enqueue(__rte_unused void *port,
> -			__rte_unused struct rte_event ev[],
> -			__rte_unused uint16_t nb_events)
> -{
> -	rte_errno = ENOTSUP;
> -	return 0;
> -}
> -
>  struct rte_eventdev *
>  rte_event_pmd_allocate(const char *name, int socket_id)  { @@ -1403,10
> +1385,6 @@ rte_event_pmd_allocate(const char *name, int socket_id)
> 
>  	eventdev = &rte_eventdevs[dev_id];
> 
> -	eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
> -	eventdev->txa_enqueue_same_dest =
> rte_event_tx_adapter_enqueue;
> -	eventdev->ca_enqueue = rte_event_crypto_adapter_enqueue;
> -
>  	if (eventdev->data == NULL) {
>  		struct rte_eventdev_data *eventdev_data = NULL;
> 
> diff --git a/lib/eventdev/rte_eventdev_core.h
> b/lib/eventdev/rte_eventdev_core.h
> index 916023f71f..61d5ebdc44 100644
> --- a/lib/eventdev/rte_eventdev_core.h
> +++ b/lib/eventdev/rte_eventdev_core.h
> @@ -65,99 +65,6 @@ struct rte_event_fp_ops {
> 
>  extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
> 
> -#define RTE_EVENTDEV_NAME_MAX_LEN (64)
> -/**< @internal Max length of name of event PMD */
> -
> -/**
> - * @internal
> - * The data part, with no function pointers, associated with each device.
> - *
> - * This structure is safe to place in shared memory to be common among
> - * different processes in a multi-process configuration.
> - */
> -struct rte_eventdev_data {
> -	int socket_id;
> -	/**< Socket ID where memory is allocated */
> -	uint8_t dev_id;
> -	/**< Device ID for this instance */
> -	uint8_t nb_queues;
> -	/**< Number of event queues. */
> -	uint8_t nb_ports;
> -	/**< Number of event ports. */
> -	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
> -	/**< Array of pointers to ports. */
> -	struct rte_event_port_conf
> ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
> -	/**< Array of port configuration structures. */
> -	struct rte_event_queue_conf
> queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
> -	/**< Array of queue configuration structures. */
> -	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
> -			   RTE_EVENT_MAX_QUEUES_PER_DEV];
> -	/**< Memory to store queues to port connections. */
> -	void *dev_private;
> -	/**< PMD-specific private data */
> -	uint32_t event_dev_cap;
> -	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
> -	struct rte_event_dev_config dev_conf;
> -	/**< Configuration applied to device. */
> -	uint8_t service_inited;
> -	/* Service initialization state */
> -	uint32_t service_id;
> -	/* Service ID*/
> -	void *dev_stop_flush_arg;
> -	/**< User-provided argument for event flush function */
> -
> -	RTE_STD_C11
> -	uint8_t dev_started : 1;
> -	/**< Device state: STARTED(1)/STOPPED(0) */
> -
> -	char name[RTE_EVENTDEV_NAME_MAX_LEN];
> -	/**< Unique identifier name */
> -
> -	uint64_t reserved_64s[4]; /**< Reserved for future fields */
> -	void *reserved_ptrs[4];	  /**< Reserved for future fields */
> -} __rte_cache_aligned;
> -
> -/** @internal The data structure associated with each event device. */ -
> struct rte_eventdev {
> -	event_enqueue_t enqueue;
> -	/**< Pointer to PMD enqueue function. */
> -	event_enqueue_burst_t enqueue_burst;
> -	/**< Pointer to PMD enqueue burst function. */
> -	event_enqueue_burst_t enqueue_new_burst;
> -	/**< Pointer to PMD enqueue burst function(op new variant) */
> -	event_enqueue_burst_t enqueue_forward_burst;
> -	/**< Pointer to PMD enqueue burst function(op forward variant) */
> -	event_dequeue_t dequeue;
> -	/**< Pointer to PMD dequeue function. */
> -	event_dequeue_burst_t dequeue_burst;
> -	/**< Pointer to PMD dequeue burst function. */
> -	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
> -	/**< Pointer to PMD eth Tx adapter burst enqueue function with
> -	 * events destined to same Eth port & Tx queue.
> -	 */
> -	event_tx_adapter_enqueue_t txa_enqueue;
> -	/**< Pointer to PMD eth Tx adapter enqueue function. */
> -	struct rte_eventdev_data *data;
> -	/**< Pointer to device data */
> -	struct eventdev_ops *dev_ops;
> -	/**< Functions exported by PMD */
> -	struct rte_device *dev;
> -	/**< Device info. supplied by probing */
> -
> -	RTE_STD_C11
> -	uint8_t attached : 1;
> -	/**< Flag indicating the device is attached */
> -
> -	event_crypto_adapter_enqueue_t ca_enqueue;
> -	/**< Pointer to PMD crypto adapter enqueue function. */
> -
> -	uint64_t reserved_64s[4]; /**< Reserved for future fields */
> -	void *reserved_ptrs[3];	  /**< Reserved for future fields */
> -} __rte_cache_aligned;
> -
> -extern struct rte_eventdev *rte_eventdevs;
> -/** @internal The pool of rte_eventdev structures. */
> -
>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index
> e684154bf9..9f6eb4ba3c 100644
> --- a/lib/eventdev/version.map
> +++ b/lib/eventdev/version.map
> @@ -83,7 +83,6 @@ DPDK_22 {
>  	rte_event_timer_arm_burst;
>  	rte_event_timer_arm_tmo_tick_burst;
>  	rte_event_timer_cancel_burst;
> -	rte_eventdevs;
> 
>  	#added in 21.11
>  	rte_event_fp_ops;
> @@ -159,4 +158,5 @@ INTERNAL {
>  	rte_event_pmd_release;
>  	rte_event_pmd_vdev_init;
>  	rte_event_pmd_vdev_uninit;
> +	rte_eventdevs;
>  };
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [PATCH v4 14/14] eventdev: mark trace variables as internal
  2021-10-17  5:58         ` Jerin Jacob
@ 2021-10-18 15:06           ` Pavan Nikhilesh Bhagavatula
  2021-10-19  7:01             ` David Marchand
  0 siblings, 1 reply; 119+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2021-10-18 15:06 UTC (permalink / raw)
  To: Jerin Jacob, Ray Kinsella, David Marchand
  Cc: Jerin Jacob Kollanukkaran, dpdk-dev

>On Sat, Oct 16, 2021 at 12:34 AM <pbhagavatula@marvell.com> wrote:
>>
>> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>
>> Mark rte_trace global variables as internal i.e. remove them
>> from experimental section of version map.
>> Some of them are used in inline APIs, mark those as global.
>>
>> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
>> Acked-by: Ray Kinsella <mdr@ashroe.eu>
>> ---
>>  doc/guides/rel_notes/release_21_11.rst | 12 +++++
>>  lib/eventdev/version.map               | 71 ++++++++++++--------------
>>  2 files changed, 44 insertions(+), 39 deletions(-)
>>
>> diff --git a/doc/guides/rel_notes/release_21_11.rst
>b/doc/guides/rel_notes/release_21_11.rst
>> index 38e601c236..5b4a05c3ae 100644
>> --- a/doc/guides/rel_notes/release_21_11.rst
>> +++ b/doc/guides/rel_notes/release_21_11.rst
>> @@ -226,6 +226,9 @@ API Changes
>>    the crypto/security operation. This field will be used to communicate
>>    events such as soft expiry with IPsec in lookaside mode.
>>
>> +* eventdev: Event vector configuration APIs have been made stable.
>> +  Move memory used by timer adapters to hugepage. This will
>prevent TLB misses
>> +  if any and aligns to memory structure of other subsystems.
>>
>>  ABI Changes
>>  -----------
>> @@ -277,6 +280,15 @@ ABI Changes
>>    were added in structure ``rte_event_eth_rx_adapter_stats`` to get
>additional
>>    status.
>>
>> +* eventdev: A new structure ``rte_event_fp_ops`` has been added
>which is now used
>> +  by the fastpath inline functions. The structures ``rte_eventdev``,
>> +  ``rte_eventdev_data`` have been made internal.
>``rte_eventdevs[]`` can't be
>> +  accessed directly by user any more. This change is transparent to
>both
>> +  applications and PMDs.
>> +
>> +* eventdev: Re-arrange fields in ``rte_event_timer`` to remove
>holes.
>> +  ``rte_event_timer_adapter_pmd.h`` has been made internal.
>
>Looks good. Please fix the following, If there are no objections, I
>will merge the next version.
>
>1) Please move the doc update to respective patches

Ack, will move in next version.

>2) Following checkpath issue
>[for-main]dell[dpdk-next-eventdev] $ ./devtools/checkpatches.sh -n
>14
>
>### eventdev: move inline APIs into separate structure
>
>INFO: symbol event_dev_fp_ops_reset has been added to the
>INTERNAL
>section of the version map
>INFO: symbol event_dev_fp_ops_set has been added to the INTERNAL
>section of the version map
>INFO: symbol event_dev_probing_finish has been added to the
>INTERNAL
>section of the version map

These can be ignored as they are internal

>ERROR: symbol rte_event_fp_ops is added in the DPDK_22 section, but
>is
>expected to be added in the EXPERIMENTAL section of the version map

This is a replacement for rte_eventdevs, ethdev rework also doesn’t mark
it as experimental. @David Marchand @Ray Kinsella any opinions?


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 01/14] eventdev: make driver interface as internal
  2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
                         ` (13 preceding siblings ...)
  2021-10-17 15:35       ` [dpdk-dev] [PATCH v4 01/14] eventdev: make driver interface " Hemant Agrawal
@ 2021-10-18 23:35       ` pbhagavatula
  2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 02/14] eventdev: separate internal structures pbhagavatula
                           ` (13 more replies)
  14 siblings, 14 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:35 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Timothy McDaniel,
	Hemant Agrawal, Nipun Gupta, Mattias Rönnblom, Liang Ma,
	Peter Mccarthy, Harry van Haaren, Abhinandan Gujjar,
	Ray Kinsella
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Mark all the driver specific functions as internal, remove
`rte` prefix from `struct rte_eventdev_ops`.
Remove experimental tag from internal functions.
Remove `eventdev_pmd.h` from non-internal header files.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 v5 Changes:
 - Move doc updates to respective patches. (Jerin)

 v4 Changes:
 - Update release notes. (Jerin)
 - Rearrange fp_ops fields. (Jerin)
 - Free timer array memory when freeing the last adapter. (Erik)
 - Rebase onto next-event.
 - Fix spell checks.
 - Rearrange version.map (David)

 v3 Changes:
 - Reset fp_ops when device is torndown.
 - Add `event_dev_probing_finish()` this function is used for
   post-initialization processing. In current usecase we use it to
   initialize fastpath ops.

 v2 Changes:
 - Rework inline flat array by adding port data into it.
 - Rearrange rte_event_timer elements.


 drivers/event/cnxk/cn10k_eventdev.c        |  6 ++---
 drivers/event/cnxk/cn9k_eventdev.c         | 10 ++++-----
 drivers/event/dlb2/dlb2.c                  |  2 +-
 drivers/event/dpaa/dpaa_eventdev.c         |  2 +-
 drivers/event/dpaa2/dpaa2_eventdev.c       |  2 +-
 drivers/event/dsw/dsw_evdev.c              |  2 +-
 drivers/event/octeontx/ssovf_evdev.c       |  2 +-
 drivers/event/octeontx/ssovf_worker.c      |  4 ++--
 drivers/event/octeontx2/otx2_evdev.c       | 26 +++++++++++-----------
 drivers/event/opdl/opdl_evdev.c            |  2 +-
 drivers/event/skeleton/skeleton_eventdev.c |  2 +-
 drivers/event/sw/sw_evdev.c                |  2 +-
 lib/eventdev/eventdev_pmd.h                |  6 ++++-
 lib/eventdev/eventdev_pmd_pci.h            |  4 +++-
 lib/eventdev/eventdev_pmd_vdev.h           |  2 ++
 lib/eventdev/meson.build                   |  6 +++++
 lib/eventdev/rte_event_crypto_adapter.h    |  1 -
 lib/eventdev/rte_eventdev.h                | 25 ++++++++++++---------
 lib/eventdev/version.map                   | 17 +++++++-------
 19 files changed, 70 insertions(+), 53 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index bfd470cffd..612c299b59 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -380,7 +380,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
@@ -388,7 +388,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
@@ -788,7 +788,7 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 }

-static struct rte_eventdev_ops cn10k_sso_dev_ops = {
+static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
 	.queue_def_conf = cnxk_sso_queue_def_conf,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 806dcb0a45..d757da7c37 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -514,7 +514,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
@@ -522,7 +522,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
@@ -530,7 +530,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
@@ -538,7 +538,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 #undef T
 		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	[f6][f5][f4][f3][f2][f1][f0] =                                         \
@@ -1060,7 +1060,7 @@ cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 }

-static struct rte_eventdev_ops cn9k_sso_dev_ops = {
+static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
 	.queue_def_conf = cnxk_sso_queue_def_conf,
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 252bbd8d5e..c8742ddb2c 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -4384,7 +4384,7 @@ dlb2_entry_points_init(struct rte_eventdev *dev)
 	struct dlb2_eventdev *dlb2;

 	/* Expose PMD's eventdev interface */
-	static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
+	static struct eventdev_ops dlb2_eventdev_entry_ops = {
 		.dev_infos_get    = dlb2_eventdev_info_get,
 		.dev_configure    = dlb2_eventdev_configure,
 		.dev_start        = dlb2_eventdev_start,
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index ec74160325..9f14390d28 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -925,7 +925,7 @@ dpaa_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }

-static struct rte_eventdev_ops dpaa_eventdev_ops = {
+static struct eventdev_ops dpaa_eventdev_ops = {
 	.dev_infos_get    = dpaa_event_dev_info_get,
 	.dev_configure    = dpaa_event_dev_configure,
 	.dev_start        = dpaa_event_dev_start,
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 5ccf22f77f..d577f64824 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1015,7 +1015,7 @@ dpaa2_eventdev_txa_enqueue(void *port,
 	return nb_events;
 }

-static struct rte_eventdev_ops dpaa2_eventdev_ops = {
+static struct eventdev_ops dpaa2_eventdev_ops = {
 	.dev_infos_get    = dpaa2_eventdev_info_get,
 	.dev_configure    = dpaa2_eventdev_configure,
 	.dev_start        = dpaa2_eventdev_start,
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 2301a4b7a0..01f060fff3 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -398,7 +398,7 @@ dsw_crypto_adapter_caps_get(const struct rte_eventdev *dev  __rte_unused,
 	return 0;
 }

-static struct rte_eventdev_ops dsw_evdev_ops = {
+static struct eventdev_ops dsw_evdev_ops = {
 	.port_setup = dsw_port_setup,
 	.port_def_conf = dsw_port_def_conf,
 	.port_release = dsw_port_release,
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index b93f6ec8c6..4a8c6a13a5 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -790,7 +790,7 @@ ssovf_crypto_adapter_qp_del(const struct rte_eventdev *dev,
 }

 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops ssovf_ops = {
+static struct eventdev_ops ssovf_ops = {
 	.dev_infos_get    = ssovf_info_get,
 	.dev_configure    = ssovf_configure,
 	.queue_def_conf   = ssovf_queue_def_conf,
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index 8b056ddc5a..2df940f0f1 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -343,11 +343,11 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)

 	dev->ca_enqueue = ssow_crypto_adapter_enqueue;

-	const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
+	const event_tx_adapter_enqueue_t ssow_txa_enqueue[2][2][2][2] = {
 #define T(name, f3, f2, f1, f0, sz, flags)				\
 	[f3][f2][f1][f0] =  sso_event_tx_adapter_enqueue_ ##name,

-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+		SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 	};

diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index 38a6b651d9..f26bed334f 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -178,41 +178,41 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 	};

 	/* Tx modes */
-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_tx_adptr_enq_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_tx_adptr_enq_seg_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_dual_tx_adptr_enq_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

-	const event_tx_adapter_enqueue
+	const event_tx_adapter_enqueue_t
 		ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
 		[f6][f5][f4][f3][f2][f1][f0] =				\
 			otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
-SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+			SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-	};
+		};

 	event_dev->enqueue			= otx2_ssogws_enq;
 	event_dev->enqueue_burst		= otx2_ssogws_enq_burst;
@@ -1596,7 +1596,7 @@ otx2_sso_close(struct rte_eventdev *event_dev)
 }

 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops otx2_sso_ops = {
+static struct eventdev_ops otx2_sso_ops = {
 	.dev_infos_get    = otx2_sso_info_get,
 	.dev_configure    = otx2_sso_configure,
 	.queue_def_conf   = otx2_sso_queue_def_conf,
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index cfa9733b64..739dc64c82 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -609,7 +609,7 @@ set_do_test(const char *key __rte_unused, const char *value, void *opaque)
 static int
 opdl_probe(struct rte_vdev_device *vdev)
 {
-	static struct rte_eventdev_ops evdev_opdl_ops = {
+	static struct eventdev_ops evdev_opdl_ops = {
 		.dev_configure = opdl_dev_configure,
 		.dev_infos_get = opdl_info_get,
 		.dev_close = opdl_close,
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index 6fd1102596..c9e17e7cb1 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -320,7 +320,7 @@ skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)


 /* Initialize and register event driver with DPDK Application */
-static struct rte_eventdev_ops skeleton_eventdev_ops = {
+static struct eventdev_ops skeleton_eventdev_ops = {
 	.dev_infos_get    = skeleton_eventdev_info_get,
 	.dev_configure    = skeleton_eventdev_configure,
 	.dev_start        = skeleton_eventdev_start,
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index a5e6ca22e8..9b72073322 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -945,7 +945,7 @@ static int32_t sw_sched_service_func(void *args)
 static int
 sw_probe(struct rte_vdev_device *vdev)
 {
-	static struct rte_eventdev_ops evdev_sw_ops = {
+	static struct eventdev_ops evdev_sw_ops = {
 			.dev_configure = sw_dev_configure,
 			.dev_infos_get = sw_info_get,
 			.dev_close = sw_close,
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 7ac31e9f92..688f30d45e 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -99,6 +99,7 @@ extern struct rte_eventdev *rte_eventdevs;
  * @return
  *   - The rte_eventdev structure pointer for the given device ID.
  */
+__rte_internal
 static inline struct rte_eventdev *
 rte_event_pmd_get_named_dev(const char *name)
 {
@@ -127,6 +128,7 @@ rte_event_pmd_get_named_dev(const char *name)
  * @return
  *   - If the device index is valid (1) or not (0).
  */
+__rte_internal
 static inline unsigned
 rte_event_pmd_is_valid_dev(uint8_t dev_id)
 {
@@ -1056,7 +1058,7 @@ typedef int (*eventdev_eth_tx_adapter_stats_reset_t)(uint8_t id,
 					const struct rte_eventdev *dev);

 /** Event device operations function pointer table */
-struct rte_eventdev_ops {
+struct eventdev_ops {
 	eventdev_info_get_t dev_infos_get;	/**< Get device info. */
 	eventdev_configure_t dev_configure;	/**< Configure device. */
 	eventdev_start_t dev_start;		/**< Start device. */
@@ -1173,6 +1175,7 @@ struct rte_eventdev_ops {
  * @return
  *   - Slot in the rte_dev_devices array for a new device;
  */
+__rte_internal
 struct rte_eventdev *
 rte_event_pmd_allocate(const char *name, int socket_id);

@@ -1184,6 +1187,7 @@ rte_event_pmd_allocate(const char *name, int socket_id);
  * @return
  *   - 0 on success, negative on error
  */
+__rte_internal
 int
 rte_event_pmd_release(struct rte_eventdev *eventdev);

diff --git a/lib/eventdev/eventdev_pmd_pci.h b/lib/eventdev/eventdev_pmd_pci.h
index 1545b240f2..2f12a5eb24 100644
--- a/lib/eventdev/eventdev_pmd_pci.h
+++ b/lib/eventdev/eventdev_pmd_pci.h
@@ -31,7 +31,7 @@ typedef int (*eventdev_pmd_pci_callback_t)(struct rte_eventdev *dev);
  * interface.  Same as rte_event_pmd_pci_probe, except caller can specify
  * the name.
  */
-__rte_experimental
+__rte_internal
 static inline int
 rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
 			      struct rte_pci_device *pci_dev,
@@ -85,6 +85,7 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
  * Wrapper for use by pci drivers as a .probe function to attach to a event
  * interface.
  */
+__rte_internal
 static inline int
 rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
 			    struct rte_pci_device *pci_dev,
@@ -108,6 +109,7 @@ rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
  * Wrapper for use by pci drivers as a .remove function to detach a event
  * interface.
  */
+__rte_internal
 static inline int
 rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev,
 			     eventdev_pmd_pci_callback_t devuninit)
diff --git a/lib/eventdev/eventdev_pmd_vdev.h b/lib/eventdev/eventdev_pmd_vdev.h
index 2d33924e6c..d9ee7277dd 100644
--- a/lib/eventdev/eventdev_pmd_vdev.h
+++ b/lib/eventdev/eventdev_pmd_vdev.h
@@ -37,6 +37,7 @@
  *   - Eventdev pointer if device is successfully created.
  *   - NULL if device cannot be created.
  */
+__rte_internal
 static inline struct rte_eventdev *
 rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
 		int socket_id)
@@ -74,6 +75,7 @@ rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
  * @return
  *   - 0 on success, negative on error
  */
+__rte_internal
 static inline int
 rte_event_pmd_vdev_uninit(const char *name)
 {
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 32abeba794..523ea9ccae 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -27,5 +27,11 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
+driver_sdk_headers += files(
+        'eventdev_pmd.h',
+        'eventdev_pmd_pci.h',
+        'eventdev_pmd_vdev.h',
+)
+
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
 deps += ['telemetry']
diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index edbd5c61a3..1a8ff75384 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -171,7 +171,6 @@ extern "C" {
 #include <stdint.h>

 #include "rte_eventdev.h"
-#include "eventdev_pmd.h"

 /**
  * Crypto event adapter mode
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index a9c496fb62..0c701888d5 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1324,7 +1324,7 @@ int
 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 				uint32_t *caps);

-struct rte_eventdev_ops;
+struct eventdev_ops;
 struct rte_eventdev;

 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
@@ -1342,18 +1342,21 @@ typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
 		uint16_t nb_events, uint64_t timeout_ticks);
 /**< @internal Dequeue burst of events from port of a device */

-typedef uint16_t (*event_tx_adapter_enqueue)(void *port,
-				struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
+					       struct rte_event ev[],
+					       uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */

-typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
-		struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_tx_adapter_enqueue_same_dest_t)(void *port,
+							 struct rte_event ev[],
+							 uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device supporting
  * burst having same destination Ethernet port & Tx queue.
  */

-typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
-				struct rte_event ev[], uint16_t nb_events);
+typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
+						   struct rte_event ev[],
+						   uint16_t nb_events);
 /**< @internal Enqueue burst of events on crypto adapter */

 #define RTE_EVENTDEV_NAME_MAX_LEN	(64)
@@ -1421,15 +1424,15 @@ struct rte_eventdev {
 	/**< Pointer to PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
+	event_tx_adapter_enqueue_same_dest_t txa_enqueue_same_dest;
 	/**< Pointer to PMD eth Tx adapter burst enqueue function with
 	 * events destined to same Eth port & Tx queue.
 	 */
-	event_tx_adapter_enqueue txa_enqueue;
+	event_tx_adapter_enqueue_t txa_enqueue;
 	/**< Pointer to PMD eth Tx adapter enqueue function. */
 	struct rte_eventdev_data *data;
 	/**< Pointer to device data */
-	struct rte_eventdev_ops *dev_ops;
+	struct eventdev_ops *dev_ops;
 	/**< Functions exported by PMD */
 	struct rte_device *dev;
 	/**< Device info. supplied by probing */
@@ -1438,7 +1441,7 @@ struct rte_eventdev {
 	uint8_t attached : 1;
 	/**< Flag indicating the device is attached */

-	event_crypto_adapter_enqueue ca_enqueue;
+	event_crypto_adapter_enqueue_t ca_enqueue;
 	/**< Pointer to PMD crypto adapter enqueue function. */

 	uint64_t reserved_64s[4]; /**< Reserved for future fields */
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 7de18497a6..cd72f45d29 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -55,12 +55,6 @@ DPDK_22 {
 	rte_event_eth_tx_adapter_stats_get;
 	rte_event_eth_tx_adapter_stats_reset;
 	rte_event_eth_tx_adapter_stop;
-	rte_event_pmd_allocate;
-	rte_event_pmd_pci_probe;
-	rte_event_pmd_pci_remove;
-	rte_event_pmd_release;
-	rte_event_pmd_vdev_init;
-	rte_event_pmd_vdev_uninit;
 	rte_event_port_attr_get;
 	rte_event_port_default_conf_get;
 	rte_event_port_link;
@@ -136,8 +130,6 @@ EXPERIMENTAL {

 	# changed in 20.11
 	__rte_eventdev_trace_port_setup;
-	# added in 20.11
-	rte_event_pmd_pci_probe_named;
 	# added in 21.11
 	rte_event_eth_rx_adapter_create_with_params;

@@ -152,4 +144,13 @@ INTERNAL {
 	global:

 	rte_event_pmd_selftest_seqn_dynfield_offset;
+	rte_event_pmd_allocate;
+	rte_event_pmd_get_named_dev;
+	rte_event_pmd_is_valid_dev;
+	rte_event_pmd_pci_probe;
+	rte_event_pmd_pci_probe_named;
+	rte_event_pmd_pci_remove;
+	rte_event_pmd_release;
+	rte_event_pmd_vdev_init;
+	rte_event_pmd_vdev_uninit;
 };
--
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 02/14] eventdev: separate internal structures
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
@ 2021-10-18 23:35         ` pbhagavatula
  2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 03/14] eventdev: allocate max space for internal arrays pbhagavatula
                           ` (12 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:35 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Create rte_eventdev_core.h and move all the internal data structures
to this file. These structures are mostly used by drivers, but they
need to be in the public header file as they are accessed by datapath
inline functions for performance reasons.
The accessibility of these data structures is not changed.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/eventdev_pmd.h      |   3 -
 lib/eventdev/meson.build         |   3 +
 lib/eventdev/rte_eventdev.h      | 718 +++++++++++++------------------
 lib/eventdev/rte_eventdev_core.h | 138 ++++++
 4 files changed, 437 insertions(+), 425 deletions(-)
 create mode 100644 lib/eventdev/rte_eventdev_core.h

diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 688f30d45e..9b2aec8371 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -87,9 +87,6 @@ struct rte_eventdev_global {
 	uint8_t nb_devs;	/**< Number of devices found */
 };
 
-extern struct rte_eventdev *rte_eventdevs;
-/** The pool of rte_eventdev structures. */
-
 /**
  * Get the rte_eventdev structure device pointer for the named device.
  *
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 523ea9ccae..8b51fde361 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -27,6 +27,9 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_tx_adapter.h',
 )
+indirect_headers += files(
+        'rte_eventdev_core.h',
+)
 driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 0c701888d5..1b11d4576d 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1324,317 +1324,6 @@ int
 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 				uint32_t *caps);
 
-struct eventdev_ops;
-struct rte_eventdev;
-
-typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
-/**< @internal Enqueue event on port of a device */
-
-typedef uint16_t (*event_enqueue_burst_t)(void *port,
-			const struct rte_event ev[], uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device */
-
-typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
-		uint64_t timeout_ticks);
-/**< @internal Dequeue event from port of a device */
-
-typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
-		uint16_t nb_events, uint64_t timeout_ticks);
-/**< @internal Dequeue burst of events from port of a device */
-
-typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
-					       struct rte_event ev[],
-					       uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device */
-
-typedef uint16_t (*event_tx_adapter_enqueue_same_dest_t)(void *port,
-							 struct rte_event ev[],
-							 uint16_t nb_events);
-/**< @internal Enqueue burst of events on port of a device supporting
- * burst having same destination Ethernet port & Tx queue.
- */
-
-typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
-						   struct rte_event ev[],
-						   uint16_t nb_events);
-/**< @internal Enqueue burst of events on crypto adapter */
-
-#define RTE_EVENTDEV_NAME_MAX_LEN	(64)
-/**< @internal Max length of name of event PMD */
-
-/**
- * @internal
- * The data part, with no function pointers, associated with each device.
- *
- * This structure is safe to place in shared memory to be common among
- * different processes in a multi-process configuration.
- */
-struct rte_eventdev_data {
-	int socket_id;
-	/**< Socket ID where memory is allocated */
-	uint8_t dev_id;
-	/**< Device ID for this instance */
-	uint8_t nb_queues;
-	/**< Number of event queues. */
-	uint8_t nb_ports;
-	/**< Number of event ports. */
-	void **ports;
-	/**< Array of pointers to ports. */
-	struct rte_event_port_conf *ports_cfg;
-	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf *queues_cfg;
-	/**< Array of queue configuration structures. */
-	uint16_t *links_map;
-	/**< Memory to store queues to port connections. */
-	void *dev_private;
-	/**< PMD-specific private data */
-	uint32_t event_dev_cap;
-	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
-	struct rte_event_dev_config dev_conf;
-	/**< Configuration applied to device. */
-	uint8_t service_inited;
-	/* Service initialization state */
-	uint32_t service_id;
-	/* Service ID*/
-	void *dev_stop_flush_arg;
-	/**< User-provided argument for event flush function */
-
-	RTE_STD_C11
-	uint8_t dev_started : 1;
-	/**< Device state: STARTED(1)/STOPPED(0) */
-
-	char name[RTE_EVENTDEV_NAME_MAX_LEN];
-	/**< Unique identifier name */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[4];   /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-/** @internal The data structure associated with each event device. */
-struct rte_eventdev {
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
-	event_enqueue_burst_t enqueue_burst;
-	/**< Pointer to PMD enqueue burst function. */
-	event_enqueue_burst_t enqueue_new_burst;
-	/**< Pointer to PMD enqueue burst function(op new variant) */
-	event_enqueue_burst_t enqueue_forward_burst;
-	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
-	event_dequeue_burst_t dequeue_burst;
-	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_same_dest_t txa_enqueue_same_dest;
-	/**< Pointer to PMD eth Tx adapter burst enqueue function with
-	 * events destined to same Eth port & Tx queue.
-	 */
-	event_tx_adapter_enqueue_t txa_enqueue;
-	/**< Pointer to PMD eth Tx adapter enqueue function. */
-	struct rte_eventdev_data *data;
-	/**< Pointer to device data */
-	struct eventdev_ops *dev_ops;
-	/**< Functions exported by PMD */
-	struct rte_device *dev;
-	/**< Device info. supplied by probing */
-
-	RTE_STD_C11
-	uint8_t attached : 1;
-	/**< Flag indicating the device is attached */
-
-	event_crypto_adapter_enqueue_t ca_enqueue;
-	/**< Pointer to PMD crypto adapter enqueue function. */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[3];   /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-extern struct rte_eventdev *rte_eventdevs;
-/** @internal The pool of rte_eventdev structures. */
-
-static __rte_always_inline uint16_t
-__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events,
-			const event_enqueue_burst_t fn)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->enqueue)(dev->data->ports[port_id], ev);
-	else
-		return fn(dev->data->ports[port_id], ev, nb_events);
-}
-
-/**
- * Enqueue a burst of events objects or an event object supplied in *rte_event*
- * structure on an  event device designated by its *dev_id* through the event
- * port specified by *port_id*. Each event object specifies the event queue on
- * which it will be enqueued.
- *
- * The *nb_events* parameter is the number of event objects to enqueue which are
- * supplied in the *ev* array of *rte_event* structure.
- *
- * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
- * enqueued to the same port that their associated events were dequeued from.
- *
- * The rte_event_enqueue_burst() function returns the number of
- * events objects it actually enqueued. A return value equal to *nb_events*
- * means that all event objects have been enqueued.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- */
-static inline uint16_t
-rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_burst);
-}
-
-/**
- * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
- * an event device designated by its *dev_id* through the event port specified
- * by *port_id*.
- *
- * Provides the same functionality as rte_event_enqueue_burst(), expect that
- * application can use this API when the all objects in the burst contains
- * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
- * function can provide the additional hint to the PMD and optimize if possible.
- *
- * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
- * has event object of operation type != RTE_EVENT_OP_NEW.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- * @see rte_event_enqueue_burst()
- */
-static inline uint16_t
-rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_new_burst);
-}
-
-/**
- * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
- * on an event device designated by its *dev_id* through the event port
- * specified by *port_id*.
- *
- * Provides the same functionality as rte_event_enqueue_burst(), expect that
- * application can use this API when the all objects in the burst contains
- * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
- * function can provide the additional hint to the PMD and optimize if possible.
- *
- * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
- * has event object of operation type != RTE_EVENT_OP_FORWARD.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param port_id
- *   The identifier of the event port.
- * @param ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   which contain the event object enqueue operations to be processed.
- * @param nb_events
- *   The number of event objects to enqueue, typically number of
- *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
- *   available for this port.
- *
- * @return
- *   The number of event objects actually enqueued on the event device. The
- *   return value can be less than the value of the *nb_events* parameter when
- *   the event devices queue is full or if invalid parameters are specified in a
- *   *rte_event*. If the return value is less than *nb_events*, the remaining
- *   events at the end of ev[] are not consumed and the caller has to take care
- *   of them, and rte_errno is set accordingly. Possible errno values include:
- *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
- *              ID is invalid, or an event's sched type doesn't match the
- *              capabilities of the destination queue.
- *   - ENOSPC   The event port was backpressured and unable to enqueue
- *              one or more events. This error code is only applicable to
- *              closed systems.
- * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
- * @see rte_event_enqueue_burst()
- */
-static inline uint16_t
-rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
-			const struct rte_event ev[], uint16_t nb_events)
-{
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-			dev->enqueue_forward_burst);
-}
-
 /**
  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
  *
@@ -1665,124 +1354,27 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
 					uint64_t *timeout_ticks);
 
 /**
- * Dequeue a burst of events objects or an event object from the event port
- * designated by its *event_port_id*, on an event device designated
- * by its *dev_id*.
- *
- * rte_event_dequeue_burst() does not dictate the specifics of scheduling
- * algorithm as each eventdev driver may have different criteria to schedule
- * an event. However, in general, from an application perspective scheduler may
- * use the following scheme to dispatch an event to the port.
- *
- * 1) Selection of event queue based on
- *   a) The list of event queues are linked to the event port.
- *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
- *   queue selection from list is based on event queue priority relative to
- *   other event queue supplied as *priority* in rte_event_queue_setup()
- *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
- *   queue selection from the list is based on event priority supplied as
- *   *priority* in rte_event_enqueue_burst()
- * 2) Selection of event
- *   a) The number of flows available in selected event queue.
- *   b) Schedule type method associated with the event
- *
- * The *nb_events* parameter is the maximum number of event objects to dequeue
- * which are returned in the *ev* array of *rte_event* structure.
+ * Link multiple source event queues supplied in *queues* to the destination
+ * event port designated by its *port_id* with associated service priority
+ * supplied in *priorities* on the event device designated by its *dev_id*.
  *
- * The rte_event_dequeue_burst() function returns the number of events objects
- * it actually dequeued. A return value equal to *nb_events* means that all
- * event objects have been dequeued.
+ * The link establishment shall enable the event port *port_id* from
+ * receiving events from the specified event queue(s) supplied in *queues*
  *
- * The number of events dequeued is the number of scheduler contexts held by
- * this port. These contexts are automatically released in the next
- * rte_event_dequeue_burst() invocation if the port supports implicit
- * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
- * operation can be used to release the contexts early.
+ * An event queue may link to one or more event ports.
+ * The number of links can be established from an event queue to event port is
+ * implementation defined.
  *
- * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
- * enqueued to the same port that their associated events were dequeued from.
+ * Event queue(s) to event port link establishment can be changed at runtime
+ * without re-configuring the device to support scaling and to reduce the
+ * latency of critical work by establishing the link with more event ports
+ * at runtime.
  *
  * @param dev_id
  *   The identifier of the device.
+ *
  * @param port_id
- *   The identifier of the event port.
- * @param[out] ev
- *   Points to an array of *nb_events* objects of type *rte_event* structure
- *   for output to be populated with the dequeued event objects.
- * @param nb_events
- *   The maximum number of event objects to dequeue, typically number of
- *   rte_event_port_dequeue_depth() available for this port.
- *
- * @param timeout_ticks
- *   - 0 no-wait, returns immediately if there is no event.
- *   - >0 wait for the event, if the device is configured with
- *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
- *   at least one event is available or *timeout_ticks* time.
- *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
- *   then this function will wait until the event available or
- *   *dequeue_timeout_ns* ns which was previously supplied to
- *   rte_event_dev_configure()
- *
- * @return
- * The number of event objects actually dequeued from the port. The return
- * value can be less than the value of the *nb_events* parameter when the
- * event port's queue is not full.
- *
- * @see rte_event_port_dequeue_depth()
- */
-static inline uint16_t
-rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
-			uint16_t nb_events, uint64_t timeout_ticks)
-{
-	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-
-	if (port_id >= dev->data->nb_ports) {
-		rte_errno = EINVAL;
-		return 0;
-	}
-#endif
-	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (*dev->dequeue)(
-			dev->data->ports[port_id], ev, timeout_ticks);
-	else
-		return (*dev->dequeue_burst)(
-			dev->data->ports[port_id], ev, nb_events,
-				timeout_ticks);
-}
-
-/**
- * Link multiple source event queues supplied in *queues* to the destination
- * event port designated by its *port_id* with associated service priority
- * supplied in *priorities* on the event device designated by its *dev_id*.
- *
- * The link establishment shall enable the event port *port_id* from
- * receiving events from the specified event queue(s) supplied in *queues*
- *
- * An event queue may link to one or more event ports.
- * The number of links can be established from an event queue to event port is
- * implementation defined.
- *
- * Event queue(s) to event port link establishment can be changed at runtime
- * without re-configuring the device to support scaling and to reduce the
- * latency of critical work by establishing the link with more event ports
- * at runtime.
- *
- * @param dev_id
- *   The identifier of the device.
- *
- * @param port_id
- *   Event port identifier to select the destination port to link.
+ *   Event port identifier to select the destination port to link.
  *
  * @param queues
  *   Points to an array of *nb_links* event queues to be linked
@@ -2148,6 +1740,288 @@ rte_event_vector_pool_create(const char *name, unsigned int n,
 			     unsigned int cache_size, uint16_t nb_elem,
 			     int socket_id);
 
+#include <rte_eventdev_core.h>
+
+static __rte_always_inline uint16_t
+__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			  const struct rte_event ev[], uint16_t nb_events,
+			  const event_enqueue_burst_t fn)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->enqueue)(dev->data->ports[port_id], ev);
+	else
+		return fn(dev->data->ports[port_id], ev, nb_events);
+}
+
+/**
+ * Enqueue a burst of events objects or an event object supplied in *rte_event*
+ * structure on an  event device designated by its *dev_id* through the event
+ * port specified by *port_id*. Each event object specifies the event queue on
+ * which it will be enqueued.
+ *
+ * The *nb_events* parameter is the number of event objects to enqueue which are
+ * supplied in the *ev* array of *rte_event* structure.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * The rte_event_enqueue_burst() function returns the number of
+ * events objects it actually enqueued. A return value equal to *nb_events*
+ * means that all event objects have been enqueued.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ */
+static inline uint16_t
+rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+			const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
+ * an event device designated by its *dev_id* through the event port specified
+ * by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_NEW.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
+			    const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_new_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
+ * on an event device designated by its *dev_id* through the event port
+ * specified by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_FORWARD.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
+				const struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+					 dev->enqueue_forward_burst);
+}
+
+/**
+ * Dequeue a burst of events objects or an event object from the event port
+ * designated by its *event_port_id*, on an event device designated
+ * by its *dev_id*.
+ *
+ * rte_event_dequeue_burst() does not dictate the specifics of scheduling
+ * algorithm as each eventdev driver may have different criteria to schedule
+ * an event. However, in general, from an application perspective scheduler may
+ * use the following scheme to dispatch an event to the port.
+ *
+ * 1) Selection of event queue based on
+ *   a) The list of event queues are linked to the event port.
+ *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
+ *   queue selection from list is based on event queue priority relative to
+ *   other event queue supplied as *priority* in rte_event_queue_setup()
+ *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
+ *   queue selection from the list is based on event priority supplied as
+ *   *priority* in rte_event_enqueue_burst()
+ * 2) Selection of event
+ *   a) The number of flows available in selected event queue.
+ *   b) Schedule type method associated with the event
+ *
+ * The *nb_events* parameter is the maximum number of event objects to dequeue
+ * which are returned in the *ev* array of *rte_event* structure.
+ *
+ * The rte_event_dequeue_burst() function returns the number of events objects
+ * it actually dequeued. A return value equal to *nb_events* means that all
+ * event objects have been dequeued.
+ *
+ * The number of events dequeued is the number of scheduler contexts held by
+ * this port. These contexts are automatically released in the next
+ * rte_event_dequeue_burst() invocation if the port supports implicit
+ * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
+ * operation can be used to release the contexts early.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param[out] ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   for output to be populated with the dequeued event objects.
+ * @param nb_events
+ *   The maximum number of event objects to dequeue, typically number of
+ *   rte_event_port_dequeue_depth() available for this port.
+ *
+ * @param timeout_ticks
+ *   - 0 no-wait, returns immediately if there is no event.
+ *   - >0 wait for the event, if the device is configured with
+ *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
+ *   at least one event is available or *timeout_ticks* time.
+ *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ *   then this function will wait until the event available or
+ *   *dequeue_timeout_ns* ns which was previously supplied to
+ *   rte_event_dev_configure()
+ *
+ * @return
+ * The number of event objects actually dequeued from the port. The return
+ * value can be less than the value of the *nb_events* parameter when the
+ * event port's queue is not full.
+ *
+ * @see rte_event_port_dequeue_depth()
+ */
+static inline uint16_t
+rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
+			uint16_t nb_events, uint64_t timeout_ticks)
+{
+	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port_id >= dev->data->nb_ports) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->dequeue)(dev->data->ports[port_id], ev,
+				       timeout_ticks);
+	else
+		return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
+					     nb_events, timeout_ticks);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
new file mode 100644
index 0000000000..b97cdf84fe
--- /dev/null
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright(C) 2021 Marvell.
+ * Copyright 2016 NXP
+ * All rights reserved.
+ */
+
+#ifndef _RTE_EVENTDEV_CORE_H_
+#define _RTE_EVENTDEV_CORE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
+/**< @internal Enqueue event on port of a device */
+
+typedef uint16_t (*event_enqueue_burst_t)(void *port,
+					  const struct rte_event ev[],
+					  uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
+				    uint64_t timeout_ticks);
+/**< @internal Dequeue event from port of a device */
+
+typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
+					  uint16_t nb_events,
+					  uint64_t timeout_ticks);
+/**< @internal Dequeue burst of events from port of a device */
+
+typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
+					       struct rte_event ev[],
+					       uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
+						   struct rte_event ev[],
+						   uint16_t nb_events);
+/**< @internal Enqueue burst of events on crypto adapter */
+
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+	int socket_id;
+	/**< Socket ID where memory is allocated */
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t nb_queues;
+	/**< Number of event queues. */
+	uint8_t nb_ports;
+	/**< Number of event ports. */
+	void **ports;
+	/**< Array of pointers to ports. */
+	struct rte_event_port_conf *ports_cfg;
+	/**< Array of port configuration structures. */
+	struct rte_event_queue_conf *queues_cfg;
+	/**< Array of queue configuration structures. */
+	uint16_t *links_map;
+	/**< Memory to store queues to port connections. */
+	void *dev_private;
+	/**< PMD-specific private data */
+	uint32_t event_dev_cap;
+	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+	struct rte_event_dev_config dev_conf;
+	/**< Configuration applied to device. */
+	uint8_t service_inited;
+	/* Service initialization state */
+	uint32_t service_id;
+	/* Service ID*/
+	void *dev_stop_flush_arg;
+	/**< User-provided argument for event flush function */
+
+	RTE_STD_C11
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	char name[RTE_EVENTDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[4];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+	event_enqueue_t enqueue;
+	/**< Pointer to PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< Pointer to PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< Pointer to PMD enqueue burst function(op new variant) */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< Pointer to PMD enqueue burst function(op forward variant) */
+	event_dequeue_t dequeue;
+	/**< Pointer to PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< Pointer to PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< Pointer to PMD eth Tx adapter burst enqueue function with
+	 * events destined to same Eth port & Tx queue.
+	 */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< Pointer to PMD eth Tx adapter enqueue function. */
+	struct rte_eventdev_data *data;
+	/**< Pointer to device data */
+	struct eventdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	struct rte_device *dev;
+	/**< Device info. supplied by probing */
+
+	RTE_STD_C11
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
+
+	event_crypto_adapter_enqueue_t ca_enqueue;
+	/**< Pointer to PMD crypto adapter enqueue function. */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[3];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*_RTE_EVENTDEV_CORE_H_*/
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 03/14] eventdev: allocate max space for internal arrays
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
  2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 02/14] eventdev: separate internal structures pbhagavatula
@ 2021-10-18 23:35         ` pbhagavatula
  2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 04/14] eventdev: move inline APIs into separate structure pbhagavatula
                           ` (11 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:35 UTC (permalink / raw)
  To: jerinj, Bruce Richardson, Anatoly Burakov; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Allocate max space for internal port, port config, queue config and
link map arrays.
Introduce new macro RTE_EVENT_MAX_PORTS_PER_DEV and set it to max
possible value.
This simplifies the port and queue reconfigure scenarios and will
also allow inline functions to refer pointer to internal port data
without extra checking of current number of configured queues.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 config/rte_config.h              |   1 +
 lib/eventdev/rte_eventdev.c      | 154 +++++++------------------------
 lib/eventdev/rte_eventdev_core.h |   9 +-
 3 files changed, 38 insertions(+), 126 deletions(-)

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c07d..e0ead8b251 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -72,6 +72,7 @@
 
 /* eventdev defines */
 #define RTE_EVENT_MAX_DEVS 16
+#define RTE_EVENT_MAX_PORTS_PER_DEV 255
 #define RTE_EVENT_MAX_QUEUES_PER_DEV 255
 #define RTE_EVENT_TIMER_ADAPTER_NUM_MAX 32
 #define RTE_EVENT_ETH_INTR_RING_SIZE 1024
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index e347d6dfd5..bfcfa31cd1 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -209,7 +209,7 @@ rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 }
 
 static inline int
-rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
+event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 {
 	uint8_t old_nb_queues = dev->data->nb_queues;
 	struct rte_event_queue_conf *queues_cfg;
@@ -218,37 +218,13 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
 			 dev->data->dev_id);
 
-	/* First time configuration */
-	if (dev->data->queues_cfg == NULL && nb_queues != 0) {
-		/* Allocate memory to store queue configuration */
-		dev->data->queues_cfg = rte_zmalloc_socket(
-				"eventdev->data->queues_cfg",
-				sizeof(dev->data->queues_cfg[0]) * nb_queues,
-				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->queues_cfg == NULL) {
-			dev->data->nb_queues = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
-					"nb_queues %u", nb_queues);
-			return -(ENOMEM);
-		}
-	/* Re-configure */
-	} else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
+	if (nb_queues != 0) {
+		queues_cfg = dev->data->queues_cfg;
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
 
 		for (i = nb_queues; i < old_nb_queues; i++)
 			(*dev->dev_ops->queue_release)(dev, i);
 
-		/* Re allocate memory to store queue configuration */
-		queues_cfg = dev->data->queues_cfg;
-		queues_cfg = rte_realloc(queues_cfg,
-				sizeof(queues_cfg[0]) * nb_queues,
-				RTE_CACHE_LINE_SIZE);
-		if (queues_cfg == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
-						" nb_queues %u", nb_queues);
-			return -(ENOMEM);
-		}
-		dev->data->queues_cfg = queues_cfg;
 
 		if (nb_queues > old_nb_queues) {
 			uint8_t new_qs = nb_queues - old_nb_queues;
@@ -256,7 +232,7 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 			memset(queues_cfg + old_nb_queues, 0,
 				sizeof(queues_cfg[0]) * new_qs);
 		}
-	} else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
+	} else {
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
 
 		for (i = nb_queues; i < old_nb_queues; i++)
@@ -270,7 +246,7 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
 
 static inline int
-rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
+event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 {
 	uint8_t old_nb_ports = dev->data->nb_ports;
 	void **ports;
@@ -281,46 +257,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
 			 dev->data->dev_id);
 
-	/* First time configuration */
-	if (dev->data->ports == NULL && nb_ports != 0) {
-		dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
-				sizeof(dev->data->ports[0]) * nb_ports,
-				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->ports == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Allocate memory to store port configurations */
-		dev->data->ports_cfg =
-			rte_zmalloc_socket("eventdev->ports_cfg",
-			sizeof(dev->data->ports_cfg[0]) * nb_ports,
-			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->ports_cfg == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Allocate memory to store queue to port link connection */
-		dev->data->links_map =
-			rte_zmalloc_socket("eventdev->links_map",
-			sizeof(dev->data->links_map[0]) * nb_ports *
-			RTE_EVENT_MAX_QUEUES_PER_DEV,
-			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
-		if (dev->data->links_map == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-		for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
-			dev->data->links_map[i] =
-				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
-	} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
+	if (nb_ports != 0) { /* re-config */
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
 
 		ports = dev->data->ports;
@@ -330,37 +267,6 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 		for (i = nb_ports; i < old_nb_ports; i++)
 			(*dev->dev_ops->port_release)(ports[i]);
 
-		/* Realloc memory for ports */
-		ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
-				RTE_CACHE_LINE_SIZE);
-		if (ports == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
-						" nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Realloc memory for ports_cfg */
-		ports_cfg = rte_realloc(ports_cfg,
-			sizeof(ports_cfg[0]) * nb_ports,
-			RTE_CACHE_LINE_SIZE);
-		if (ports_cfg == NULL) {
-			RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
-						" nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
-		/* Realloc memory to store queue to port link connection */
-		links_map = rte_realloc(links_map,
-			sizeof(dev->data->links_map[0]) * nb_ports *
-			RTE_EVENT_MAX_QUEUES_PER_DEV,
-			RTE_CACHE_LINE_SIZE);
-		if (links_map == NULL) {
-			dev->data->nb_ports = 0;
-			RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
-					"nb_ports %u", nb_ports);
-			return -(ENOMEM);
-		}
-
 		if (nb_ports > old_nb_ports) {
 			uint8_t new_ps = nb_ports - old_nb_ports;
 			unsigned int old_links_map_end =
@@ -376,16 +282,14 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 				links_map[i] =
 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
 		}
-
-		dev->data->ports = ports;
-		dev->data->ports_cfg = ports_cfg;
-		dev->data->links_map = links_map;
-	} else if (dev->data->ports != NULL && nb_ports == 0) {
+	} else {
 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
 
 		ports = dev->data->ports;
-		for (i = nb_ports; i < old_nb_ports; i++)
+		for (i = nb_ports; i < old_nb_ports; i++) {
 			(*dev->dev_ops->port_release)(ports[i]);
+			ports[i] = NULL;
+		}
 	}
 
 	dev->data->nb_ports = nb_ports;
@@ -550,19 +454,19 @@ rte_event_dev_configure(uint8_t dev_id,
 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
 
 	/* Setup new number of queues and reconfigure device. */
-	diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
+	diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
 	if (diag != 0) {
-		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
-				dev_id, diag);
+		RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
+				 diag);
 		return diag;
 	}
 
 	/* Setup new number of ports and reconfigure device. */
-	diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
+	diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
 	if (diag != 0) {
-		rte_event_dev_queue_config(dev, 0);
-		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
-				dev_id, diag);
+		event_dev_queue_config(dev, 0);
+		RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
+				 diag);
 		return diag;
 	}
 
@@ -570,8 +474,8 @@ rte_event_dev_configure(uint8_t dev_id,
 	diag = (*dev->dev_ops->dev_configure)(dev);
 	if (diag != 0) {
 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
-		rte_event_dev_queue_config(dev, 0);
-		rte_event_dev_port_config(dev, 0);
+		event_dev_queue_config(dev, 0);
+		event_dev_port_config(dev, 0);
 	}
 
 	dev->data->event_dev_cap = info.event_dev_cap;
@@ -1403,8 +1307,8 @@ rte_event_dev_close(uint8_t dev_id)
 }
 
 static inline int
-rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
-		int socket_id)
+eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
+		    int socket_id)
 {
 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
 	const struct rte_memzone *mz;
@@ -1426,14 +1330,20 @@ rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
 		return -ENOMEM;
 
 	*data = mz->addr;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
 		memset(*data, 0, sizeof(struct rte_eventdev_data));
+		for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
+					RTE_EVENT_MAX_QUEUES_PER_DEV;
+		     n++)
+			(*data)->links_map[n] =
+				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+	}
 
 	return 0;
 }
 
 static inline uint8_t
-rte_eventdev_find_free_device_index(void)
+eventdev_find_free_device_index(void)
 {
 	uint8_t dev_id;
 
@@ -1475,7 +1385,7 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 		return NULL;
 	}
 
-	dev_id = rte_eventdev_find_free_device_index();
+	dev_id = eventdev_find_free_device_index();
 	if (dev_id == RTE_EVENT_MAX_DEVS) {
 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
 		return NULL;
@@ -1490,8 +1400,8 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 	if (eventdev->data == NULL) {
 		struct rte_eventdev_data *eventdev_data = NULL;
 
-		int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
-				socket_id);
+		int retval =
+			eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
 
 		if (retval < 0 || eventdev_data == NULL)
 			return NULL;
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index b97cdf84fe..115b97e431 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -58,13 +58,14 @@ struct rte_eventdev_data {
 	/**< Number of event queues. */
 	uint8_t nb_ports;
 	/**< Number of event ports. */
-	void **ports;
+	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
 	/**< Array of pointers to ports. */
-	struct rte_event_port_conf *ports_cfg;
+	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
 	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf *queues_cfg;
+	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
 	/**< Array of queue configuration structures. */
-	uint16_t *links_map;
+	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
+			   RTE_EVENT_MAX_QUEUES_PER_DEV];
 	/**< Memory to store queues to port connections. */
 	void *dev_private;
 	/**< PMD-specific private data */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 04/14] eventdev: move inline APIs into separate structure
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
  2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 02/14] eventdev: separate internal structures pbhagavatula
  2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 03/14] eventdev: allocate max space for internal arrays pbhagavatula
@ 2021-10-18 23:35         ` pbhagavatula
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 05/14] drivers/event: invoke probing finish function pbhagavatula
                           ` (10 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:35 UTC (permalink / raw)
  To: jerinj, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move fastpath inline function pointers from rte_eventdev into a
separate structure accessed via a flat array.
The intention is to make rte_eventdev and related structures private
to avoid future API/ABI breakages.`

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 doc/guides/rel_notes/release_21_11.rst |   6 ++
 lib/eventdev/eventdev_pmd.h            |  38 +++++++++
 lib/eventdev/eventdev_pmd_pci.h        |   4 +-
 lib/eventdev/eventdev_private.c        | 112 +++++++++++++++++++++++++
 lib/eventdev/meson.build               |  21 ++---
 lib/eventdev/rte_eventdev.c            |  22 ++++-
 lib/eventdev/rte_eventdev_core.h       |  26 ++++++
 lib/eventdev/version.map               |   6 ++
 8 files changed, 223 insertions(+), 12 deletions(-)
 create mode 100644 lib/eventdev/eventdev_private.c

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 38e601c236..b4e1770d4d 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -277,6 +277,12 @@ ABI Changes
   were added in structure ``rte_event_eth_rx_adapter_stats`` to get additional
   status.
 
+* eventdev: A new structure ``rte_event_fp_ops`` has been added which is now used
+  by the fastpath inline functions. The structures ``rte_eventdev``,
+  ``rte_eventdev_data`` have been made internal. ``rte_eventdevs[]`` can't be
+  accessed directly by user any more. This change is transparent to both
+  applications and PMDs.
+
 
 Known Issues
 ------------
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 9b2aec8371..0532b542d4 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -1188,4 +1188,42 @@ __rte_internal
 int
 rte_event_pmd_release(struct rte_eventdev *eventdev);
 
+/**
+ *
+ * @internal
+ * This is the last step of device probing.
+ * It must be called after a port is allocated and initialized successfully.
+ *
+ * @param eventdev
+ *  New event device.
+ */
+__rte_internal
+void
+event_dev_probing_finish(struct rte_eventdev *eventdev);
+
+/**
+ * Reset eventdevice fastpath APIs to dummy values.
+ *
+ * @param fp_ops
+ * The *fp_ops* pointer to reset.
+ */
+__rte_internal
+void
+event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op);
+
+/**
+ * Set eventdevice fastpath APIs to event device values.
+ *
+ * @param fp_ops
+ * The *fp_ops* pointer to set.
+ */
+__rte_internal
+void
+event_dev_fp_ops_set(struct rte_event_fp_ops *fp_ops,
+		     const struct rte_eventdev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _RTE_EVENTDEV_PMD_H_ */
diff --git a/lib/eventdev/eventdev_pmd_pci.h b/lib/eventdev/eventdev_pmd_pci.h
index 2f12a5eb24..499852db16 100644
--- a/lib/eventdev/eventdev_pmd_pci.h
+++ b/lib/eventdev/eventdev_pmd_pci.h
@@ -67,8 +67,10 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
 
 	/* Invoke PMD device initialization function */
 	retval = devinit(eventdev);
-	if (retval == 0)
+	if (retval == 0) {
+		event_dev_probing_finish(eventdev);
 		return 0;
+	}
 
 	RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
 			" failed", pci_drv->driver.name,
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
new file mode 100644
index 0000000000..9084833847
--- /dev/null
+++ b/lib/eventdev/eventdev_private.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "eventdev_pmd.h"
+#include "rte_eventdev.h"
+
+static uint16_t
+dummy_event_enqueue(__rte_unused void *port,
+		    __rte_unused const struct rte_event *ev)
+{
+	RTE_EDEV_LOG_ERR(
+		"event enqueue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_enqueue_burst(__rte_unused void *port,
+			  __rte_unused const struct rte_event ev[],
+			  __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event enqueue burst requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
+		    __rte_unused uint64_t timeout_ticks)
+{
+	RTE_EDEV_LOG_ERR(
+		"event dequeue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_dequeue_burst(__rte_unused void *port,
+			  __rte_unused struct rte_event ev[],
+			  __rte_unused uint16_t nb_events,
+			  __rte_unused uint64_t timeout_ticks)
+{
+	RTE_EDEV_LOG_ERR(
+		"event dequeue burst requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue(__rte_unused void *port,
+			       __rte_unused struct rte_event ev[],
+			       __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event Tx adapter enqueue requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue_same_dest(__rte_unused void *port,
+					 __rte_unused struct rte_event ev[],
+					 __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event Tx adapter enqueue same destination requested for unconfigured event device");
+	return 0;
+}
+
+static uint16_t
+dummy_event_crypto_adapter_enqueue(__rte_unused void *port,
+				   __rte_unused struct rte_event ev[],
+				   __rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event crypto adapter enqueue requested for unconfigured event device");
+	return 0;
+}
+
+void
+event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
+{
+	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
+	static const struct rte_event_fp_ops dummy = {
+		.enqueue = dummy_event_enqueue,
+		.enqueue_burst = dummy_event_enqueue_burst,
+		.enqueue_new_burst = dummy_event_enqueue_burst,
+		.enqueue_forward_burst = dummy_event_enqueue_burst,
+		.dequeue = dummy_event_dequeue,
+		.dequeue_burst = dummy_event_dequeue_burst,
+		.txa_enqueue = dummy_event_tx_adapter_enqueue,
+		.txa_enqueue_same_dest =
+			dummy_event_tx_adapter_enqueue_same_dest,
+		.ca_enqueue = dummy_event_crypto_adapter_enqueue,
+		.data = dummy_data,
+	};
+
+	*fp_op = dummy;
+}
+
+void
+event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
+		     const struct rte_eventdev *dev)
+{
+	fp_op->enqueue = dev->enqueue;
+	fp_op->enqueue_burst = dev->enqueue_burst;
+	fp_op->enqueue_new_burst = dev->enqueue_new_burst;
+	fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
+	fp_op->dequeue = dev->dequeue;
+	fp_op->dequeue_burst = dev->dequeue_burst;
+	fp_op->txa_enqueue = dev->txa_enqueue;
+	fp_op->txa_enqueue_same_dest = dev->txa_enqueue_same_dest;
+	fp_op->ca_enqueue = dev->ca_enqueue;
+	fp_op->data = dev->data->ports;
+}
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 8b51fde361..cb9abe92f6 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -8,24 +8,25 @@ else
 endif
 
 sources = files(
-        'rte_eventdev.c',
-        'rte_event_ring.c',
+        'eventdev_private.c',
         'eventdev_trace_points.c',
-        'rte_event_eth_rx_adapter.c',
-        'rte_event_timer_adapter.c',
         'rte_event_crypto_adapter.c',
+        'rte_event_eth_rx_adapter.c',
         'rte_event_eth_tx_adapter.c',
+        'rte_event_ring.c',
+        'rte_event_timer_adapter.c',
+        'rte_eventdev.c',
 )
 headers = files(
-        'rte_eventdev.h',
-        'rte_eventdev_trace.h',
-        'rte_eventdev_trace_fp.h',
-        'rte_event_ring.h',
+        'rte_event_crypto_adapter.h',
         'rte_event_eth_rx_adapter.h',
+        'rte_event_eth_tx_adapter.h',
+        'rte_event_ring.h',
         'rte_event_timer_adapter.h',
         'rte_event_timer_adapter_pmd.h',
-        'rte_event_crypto_adapter.h',
-        'rte_event_eth_tx_adapter.h',
+        'rte_eventdev.h',
+        'rte_eventdev_trace.h',
+        'rte_eventdev_trace_fp.h',
 )
 indirect_headers += files(
         'rte_eventdev_core.h',
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index bfcfa31cd1..4c30a37831 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -46,6 +46,9 @@ static struct rte_eventdev_global eventdev_globals = {
 	.nb_devs		= 0
 };
 
+/* Public fastpath APIs. */
+struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
+
 /* Event dev north bound API implementation */
 
 uint8_t
@@ -300,8 +303,8 @@ int
 rte_event_dev_configure(uint8_t dev_id,
 			const struct rte_event_dev_config *dev_conf)
 {
-	struct rte_eventdev *dev;
 	struct rte_event_dev_info info;
+	struct rte_eventdev *dev;
 	int diag;
 
 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -470,10 +473,13 @@ rte_event_dev_configure(uint8_t dev_id,
 		return diag;
 	}
 
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
+
 	/* Configure the device */
 	diag = (*dev->dev_ops->dev_configure)(dev);
 	if (diag != 0) {
 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
+		event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 		event_dev_queue_config(dev, 0);
 		event_dev_port_config(dev, 0);
 	}
@@ -1244,6 +1250,8 @@ rte_event_dev_start(uint8_t dev_id)
 	else
 		return diag;
 
+	event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
+
 	return 0;
 }
 
@@ -1284,6 +1292,7 @@ rte_event_dev_stop(uint8_t dev_id)
 	dev->data->dev_started = 0;
 	(*dev->dev_ops->dev_stop)(dev);
 	rte_eventdev_trace_stop(dev_id);
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 }
 
 int
@@ -1302,6 +1311,7 @@ rte_event_dev_close(uint8_t dev_id)
 		return -EBUSY;
 	}
 
+	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 	rte_eventdev_trace_close(dev_id);
 	return (*dev->dev_ops->dev_close)(dev);
 }
@@ -1435,6 +1445,7 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
 	if (eventdev == NULL)
 		return -EINVAL;
 
+	event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
 	eventdev->attached = RTE_EVENTDEV_DETACHED;
 	eventdev_globals.nb_devs--;
 
@@ -1460,6 +1471,15 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
 	return 0;
 }
 
+void
+event_dev_probing_finish(struct rte_eventdev *eventdev)
+{
+	if (eventdev == NULL)
+		return;
+
+	event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
+			     eventdev);
+}
 
 static int
 handle_dev_list(const char *cmd __rte_unused,
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 115b97e431..916023f71f 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -39,6 +39,32 @@ typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
 						   uint16_t nb_events);
 /**< @internal Enqueue burst of events on crypto adapter */
 
+struct rte_event_fp_ops {
+	void **data;
+	/**< points to array of internal port data pointers */
+	event_enqueue_t enqueue;
+	/**< PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< PMD enqueue burst new function. */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< PMD enqueue burst fwd function. */
+	event_dequeue_t dequeue;
+	/**< PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< PMD Tx adapter enqueue function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< PMD Tx adapter enqueue same destination function. */
+	event_crypto_adapter_enqueue_t ca_enqueue;
+	/**< PMD Crypto adapter enqueue function. */
+	uintptr_t reserved[6];
+} __rte_cache_aligned;
+
+extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
+
 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
 /**< @internal Max length of name of event PMD */
 
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index cd72f45d29..e684154bf9 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -85,6 +85,9 @@ DPDK_22 {
 	rte_event_timer_cancel_burst;
 	rte_eventdevs;
 
+	#added in 21.11
+	rte_event_fp_ops;
+
 	local: *;
 };
 
@@ -143,6 +146,9 @@ EXPERIMENTAL {
 INTERNAL {
 	global:
 
+	event_dev_fp_ops_reset;
+	event_dev_fp_ops_set;
+	event_dev_probing_finish;
 	rte_event_pmd_selftest_seqn_dynfield_offset;
 	rte_event_pmd_allocate;
 	rte_event_pmd_get_named_dev;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 05/14] drivers/event: invoke probing finish function
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
                           ` (2 preceding siblings ...)
  2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 04/14] eventdev: move inline APIs into separate structure pbhagavatula
@ 2021-10-18 23:36         ` pbhagavatula
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 06/14] eventdev: use new API for inline functions pbhagavatula
                           ` (9 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:36 UTC (permalink / raw)
  To: jerinj, Hemant Agrawal, Nipun Gupta, Mattias Rönnblom,
	Liang Ma, Peter Mccarthy, Harry van Haaren
  Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Invoke event_dev_probing_finish() function at the end of probing,
this function sets the function pointers in the fp_ops flat array.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa/dpaa_eventdev.c         | 4 +++-
 drivers/event/dpaa2/dpaa2_eventdev.c       | 4 +++-
 drivers/event/dsw/dsw_evdev.c              | 1 +
 drivers/event/octeontx/ssovf_evdev.c       | 1 +
 drivers/event/opdl/opdl_evdev.c            | 4 +++-
 drivers/event/skeleton/skeleton_eventdev.c | 1 +
 drivers/event/sw/sw_evdev.c                | 2 ++
 7 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 9f14390d28..14ca341829 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -1026,10 +1026,12 @@ dpaa_event_dev_create(const char *name, const char *params)

 	/* For secondary processes, the primary has done all the work */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
+		goto done;

 	priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;

+done:
+	event_dev_probing_finish(eventdev);
 	return 0;
 fail:
 	return -EFAULT;
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index d577f64824..1d3ad8ffd6 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1110,7 +1110,7 @@ dpaa2_eventdev_create(const char *name)

 	/* For secondary processes, the primary has done all the work */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
+		goto done;

 	priv = eventdev->data->dev_private;
 	priv->max_event_queues = 0;
@@ -1139,6 +1139,8 @@ dpaa2_eventdev_create(const char *name)

 	RTE_LOG(INFO, PMD, "%s eventdev created\n", name);

+done:
+	event_dev_probing_finish(eventdev);
 	return 0;
 fail:
 	return -EFAULT;
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 01f060fff3..17568967be 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -448,6 +448,7 @@ dsw_probe(struct rte_vdev_device *vdev)
 	dsw = dev->data->dev_private;
 	dsw->data = dev->data;

+	event_dev_probing_finish(dev);
 	return 0;
 }

diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 4a8c6a13a5..eb80eeafe1 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -933,6 +933,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
 			edev->max_event_ports);

 	ssovf_init_once = 1;
+	event_dev_probing_finish(eventdev);
 	return 0;

 error:
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index 739dc64c82..5007e9a7bf 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -720,7 +720,7 @@ opdl_probe(struct rte_vdev_device *vdev)
 	dev->dequeue_burst = opdl_event_dequeue_burst;

 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
+		goto done;

 	opdl = dev->data->dev_private;
 	opdl->data = dev->data;
@@ -733,6 +733,8 @@ opdl_probe(struct rte_vdev_device *vdev)
 	if (do_test == 1)
 		test_result =  opdl_selftest();

+done:
+	event_dev_probing_finish(dev);
 	return test_result;
 }

diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index c9e17e7cb1..af0efb3302 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -443,6 +443,7 @@ skeleton_eventdev_create(const char *name, int socket_id)
 	eventdev->dequeue       = skeleton_eventdev_dequeue;
 	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;

+	event_dev_probing_finish(eventdev);
 	return 0;
 fail:
 	return -EFAULT;
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 9b72073322..e99b47afbe 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -1124,6 +1124,8 @@ sw_probe(struct rte_vdev_device *vdev)
 	dev->data->service_inited = 1;
 	dev->data->service_id = sw->service_id;

+	event_dev_probing_finish(dev);
+
 	return 0;
 }

--
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 06/14] eventdev: use new API for inline functions
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
                           ` (3 preceding siblings ...)
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 05/14] drivers/event: invoke probing finish function pbhagavatula
@ 2021-10-18 23:36         ` pbhagavatula
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 07/14] eventdev: hide event device related structures pbhagavatula
                           ` (8 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:36 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Use new driver interface for the fastpath enqueue/dequeue inline
functions.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
---
 lib/eventdev/rte_event_crypto_adapter.h | 15 +++++---
 lib/eventdev/rte_event_eth_tx_adapter.h | 15 ++++----
 lib/eventdev/rte_eventdev.h             | 46 +++++++++++++++----------
 3 files changed, 47 insertions(+), 29 deletions(-)

diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index 1a8ff75384..d90a19e72c 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -569,12 +569,19 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id,
 				struct rte_event ev[],
 				uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
+		rte_errno = EINVAL;
+		return 0;
+	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -582,7 +589,7 @@ rte_event_crypto_adapter_enqueue(uint8_t dev_id,
 	rte_eventdev_trace_crypto_adapter_enqueue(dev_id, port_id, ev,
 		nb_events);
 
-	return dev->ca_enqueue(dev->data->ports[port_id], ev, nb_events);
+	return fp_ops->ca_enqueue(port, ev, nb_events);
 }
 
 #ifdef __cplusplus
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.h b/lib/eventdev/rte_event_eth_tx_adapter.h
index 8c59547165..3908c2ded5 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.h
+++ b/lib/eventdev/rte_event_eth_tx_adapter.h
@@ -355,16 +355,19 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
 				uint16_t nb_events,
 				const uint8_t flags)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
-		!rte_eventdevs[dev_id].attached) {
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -372,11 +375,9 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id,
 	rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
 		nb_events, flags);
 	if (flags)
-		return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
-						  ev, nb_events);
+		return fp_ops->txa_enqueue_same_dest(port, ev, nb_events);
 	else
-		return dev->txa_enqueue(dev->data->ports[port_id], ev,
-					nb_events);
+		return fp_ops->txa_enqueue(port, ev, nb_events);
 }
 
 /**
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 1b11d4576d..31fa9ac4b8 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1747,15 +1747,19 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 			  const struct rte_event ev[], uint16_t nb_events,
 			  const event_enqueue_burst_t fn)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -1766,9 +1770,9 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 	 * requests nb_events as const one
 	 */
 	if (nb_events == 1)
-		return (*dev->enqueue)(dev->data->ports[port_id], ev);
+		return (fp_ops->enqueue)(port, ev);
 	else
-		return fn(dev->data->ports[port_id], ev, nb_events);
+		return fn(port, ev, nb_events);
 }
 
 /**
@@ -1818,10 +1822,11 @@ static inline uint16_t
 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 			const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_burst);
+					 fp_ops->enqueue_burst);
 }
 
 /**
@@ -1869,10 +1874,11 @@ static inline uint16_t
 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
 			    const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_new_burst);
+					 fp_ops->enqueue_new_burst);
 }
 
 /**
@@ -1920,10 +1926,11 @@ static inline uint16_t
 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
 				const struct rte_event ev[], uint16_t nb_events)
 {
-	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
-					 dev->enqueue_forward_burst);
+					 fp_ops->enqueue_forward_burst);
 }
 
 /**
@@ -1996,15 +2003,19 @@ static inline uint16_t
 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 			uint16_t nb_events, uint64_t timeout_ticks)
 {
-	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
 
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
-	if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (port_id >= dev->data->nb_ports) {
+	if (port == NULL) {
 		rte_errno = EINVAL;
 		return 0;
 	}
@@ -2015,11 +2026,10 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 	 * requests nb_events as const one
 	 */
 	if (nb_events == 1)
-		return (*dev->dequeue)(dev->data->ports[port_id], ev,
-				       timeout_ticks);
+		return (fp_ops->dequeue)(port, ev, timeout_ticks);
 	else
-		return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
-					     nb_events, timeout_ticks);
+		return (fp_ops->dequeue_burst)(port, ev, nb_events,
+					       timeout_ticks);
 }
 
 #ifdef __cplusplus
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 07/14] eventdev: hide event device related structures
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
                           ` (4 preceding siblings ...)
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 06/14] eventdev: use new API for inline functions pbhagavatula
@ 2021-10-18 23:36         ` pbhagavatula
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 08/14] eventdev: hide timer adapter PMD file pbhagavatula
                           ` (7 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:36 UTC (permalink / raw)
  To: jerinj, Timothy McDaniel, Mattias Rönnblom, Pavan Nikhilesh,
	Harman Kalra, Ray Kinsella
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move rte_eventdev, rte_eventdev_data structures to eventdev_pmd.h.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/event/dlb2/dlb2_inline_fns.h   |  2 +
 drivers/event/dsw/dsw_evdev.h          |  2 +
 drivers/event/octeontx/timvf_worker.h  |  2 +
 drivers/net/octeontx/octeontx_ethdev.c |  3 +-
 lib/eventdev/eventdev_pmd.h            | 92 +++++++++++++++++++++++++
 lib/eventdev/rte_eventdev.c            | 22 ------
 lib/eventdev/rte_eventdev_core.h       | 93 --------------------------
 lib/eventdev/version.map               |  2 +-
 8 files changed, 101 insertions(+), 117 deletions(-)

diff --git a/drivers/event/dlb2/dlb2_inline_fns.h b/drivers/event/dlb2/dlb2_inline_fns.h
index ac8d01aa98..1429281cfd 100644
--- a/drivers/event/dlb2/dlb2_inline_fns.h
+++ b/drivers/event/dlb2/dlb2_inline_fns.h
@@ -5,6 +5,8 @@
 #ifndef _DLB2_INLINE_FNS_H_
 #define _DLB2_INLINE_FNS_H_
 
+#include <eventdev_pmd.h>
+
 /* Inline functions required in more than one source file. */
 
 static inline struct dlb2_eventdev *
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index 08889a0990..631daea55c 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -5,6 +5,8 @@
 #ifndef _DSW_EVDEV_H_
 #define _DSW_EVDEV_H_
 
+#include <eventdev_pmd.h>
+
 #include <rte_event_ring.h>
 #include <rte_eventdev.h>
 
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index dede1a4a4f..3f1e77f1d1 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -2,6 +2,8 @@
  * Copyright(c) 2017 Cavium, Inc
  */
 
+#include <eventdev_pmd.h>
+
 #include <rte_common.h>
 #include <rte_branch_prediction.h>
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 7c91494f0e..ddfce57394 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -9,13 +9,14 @@
 #include <string.h>
 #include <unistd.h>
 
+#include <eventdev_pmd.h>
 #include <rte_alarm.h>
 #include <rte_branch_prediction.h>
 #include <rte_bus_vdev.h>
 #include <rte_cycles.h>
 #include <rte_debug.h>
-#include <rte_devargs.h>
 #include <rte_dev.h>
+#include <rte_devargs.h>
 #include <rte_kvargs.h>
 #include <rte_malloc.h>
 #include <rte_mbuf_pool_ops.h>
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 0532b542d4..9aa9943fa5 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -80,6 +80,9 @@
 #define RTE_EVENTDEV_DETACHED  (0)
 #define RTE_EVENTDEV_ATTACHED  (1)
 
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
 struct rte_eth_dev;
 
 /** Global structure used for maintaining state of allocated event devices */
@@ -87,6 +90,95 @@ struct rte_eventdev_global {
 	uint8_t nb_devs;	/**< Number of devices found */
 };
 
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+	int socket_id;
+	/**< Socket ID where memory is allocated */
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t nb_queues;
+	/**< Number of event queues. */
+	uint8_t nb_ports;
+	/**< Number of event ports. */
+	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
+	/**< Array of pointers to ports. */
+	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
+	/**< Array of port configuration structures. */
+	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
+	/**< Array of queue configuration structures. */
+	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
+			   RTE_EVENT_MAX_QUEUES_PER_DEV];
+	/**< Memory to store queues to port connections. */
+	void *dev_private;
+	/**< PMD-specific private data */
+	uint32_t event_dev_cap;
+	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+	struct rte_event_dev_config dev_conf;
+	/**< Configuration applied to device. */
+	uint8_t service_inited;
+	/* Service initialization state */
+	uint32_t service_id;
+	/* Service ID*/
+	void *dev_stop_flush_arg;
+	/**< User-provided argument for event flush function */
+
+	RTE_STD_C11
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	char name[RTE_EVENTDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[4];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+	struct rte_eventdev_data *data;
+	/**< Pointer to device data */
+	struct eventdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	struct rte_device *dev;
+	/**< Device info. supplied by probing */
+
+	RTE_STD_C11
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
+
+	event_enqueue_t enqueue;
+	/**< Pointer to PMD enqueue function. */
+	event_enqueue_burst_t enqueue_burst;
+	/**< Pointer to PMD enqueue burst function. */
+	event_enqueue_burst_t enqueue_new_burst;
+	/**< Pointer to PMD enqueue burst function(op new variant) */
+	event_enqueue_burst_t enqueue_forward_burst;
+	/**< Pointer to PMD enqueue burst function(op forward variant) */
+	event_dequeue_t dequeue;
+	/**< Pointer to PMD dequeue function. */
+	event_dequeue_burst_t dequeue_burst;
+	/**< Pointer to PMD dequeue burst function. */
+	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+	/**< Pointer to PMD eth Tx adapter burst enqueue function with
+	 * events destined to same Eth port & Tx queue.
+	 */
+	event_tx_adapter_enqueue_t txa_enqueue;
+	/**< Pointer to PMD eth Tx adapter enqueue function. */
+	event_crypto_adapter_enqueue_t ca_enqueue;
+
+	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	void *reserved_ptrs[3];	  /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
 /**
  * Get the rte_eventdev structure device pointer for the named device.
  *
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 4c30a37831..e55241defd 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -1365,24 +1365,6 @@ eventdev_find_free_device_index(void)
 	return RTE_EVENT_MAX_DEVS;
 }
 
-static uint16_t
-rte_event_tx_adapter_enqueue(__rte_unused void *port,
-			__rte_unused struct rte_event ev[],
-			__rte_unused uint16_t nb_events)
-{
-	rte_errno = ENOTSUP;
-	return 0;
-}
-
-static uint16_t
-rte_event_crypto_adapter_enqueue(__rte_unused void *port,
-			__rte_unused struct rte_event ev[],
-			__rte_unused uint16_t nb_events)
-{
-	rte_errno = ENOTSUP;
-	return 0;
-}
-
 struct rte_eventdev *
 rte_event_pmd_allocate(const char *name, int socket_id)
 {
@@ -1403,10 +1385,6 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 
 	eventdev = &rte_eventdevs[dev_id];
 
-	eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
-	eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
-	eventdev->ca_enqueue = rte_event_crypto_adapter_enqueue;
-
 	if (eventdev->data == NULL) {
 		struct rte_eventdev_data *eventdev_data = NULL;
 
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 916023f71f..61d5ebdc44 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -65,99 +65,6 @@ struct rte_event_fp_ops {
 
 extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
 
-#define RTE_EVENTDEV_NAME_MAX_LEN (64)
-/**< @internal Max length of name of event PMD */
-
-/**
- * @internal
- * The data part, with no function pointers, associated with each device.
- *
- * This structure is safe to place in shared memory to be common among
- * different processes in a multi-process configuration.
- */
-struct rte_eventdev_data {
-	int socket_id;
-	/**< Socket ID where memory is allocated */
-	uint8_t dev_id;
-	/**< Device ID for this instance */
-	uint8_t nb_queues;
-	/**< Number of event queues. */
-	uint8_t nb_ports;
-	/**< Number of event ports. */
-	void *ports[RTE_EVENT_MAX_PORTS_PER_DEV];
-	/**< Array of pointers to ports. */
-	struct rte_event_port_conf ports_cfg[RTE_EVENT_MAX_PORTS_PER_DEV];
-	/**< Array of port configuration structures. */
-	struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
-	/**< Array of queue configuration structures. */
-	uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
-			   RTE_EVENT_MAX_QUEUES_PER_DEV];
-	/**< Memory to store queues to port connections. */
-	void *dev_private;
-	/**< PMD-specific private data */
-	uint32_t event_dev_cap;
-	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
-	struct rte_event_dev_config dev_conf;
-	/**< Configuration applied to device. */
-	uint8_t service_inited;
-	/* Service initialization state */
-	uint32_t service_id;
-	/* Service ID*/
-	void *dev_stop_flush_arg;
-	/**< User-provided argument for event flush function */
-
-	RTE_STD_C11
-	uint8_t dev_started : 1;
-	/**< Device state: STARTED(1)/STOPPED(0) */
-
-	char name[RTE_EVENTDEV_NAME_MAX_LEN];
-	/**< Unique identifier name */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[4];	  /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-/** @internal The data structure associated with each event device. */
-struct rte_eventdev {
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
-	event_enqueue_burst_t enqueue_burst;
-	/**< Pointer to PMD enqueue burst function. */
-	event_enqueue_burst_t enqueue_new_burst;
-	/**< Pointer to PMD enqueue burst function(op new variant) */
-	event_enqueue_burst_t enqueue_forward_burst;
-	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
-	event_dequeue_burst_t dequeue_burst;
-	/**< Pointer to PMD dequeue burst function. */
-	event_tx_adapter_enqueue_t txa_enqueue_same_dest;
-	/**< Pointer to PMD eth Tx adapter burst enqueue function with
-	 * events destined to same Eth port & Tx queue.
-	 */
-	event_tx_adapter_enqueue_t txa_enqueue;
-	/**< Pointer to PMD eth Tx adapter enqueue function. */
-	struct rte_eventdev_data *data;
-	/**< Pointer to device data */
-	struct eventdev_ops *dev_ops;
-	/**< Functions exported by PMD */
-	struct rte_device *dev;
-	/**< Device info. supplied by probing */
-
-	RTE_STD_C11
-	uint8_t attached : 1;
-	/**< Flag indicating the device is attached */
-
-	event_crypto_adapter_enqueue_t ca_enqueue;
-	/**< Pointer to PMD crypto adapter enqueue function. */
-
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
-	void *reserved_ptrs[3];	  /**< Reserved for future fields */
-} __rte_cache_aligned;
-
-extern struct rte_eventdev *rte_eventdevs;
-/** @internal The pool of rte_eventdev structures. */
-
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index e684154bf9..9f6eb4ba3c 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -83,7 +83,6 @@ DPDK_22 {
 	rte_event_timer_arm_burst;
 	rte_event_timer_arm_tmo_tick_burst;
 	rte_event_timer_cancel_burst;
-	rte_eventdevs;
 
 	#added in 21.11
 	rte_event_fp_ops;
@@ -159,4 +158,5 @@ INTERNAL {
 	rte_event_pmd_release;
 	rte_event_pmd_vdev_init;
 	rte_event_pmd_vdev_uninit;
+	rte_eventdevs;
 };
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 08/14] eventdev: hide timer adapter PMD file
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
                           ` (5 preceding siblings ...)
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 07/14] eventdev: hide event device related structures pbhagavatula
@ 2021-10-18 23:36         ` pbhagavatula
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
                           ` (6 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:36 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton, Mattias Rönnblom,
	Harry van Haaren, Erik Gabriel Carrillo
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Hide rte_event_timer_adapter_pmd.h file as it is an internal file.
Remove rte_ prefix from rte_event_timer_adapter_ops structure.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cnxk_tim_evdev.c           |  5 ++--
 drivers/event/cnxk/cnxk_tim_evdev.h           |  2 +-
 drivers/event/dsw/dsw_evdev.c                 |  4 +--
 drivers/event/octeontx/ssovf_evdev.c          |  2 +-
 drivers/event/octeontx/timvf_evdev.c          | 17 ++++++-----
 drivers/event/octeontx/timvf_evdev.h          |  9 +++---
 drivers/event/octeontx2/otx2_tim_evdev.c      |  5 ++--
 drivers/event/octeontx2/otx2_tim_evdev.h      |  4 +--
 drivers/event/sw/sw_evdev.c                   |  5 ++--
 ...dapter_pmd.h => event_timer_adapter_pmd.h} |  8 ++---
 lib/eventdev/eventdev_pmd.h                   |  8 ++---
 lib/eventdev/meson.build                      |  2 +-
 lib/eventdev/rte_event_timer_adapter.c        | 30 +++++++++----------
 lib/eventdev/rte_event_timer_adapter.h        |  2 +-
 lib/eventdev/rte_eventdev.c                   |  2 +-
 15 files changed, 51 insertions(+), 54 deletions(-)
 rename lib/eventdev/{rte_event_timer_adapter_pmd.h => event_timer_adapter_pmd.h} (95%)

diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c
index c3e9dc508c..100fafb67e 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -5,7 +5,7 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_tim_evdev.h"
 
-static struct rte_event_timer_adapter_ops cnxk_tim_ops;
+static struct event_timer_adapter_ops cnxk_tim_ops;
 
 static int
 cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
@@ -353,8 +353,7 @@ cnxk_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
 
 int
 cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
 
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h
index 9a23952a91..2478a5c1df 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -268,7 +268,7 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 
 int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);
 
 void cnxk_tim_init(struct roc_sso *sso);
 void cnxk_tim_fini(void);
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 17568967be..0652d83ad6 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -381,8 +381,8 @@ dsw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
 
 static int
 dsw_timer_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
-			   uint64_t flags  __rte_unused, uint32_t *caps,
-			   const struct rte_event_timer_adapter_ops **ops)
+			   uint64_t flags __rte_unused, uint32_t *caps,
+			   const struct event_timer_adapter_ops **ops)
 {
 	*caps = 0;
 	*ops = NULL;
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index eb80eeafe1..2245599810 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -721,7 +721,7 @@ ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque)
 
 static int
 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
+		     uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
 			timvf_enable_stats);
diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index 688e9daa66..1f1cda3f7f 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -407,18 +407,19 @@ timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
 	return 0;
 }
 
-static struct rte_event_timer_adapter_ops timvf_ops = {
-	.init		= timvf_ring_create,
-	.uninit		= timvf_ring_free,
-	.start		= timvf_ring_start,
-	.stop		= timvf_ring_stop,
-	.get_info	= timvf_ring_info_get,
+static struct event_timer_adapter_ops timvf_ops = {
+	.init = timvf_ring_create,
+	.uninit = timvf_ring_free,
+	.start = timvf_ring_start,
+	.stop = timvf_ring_stop,
+	.get_info = timvf_ring_info_get,
 };
 
 int
 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats)
+			     uint32_t *caps,
+			     const struct event_timer_adapter_ops **ops,
+			     uint8_t enable_stats)
 {
 	RTE_SET_USED(dev);
 
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index 2977063d66..cef02cd7f9 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -5,13 +5,13 @@
 #ifndef __TIMVF_EVDEV_H__
 #define __TIMVF_EVDEV_H__
 
+#include <event_timer_adapter_pmd.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
 #include <rte_debug.h>
 #include <rte_eal.h>
-#include <rte_eventdev.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
+#include <rte_eventdev.h>
 #include <rte_io.h>
 #include <rte_lcore.h>
 #include <rte_log.h>
@@ -196,8 +196,9 @@ uint8_t timvf_get_ring(void);
 void timvf_release_ring(uint8_t vfid);
 void *timvf_bar(uint8_t id, uint8_t bar);
 int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
-		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
-		uint8_t enable_stats);
+				 uint32_t *caps,
+				 const struct event_timer_adapter_ops **ops,
+				 uint8_t enable_stats);
 uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 		struct rte_event_timer **tim, const uint16_t nb_timers);
 uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.c b/drivers/event/octeontx2/otx2_tim_evdev.c
index de50c4c76e..7dcb291043 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.c
+++ b/drivers/event/octeontx2/otx2_tim_evdev.c
@@ -9,7 +9,7 @@
 #include "otx2_evdev.h"
 #include "otx2_tim_evdev.h"
 
-static struct rte_event_timer_adapter_ops otx2_tim_ops;
+static struct event_timer_adapter_ops otx2_tim_ops;
 
 static inline int
 tim_get_msix_offsets(void)
@@ -497,8 +497,7 @@ otx2_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
 
 int
 otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
-		  uint32_t *caps,
-		  const struct rte_event_timer_adapter_ops **ops)
+		  uint32_t *caps, const struct event_timer_adapter_ops **ops)
 {
 	struct otx2_tim_evdev *dev = tim_priv_get();
 
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.h b/drivers/event/octeontx2/otx2_tim_evdev.h
index caa6ad3b3c..dac642e0e1 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.h
+++ b/drivers/event/octeontx2/otx2_tim_evdev.h
@@ -5,8 +5,8 @@
 #ifndef __OTX2_TIM_EVDEV_H__
 #define __OTX2_TIM_EVDEV_H__
 
+#include <event_timer_adapter_pmd.h>
 #include <rte_event_timer_adapter.h>
-#include <rte_event_timer_adapter_pmd.h>
 #include <rte_reciprocal.h>
 
 #include "otx2_dev.h"
@@ -244,7 +244,7 @@ uint16_t otx2_tim_timer_cancel_burst(
 
 int otx2_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		      uint32_t *caps,
-		      const struct rte_event_timer_adapter_ops **ops);
+		      const struct event_timer_adapter_ops **ops);
 
 void otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev);
 void otx2_tim_fini(void);
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index e99b47afbe..070a4802e9 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -561,10 +561,9 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
 }
 
 static int
-sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
-			  uint64_t flags,
+sw_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 			  uint32_t *caps,
-			  const struct rte_event_timer_adapter_ops **ops)
+			  const struct event_timer_adapter_ops **ops)
 {
 	RTE_SET_USED(dev);
 	RTE_SET_USED(flags);
diff --git a/lib/eventdev/rte_event_timer_adapter_pmd.h b/lib/eventdev/event_timer_adapter_pmd.h
similarity index 95%
rename from lib/eventdev/rte_event_timer_adapter_pmd.h
rename to lib/eventdev/event_timer_adapter_pmd.h
index cf3509dc6f..189017b5c1 100644
--- a/lib/eventdev/rte_event_timer_adapter_pmd.h
+++ b/lib/eventdev/event_timer_adapter_pmd.h
@@ -3,8 +3,8 @@
  * All rights reserved.
  */
 
-#ifndef __RTE_EVENT_TIMER_ADAPTER_PMD_H__
-#define __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+#ifndef __EVENT_TIMER_ADAPTER_PMD_H__
+#define __EVENT_TIMER_ADAPTER_PMD_H__
 
 /**
  * @file
@@ -57,7 +57,7 @@ typedef int (*rte_event_timer_adapter_stats_reset_t)(
  * @internal Structure containing the functions exported by an event timer
  * adapter implementation.
  */
-struct rte_event_timer_adapter_ops {
+struct event_timer_adapter_ops {
 	rte_event_timer_adapter_init_t		init;  /**< Set up adapter */
 	rte_event_timer_adapter_uninit_t	uninit;/**< Tear down adapter */
 	rte_event_timer_adapter_start_t		start; /**< Start adapter */
@@ -111,4 +111,4 @@ struct rte_event_timer_adapter_data {
 }
 #endif
 
-#endif /* __RTE_EVENT_TIMER_ADAPTER_PMD_H__ */
+#endif /* __EVENT_TIMER_ADAPTER_PMD_H__ */
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 9aa9943fa5..d009e24309 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -24,8 +24,8 @@
 #include <rte_mbuf.h>
 #include <rte_mbuf_dyn.h>
 
+#include "event_timer_adapter_pmd.h"
 #include "rte_eventdev.h"
-#include "rte_event_timer_adapter_pmd.h"
 
 /* Logging Macros */
 #define RTE_EDEV_LOG_ERR(...) \
@@ -591,10 +591,8 @@ struct rte_event_eth_rx_adapter_queue_conf;
  *
  */
 typedef int (*eventdev_timer_adapter_caps_get_t)(
-				const struct rte_eventdev *dev,
-				uint64_t flags,
-				uint32_t *caps,
-				const struct rte_event_timer_adapter_ops **ops);
+	const struct rte_eventdev *dev, uint64_t flags, uint32_t *caps,
+	const struct event_timer_adapter_ops **ops);
 
 /**
  * Add ethernet Rx queues to event device. This callback is invoked if
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index cb9abe92f6..22c3289912 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -23,7 +23,6 @@ headers = files(
         'rte_event_eth_tx_adapter.h',
         'rte_event_ring.h',
         'rte_event_timer_adapter.h',
-        'rte_event_timer_adapter_pmd.h',
         'rte_eventdev.h',
         'rte_eventdev_trace.h',
         'rte_eventdev_trace_fp.h',
@@ -35,6 +34,7 @@ driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
         'eventdev_pmd_vdev.h',
+        'event_timer_adapter_pmd.h',
 )
 
 deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index ee20b39f4b..ae55407042 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -20,11 +20,11 @@
 #include <rte_service_component.h>
 #include <rte_cycles.h>
 
-#include "rte_eventdev.h"
+#include "event_timer_adapter_pmd.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
 #include "rte_event_timer_adapter.h"
-#include "rte_event_timer_adapter_pmd.h"
+#include "rte_eventdev.h"
+#include "rte_eventdev_trace.h"
 
 #define DATA_MZ_NAME_MAX_LEN 64
 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
@@ -35,7 +35,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
 
 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
 
-static const struct rte_event_timer_adapter_ops swtim_ops;
+static const struct event_timer_adapter_ops swtim_ops;
 
 #define EVTIM_LOG(level, logtype, ...) \
 	rte_log(RTE_LOG_ ## level, logtype, \
@@ -1207,15 +1207,15 @@ swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
 }
 
-static const struct rte_event_timer_adapter_ops swtim_ops = {
-	.init			= swtim_init,
-	.uninit			= swtim_uninit,
-	.start			= swtim_start,
-	.stop			= swtim_stop,
-	.get_info		= swtim_get_info,
-	.stats_get		= swtim_stats_get,
-	.stats_reset		= swtim_stats_reset,
-	.arm_burst		= swtim_arm_burst,
-	.arm_tmo_tick_burst	= swtim_arm_tmo_tick_burst,
-	.cancel_burst		= swtim_cancel_burst,
+static const struct event_timer_adapter_ops swtim_ops = {
+	.init = swtim_init,
+	.uninit = swtim_uninit,
+	.start = swtim_start,
+	.stop = swtim_stop,
+	.get_info = swtim_get_info,
+	.stats_get = swtim_stats_get,
+	.stats_reset = swtim_stats_reset,
+	.arm_burst = swtim_arm_burst,
+	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
+	.cancel_burst = swtim_cancel_burst,
 };
diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h
index 4e0d2a819b..cad6d3b4c5 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -523,7 +523,7 @@ struct rte_event_timer_adapter {
 	/**< Pointer to driver cancel function. */
 	struct rte_event_timer_adapter_data *data;
 	/**< Pointer to shared adapter data */
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;
 	/**< Functions exported by adapter driver */
 
 	RTE_STD_C11
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index e55241defd..de6346194e 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -142,7 +142,7 @@ int
 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
 {
 	struct rte_eventdev *dev;
-	const struct rte_event_timer_adapter_ops *ops;
+	const struct event_timer_adapter_ops *ops;
 
 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 09/14] eventdev: remove rte prefix for internal structs
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
                           ` (6 preceding siblings ...)
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 08/14] eventdev: hide timer adapter PMD file pbhagavatula
@ 2021-10-18 23:36         ` pbhagavatula
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 10/14] eventdev: rearrange fields in timer object pbhagavatula
                           ` (5 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:36 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Remove rte_ prefix from rte_eth_event_enqueue_buffer,
rte_event_eth_rx_adapter and rte_event_crypto_adapter
as they are only used in rte_event_eth_rx_adapter.c and
rte_event_crypto_adapter.c

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
---
 lib/eventdev/rte_event_crypto_adapter.c |  66 +++---
 lib/eventdev/rte_event_eth_rx_adapter.c | 258 ++++++++++--------------
 lib/eventdev/rte_eventdev.h             |   2 +-
 3 files changed, 145 insertions(+), 181 deletions(-)

diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index ebfc8326a8..e9e660a3d2 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -30,7 +30,7 @@
  */
 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
 
-struct rte_event_crypto_adapter {
+struct event_crypto_adapter {
 	/* Event device identifier */
 	uint8_t eventdev_id;
 	/* Event port identifier */
@@ -99,7 +99,7 @@ struct crypto_queue_pair_info {
 	uint8_t len;
 } __rte_cache_aligned;
 
-static struct rte_event_crypto_adapter **event_crypto_adapter;
+static struct event_crypto_adapter **event_crypto_adapter;
 
 /* Macros to check for valid adapter */
 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
@@ -141,7 +141,7 @@ eca_init(void)
 	return 0;
 }
 
-static inline struct rte_event_crypto_adapter *
+static inline struct event_crypto_adapter *
 eca_id_to_adapter(uint8_t id)
 {
 	return event_crypto_adapter ?
@@ -158,7 +158,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	int ret;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
+	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
 
 	if (adapter == NULL)
 		return -EINVAL;
@@ -202,7 +202,7 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
 				enum rte_event_crypto_adapter_mode mode,
 				void *conf_arg)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
 	struct rte_event_dev_info dev_info;
 	int socket_id;
@@ -304,7 +304,7 @@ rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_crypto_adapter_free(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -329,8 +329,8 @@ rte_event_crypto_adapter_free(uint8_t id)
 }
 
 static inline unsigned int
-eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
-		 struct rte_event *ev, unsigned int cnt)
+eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
+		     unsigned int cnt)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -420,7 +420,7 @@ eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
 }
 
 static unsigned int
-eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
+eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -466,8 +466,8 @@ eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
 }
 
 static int
-eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_enq)
+eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_enq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct rte_event ev[BATCH_SIZE];
@@ -500,8 +500,8 @@ eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline void
-eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
-		  struct rte_crypto_op **ops, uint16_t num)
+eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
+		      struct rte_crypto_op **ops, uint16_t num)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -564,8 +564,8 @@ eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline unsigned int
-eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_deq)
+eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_deq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -627,8 +627,8 @@ eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static void
-eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_ops)
+eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
+		       unsigned int max_ops)
 {
 	while (max_ops) {
 		unsigned int e_cnt, d_cnt;
@@ -648,7 +648,7 @@ eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
 static int
 eca_service_func(void *args)
 {
-	struct rte_event_crypto_adapter *adapter = args;
+	struct event_crypto_adapter *adapter = args;
 
 	if (rte_spinlock_trylock(&adapter->lock) == 0)
 		return 0;
@@ -659,7 +659,7 @@ eca_service_func(void *args)
 }
 
 static int
-eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
+eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
 {
 	struct rte_event_crypto_adapter_conf adapter_conf;
 	struct rte_service_spec service;
@@ -699,10 +699,9 @@ eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
 }
 
 static void
-eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
-			struct crypto_device_info *dev_info,
-			int32_t queue_pair_id,
-			uint8_t add)
+eca_update_qp_info(struct event_crypto_adapter *adapter,
+		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
+		   uint8_t add)
 {
 	struct crypto_queue_pair_info *qp_info;
 	int enabled;
@@ -729,9 +728,8 @@ eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
 }
 
 static int
-eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
-		uint8_t cdev_id,
-		int queue_pair_id)
+eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
+		   int queue_pair_id)
 {
 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
 	struct crypto_queue_pair_info *qpairs;
@@ -773,7 +771,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			int32_t queue_pair_id,
 			const struct rte_event *event)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_eventdev *dev;
 	struct crypto_device_info *dev_info;
 	uint32_t cap;
@@ -889,7 +887,7 @@ int
 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 					int32_t queue_pair_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	int ret;
@@ -975,7 +973,7 @@ rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 static int
 eca_adapter_ctrl(uint8_t id, int start)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1017,7 +1015,7 @@ eca_adapter_ctrl(uint8_t id, int start)
 int
 rte_event_crypto_adapter_start(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 	adapter = eca_id_to_adapter(id);
@@ -1039,7 +1037,7 @@ int
 rte_event_crypto_adapter_stats_get(uint8_t id,
 				struct rte_event_crypto_adapter_stats *stats)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_crypto_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -1083,7 +1081,7 @@ rte_event_crypto_adapter_stats_get(uint8_t id,
 int
 rte_event_crypto_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1111,7 +1109,7 @@ rte_event_crypto_adapter_stats_reset(uint8_t id)
 int
 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -1128,7 +1126,7 @@ rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 int
 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index bd68b8efe1..7d37456856 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -82,7 +82,7 @@ struct eth_rx_vector_data {
 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
 
 /* Instance per adapter */
-struct rte_eth_event_enqueue_buffer {
+struct eth_event_enqueue_buffer {
 	/* Count of events in this buffer */
 	uint16_t count;
 	/* Array of events in this buffer */
@@ -98,7 +98,7 @@ struct rte_eth_event_enqueue_buffer {
 	uint16_t last_mask;
 };
 
-struct rte_event_eth_rx_adapter {
+struct event_eth_rx_adapter {
 	/* RSS key */
 	uint8_t rss_key_be[RSS_KEY_SIZE];
 	/* Event device identifier */
@@ -124,7 +124,7 @@ struct rte_event_eth_rx_adapter {
 	/* Next entry in wrr[] to begin polling */
 	uint32_t wrr_pos;
 	/* Event burst buffer */
-	struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
+	struct eth_event_enqueue_buffer event_enqueue_buffer;
 	/* Vector enable flag */
 	uint8_t ena_vector;
 	/* Timestamp of previous vector expiry list traversal */
@@ -244,10 +244,10 @@ struct eth_rx_queue_info {
 	uint32_t flow_id_mask;	/* Set to ~0 if app provides flow id else 0 */
 	uint64_t event;
 	struct eth_rx_vector_data vector_data;
-	struct rte_eth_event_enqueue_buffer *event_buf;
+	struct eth_event_enqueue_buffer *event_buf;
 };
 
-static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
+static struct event_eth_rx_adapter **event_eth_rx_adapter;
 
 /* Enable dynamic timestamp field in mbuf */
 static uint64_t event_eth_rx_timestamp_dynflag;
@@ -266,9 +266,9 @@ rxa_validate_id(uint8_t id)
 	return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
 }
 
-static inline struct rte_eth_event_enqueue_buffer *
-rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter,
-		  uint16_t eth_dev_id, uint16_t rx_queue_id)
+static inline struct eth_event_enqueue_buffer *
+rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+		  uint16_t rx_queue_id)
 {
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_device_info *dev_info =
@@ -286,7 +286,7 @@ rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter,
 } while (0)
 
 static inline int
-rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
 {
 	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
 }
@@ -304,10 +304,9 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
  */
 static int
-rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
-	 unsigned int n, int *cw,
-	 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
-	 uint16_t gcd, int prev)
+rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
+	     struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
+	     uint16_t gcd, int prev)
 {
 	int i = prev;
 	uint16_t w;
@@ -412,10 +411,9 @@ rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
  */
 static void
-rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_intr)
+rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_intr)
 {
 	uint32_t intr_diff;
 
@@ -431,12 +429,10 @@ rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * interrupt queues could currently be poll mode Rx queues
  */
 static void
-rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+			  uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -463,11 +459,9 @@ rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
  * after deleting poll mode rx queues
  */
 static void
-rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_wrr)
 {
 	uint32_t poll_diff;
 	uint32_t wrr_len_diff;
@@ -488,13 +482,10 @@ rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
 /* Calculate nb_rx_* after adding poll mode rx queues
  */
 static void
-rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint16_t wt,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint16_t wt, uint32_t *nb_rx_poll,
+			  uint32_t *nb_rx_intr, uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -521,13 +512,10 @@ rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after adding rx_queue_id */
 static void
-rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint16_t wt,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	if (wt != 0)
 		rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
@@ -539,12 +527,10 @@ rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after deleting rx_queue_id */
 static void
-rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
 				nb_wrr);
@@ -556,8 +542,7 @@ rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the rx_poll array
  */
 static struct eth_rx_poll_entry *
-rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint32_t num_rx_polled)
+rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
 {
 	size_t len;
 
@@ -573,7 +558,7 @@ rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the WRR array
  */
 static uint32_t *
-rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
+rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
 {
 	size_t len;
 
@@ -586,11 +571,9 @@ rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
 }
 
 static int
-rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint32_t nb_poll,
-		uint32_t nb_wrr,
-		struct eth_rx_poll_entry **rx_poll,
-		uint32_t **wrr_sched)
+rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
+		      uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
+		      uint32_t **wrr_sched)
 {
 
 	if (nb_poll == 0) {
@@ -615,9 +598,8 @@ rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Precalculate WRR polling sequence for all queues in rx_adapter */
 static void
-rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_rx_poll_entry *rx_poll,
-		uint32_t *rx_wrr)
+rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
+		      struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
 {
 	uint16_t d;
 	uint16_t q;
@@ -744,13 +726,13 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
 }
 
 static inline int
-rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
 {
 	return !!rx_adapter->enq_block_count;
 }
 
 static inline void
-rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->rx_enq_block_start_ts)
 		return;
@@ -763,8 +745,8 @@ rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
-		    struct rte_event_eth_rx_adapter_stats *stats)
+rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
+		     struct rte_event_eth_rx_adapter_stats *stats)
 {
 	if (unlikely(!stats->rx_enq_start_ts))
 		stats->rx_enq_start_ts = rte_get_tsc_cycles();
@@ -783,8 +765,8 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue buffered events to event device */
 static inline uint16_t
-rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
-		       struct rte_eth_event_enqueue_buffer *buf)
+rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
+		       struct eth_event_enqueue_buffer *buf)
 {
 	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
 	uint16_t count = buf->last ? buf->last - buf->head : buf->count;
@@ -828,7 +810,7 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
 		struct eth_rx_vector_data *vec)
 {
 	vec->vector_ev->nb_elem = 0;
@@ -839,9 +821,9 @@ rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline uint16_t
-rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
 			struct eth_rx_queue_info *queue_info,
-			struct rte_eth_event_enqueue_buffer *buf,
+			struct eth_event_enqueue_buffer *buf,
 			struct rte_mbuf **mbufs, uint16_t num)
 {
 	struct rte_event *ev = &buf->events[buf->count];
@@ -899,12 +881,9 @@ rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		uint16_t rx_queue_id,
-		struct rte_mbuf **mbufs,
-		uint16_t num,
-		struct rte_eth_event_enqueue_buffer *buf)
+rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
+		 struct eth_event_enqueue_buffer *buf)
 {
 	uint32_t i;
 	struct eth_device_info *dev_info =
@@ -983,7 +962,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline bool
-rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf)
+rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
 {
 	uint32_t nb_req = buf->tail + BATCH_SIZE;
 
@@ -1004,13 +983,9 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf)
 
 /* Enqueue packets from  <port, q>  to event buffer */
 static inline uint32_t
-rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint16_t port_id,
-	uint16_t queue_id,
-	uint32_t rx_count,
-	uint32_t max_rx,
-	int *rxq_empty,
-	struct rte_eth_event_enqueue_buffer *buf)
+rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
+	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
+	   int *rxq_empty, struct eth_event_enqueue_buffer *buf)
 {
 	struct rte_mbuf *mbufs[BATCH_SIZE];
 	struct rte_event_eth_rx_adapter_stats *stats =
@@ -1047,8 +1022,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
-		void *data)
+rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
 {
 	uint16_t port_id;
 	uint16_t queue;
@@ -1088,8 +1062,8 @@ rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
-			uint32_t num_intr_vec)
+rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
+			  uint32_t num_intr_vec)
 {
 	if (rx_adapter->num_intr_vec + num_intr_vec >
 				RTE_EVENT_ETH_INTR_RING_SIZE) {
@@ -1104,9 +1078,9 @@ rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Delete entries for (dev, queue) from the interrupt ring */
 static void
-rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			uint16_t rx_queue_id)
+rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info,
+			  uint16_t rx_queue_id)
 {
 	int i, n;
 	union queue_data qd;
@@ -1139,7 +1113,7 @@ rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
 static void *
 rxa_intr_thread(void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
+	struct event_eth_rx_adapter *rx_adapter = arg;
 	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
 	int n, i;
 
@@ -1162,12 +1136,12 @@ rxa_intr_thread(void *arg)
  * mbufs to eventdev
  */
 static inline uint32_t
-rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t n;
 	uint32_t nb_rx = 0;
 	int rxq_empty;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct eth_event_enqueue_buffer *buf;
 	rte_spinlock_t *ring_lock;
 	uint8_t max_done = 0;
 
@@ -1282,11 +1256,11 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
  * it.
  */
 static inline uint32_t
-rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t num_queue;
 	uint32_t nb_rx = 0;
-	struct rte_eth_event_enqueue_buffer *buf = NULL;
+	struct eth_event_enqueue_buffer *buf = NULL;
 	uint32_t wrr_pos;
 	uint32_t max_nb_rx;
 
@@ -1333,8 +1307,8 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
 static void
 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
-	struct rte_eth_event_enqueue_buffer *buf = NULL;
+	struct event_eth_rx_adapter *rx_adapter = arg;
+	struct eth_event_enqueue_buffer *buf = NULL;
 	struct rte_event *ev;
 
 	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
@@ -1358,7 +1332,7 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 static int
 rxa_service_func(void *args)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = args;
+	struct event_eth_rx_adapter *rx_adapter = args;
 	struct rte_event_eth_rx_adapter_stats *stats;
 
 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
@@ -1434,7 +1408,7 @@ rxa_memzone_lookup(void)
 	return 0;
 }
 
-static inline struct rte_event_eth_rx_adapter *
+static inline struct event_eth_rx_adapter *
 rxa_id_to_adapter(uint8_t id)
 {
 	return event_eth_rx_adapter ?
@@ -1451,7 +1425,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	uint8_t port_id;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
+	struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
 	dev_conf = dev->data->dev_conf;
@@ -1500,7 +1474,7 @@ rxa_epoll_create1(void)
 }
 
 static int
-rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->epd != INIT_FD)
 		return 0;
@@ -1517,7 +1491,7 @@ rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 	char thread_name[RTE_MAX_THREAD_NAME_LEN];
@@ -1561,7 +1535,7 @@ rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 
@@ -1582,7 +1556,7 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
 {
 	int ret;
 
@@ -1600,9 +1574,8 @@ rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1630,9 +1603,8 @@ rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id)
+rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 {
 	int err;
 	int i;
@@ -1689,9 +1661,8 @@ rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
+		struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err, err1;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1779,9 +1750,8 @@ rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int rx_queue_id)
+rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 
 {
 	int i, j, err;
@@ -1829,9 +1799,8 @@ rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	return err;
 }
 
-
 static int
-rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
 {
 	int ret;
 	struct rte_service_spec service;
@@ -1874,10 +1843,9 @@ rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
 }
 
 static void
-rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int32_t rx_queue_id,
-		uint8_t add)
+rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, int32_t rx_queue_id,
+		 uint8_t add)
 {
 	struct eth_rx_queue_info *queue_info;
 	int enabled;
@@ -1927,9 +1895,8 @@ rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
 }
 
 static void
-rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id)
+rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
+	   struct eth_device_info *dev_info, int32_t rx_queue_id)
 {
 	struct eth_rx_vector_data *vec;
 	int pollq;
@@ -1968,7 +1935,7 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
 	dev_info->nb_rx_intr -= intrq;
 	dev_info->nb_shared_intr -= intrq && sintrq;
 	if (rx_adapter->use_queue_event_buf) {
-		struct rte_eth_event_enqueue_buffer *event_buf =
+		struct eth_event_enqueue_buffer *event_buf =
 			dev_info->rx_queue[rx_queue_id].event_buf;
 		rte_free(event_buf->events);
 		rte_free(event_buf);
@@ -1977,10 +1944,9 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id,
-	const struct rte_event_eth_rx_adapter_queue_conf *conf)
+rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
+	      struct eth_device_info *dev_info, int32_t rx_queue_id,
+	      const struct rte_event_eth_rx_adapter_queue_conf *conf)
 {
 	struct eth_rx_queue_info *queue_info;
 	const struct rte_event *ev = &conf->ev;
@@ -1988,7 +1954,7 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	int intrq;
 	int sintrq;
 	struct rte_event *qi_ev;
-	struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL;
+	struct eth_event_enqueue_buffer *new_rx_buf = NULL;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
 	int ret;
 
@@ -2098,10 +2064,10 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	return 0;
 }
 
-static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		int rx_queue_id,
-		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+static int
+rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+	   int rx_queue_id,
+	   const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
 	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
 	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
@@ -2242,7 +2208,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
 static int
 rxa_ctrl(uint8_t id, int start)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2290,8 +2256,8 @@ rxa_create(uint8_t id, uint8_t dev_id,
 	   rte_event_eth_rx_adapter_conf_cb conf_cb,
 	   void *conf_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_event_enqueue_buffer *buf;
 	struct rte_event *events;
 	int ret;
 	int socket_id;
@@ -2488,7 +2454,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_eth_rx_adapter_free(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2522,7 +2488,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
 {
 	int ret;
 	uint32_t cap;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	struct rte_event_eth_rx_adapter_vector_limits limits;
@@ -2682,7 +2648,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
 {
 	int ret = 0;
 	struct rte_eventdev *dev;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	uint32_t nb_rx_poll = 0;
@@ -2852,8 +2818,8 @@ int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
 			       struct rte_event_eth_rx_adapter_stats *stats)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_event_enqueue_buffer *buf;
 	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_eth_rx_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -2907,7 +2873,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 int
 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2938,7 +2904,7 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 int
 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	if (rxa_memzone_lookup())
 		return -ENOMEM;
@@ -2961,7 +2927,7 @@ rte_event_eth_rx_adapter_cb_register(uint8_t id,
 					rte_event_eth_rx_adapter_cb_fn cb_fn,
 					void *cb_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	int ret;
@@ -3007,7 +2973,7 @@ rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
 			struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
 	struct rte_eventdev *dev;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	struct eth_rx_queue_info *queue_info;
 	struct rte_event *qi_ev;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 31fa9ac4b8..f1fcd6ce3d 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1193,7 +1193,7 @@ struct rte_event {
 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
 /**< The application can override the adapter generated flow ID in the
  * event. This flow ID can be specified when adding an ethdev Rx queue
- * to the adapter using the ev member of struct rte_event_eth_rx_adapter
+ * to the adapter using the ev.flow_id member.
  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
  */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 10/14] eventdev: rearrange fields in timer object
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
                           ` (7 preceding siblings ...)
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
@ 2021-10-18 23:36         ` pbhagavatula
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
                           ` (4 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:36 UTC (permalink / raw)
  To: jerinj, Erik Gabriel Carrillo; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Rearrange fields in rte_event_timer data structure to remove holes.
Also, remove use of volatile from rte_event_timer.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 doc/guides/rel_notes/release_21_11.rst | 3 +++
 lib/eventdev/rte_event_timer_adapter.h | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index b4e1770d4d..6442c79977 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -283,6 +283,9 @@ ABI Changes
   accessed directly by user any more. This change is transparent to both
   applications and PMDs.
 
+* eventdev: Re-arrange fields in ``rte_event_timer`` to remove holes.
+  ``rte_event_timer_adapter_pmd.h`` has been made internal.
+
 
 Known Issues
 ------------
diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h
index cad6d3b4c5..1551741820 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -475,8 +475,6 @@ struct rte_event_timer {
 	 *  - op: RTE_EVENT_OP_NEW
 	 *  - event_type: RTE_EVENT_TYPE_TIMER
 	 */
-	volatile enum rte_event_timer_state state;
-	/**< State of the event timer. */
 	uint64_t timeout_ticks;
 	/**< Expiry timer ticks expressed in number of *timer_ticks_ns* from
 	 * now.
@@ -488,6 +486,8 @@ struct rte_event_timer {
 	 * implementation specific values to share between the arm and cancel
 	 * operations.  The application should not modify this field.
 	 */
+	enum rte_event_timer_state state;
+	/**< State of the event timer. */
 	uint8_t user_meta[0];
 	/**< Memory to store user specific metadata.
 	 * The event timer adapter implementation should not modify this area.
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 11/14] eventdev: move timer adapters memory to hugepage
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
                           ` (8 preceding siblings ...)
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 10/14] eventdev: rearrange fields in timer object pbhagavatula
@ 2021-10-18 23:36         ` pbhagavatula
  2021-10-20 20:24           ` Carrillo, Erik G
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 12/14] eventdev: promote event vector API to stable pbhagavatula
                           ` (3 subsequent siblings)
  13 siblings, 1 reply; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:36 UTC (permalink / raw)
  To: jerinj, Erik Gabriel Carrillo; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move memory used by timer adapters to hugepage.
Allocate memory on the first adapter create or lookup to address
both primary and secondary process usecases.
This will prevent TLB misses if any and aligns to memory structure
of other subsystems.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 doc/guides/rel_notes/release_21_11.rst |  2 ++
 lib/eventdev/rte_event_timer_adapter.c | 36 ++++++++++++++++++++++++--
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 6442c79977..9694b32002 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -226,6 +226,8 @@ API Changes
   the crypto/security operation. This field will be used to communicate
   events such as soft expiry with IPsec in lookaside mode.
 
+* eventdev: Move memory used by timer adapters to hugepage. This will prevent
+  TLB misses if any and aligns to memory structure of other subsystems.
 
 ABI Changes
 -----------
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index ae55407042..894f532ef0 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -33,7 +33,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
 
-static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
+static struct rte_event_timer_adapter *adapters;
 
 static const struct event_timer_adapter_ops swtim_ops;
 
@@ -138,6 +138,17 @@ rte_event_timer_adapter_create_ext(
 	int n, ret;
 	struct rte_eventdev *dev;
 
+	if (adapters == NULL) {
+		adapters = rte_zmalloc("Eventdev",
+				       sizeof(struct rte_event_timer_adapter) *
+					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+				       RTE_CACHE_LINE_SIZE);
+		if (adapters == NULL) {
+			rte_errno = ENOMEM;
+			return NULL;
+		}
+	}
+
 	if (conf == NULL) {
 		rte_errno = EINVAL;
 		return NULL;
@@ -312,6 +323,17 @@ rte_event_timer_adapter_lookup(uint16_t adapter_id)
 	int ret;
 	struct rte_eventdev *dev;
 
+	if (adapters == NULL) {
+		adapters = rte_zmalloc("Eventdev",
+				       sizeof(struct rte_event_timer_adapter) *
+					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+				       RTE_CACHE_LINE_SIZE);
+		if (adapters == NULL) {
+			rte_errno = ENOMEM;
+			return NULL;
+		}
+	}
+
 	if (adapters[adapter_id].allocated)
 		return &adapters[adapter_id]; /* Adapter is already loaded */
 
@@ -358,7 +380,7 @@ rte_event_timer_adapter_lookup(uint16_t adapter_id)
 int
 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
 {
-	int ret;
+	int i, ret;
 
 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
@@ -382,6 +404,16 @@ rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
 	adapter->data = NULL;
 	adapter->allocated = 0;
 
+	ret = 0;
+	for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
+		if (adapters[i].allocated)
+			ret = adapter[i].allocated;
+
+	if (!ret) {
+		rte_free(adapters);
+		adapters = NULL;
+	}
+
 	rte_eventdev_trace_timer_adapter_free(adapter);
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 12/14] eventdev: promote event vector API to stable
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
                           ` (9 preceding siblings ...)
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
@ 2021-10-18 23:36         ` pbhagavatula
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 13/14] eventdev: make trace APIs internal pbhagavatula
                           ` (2 subsequent siblings)
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:36 UTC (permalink / raw)
  To: jerinj, Jay Jayatheerthan, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Promote event vector configuration APIs to stable.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 doc/guides/rel_notes/release_21_11.rst  | 2 ++
 lib/eventdev/rte_event_eth_rx_adapter.h | 1 -
 lib/eventdev/rte_eventdev.h             | 1 -
 lib/eventdev/version.map                | 4 ++--
 4 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 9694b32002..57389dc594 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -229,6 +229,8 @@ API Changes
 * eventdev: Move memory used by timer adapters to hugepage. This will prevent
   TLB misses if any and aligns to memory structure of other subsystems.
 
+* eventdev: Event vector configuration APIs have been made stable.
+
 ABI Changes
 -----------
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index c4257e750d..ab625f7273 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -588,7 +588,6 @@ int rte_event_eth_rx_adapter_cb_register(uint8_t id, uint16_t eth_dev_id,
  *  - 0: Success.
  *  - <0: Error code on failure.
  */
-__rte_experimental
 int rte_event_eth_rx_adapter_vector_limits_get(
 	uint8_t dev_id, uint16_t eth_port_id,
 	struct rte_event_eth_rx_adapter_vector_limits *limits);
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index f1fcd6ce3d..14d4d9ec81 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1734,7 +1734,6 @@ int rte_event_dev_selftest(uint8_t dev_id);
  *    - ENOMEM - no appropriate memory area found in which to create memzone
  *    - ENAMETOOLONG - mempool name requested is too long.
  */
-__rte_experimental
 struct rte_mempool *
 rte_event_vector_pool_create(const char *name, unsigned int n,
 			     unsigned int cache_size, uint16_t nb_elem,
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 9f6eb4ba3c..8f2fb0cf14 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -42,6 +42,7 @@ DPDK_22 {
 	rte_event_eth_rx_adapter_start;
 	rte_event_eth_rx_adapter_stats_get;
 	rte_event_eth_rx_adapter_stats_reset;
+	rte_event_eth_rx_adapter_vector_limits_get;
 	rte_event_eth_rx_adapter_stop;
 	rte_event_eth_tx_adapter_caps_get;
 	rte_event_eth_tx_adapter_create;
@@ -83,6 +84,7 @@ DPDK_22 {
 	rte_event_timer_arm_burst;
 	rte_event_timer_arm_tmo_tick_burst;
 	rte_event_timer_cancel_burst;
+	rte_event_vector_pool_create;
 
 	#added in 21.11
 	rte_event_fp_ops;
@@ -136,8 +138,6 @@ EXPERIMENTAL {
 	rte_event_eth_rx_adapter_create_with_params;
 
 	#added in 21.05
-	rte_event_vector_pool_create;
-	rte_event_eth_rx_adapter_vector_limits_get;
 	__rte_eventdev_trace_crypto_adapter_enqueue;
 	rte_event_eth_rx_adapter_queue_conf_get;
 };
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 13/14] eventdev: make trace APIs internal
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
                           ` (10 preceding siblings ...)
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 12/14] eventdev: promote event vector API to stable pbhagavatula
@ 2021-10-18 23:36         ` pbhagavatula
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 14/14] eventdev: mark trace variables as internal pbhagavatula
  2021-10-20  4:01         ` [dpdk-dev] [PATCH v5 01/14] eventdev: make driver interface " Jerin Jacob
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:36 UTC (permalink / raw)
  To: jerinj, Abhinandan Gujjar, Jay Jayatheerthan, Erik Gabriel Carrillo
  Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Slowpath trace APIs are only used in rte_eventdev.c so make them
as internal.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
---
 lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} | 0
 lib/eventdev/eventdev_trace_points.c                    | 2 +-
 lib/eventdev/meson.build                                | 2 +-
 lib/eventdev/rte_event_crypto_adapter.c                 | 2 +-
 lib/eventdev/rte_event_eth_rx_adapter.c                 | 2 +-
 lib/eventdev/rte_event_eth_tx_adapter.c                 | 2 +-
 lib/eventdev/rte_event_timer_adapter.c                  | 2 +-
 lib/eventdev/rte_eventdev.c                             | 2 +-
 8 files changed, 7 insertions(+), 7 deletions(-)
 rename lib/eventdev/{rte_eventdev_trace.h => eventdev_trace.h} (100%)

diff --git a/lib/eventdev/rte_eventdev_trace.h b/lib/eventdev/eventdev_trace.h
similarity index 100%
rename from lib/eventdev/rte_eventdev_trace.h
rename to lib/eventdev/eventdev_trace.h
diff --git a/lib/eventdev/eventdev_trace_points.c b/lib/eventdev/eventdev_trace_points.c
index 3867ec8008..237d9383fd 100644
--- a/lib/eventdev/eventdev_trace_points.c
+++ b/lib/eventdev/eventdev_trace_points.c
@@ -4,7 +4,7 @@
 
 #include <rte_trace_point_register.h>
 
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 /* Eventdev trace points */
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_configure,
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 22c3289912..abe88f733a 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -24,7 +24,6 @@ headers = files(
         'rte_event_ring.h',
         'rte_event_timer_adapter.h',
         'rte_eventdev.h',
-        'rte_eventdev_trace.h',
         'rte_eventdev_trace_fp.h',
 )
 indirect_headers += files(
@@ -34,6 +33,7 @@ driver_sdk_headers += files(
         'eventdev_pmd.h',
         'eventdev_pmd_pci.h',
         'eventdev_pmd_vdev.h',
+        'eventdev_trace.h',
         'event_timer_adapter_pmd.h',
 )
 
diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index e9e660a3d2..ae1151fb75 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -16,7 +16,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_crypto_adapter.h"
 
 #define BATCH_SIZE 32
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 7d37456856..106b68c2f4 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -22,7 +22,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_rx_adapter.h"
 
 #define BATCH_SIZE		32
diff --git a/lib/eventdev/rte_event_eth_tx_adapter.c b/lib/eventdev/rte_event_eth_tx_adapter.c
index 18c0359db7..ee3631bced 100644
--- a/lib/eventdev/rte_event_eth_tx_adapter.c
+++ b/lib/eventdev/rte_event_eth_tx_adapter.c
@@ -6,7 +6,7 @@
 #include <rte_ethdev.h>
 
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_tx_adapter.h"
 
 #define TXA_BATCH_SIZE		32
diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index 894f532ef0..86b6c3fc6f 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -24,7 +24,7 @@
 #include "eventdev_pmd.h"
 #include "rte_event_timer_adapter.h"
 #include "rte_eventdev.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 #define DATA_MZ_NAME_MAX_LEN 64
 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index de6346194e..f881b7cc35 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -36,7 +36,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* [dpdk-dev] [PATCH v5 14/14] eventdev: mark trace variables as internal
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
                           ` (11 preceding siblings ...)
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 13/14] eventdev: make trace APIs internal pbhagavatula
@ 2021-10-18 23:36         ` pbhagavatula
  2021-10-20  4:01         ` [dpdk-dev] [PATCH v5 01/14] eventdev: make driver interface " Jerin Jacob
  13 siblings, 0 replies; 119+ messages in thread
From: pbhagavatula @ 2021-10-18 23:36 UTC (permalink / raw)
  To: jerinj, Ray Kinsella; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Mark rte_trace global variables as internal i.e. remove them
from experimental section of version map.
Some of them are used in inline APIs, mark those as global.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 lib/eventdev/version.map | 71 ++++++++++++++++++----------------------
 1 file changed, 32 insertions(+), 39 deletions(-)

diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 8f2fb0cf14..cd37164141 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -1,6 +1,13 @@
 DPDK_22 {
 	global:
 
+	__rte_eventdev_trace_crypto_adapter_enqueue;
+	__rte_eventdev_trace_deq_burst;
+	__rte_eventdev_trace_enq_burst;
+	__rte_eventdev_trace_eth_tx_adapter_enqueue;
+	__rte_eventdev_trace_timer_arm_burst;
+	__rte_eventdev_trace_timer_arm_tmo_tick_burst;
+	__rte_eventdev_trace_timer_cancel_burst;
 	rte_event_crypto_adapter_caps_get;
 	rte_event_crypto_adapter_create;
 	rte_event_crypto_adapter_create_ext;
@@ -42,8 +49,8 @@ DPDK_22 {
 	rte_event_eth_rx_adapter_start;
 	rte_event_eth_rx_adapter_stats_get;
 	rte_event_eth_rx_adapter_stats_reset;
-	rte_event_eth_rx_adapter_vector_limits_get;
 	rte_event_eth_rx_adapter_stop;
+	rte_event_eth_rx_adapter_vector_limits_get;
 	rte_event_eth_tx_adapter_caps_get;
 	rte_event_eth_tx_adapter_create;
 	rte_event_eth_tx_adapter_create_ext;
@@ -56,6 +63,7 @@ DPDK_22 {
 	rte_event_eth_tx_adapter_stats_get;
 	rte_event_eth_tx_adapter_stats_reset;
 	rte_event_eth_tx_adapter_stop;
+	rte_event_fp_ops;
 	rte_event_port_attr_get;
 	rte_event_port_default_conf_get;
 	rte_event_port_link;
@@ -86,25 +94,28 @@ DPDK_22 {
 	rte_event_timer_cancel_burst;
 	rte_event_vector_pool_create;
 
-	#added in 21.11
-	rte_event_fp_ops;
-
 	local: *;
 };
 
 EXPERIMENTAL {
 	global:
 
-	# added in 20.05
-	__rte_eventdev_trace_configure;
-	__rte_eventdev_trace_queue_setup;
-	__rte_eventdev_trace_port_link;
-	__rte_eventdev_trace_port_unlink;
-	__rte_eventdev_trace_start;
-	__rte_eventdev_trace_stop;
+	# added in 21.11
+	rte_event_eth_rx_adapter_create_with_params;
+	rte_event_eth_rx_adapter_queue_conf_get;
+};
+
+INTERNAL {
+	global:
+
 	__rte_eventdev_trace_close;
-	__rte_eventdev_trace_deq_burst;
-	__rte_eventdev_trace_enq_burst;
+	__rte_eventdev_trace_configure;
+	__rte_eventdev_trace_crypto_adapter_create;
+	__rte_eventdev_trace_crypto_adapter_free;
+	__rte_eventdev_trace_crypto_adapter_queue_pair_add;
+	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
+	__rte_eventdev_trace_crypto_adapter_start;
+	__rte_eventdev_trace_crypto_adapter_stop;
 	__rte_eventdev_trace_eth_rx_adapter_create;
 	__rte_eventdev_trace_eth_rx_adapter_free;
 	__rte_eventdev_trace_eth_rx_adapter_queue_add;
@@ -117,38 +128,19 @@ EXPERIMENTAL {
 	__rte_eventdev_trace_eth_tx_adapter_queue_del;
 	__rte_eventdev_trace_eth_tx_adapter_start;
 	__rte_eventdev_trace_eth_tx_adapter_stop;
-	__rte_eventdev_trace_eth_tx_adapter_enqueue;
+	__rte_eventdev_trace_port_link;
+	__rte_eventdev_trace_port_setup;
+	__rte_eventdev_trace_port_unlink;
+	__rte_eventdev_trace_queue_setup;
+	__rte_eventdev_trace_start;
+	__rte_eventdev_trace_stop;
 	__rte_eventdev_trace_timer_adapter_create;
+	__rte_eventdev_trace_timer_adapter_free;
 	__rte_eventdev_trace_timer_adapter_start;
 	__rte_eventdev_trace_timer_adapter_stop;
-	__rte_eventdev_trace_timer_adapter_free;
-	__rte_eventdev_trace_timer_arm_burst;
-	__rte_eventdev_trace_timer_arm_tmo_tick_burst;
-	__rte_eventdev_trace_timer_cancel_burst;
-	__rte_eventdev_trace_crypto_adapter_create;
-	__rte_eventdev_trace_crypto_adapter_free;
-	__rte_eventdev_trace_crypto_adapter_queue_pair_add;
-	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
-	__rte_eventdev_trace_crypto_adapter_start;
-	__rte_eventdev_trace_crypto_adapter_stop;
-
-	# changed in 20.11
-	__rte_eventdev_trace_port_setup;
-	# added in 21.11
-	rte_event_eth_rx_adapter_create_with_params;
-
-	#added in 21.05
-	__rte_eventdev_trace_crypto_adapter_enqueue;
-	rte_event_eth_rx_adapter_queue_conf_get;
-};
-
-INTERNAL {
-	global:
-
 	event_dev_fp_ops_reset;
 	event_dev_fp_ops_set;
 	event_dev_probing_finish;
-	rte_event_pmd_selftest_seqn_dynfield_offset;
 	rte_event_pmd_allocate;
 	rte_event_pmd_get_named_dev;
 	rte_event_pmd_is_valid_dev;
@@ -156,6 +148,7 @@ INTERNAL {
 	rte_event_pmd_pci_probe_named;
 	rte_event_pmd_pci_remove;
 	rte_event_pmd_release;
+	rte_event_pmd_selftest_seqn_dynfield_offset;
 	rte_event_pmd_vdev_init;
 	rte_event_pmd_vdev_uninit;
 	rte_eventdevs;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [PATCH v4 14/14] eventdev: mark trace variables as internal
  2021-10-18 15:06           ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
@ 2021-10-19  7:01             ` David Marchand
  0 siblings, 0 replies; 119+ messages in thread
From: David Marchand @ 2021-10-19  7:01 UTC (permalink / raw)
  To: Pavan Nikhilesh Bhagavatula, Ray Kinsella
  Cc: Jerin Jacob, Jerin Jacob Kollanukkaran, dpdk-dev

Hello Pavan,

On Mon, Oct 18, 2021 at 5:07 PM Pavan Nikhilesh Bhagavatula
<pbhagavatula@marvell.com> wrote:
> >[for-main]dell[dpdk-next-eventdev] $ ./devtools/checkpatches.sh -n
> >14
> >
> >### eventdev: move inline APIs into separate structure
> >
> >INFO: symbol event_dev_fp_ops_reset has been added to the
> >INTERNAL
> >section of the version map
> >INFO: symbol event_dev_fp_ops_set has been added to the INTERNAL
> >section of the version map
> >INFO: symbol event_dev_probing_finish has been added to the
> >INTERNAL
> >section of the version map
>
> These can be ignored as they are internal

Those first warnings are informational.

>
> >ERROR: symbol rte_event_fp_ops is added in the DPDK_22 section, but
> >is
> >expected to be added in the EXPERIMENTAL section of the version map
>
> This is a replacement for rte_eventdevs, ethdev rework also doesn’t mark
> it as experimental. @David Marchand @Ray Kinsella any opinions?

This check is there to ensure that added symbols first go through a
period in experimental status.

Same as for ethdev, the use of inlines in stable API directly exposes
a new symbol to applications.
With this implementation, this check can be waived and the symbol can
go directly to stable status.

This symbol being exposed as stable, it will be frozen in ABI until
next breakage.
I see you reserved 6 spots for new ops, so it looks ok.



-- 
David Marchand


^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v5 01/14] eventdev: make driver interface as internal
  2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
                           ` (12 preceding siblings ...)
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 14/14] eventdev: mark trace variables as internal pbhagavatula
@ 2021-10-20  4:01         ` Jerin Jacob
  13 siblings, 0 replies; 119+ messages in thread
From: Jerin Jacob @ 2021-10-20  4:01 UTC (permalink / raw)
  To: Pavan Nikhilesh
  Cc: Jerin Jacob, Shijith Thotton, Timothy McDaniel, Hemant Agrawal,
	Nipun Gupta, Mattias Rönnblom, Liang Ma, Peter Mccarthy,
	Harry van Haaren, Abhinandan Gujjar, Ray Kinsella, dpdk-dev

On Tue, Oct 19, 2021 at 5:06 AM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Mark all the driver specific functions as internal, remove
> `rte` prefix from `struct rte_eventdev_ops`.
> Remove experimental tag from internal functions.
> Remove `eventdev_pmd.h` from non-internal header files.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

Series applied to dpdk-next-net-eventdev/for-main. Thanks


> ---
>  v5 Changes:
>  - Move doc updates to respective patches. (Jerin)
>
>  v4 Changes:
>  - Update release notes. (Jerin)
>  - Rearrange fp_ops fields. (Jerin)
>  - Free timer array memory when freeing the last adapter. (Erik)
>  - Rebase onto next-event.
>  - Fix spell checks.
>  - Rearrange version.map (David)
>
>  v3 Changes:
>  - Reset fp_ops when device is torndown.
>  - Add `event_dev_probing_finish()` this function is used for
>    post-initialization processing. In current usecase we use it to
>    initialize fastpath ops.
>
>  v2 Changes:
>  - Rework inline flat array by adding port data into it.
>  - Rearrange rte_event_timer elements.
>
>
>  drivers/event/cnxk/cn10k_eventdev.c        |  6 ++---
>  drivers/event/cnxk/cn9k_eventdev.c         | 10 ++++-----
>  drivers/event/dlb2/dlb2.c                  |  2 +-
>  drivers/event/dpaa/dpaa_eventdev.c         |  2 +-
>  drivers/event/dpaa2/dpaa2_eventdev.c       |  2 +-
>  drivers/event/dsw/dsw_evdev.c              |  2 +-
>  drivers/event/octeontx/ssovf_evdev.c       |  2 +-
>  drivers/event/octeontx/ssovf_worker.c      |  4 ++--
>  drivers/event/octeontx2/otx2_evdev.c       | 26 +++++++++++-----------
>  drivers/event/opdl/opdl_evdev.c            |  2 +-
>  drivers/event/skeleton/skeleton_eventdev.c |  2 +-
>  drivers/event/sw/sw_evdev.c                |  2 +-
>  lib/eventdev/eventdev_pmd.h                |  6 ++++-
>  lib/eventdev/eventdev_pmd_pci.h            |  4 +++-
>  lib/eventdev/eventdev_pmd_vdev.h           |  2 ++
>  lib/eventdev/meson.build                   |  6 +++++
>  lib/eventdev/rte_event_crypto_adapter.h    |  1 -
>  lib/eventdev/rte_eventdev.h                | 25 ++++++++++++---------
>  lib/eventdev/version.map                   | 17 +++++++-------
>  19 files changed, 70 insertions(+), 53 deletions(-)
>
> diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
> index bfd470cffd..612c299b59 100644
> --- a/drivers/event/cnxk/cn10k_eventdev.c
> +++ b/drivers/event/cnxk/cn10k_eventdev.c
> @@ -380,7 +380,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
>         };
>
>         /* Tx modes */
> -       const event_tx_adapter_enqueue
> +       const event_tx_adapter_enqueue_t
>                 sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
>  #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
>         [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
> @@ -388,7 +388,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
>  #undef T
>                 };
>
> -       const event_tx_adapter_enqueue
> +       const event_tx_adapter_enqueue_t
>                 sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
>  #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
>         [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
> @@ -788,7 +788,7 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
>         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
>  }
>
> -static struct rte_eventdev_ops cn10k_sso_dev_ops = {
> +static struct eventdev_ops cn10k_sso_dev_ops = {
>         .dev_infos_get = cn10k_sso_info_get,
>         .dev_configure = cn10k_sso_dev_configure,
>         .queue_def_conf = cnxk_sso_queue_def_conf,
> diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
> index 806dcb0a45..d757da7c37 100644
> --- a/drivers/event/cnxk/cn9k_eventdev.c
> +++ b/drivers/event/cnxk/cn9k_eventdev.c
> @@ -514,7 +514,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
>         };
>
>         /* Tx modes */
> -       const event_tx_adapter_enqueue
> +       const event_tx_adapter_enqueue_t
>                 sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
>  #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
>         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
> @@ -522,7 +522,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
>  #undef T
>                 };
>
> -       const event_tx_adapter_enqueue
> +       const event_tx_adapter_enqueue_t
>                 sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
>  #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
>         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
> @@ -530,7 +530,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
>  #undef T
>                 };
>
> -       const event_tx_adapter_enqueue
> +       const event_tx_adapter_enqueue_t
>                 sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
>  #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
>         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
> @@ -538,7 +538,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
>  #undef T
>                 };
>
> -       const event_tx_adapter_enqueue
> +       const event_tx_adapter_enqueue_t
>                 sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
>  #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
>         [f6][f5][f4][f3][f2][f1][f0] =                                         \
> @@ -1060,7 +1060,7 @@ cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
>         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
>  }
>
> -static struct rte_eventdev_ops cn9k_sso_dev_ops = {
> +static struct eventdev_ops cn9k_sso_dev_ops = {
>         .dev_infos_get = cn9k_sso_info_get,
>         .dev_configure = cn9k_sso_dev_configure,
>         .queue_def_conf = cnxk_sso_queue_def_conf,
> diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
> index 252bbd8d5e..c8742ddb2c 100644
> --- a/drivers/event/dlb2/dlb2.c
> +++ b/drivers/event/dlb2/dlb2.c
> @@ -4384,7 +4384,7 @@ dlb2_entry_points_init(struct rte_eventdev *dev)
>         struct dlb2_eventdev *dlb2;
>
>         /* Expose PMD's eventdev interface */
> -       static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
> +       static struct eventdev_ops dlb2_eventdev_entry_ops = {
>                 .dev_infos_get    = dlb2_eventdev_info_get,
>                 .dev_configure    = dlb2_eventdev_configure,
>                 .dev_start        = dlb2_eventdev_start,
> diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
> index ec74160325..9f14390d28 100644
> --- a/drivers/event/dpaa/dpaa_eventdev.c
> +++ b/drivers/event/dpaa/dpaa_eventdev.c
> @@ -925,7 +925,7 @@ dpaa_eventdev_txa_enqueue(void *port,
>         return nb_events;
>  }
>
> -static struct rte_eventdev_ops dpaa_eventdev_ops = {
> +static struct eventdev_ops dpaa_eventdev_ops = {
>         .dev_infos_get    = dpaa_event_dev_info_get,
>         .dev_configure    = dpaa_event_dev_configure,
>         .dev_start        = dpaa_event_dev_start,
> diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
> index 5ccf22f77f..d577f64824 100644
> --- a/drivers/event/dpaa2/dpaa2_eventdev.c
> +++ b/drivers/event/dpaa2/dpaa2_eventdev.c
> @@ -1015,7 +1015,7 @@ dpaa2_eventdev_txa_enqueue(void *port,
>         return nb_events;
>  }
>
> -static struct rte_eventdev_ops dpaa2_eventdev_ops = {
> +static struct eventdev_ops dpaa2_eventdev_ops = {
>         .dev_infos_get    = dpaa2_eventdev_info_get,
>         .dev_configure    = dpaa2_eventdev_configure,
>         .dev_start        = dpaa2_eventdev_start,
> diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
> index 2301a4b7a0..01f060fff3 100644
> --- a/drivers/event/dsw/dsw_evdev.c
> +++ b/drivers/event/dsw/dsw_evdev.c
> @@ -398,7 +398,7 @@ dsw_crypto_adapter_caps_get(const struct rte_eventdev *dev  __rte_unused,
>         return 0;
>  }
>
> -static struct rte_eventdev_ops dsw_evdev_ops = {
> +static struct eventdev_ops dsw_evdev_ops = {
>         .port_setup = dsw_port_setup,
>         .port_def_conf = dsw_port_def_conf,
>         .port_release = dsw_port_release,
> diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
> index b93f6ec8c6..4a8c6a13a5 100644
> --- a/drivers/event/octeontx/ssovf_evdev.c
> +++ b/drivers/event/octeontx/ssovf_evdev.c
> @@ -790,7 +790,7 @@ ssovf_crypto_adapter_qp_del(const struct rte_eventdev *dev,
>  }
>
>  /* Initialize and register event driver with DPDK Application */
> -static struct rte_eventdev_ops ssovf_ops = {
> +static struct eventdev_ops ssovf_ops = {
>         .dev_infos_get    = ssovf_info_get,
>         .dev_configure    = ssovf_configure,
>         .queue_def_conf   = ssovf_queue_def_conf,
> diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
> index 8b056ddc5a..2df940f0f1 100644
> --- a/drivers/event/octeontx/ssovf_worker.c
> +++ b/drivers/event/octeontx/ssovf_worker.c
> @@ -343,11 +343,11 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)
>
>         dev->ca_enqueue = ssow_crypto_adapter_enqueue;
>
> -       const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
> +       const event_tx_adapter_enqueue_t ssow_txa_enqueue[2][2][2][2] = {
>  #define T(name, f3, f2, f1, f0, sz, flags)                             \
>         [f3][f2][f1][f0] =  sso_event_tx_adapter_enqueue_ ##name,
>
> -SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
> +               SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
>  #undef T
>         };
>
> diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
> index 38a6b651d9..f26bed334f 100644
> --- a/drivers/event/octeontx2/otx2_evdev.c
> +++ b/drivers/event/octeontx2/otx2_evdev.c
> @@ -178,41 +178,41 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
>         };
>
>         /* Tx modes */
> -       const event_tx_adapter_enqueue
> +       const event_tx_adapter_enqueue_t
>                 ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = {
>  #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                 \
>                 [f6][f5][f4][f3][f2][f1][f0] =                          \
>                         otx2_ssogws_tx_adptr_enq_ ## name,
> -SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
> +                       SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
>  #undef T
> -       };
> +               };
>
> -       const event_tx_adapter_enqueue
> +       const event_tx_adapter_enqueue_t
>                 ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
>  #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                 \
>                 [f6][f5][f4][f3][f2][f1][f0] =                          \
>                         otx2_ssogws_tx_adptr_enq_seg_ ## name,
> -SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
> +                       SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
>  #undef T
> -       };
> +               };
>
> -       const event_tx_adapter_enqueue
> +       const event_tx_adapter_enqueue_t
>                 ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
>  #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                 \
>                 [f6][f5][f4][f3][f2][f1][f0] =                          \
>                         otx2_ssogws_dual_tx_adptr_enq_ ## name,
> -SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
> +                       SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
>  #undef T
> -       };
> +               };
>
> -       const event_tx_adapter_enqueue
> +       const event_tx_adapter_enqueue_t
>                 ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
>  #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                 \
>                 [f6][f5][f4][f3][f2][f1][f0] =                          \
>                         otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
> -SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
> +                       SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
>  #undef T
> -       };
> +               };
>
>         event_dev->enqueue                      = otx2_ssogws_enq;
>         event_dev->enqueue_burst                = otx2_ssogws_enq_burst;
> @@ -1596,7 +1596,7 @@ otx2_sso_close(struct rte_eventdev *event_dev)
>  }
>
>  /* Initialize and register event driver with DPDK Application */
> -static struct rte_eventdev_ops otx2_sso_ops = {
> +static struct eventdev_ops otx2_sso_ops = {
>         .dev_infos_get    = otx2_sso_info_get,
>         .dev_configure    = otx2_sso_configure,
>         .queue_def_conf   = otx2_sso_queue_def_conf,
> diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
> index cfa9733b64..739dc64c82 100644
> --- a/drivers/event/opdl/opdl_evdev.c
> +++ b/drivers/event/opdl/opdl_evdev.c
> @@ -609,7 +609,7 @@ set_do_test(const char *key __rte_unused, const char *value, void *opaque)
>  static int
>  opdl_probe(struct rte_vdev_device *vdev)
>  {
> -       static struct rte_eventdev_ops evdev_opdl_ops = {
> +       static struct eventdev_ops evdev_opdl_ops = {
>                 .dev_configure = opdl_dev_configure,
>                 .dev_infos_get = opdl_info_get,
>                 .dev_close = opdl_close,
> diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
> index 6fd1102596..c9e17e7cb1 100644
> --- a/drivers/event/skeleton/skeleton_eventdev.c
> +++ b/drivers/event/skeleton/skeleton_eventdev.c
> @@ -320,7 +320,7 @@ skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
>
>
>  /* Initialize and register event driver with DPDK Application */
> -static struct rte_eventdev_ops skeleton_eventdev_ops = {
> +static struct eventdev_ops skeleton_eventdev_ops = {
>         .dev_infos_get    = skeleton_eventdev_info_get,
>         .dev_configure    = skeleton_eventdev_configure,
>         .dev_start        = skeleton_eventdev_start,
> diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
> index a5e6ca22e8..9b72073322 100644
> --- a/drivers/event/sw/sw_evdev.c
> +++ b/drivers/event/sw/sw_evdev.c
> @@ -945,7 +945,7 @@ static int32_t sw_sched_service_func(void *args)
>  static int
>  sw_probe(struct rte_vdev_device *vdev)
>  {
> -       static struct rte_eventdev_ops evdev_sw_ops = {
> +       static struct eventdev_ops evdev_sw_ops = {
>                         .dev_configure = sw_dev_configure,
>                         .dev_infos_get = sw_info_get,
>                         .dev_close = sw_close,
> diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
> index 7ac31e9f92..688f30d45e 100644
> --- a/lib/eventdev/eventdev_pmd.h
> +++ b/lib/eventdev/eventdev_pmd.h
> @@ -99,6 +99,7 @@ extern struct rte_eventdev *rte_eventdevs;
>   * @return
>   *   - The rte_eventdev structure pointer for the given device ID.
>   */
> +__rte_internal
>  static inline struct rte_eventdev *
>  rte_event_pmd_get_named_dev(const char *name)
>  {
> @@ -127,6 +128,7 @@ rte_event_pmd_get_named_dev(const char *name)
>   * @return
>   *   - If the device index is valid (1) or not (0).
>   */
> +__rte_internal
>  static inline unsigned
>  rte_event_pmd_is_valid_dev(uint8_t dev_id)
>  {
> @@ -1056,7 +1058,7 @@ typedef int (*eventdev_eth_tx_adapter_stats_reset_t)(uint8_t id,
>                                         const struct rte_eventdev *dev);
>
>  /** Event device operations function pointer table */
> -struct rte_eventdev_ops {
> +struct eventdev_ops {
>         eventdev_info_get_t dev_infos_get;      /**< Get device info. */
>         eventdev_configure_t dev_configure;     /**< Configure device. */
>         eventdev_start_t dev_start;             /**< Start device. */
> @@ -1173,6 +1175,7 @@ struct rte_eventdev_ops {
>   * @return
>   *   - Slot in the rte_dev_devices array for a new device;
>   */
> +__rte_internal
>  struct rte_eventdev *
>  rte_event_pmd_allocate(const char *name, int socket_id);
>
> @@ -1184,6 +1187,7 @@ rte_event_pmd_allocate(const char *name, int socket_id);
>   * @return
>   *   - 0 on success, negative on error
>   */
> +__rte_internal
>  int
>  rte_event_pmd_release(struct rte_eventdev *eventdev);
>
> diff --git a/lib/eventdev/eventdev_pmd_pci.h b/lib/eventdev/eventdev_pmd_pci.h
> index 1545b240f2..2f12a5eb24 100644
> --- a/lib/eventdev/eventdev_pmd_pci.h
> +++ b/lib/eventdev/eventdev_pmd_pci.h
> @@ -31,7 +31,7 @@ typedef int (*eventdev_pmd_pci_callback_t)(struct rte_eventdev *dev);
>   * interface.  Same as rte_event_pmd_pci_probe, except caller can specify
>   * the name.
>   */
> -__rte_experimental
> +__rte_internal
>  static inline int
>  rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
>                               struct rte_pci_device *pci_dev,
> @@ -85,6 +85,7 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
>   * Wrapper for use by pci drivers as a .probe function to attach to a event
>   * interface.
>   */
> +__rte_internal
>  static inline int
>  rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
>                             struct rte_pci_device *pci_dev,
> @@ -108,6 +109,7 @@ rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
>   * Wrapper for use by pci drivers as a .remove function to detach a event
>   * interface.
>   */
> +__rte_internal
>  static inline int
>  rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev,
>                              eventdev_pmd_pci_callback_t devuninit)
> diff --git a/lib/eventdev/eventdev_pmd_vdev.h b/lib/eventdev/eventdev_pmd_vdev.h
> index 2d33924e6c..d9ee7277dd 100644
> --- a/lib/eventdev/eventdev_pmd_vdev.h
> +++ b/lib/eventdev/eventdev_pmd_vdev.h
> @@ -37,6 +37,7 @@
>   *   - Eventdev pointer if device is successfully created.
>   *   - NULL if device cannot be created.
>   */
> +__rte_internal
>  static inline struct rte_eventdev *
>  rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
>                 int socket_id)
> @@ -74,6 +75,7 @@ rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
>   * @return
>   *   - 0 on success, negative on error
>   */
> +__rte_internal
>  static inline int
>  rte_event_pmd_vdev_uninit(const char *name)
>  {
> diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
> index 32abeba794..523ea9ccae 100644
> --- a/lib/eventdev/meson.build
> +++ b/lib/eventdev/meson.build
> @@ -27,5 +27,11 @@ headers = files(
>          'rte_event_crypto_adapter.h',
>          'rte_event_eth_tx_adapter.h',
>  )
> +driver_sdk_headers += files(
> +        'eventdev_pmd.h',
> +        'eventdev_pmd_pci.h',
> +        'eventdev_pmd_vdev.h',
> +)
> +
>  deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
>  deps += ['telemetry']
> diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
> index edbd5c61a3..1a8ff75384 100644
> --- a/lib/eventdev/rte_event_crypto_adapter.h
> +++ b/lib/eventdev/rte_event_crypto_adapter.h
> @@ -171,7 +171,6 @@ extern "C" {
>  #include <stdint.h>
>
>  #include "rte_eventdev.h"
> -#include "eventdev_pmd.h"
>
>  /**
>   * Crypto event adapter mode
> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> index a9c496fb62..0c701888d5 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -1324,7 +1324,7 @@ int
>  rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
>                                 uint32_t *caps);
>
> -struct rte_eventdev_ops;
> +struct eventdev_ops;
>  struct rte_eventdev;
>
>  typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
> @@ -1342,18 +1342,21 @@ typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
>                 uint16_t nb_events, uint64_t timeout_ticks);
>  /**< @internal Dequeue burst of events from port of a device */
>
> -typedef uint16_t (*event_tx_adapter_enqueue)(void *port,
> -                               struct rte_event ev[], uint16_t nb_events);
> +typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
> +                                              struct rte_event ev[],
> +                                              uint16_t nb_events);
>  /**< @internal Enqueue burst of events on port of a device */
>
> -typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
> -               struct rte_event ev[], uint16_t nb_events);
> +typedef uint16_t (*event_tx_adapter_enqueue_same_dest_t)(void *port,
> +                                                        struct rte_event ev[],
> +                                                        uint16_t nb_events);
>  /**< @internal Enqueue burst of events on port of a device supporting
>   * burst having same destination Ethernet port & Tx queue.
>   */
>
> -typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
> -                               struct rte_event ev[], uint16_t nb_events);
> +typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
> +                                                  struct rte_event ev[],
> +                                                  uint16_t nb_events);
>  /**< @internal Enqueue burst of events on crypto adapter */
>
>  #define RTE_EVENTDEV_NAME_MAX_LEN      (64)
> @@ -1421,15 +1424,15 @@ struct rte_eventdev {
>         /**< Pointer to PMD dequeue function. */
>         event_dequeue_burst_t dequeue_burst;
>         /**< Pointer to PMD dequeue burst function. */
> -       event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
> +       event_tx_adapter_enqueue_same_dest_t txa_enqueue_same_dest;
>         /**< Pointer to PMD eth Tx adapter burst enqueue function with
>          * events destined to same Eth port & Tx queue.
>          */
> -       event_tx_adapter_enqueue txa_enqueue;
> +       event_tx_adapter_enqueue_t txa_enqueue;
>         /**< Pointer to PMD eth Tx adapter enqueue function. */
>         struct rte_eventdev_data *data;
>         /**< Pointer to device data */
> -       struct rte_eventdev_ops *dev_ops;
> +       struct eventdev_ops *dev_ops;
>         /**< Functions exported by PMD */
>         struct rte_device *dev;
>         /**< Device info. supplied by probing */
> @@ -1438,7 +1441,7 @@ struct rte_eventdev {
>         uint8_t attached : 1;
>         /**< Flag indicating the device is attached */
>
> -       event_crypto_adapter_enqueue ca_enqueue;
> +       event_crypto_adapter_enqueue_t ca_enqueue;
>         /**< Pointer to PMD crypto adapter enqueue function. */
>
>         uint64_t reserved_64s[4]; /**< Reserved for future fields */
> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
> index 7de18497a6..cd72f45d29 100644
> --- a/lib/eventdev/version.map
> +++ b/lib/eventdev/version.map
> @@ -55,12 +55,6 @@ DPDK_22 {
>         rte_event_eth_tx_adapter_stats_get;
>         rte_event_eth_tx_adapter_stats_reset;
>         rte_event_eth_tx_adapter_stop;
> -       rte_event_pmd_allocate;
> -       rte_event_pmd_pci_probe;
> -       rte_event_pmd_pci_remove;
> -       rte_event_pmd_release;
> -       rte_event_pmd_vdev_init;
> -       rte_event_pmd_vdev_uninit;
>         rte_event_port_attr_get;
>         rte_event_port_default_conf_get;
>         rte_event_port_link;
> @@ -136,8 +130,6 @@ EXPERIMENTAL {
>
>         # changed in 20.11
>         __rte_eventdev_trace_port_setup;
> -       # added in 20.11
> -       rte_event_pmd_pci_probe_named;
>         # added in 21.11
>         rte_event_eth_rx_adapter_create_with_params;
>
> @@ -152,4 +144,13 @@ INTERNAL {
>         global:
>
>         rte_event_pmd_selftest_seqn_dynfield_offset;
> +       rte_event_pmd_allocate;
> +       rte_event_pmd_get_named_dev;
> +       rte_event_pmd_is_valid_dev;
> +       rte_event_pmd_pci_probe;
> +       rte_event_pmd_pci_probe_named;
> +       rte_event_pmd_pci_remove;
> +       rte_event_pmd_release;
> +       rte_event_pmd_vdev_init;
> +       rte_event_pmd_vdev_uninit;
>  };
> --
> 2.17.1
>

^ permalink raw reply	[flat|nested] 119+ messages in thread

* Re: [dpdk-dev] [PATCH v5 11/14] eventdev: move timer adapters memory to hugepage
  2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
@ 2021-10-20 20:24           ` Carrillo, Erik G
  0 siblings, 0 replies; 119+ messages in thread
From: Carrillo, Erik G @ 2021-10-20 20:24 UTC (permalink / raw)
  To: pbhagavatula, jerinj; +Cc: dev

Hi Pavan and Jerin,

> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Monday, October 18, 2021 6:36 PM
> To: jerinj@marvell.com; Carrillo, Erik G <erik.g.carrillo@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [PATCH v5 11/14] eventdev: move timer adapters
> memory to hugepage
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Move memory used by timer adapters to hugepage.
> Allocate memory on the first adapter create or lookup to address both
> primary and secondary process usecases.
> This will prevent TLB misses if any and aligns to memory structure of other
> subsystems.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  doc/guides/rel_notes/release_21_11.rst |  2 ++
> lib/eventdev/rte_event_timer_adapter.c | 36
> ++++++++++++++++++++++++--
>  2 files changed, 36 insertions(+), 2 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/release_21_11.rst
> b/doc/guides/rel_notes/release_21_11.rst
> index 6442c79977..9694b32002 100644
> --- a/doc/guides/rel_notes/release_21_11.rst
> +++ b/doc/guides/rel_notes/release_21_11.rst
> @@ -226,6 +226,8 @@ API Changes
>    the crypto/security operation. This field will be used to communicate
>    events such as soft expiry with IPsec in lookaside mode.
> 
> +* eventdev: Move memory used by timer adapters to hugepage. This will
> +prevent
> +  TLB misses if any and aligns to memory structure of other subsystems.
> 
>  ABI Changes
>  -----------
> diff --git a/lib/eventdev/rte_event_timer_adapter.c
> b/lib/eventdev/rte_event_timer_adapter.c
> index ae55407042..894f532ef0 100644
> --- a/lib/eventdev/rte_event_timer_adapter.c
> +++ b/lib/eventdev/rte_event_timer_adapter.c
> @@ -33,7 +33,7 @@ RTE_LOG_REGISTER_SUFFIX(evtim_logtype,
> adapter.timer, NOTICE);
> RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
> RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc,
> NOTICE);
> 
> -static struct rte_event_timer_adapter
> adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
> +static struct rte_event_timer_adapter *adapters;
> 
>  static const struct event_timer_adapter_ops swtim_ops;
> 
> @@ -138,6 +138,17 @@ rte_event_timer_adapter_create_ext(
>  	int n, ret;
>  	struct rte_eventdev *dev;
> 
> +	if (adapters == NULL) {
> +		adapters = rte_zmalloc("Eventdev",
> +				       sizeof(struct rte_event_timer_adapter) *
> +
> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
> +				       RTE_CACHE_LINE_SIZE);
> +		if (adapters == NULL) {
> +			rte_errno = ENOMEM;
> +			return NULL;
> +		}
> +	}
> +
>  	if (conf == NULL) {
>  		rte_errno = EINVAL;
>  		return NULL;
> @@ -312,6 +323,17 @@ rte_event_timer_adapter_lookup(uint16_t
> adapter_id)
>  	int ret;
>  	struct rte_eventdev *dev;
> 
> +	if (adapters == NULL) {
> +		adapters = rte_zmalloc("Eventdev",
> +				       sizeof(struct rte_event_timer_adapter) *
> +
> RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
> +				       RTE_CACHE_LINE_SIZE);
> +		if (adapters == NULL) {
> +			rte_errno = ENOMEM;
> +			return NULL;
> +		}
> +	}
> +
>  	if (adapters[adapter_id].allocated)
>  		return &adapters[adapter_id]; /* Adapter is already loaded
> */
> 
> @@ -358,7 +380,7 @@ rte_event_timer_adapter_lookup(uint16_t
> adapter_id)  int  rte_event_timer_adapter_free(struct
> rte_event_timer_adapter *adapter)  {
> -	int ret;
> +	int i, ret;
> 
>  	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
>  	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL); @@ -
> 382,6 +404,16 @@ rte_event_timer_adapter_free(struct
> rte_event_timer_adapter *adapter)
>  	adapter->data = NULL;
>  	adapter->allocated = 0;
> 
> +	ret = 0;
> +	for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
> +		if (adapters[i].allocated)
> +			ret = adapter[i].allocated;
> +

I found a typo here, but it looks like this series has already been accepted, so I submitted the following patch for the issue:

http://patchwork.dpdk.org/project/dpdk/patch/20211020202021.1205135-1-erik.g.carrillo@intel.com/

Besides that, this patch and the others I was copied on look good to me.

Thanks,
Erik

> +	if (!ret) {
> +		rte_free(adapters);
> +		adapters = NULL;
> +	}
> +
>  	rte_eventdev_trace_timer_adapter_free(adapter);
>  	return 0;
>  }
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 119+ messages in thread

end of thread, other threads:[~2021-10-20 20:25 UTC | newest]

Thread overview: 119+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 02/15] eventdev: separate internal structures pbhagavatula
2021-10-14  9:11   ` Jerin Jacob
2021-08-23 19:40 ` [dpdk-dev] [RFC 03/15] eventdev: move eventdevs globals to hugepage mem pbhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 04/15] eventdev: move inline APIs into separate structure pbhagavatula
2021-09-08 12:03   ` Kinsella, Ray
2021-08-23 19:40 ` [dpdk-dev] [RFC 05/15] eventdev: add helper functions for new driver API pbhagavatula
2021-09-08 12:04   ` Kinsella, Ray
2021-08-23 19:40 ` [dpdk-dev] [RFC 06/15] eventdev: use new API for inline functions pbhagavatula
2021-08-30 14:41   ` Jayatheerthan, Jay
2021-08-30 14:46   ` David Marchand
2021-10-02 20:32     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 07/15] eventdev: make drivers to use new API pbhagavatula
2021-09-08  6:43   ` Hemant Agrawal
2021-08-23 19:40 ` [dpdk-dev] [RFC 08/15] eventdev: hide event device related structures pbhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 09/15] eventdev: hide timer adapter pmd file pbhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 10/15] eventdev: remove rte prefix for internal structs pbhagavatula
2021-08-30 14:42   ` Jayatheerthan, Jay
2021-08-23 19:40 ` [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object pbhagavatula
2021-08-23 20:42   ` Carrillo, Erik G
2021-08-24  5:16     ` Pavan Nikhilesh Bhagavatula
2021-08-24 15:10   ` Stephen Hemminger
2021-09-01  6:48     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-09-07 21:02       ` Carrillo, Erik G
2021-09-07 21:31   ` [dpdk-dev] " Stephen Hemminger
2021-08-23 19:40 ` [dpdk-dev] [RFC 12/15] eventdev: move timer adapters memory to hugepage pbhagavatula
2021-08-24 13:50   ` Carrillo, Erik G
2021-09-01  6:30     ` Pavan Nikhilesh Bhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 13/15] eventdev: promote event vector API to stable pbhagavatula
2021-08-30 14:43   ` Jayatheerthan, Jay
2021-09-08 12:05   ` Kinsella, Ray
2021-08-23 19:40 ` [dpdk-dev] [RFC 14/15] eventdev: make trace APIs internal pbhagavatula
2021-08-30 14:47   ` Jayatheerthan, Jay
2021-08-23 19:40 ` [dpdk-dev] [RFC 15/15] eventdev: promote trace variables to stable pbhagavatula
2021-09-08 12:06   ` Kinsella, Ray
2021-08-24  7:43 ` [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal Mattias Rönnblom
2021-08-24  7:47   ` Pavan Nikhilesh Bhagavatula
2021-08-24  8:05     ` Pavan Nikhilesh Bhagavatula
2021-08-30 10:25   ` Mattias Rönnblom
2021-08-30 16:00     ` [dpdk-dev] [RFC] eventdev: uninline inline API functions Mattias Rönnblom
2021-08-31 12:28       ` Jerin Jacob
2021-08-31 12:34         ` Mattias Rönnblom
2021-09-28  9:56 ` [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal Jerin Jacob
2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
2021-10-03  8:26   ` [dpdk-dev] [PATCH v2 02/13] eventdev: separate internal structures pbhagavatula
2021-10-03  8:26   ` [dpdk-dev] [PATCH v2 03/13] eventdev: allocate max space for internal arrays pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 04/13] eventdev: move inline APIs into separate structure pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 05/13] eventdev: use new API for inline functions pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 06/13] eventdev: hide event device related structures pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v 07/13] eventdev: hide timer adapter PMD file pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 " pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 08/13] eventdev: remove rte prefix for internal structs pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 09/13] eventdev: rearrange fields in timer object pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 10/13] eventdev: move timer adapters memory to hugepage pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 11/13] eventdev: promote event vector API to stable pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 12/13] eventdev: make trace APIs internal pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 13/13] eventdev: mark trace variables as internal pbhagavatula
2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
2021-10-06  6:49     ` [dpdk-dev] [PATCH v3 02/14] eventdev: separate internal structures pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 03/14] eventdev: allocate max space for internal arrays pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 04/14] eventdev: move inline APIs into separate structure pbhagavatula
2021-10-14  9:20       ` Jerin Jacob
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 05/14] drivers/event: invoke probing finish function pbhagavatula
2021-10-14  9:22       ` Jerin Jacob
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 06/14] eventdev: use new API for inline functions pbhagavatula
2021-10-11  9:51       ` Gujjar, Abhinandan S
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 07/14] eventdev: hide event device related structures pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 08/14] eventdev: hide timer adapter PMD file pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
2021-10-11  9:58       ` Gujjar, Abhinandan S
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 10/14] eventdev: rearrange fields in timer object pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
2021-10-07 20:49       ` Carrillo, Erik G
2021-10-08  5:38         ` Pavan Nikhilesh Bhagavatula
2021-10-08 15:57           ` Carrillo, Erik G
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 12/14] eventdev: promote event vector API to stable pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 13/14] eventdev: make trace APIs internal pbhagavatula
2021-10-11  9:59       ` Gujjar, Abhinandan S
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 14/14] eventdev: mark trace variables as internal pbhagavatula
2021-10-06  7:11       ` David Marchand
2021-10-14  9:28         ` Jerin Jacob
2021-10-14  9:05     ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " Jerin Jacob
2021-10-14  9:08     ` Jerin Jacob
2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 02/14] eventdev: separate internal structures pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 03/14] eventdev: allocate max space for internal arrays pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 04/14] eventdev: move inline APIs into separate structure pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 05/14] drivers/event: invoke probing finish function pbhagavatula
2021-10-17 15:34         ` Hemant Agrawal
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 06/14] eventdev: use new API for inline functions pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 07/14] eventdev: hide event device related structures pbhagavatula
2021-10-18  7:07         ` Harman Kalra
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 08/14] eventdev: hide timer adapter PMD file pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 10/14] eventdev: rearrange fields in timer object pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 12/14] eventdev: promote event vector API to stable pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 13/14] eventdev: make trace APIs internal pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 14/14] eventdev: mark trace variables as internal pbhagavatula
2021-10-17  5:58         ` Jerin Jacob
2021-10-18 15:06           ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-10-19  7:01             ` David Marchand
2021-10-17 15:35       ` [dpdk-dev] [PATCH v4 01/14] eventdev: make driver interface " Hemant Agrawal
2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 02/14] eventdev: separate internal structures pbhagavatula
2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 03/14] eventdev: allocate max space for internal arrays pbhagavatula
2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 04/14] eventdev: move inline APIs into separate structure pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 05/14] drivers/event: invoke probing finish function pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 06/14] eventdev: use new API for inline functions pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 07/14] eventdev: hide event device related structures pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 08/14] eventdev: hide timer adapter PMD file pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 10/14] eventdev: rearrange fields in timer object pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
2021-10-20 20:24           ` Carrillo, Erik G
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 12/14] eventdev: promote event vector API to stable pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 13/14] eventdev: make trace APIs internal pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 14/14] eventdev: mark trace variables as internal pbhagavatula
2021-10-20  4:01         ` [dpdk-dev] [PATCH v5 01/14] eventdev: make driver interface " Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).