DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jerin Jacob <jerinjacobk@gmail.com>
To: Pavan Nikhilesh <pbhagavatula@marvell.com>
Cc: Jerin Jacob <jerinj@marvell.com>,
	 "Ananyev, Konstantin" <konstantin.ananyev@intel.com>,
	dpdk-dev <dev@dpdk.org>
Subject: Re: [dpdk-dev] [RFC 02/15] eventdev: separate internal structures
Date: Thu, 14 Oct 2021 14:41:53 +0530	[thread overview]
Message-ID: <CALBAE1OtNwLOVxA9m6SUWgKOmeqtjj2u4kiqzkHMw0sgg1b0Zg@mail.gmail.com> (raw)
In-Reply-To: <20210823194020.1229-2-pbhagavatula@marvell.com>

On Tue, Aug 24, 2021 at 1:10 AM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Create rte_eventdev_core.h and move all the internal data structures
> to this file. These structures are mostly used by drivers, but they
> need to be in the public header file as they are accessed by datapath
> inline functions for performance reasons.
> The accessibility of these data structures is not changed.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/eventdev_pmd.h      |   3 -
>  lib/eventdev/meson.build         |   3 +
>  lib/eventdev/rte_eventdev.h      | 715 +++++++++++++------------------
>  lib/eventdev/rte_eventdev_core.h | 144 +++++++
>  4 files changed, 443 insertions(+), 422 deletions(-)
>  create mode 100644 lib/eventdev/rte_eventdev_core.h

Please validate the Doxygen output.


>
> diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
> index 5dab9e2f70..a25d3f1fb5 100644
> --- a/lib/eventdev/eventdev_pmd.h
> +++ b/lib/eventdev/eventdev_pmd.h
> @@ -91,9 +91,6 @@ struct rte_eventdev_global {
>         uint8_t nb_devs;        /**< Number of devices found */
>  };
>
> -extern struct rte_eventdev *rte_eventdevs;
> -/** The pool of rte_eventdev structures. */
> -
>  /**
>   * Get the rte_eventdev structure device pointer for the named device.
>   *
> diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
> index 523ea9ccae..8b51fde361 100644
> --- a/lib/eventdev/meson.build
> +++ b/lib/eventdev/meson.build
> @@ -27,6 +27,9 @@ headers = files(
>          'rte_event_crypto_adapter.h',
>          'rte_event_eth_tx_adapter.h',
>  )
> +indirect_headers += files(
> +        'rte_eventdev_core.h',
> +)
>  driver_sdk_headers += files(
>          'eventdev_pmd.h',
>          'eventdev_pmd_pci.h',
> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> index 6ba116002f..1b11d4576d 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -1324,314 +1324,6 @@ int
>  rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
>                                 uint32_t *caps);
>
> -struct eventdev_ops;
> -struct rte_eventdev;
> -
> -typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
> -/**< @internal Enqueue event on port of a device */
> -
> -typedef uint16_t (*event_enqueue_burst_t)(void *port,
> -                       const struct rte_event ev[], uint16_t nb_events);
> -/**< @internal Enqueue burst of events on port of a device */
> -
> -typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
> -               uint64_t timeout_ticks);
> -/**< @internal Dequeue event from port of a device */
> -
> -typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
> -               uint16_t nb_events, uint64_t timeout_ticks);
> -/**< @internal Dequeue burst of events from port of a device */
> -
> -typedef uint16_t (*event_tx_adapter_enqueue)(void *port,
> -                               struct rte_event ev[], uint16_t nb_events);
> -/**< @internal Enqueue burst of events on port of a device */
> -
> -typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
> -               struct rte_event ev[], uint16_t nb_events);
> -/**< @internal Enqueue burst of events on port of a device supporting
> - * burst having same destination Ethernet port & Tx queue.
> - */
> -
> -typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
> -                               struct rte_event ev[], uint16_t nb_events);
> -/**< @internal Enqueue burst of events on crypto adapter */
> -
> -#define RTE_EVENTDEV_NAME_MAX_LEN      (64)
> -/**< @internal Max length of name of event PMD */
> -
> -/**
> - * @internal
> - * The data part, with no function pointers, associated with each device.
> - *
> - * This structure is safe to place in shared memory to be common among
> - * different processes in a multi-process configuration.
> - */
> -struct rte_eventdev_data {
> -       int socket_id;
> -       /**< Socket ID where memory is allocated */
> -       uint8_t dev_id;
> -       /**< Device ID for this instance */
> -       uint8_t nb_queues;
> -       /**< Number of event queues. */
> -       uint8_t nb_ports;
> -       /**< Number of event ports. */
> -       void **ports;
> -       /**< Array of pointers to ports. */
> -       struct rte_event_port_conf *ports_cfg;
> -       /**< Array of port configuration structures. */
> -       struct rte_event_queue_conf *queues_cfg;
> -       /**< Array of queue configuration structures. */
> -       uint16_t *links_map;
> -       /**< Memory to store queues to port connections. */
> -       void *dev_private;
> -       /**< PMD-specific private data */
> -       uint32_t event_dev_cap;
> -       /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
> -       struct rte_event_dev_config dev_conf;
> -       /**< Configuration applied to device. */
> -       uint8_t service_inited;
> -       /* Service initialization state */
> -       uint32_t service_id;
> -       /* Service ID*/
> -       void *dev_stop_flush_arg;
> -       /**< User-provided argument for event flush function */
> -
> -       RTE_STD_C11
> -       uint8_t dev_started : 1;
> -       /**< Device state: STARTED(1)/STOPPED(0) */
> -
> -       char name[RTE_EVENTDEV_NAME_MAX_LEN];
> -       /**< Unique identifier name */
> -
> -       uint64_t reserved_64s[4]; /**< Reserved for future fields */
> -       void *reserved_ptrs[4];   /**< Reserved for future fields */
> -} __rte_cache_aligned;
> -
> -/** @internal The data structure associated with each event device. */
> -struct rte_eventdev {
> -       event_enqueue_t enqueue;
> -       /**< Pointer to PMD enqueue function. */
> -       event_enqueue_burst_t enqueue_burst;
> -       /**< Pointer to PMD enqueue burst function. */
> -       event_enqueue_burst_t enqueue_new_burst;
> -       /**< Pointer to PMD enqueue burst function(op new variant) */
> -       event_enqueue_burst_t enqueue_forward_burst;
> -       /**< Pointer to PMD enqueue burst function(op forward variant) */
> -       event_dequeue_t dequeue;
> -       /**< Pointer to PMD dequeue function. */
> -       event_dequeue_burst_t dequeue_burst;
> -       /**< Pointer to PMD dequeue burst function. */
> -       event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
> -       /**< Pointer to PMD eth Tx adapter burst enqueue function with
> -        * events destined to same Eth port & Tx queue.
> -        */
> -       event_tx_adapter_enqueue txa_enqueue;
> -       /**< Pointer to PMD eth Tx adapter enqueue function. */
> -       struct rte_eventdev_data *data;
> -       /**< Pointer to device data */
> -       struct eventdev_ops *dev_ops;
> -       /**< Functions exported by PMD */
> -       struct rte_device *dev;
> -       /**< Device info. supplied by probing */
> -
> -       RTE_STD_C11
> -       uint8_t attached : 1;
> -       /**< Flag indicating the device is attached */
> -
> -       event_crypto_adapter_enqueue ca_enqueue;
> -       /**< Pointer to PMD crypto adapter enqueue function. */
> -
> -       uint64_t reserved_64s[4]; /**< Reserved for future fields */
> -       void *reserved_ptrs[3];   /**< Reserved for future fields */
> -} __rte_cache_aligned;
> -
> -extern struct rte_eventdev *rte_eventdevs;
> -/** @internal The pool of rte_eventdev structures. */
> -
> -static __rte_always_inline uint16_t
> -__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events,
> -                       const event_enqueue_burst_t fn)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -
> -       if (port_id >= dev->data->nb_ports) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -#endif
> -       rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
> -       /*
> -        * Allow zero cost non burst mode routine invocation if application
> -        * requests nb_events as const one
> -        */
> -       if (nb_events == 1)
> -               return (*dev->enqueue)(dev->data->ports[port_id], ev);
> -       else
> -               return fn(dev->data->ports[port_id], ev, nb_events);
> -}
> -
> -/**
> - * Enqueue a burst of events objects or an event object supplied in *rte_event*
> - * structure on an  event device designated by its *dev_id* through the event
> - * port specified by *port_id*. Each event object specifies the event queue on
> - * which it will be enqueued.
> - *
> - * The *nb_events* parameter is the number of event objects to enqueue which are
> - * supplied in the *ev* array of *rte_event* structure.
> - *
> - * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
> - * enqueued to the same port that their associated events were dequeued from.
> - *
> - * The rte_event_enqueue_burst() function returns the number of
> - * events objects it actually enqueued. A return value equal to *nb_events*
> - * means that all event objects have been enqueued.
> - *
> - * @param dev_id
> - *   The identifier of the device.
> - * @param port_id
> - *   The identifier of the event port.
> - * @param ev
> - *   Points to an array of *nb_events* objects of type *rte_event* structure
> - *   which contain the event object enqueue operations to be processed.
> - * @param nb_events
> - *   The number of event objects to enqueue, typically number of
> - *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> - *   available for this port.
> - *
> - * @return
> - *   The number of event objects actually enqueued on the event device. The
> - *   return value can be less than the value of the *nb_events* parameter when
> - *   the event devices queue is full or if invalid parameters are specified in a
> - *   *rte_event*. If the return value is less than *nb_events*, the remaining
> - *   events at the end of ev[] are not consumed and the caller has to take care
> - *   of them, and rte_errno is set accordingly. Possible errno values include:
> - *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> - *              ID is invalid, or an event's sched type doesn't match the
> - *              capabilities of the destination queue.
> - *   - ENOSPC   The event port was backpressured and unable to enqueue
> - *              one or more events. This error code is only applicable to
> - *              closed systems.
> - * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> - */
> -static inline uint16_t
> -rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -                       dev->enqueue_burst);
> -}
> -
> -/**
> - * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
> - * an event device designated by its *dev_id* through the event port specified
> - * by *port_id*.
> - *
> - * Provides the same functionality as rte_event_enqueue_burst(), expect that
> - * application can use this API when the all objects in the burst contains
> - * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
> - * function can provide the additional hint to the PMD and optimize if possible.
> - *
> - * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
> - * has event object of operation type != RTE_EVENT_OP_NEW.
> - *
> - * @param dev_id
> - *   The identifier of the device.
> - * @param port_id
> - *   The identifier of the event port.
> - * @param ev
> - *   Points to an array of *nb_events* objects of type *rte_event* structure
> - *   which contain the event object enqueue operations to be processed.
> - * @param nb_events
> - *   The number of event objects to enqueue, typically number of
> - *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> - *   available for this port.
> - *
> - * @return
> - *   The number of event objects actually enqueued on the event device. The
> - *   return value can be less than the value of the *nb_events* parameter when
> - *   the event devices queue is full or if invalid parameters are specified in a
> - *   *rte_event*. If the return value is less than *nb_events*, the remaining
> - *   events at the end of ev[] are not consumed and the caller has to take care
> - *   of them, and rte_errno is set accordingly. Possible errno values include:
> - *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> - *              ID is invalid, or an event's sched type doesn't match the
> - *              capabilities of the destination queue.
> - *   - ENOSPC   The event port was backpressured and unable to enqueue
> - *              one or more events. This error code is only applicable to
> - *              closed systems.
> - * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> - * @see rte_event_enqueue_burst()
> - */
> -static inline uint16_t
> -rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -                       dev->enqueue_new_burst);
> -}
> -
> -/**
> - * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
> - * on an event device designated by its *dev_id* through the event port
> - * specified by *port_id*.
> - *
> - * Provides the same functionality as rte_event_enqueue_burst(), expect that
> - * application can use this API when the all objects in the burst contains
> - * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
> - * function can provide the additional hint to the PMD and optimize if possible.
> - *
> - * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
> - * has event object of operation type != RTE_EVENT_OP_FORWARD.
> - *
> - * @param dev_id
> - *   The identifier of the device.
> - * @param port_id
> - *   The identifier of the event port.
> - * @param ev
> - *   Points to an array of *nb_events* objects of type *rte_event* structure
> - *   which contain the event object enqueue operations to be processed.
> - * @param nb_events
> - *   The number of event objects to enqueue, typically number of
> - *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> - *   available for this port.
> - *
> - * @return
> - *   The number of event objects actually enqueued on the event device. The
> - *   return value can be less than the value of the *nb_events* parameter when
> - *   the event devices queue is full or if invalid parameters are specified in a
> - *   *rte_event*. If the return value is less than *nb_events*, the remaining
> - *   events at the end of ev[] are not consumed and the caller has to take care
> - *   of them, and rte_errno is set accordingly. Possible errno values include:
> - *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> - *              ID is invalid, or an event's sched type doesn't match the
> - *              capabilities of the destination queue.
> - *   - ENOSPC   The event port was backpressured and unable to enqueue
> - *              one or more events. This error code is only applicable to
> - *              closed systems.
> - * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> - * @see rte_event_enqueue_burst()
> - */
> -static inline uint16_t
> -rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
> -                       const struct rte_event ev[], uint16_t nb_events)
> -{
> -       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> -                       dev->enqueue_forward_burst);
> -}
> -
>  /**
>   * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
>   *
> @@ -1662,124 +1354,27 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
>                                         uint64_t *timeout_ticks);
>
>  /**
> - * Dequeue a burst of events objects or an event object from the event port
> - * designated by its *event_port_id*, on an event device designated
> - * by its *dev_id*.
> - *
> - * rte_event_dequeue_burst() does not dictate the specifics of scheduling
> - * algorithm as each eventdev driver may have different criteria to schedule
> - * an event. However, in general, from an application perspective scheduler may
> - * use the following scheme to dispatch an event to the port.
> - *
> - * 1) Selection of event queue based on
> - *   a) The list of event queues are linked to the event port.
> - *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
> - *   queue selection from list is based on event queue priority relative to
> - *   other event queue supplied as *priority* in rte_event_queue_setup()
> - *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
> - *   queue selection from the list is based on event priority supplied as
> - *   *priority* in rte_event_enqueue_burst()
> - * 2) Selection of event
> - *   a) The number of flows available in selected event queue.
> - *   b) Schedule type method associated with the event
> - *
> - * The *nb_events* parameter is the maximum number of event objects to dequeue
> - * which are returned in the *ev* array of *rte_event* structure.
> + * Link multiple source event queues supplied in *queues* to the destination
> + * event port designated by its *port_id* with associated service priority
> + * supplied in *priorities* on the event device designated by its *dev_id*.
>   *
> - * The rte_event_dequeue_burst() function returns the number of events objects
> - * it actually dequeued. A return value equal to *nb_events* means that all
> - * event objects have been dequeued.
> + * The link establishment shall enable the event port *port_id* from
> + * receiving events from the specified event queue(s) supplied in *queues*
>   *
> - * The number of events dequeued is the number of scheduler contexts held by
> - * this port. These contexts are automatically released in the next
> - * rte_event_dequeue_burst() invocation if the port supports implicit
> - * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
> - * operation can be used to release the contexts early.
> + * An event queue may link to one or more event ports.
> + * The number of links can be established from an event queue to event port is
> + * implementation defined.
>   *
> - * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
> - * enqueued to the same port that their associated events were dequeued from.
> + * Event queue(s) to event port link establishment can be changed at runtime
> + * without re-configuring the device to support scaling and to reduce the
> + * latency of critical work by establishing the link with more event ports
> + * at runtime.
>   *
>   * @param dev_id
>   *   The identifier of the device.
> + *
>   * @param port_id
> - *   The identifier of the event port.
> - * @param[out] ev
> - *   Points to an array of *nb_events* objects of type *rte_event* structure
> - *   for output to be populated with the dequeued event objects.
> - * @param nb_events
> - *   The maximum number of event objects to dequeue, typically number of
> - *   rte_event_port_dequeue_depth() available for this port.
> - *
> - * @param timeout_ticks
> - *   - 0 no-wait, returns immediately if there is no event.
> - *   - >0 wait for the event, if the device is configured with
> - *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
> - *   at least one event is available or *timeout_ticks* time.
> - *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
> - *   then this function will wait until the event available or
> - *   *dequeue_timeout_ns* ns which was previously supplied to
> - *   rte_event_dev_configure()
> - *
> - * @return
> - * The number of event objects actually dequeued from the port. The return
> - * value can be less than the value of the *nb_events* parameter when the
> - * event port's queue is not full.
> - *
> - * @see rte_event_port_dequeue_depth()
> - */
> -static inline uint16_t
> -rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
> -                       uint16_t nb_events, uint64_t timeout_ticks)
> -{
> -       struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> -
> -#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> -       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -
> -       if (port_id >= dev->data->nb_ports) {
> -               rte_errno = EINVAL;
> -               return 0;
> -       }
> -#endif
> -       rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
> -       /*
> -        * Allow zero cost non burst mode routine invocation if application
> -        * requests nb_events as const one
> -        */
> -       if (nb_events == 1)
> -               return (*dev->dequeue)(
> -                       dev->data->ports[port_id], ev, timeout_ticks);
> -       else
> -               return (*dev->dequeue_burst)(
> -                       dev->data->ports[port_id], ev, nb_events,
> -                               timeout_ticks);
> -}
> -
> -/**
> - * Link multiple source event queues supplied in *queues* to the destination
> - * event port designated by its *port_id* with associated service priority
> - * supplied in *priorities* on the event device designated by its *dev_id*.
> - *
> - * The link establishment shall enable the event port *port_id* from
> - * receiving events from the specified event queue(s) supplied in *queues*
> - *
> - * An event queue may link to one or more event ports.
> - * The number of links can be established from an event queue to event port is
> - * implementation defined.
> - *
> - * Event queue(s) to event port link establishment can be changed at runtime
> - * without re-configuring the device to support scaling and to reduce the
> - * latency of critical work by establishing the link with more event ports
> - * at runtime.
> - *
> - * @param dev_id
> - *   The identifier of the device.
> - *
> - * @param port_id
> - *   Event port identifier to select the destination port to link.
> + *   Event port identifier to select the destination port to link.
>   *
>   * @param queues
>   *   Points to an array of *nb_links* event queues to be linked
> @@ -2145,6 +1740,288 @@ rte_event_vector_pool_create(const char *name, unsigned int n,
>                              unsigned int cache_size, uint16_t nb_elem,
>                              int socket_id);
>
> +#include <rte_eventdev_core.h>
> +
> +static __rte_always_inline uint16_t
> +__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> +                         const struct rte_event ev[], uint16_t nb_events,
> +                         const event_enqueue_burst_t fn)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> +       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +
> +       if (port_id >= dev->data->nb_ports) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +#endif
> +       rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
> +       /*
> +        * Allow zero cost non burst mode routine invocation if application
> +        * requests nb_events as const one
> +        */
> +       if (nb_events == 1)
> +               return (*dev->enqueue)(dev->data->ports[port_id], ev);
> +       else
> +               return fn(dev->data->ports[port_id], ev, nb_events);
> +}
> +
> +/**
> + * Enqueue a burst of events objects or an event object supplied in *rte_event*
> + * structure on an  event device designated by its *dev_id* through the event
> + * port specified by *port_id*. Each event object specifies the event queue on
> + * which it will be enqueued.
> + *
> + * The *nb_events* parameter is the number of event objects to enqueue which are
> + * supplied in the *ev* array of *rte_event* structure.
> + *
> + * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
> + * enqueued to the same port that their associated events were dequeued from.
> + *
> + * The rte_event_enqueue_burst() function returns the number of
> + * events objects it actually enqueued. A return value equal to *nb_events*
> + * means that all event objects have been enqueued.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param port_id
> + *   The identifier of the event port.
> + * @param ev
> + *   Points to an array of *nb_events* objects of type *rte_event* structure
> + *   which contain the event object enqueue operations to be processed.
> + * @param nb_events
> + *   The number of event objects to enqueue, typically number of
> + *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> + *   available for this port.
> + *
> + * @return
> + *   The number of event objects actually enqueued on the event device. The
> + *   return value can be less than the value of the *nb_events* parameter when
> + *   the event devices queue is full or if invalid parameters are specified in a
> + *   *rte_event*. If the return value is less than *nb_events*, the remaining
> + *   events at the end of ev[] are not consumed and the caller has to take care
> + *   of them, and rte_errno is set accordingly. Possible errno values include:
> + *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> + *              ID is invalid, or an event's sched type doesn't match the
> + *              capabilities of the destination queue.
> + *   - ENOSPC   The event port was backpressured and unable to enqueue
> + *              one or more events. This error code is only applicable to
> + *              closed systems.
> + * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> + */
> +static inline uint16_t
> +rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
> +                       const struct rte_event ev[], uint16_t nb_events)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> +                                        dev->enqueue_burst);
> +}
> +
> +/**
> + * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
> + * an event device designated by its *dev_id* through the event port specified
> + * by *port_id*.
> + *
> + * Provides the same functionality as rte_event_enqueue_burst(), expect that
> + * application can use this API when the all objects in the burst contains
> + * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
> + * function can provide the additional hint to the PMD and optimize if possible.
> + *
> + * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
> + * has event object of operation type != RTE_EVENT_OP_NEW.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param port_id
> + *   The identifier of the event port.
> + * @param ev
> + *   Points to an array of *nb_events* objects of type *rte_event* structure
> + *   which contain the event object enqueue operations to be processed.
> + * @param nb_events
> + *   The number of event objects to enqueue, typically number of
> + *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> + *   available for this port.
> + *
> + * @return
> + *   The number of event objects actually enqueued on the event device. The
> + *   return value can be less than the value of the *nb_events* parameter when
> + *   the event devices queue is full or if invalid parameters are specified in a
> + *   *rte_event*. If the return value is less than *nb_events*, the remaining
> + *   events at the end of ev[] are not consumed and the caller has to take care
> + *   of them, and rte_errno is set accordingly. Possible errno values include:
> + *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> + *              ID is invalid, or an event's sched type doesn't match the
> + *              capabilities of the destination queue.
> + *   - ENOSPC   The event port was backpressured and unable to enqueue
> + *              one or more events. This error code is only applicable to
> + *              closed systems.
> + * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> + * @see rte_event_enqueue_burst()
> + */
> +static inline uint16_t
> +rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
> +                           const struct rte_event ev[], uint16_t nb_events)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> +                                        dev->enqueue_new_burst);
> +}
> +
> +/**
> + * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
> + * on an event device designated by its *dev_id* through the event port
> + * specified by *port_id*.
> + *
> + * Provides the same functionality as rte_event_enqueue_burst(), expect that
> + * application can use this API when the all objects in the burst contains
> + * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
> + * function can provide the additional hint to the PMD and optimize if possible.
> + *
> + * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
> + * has event object of operation type != RTE_EVENT_OP_FORWARD.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param port_id
> + *   The identifier of the event port.
> + * @param ev
> + *   Points to an array of *nb_events* objects of type *rte_event* structure
> + *   which contain the event object enqueue operations to be processed.
> + * @param nb_events
> + *   The number of event objects to enqueue, typically number of
> + *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
> + *   available for this port.
> + *
> + * @return
> + *   The number of event objects actually enqueued on the event device. The
> + *   return value can be less than the value of the *nb_events* parameter when
> + *   the event devices queue is full or if invalid parameters are specified in a
> + *   *rte_event*. If the return value is less than *nb_events*, the remaining
> + *   events at the end of ev[] are not consumed and the caller has to take care
> + *   of them, and rte_errno is set accordingly. Possible errno values include:
> + *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
> + *              ID is invalid, or an event's sched type doesn't match the
> + *              capabilities of the destination queue.
> + *   - ENOSPC   The event port was backpressured and unable to enqueue
> + *              one or more events. This error code is only applicable to
> + *              closed systems.
> + * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
> + * @see rte_event_enqueue_burst()
> + */
> +static inline uint16_t
> +rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
> +                               const struct rte_event ev[], uint16_t nb_events)
> +{
> +       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +       return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
> +                                        dev->enqueue_forward_burst);
> +}
> +
> +/**
> + * Dequeue a burst of events objects or an event object from the event port
> + * designated by its *event_port_id*, on an event device designated
> + * by its *dev_id*.
> + *
> + * rte_event_dequeue_burst() does not dictate the specifics of scheduling
> + * algorithm as each eventdev driver may have different criteria to schedule
> + * an event. However, in general, from an application perspective scheduler may
> + * use the following scheme to dispatch an event to the port.
> + *
> + * 1) Selection of event queue based on
> + *   a) The list of event queues are linked to the event port.
> + *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
> + *   queue selection from list is based on event queue priority relative to
> + *   other event queue supplied as *priority* in rte_event_queue_setup()
> + *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
> + *   queue selection from the list is based on event priority supplied as
> + *   *priority* in rte_event_enqueue_burst()
> + * 2) Selection of event
> + *   a) The number of flows available in selected event queue.
> + *   b) Schedule type method associated with the event
> + *
> + * The *nb_events* parameter is the maximum number of event objects to dequeue
> + * which are returned in the *ev* array of *rte_event* structure.
> + *
> + * The rte_event_dequeue_burst() function returns the number of events objects
> + * it actually dequeued. A return value equal to *nb_events* means that all
> + * event objects have been dequeued.
> + *
> + * The number of events dequeued is the number of scheduler contexts held by
> + * this port. These contexts are automatically released in the next
> + * rte_event_dequeue_burst() invocation if the port supports implicit
> + * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
> + * operation can be used to release the contexts early.
> + *
> + * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
> + * enqueued to the same port that their associated events were dequeued from.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param port_id
> + *   The identifier of the event port.
> + * @param[out] ev
> + *   Points to an array of *nb_events* objects of type *rte_event* structure
> + *   for output to be populated with the dequeued event objects.
> + * @param nb_events
> + *   The maximum number of event objects to dequeue, typically number of
> + *   rte_event_port_dequeue_depth() available for this port.
> + *
> + * @param timeout_ticks
> + *   - 0 no-wait, returns immediately if there is no event.
> + *   - >0 wait for the event, if the device is configured with
> + *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
> + *   at least one event is available or *timeout_ticks* time.
> + *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
> + *   then this function will wait until the event available or
> + *   *dequeue_timeout_ns* ns which was previously supplied to
> + *   rte_event_dev_configure()
> + *
> + * @return
> + * The number of event objects actually dequeued from the port. The return
> + * value can be less than the value of the *nb_events* parameter when the
> + * event port's queue is not full.
> + *
> + * @see rte_event_port_dequeue_depth()
> + */
> +static inline uint16_t
> +rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
> +                       uint16_t nb_events, uint64_t timeout_ticks)
> +{
> +       struct rte_eventdev *dev = &rte_eventdevs[dev_id];
> +
> +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> +       if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +
> +       if (port_id >= dev->data->nb_ports) {
> +               rte_errno = EINVAL;
> +               return 0;
> +       }
> +#endif
> +       rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
> +       /*
> +        * Allow zero cost non burst mode routine invocation if application
> +        * requests nb_events as const one
> +        */
> +       if (nb_events == 1)
> +               return (*dev->dequeue)(dev->data->ports[port_id], ev,
> +                                      timeout_ticks);
> +       else
> +               return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
> +                                            nb_events, timeout_ticks);
> +}
> +
>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
> new file mode 100644
> index 0000000000..97dfec1ae1
> --- /dev/null
> +++ b/lib/eventdev/rte_eventdev_core.h
> @@ -0,0 +1,144 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2018 Intel Corporation.
> + * Copyright(C) 2021 Marvell.
> + * Copyright 2016 NXP
> + * All rights reserved.
> + */
> +
> +#ifndef _RTE_EVENTDEV_CORE_H_
> +#define _RTE_EVENTDEV_CORE_H_
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
> +/**< @internal Enqueue event on port of a device */
> +
> +typedef uint16_t (*event_enqueue_burst_t)(void *port,
> +                                         const struct rte_event ev[],
> +                                         uint16_t nb_events);
> +/**< @internal Enqueue burst of events on port of a device */
> +
> +typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
> +                                   uint64_t timeout_ticks);
> +/**< @internal Dequeue event from port of a device */
> +
> +typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
> +                                         uint16_t nb_events,
> +                                         uint64_t timeout_ticks);
> +/**< @internal Dequeue burst of events from port of a device */
> +
> +typedef uint16_t (*event_tx_adapter_enqueue)(void *port, struct rte_event ev[],
> +                                            uint16_t nb_events);
> +/**< @internal Enqueue burst of events on port of a device */
> +
> +typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
> +                                                      struct rte_event ev[],
> +                                                      uint16_t nb_events);
> +/**< @internal Enqueue burst of events on port of a device supporting
> + * burst having same destination Ethernet port & Tx queue.
> + */
> +
> +typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
> +                                                struct rte_event ev[],
> +                                                uint16_t nb_events);
> +/**< @internal Enqueue burst of events on crypto adapter */
> +
> +#define RTE_EVENTDEV_NAME_MAX_LEN (64)
> +/**< @internal Max length of name of event PMD */
> +
> +/**
> + * @internal
> + * The data part, with no function pointers, associated with each device.
> + *
> + * This structure is safe to place in shared memory to be common among
> + * different processes in a multi-process configuration.
> + */
> +struct rte_eventdev_data {
> +       int socket_id;
> +       /**< Socket ID where memory is allocated */
> +       uint8_t dev_id;
> +       /**< Device ID for this instance */
> +       uint8_t nb_queues;
> +       /**< Number of event queues. */
> +       uint8_t nb_ports;
> +       /**< Number of event ports. */
> +       void **ports;
> +       /**< Array of pointers to ports. */
> +       struct rte_event_port_conf *ports_cfg;
> +       /**< Array of port configuration structures. */
> +       struct rte_event_queue_conf *queues_cfg;
> +       /**< Array of queue configuration structures. */
> +       uint16_t *links_map;
> +       /**< Memory to store queues to port connections. */
> +       void *dev_private;
> +       /**< PMD-specific private data */
> +       uint32_t event_dev_cap;
> +       /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
> +       struct rte_event_dev_config dev_conf;
> +       /**< Configuration applied to device. */
> +       uint8_t service_inited;
> +       /* Service initialization state */
> +       uint32_t service_id;
> +       /* Service ID*/
> +       void *dev_stop_flush_arg;
> +       /**< User-provided argument for event flush function */
> +
> +       RTE_STD_C11
> +       uint8_t dev_started : 1;
> +       /**< Device state: STARTED(1)/STOPPED(0) */
> +
> +       char name[RTE_EVENTDEV_NAME_MAX_LEN];
> +       /**< Unique identifier name */
> +
> +       uint64_t reserved_64s[4]; /**< Reserved for future fields */
> +       void *reserved_ptrs[4];   /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +/** @internal The data structure associated with each event device. */
> +struct rte_eventdev {
> +       event_enqueue_t enqueue;
> +       /**< Pointer to PMD enqueue function. */
> +       event_enqueue_burst_t enqueue_burst;
> +       /**< Pointer to PMD enqueue burst function. */
> +       event_enqueue_burst_t enqueue_new_burst;
> +       /**< Pointer to PMD enqueue burst function(op new variant) */
> +       event_enqueue_burst_t enqueue_forward_burst;
> +       /**< Pointer to PMD enqueue burst function(op forward variant) */
> +       event_dequeue_t dequeue;
> +       /**< Pointer to PMD dequeue function. */
> +       event_dequeue_burst_t dequeue_burst;
> +       /**< Pointer to PMD dequeue burst function. */
> +       event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
> +       /**< Pointer to PMD eth Tx adapter burst enqueue function with
> +        * events destined to same Eth port & Tx queue.
> +        */
> +       event_tx_adapter_enqueue txa_enqueue;
> +       /**< Pointer to PMD eth Tx adapter enqueue function. */
> +       struct rte_eventdev_data *data;
> +       /**< Pointer to device data */
> +       struct eventdev_ops *dev_ops;
> +       /**< Functions exported by PMD */
> +       struct rte_device *dev;
> +       /**< Device info. supplied by probing */
> +
> +       RTE_STD_C11
> +       uint8_t attached : 1;
> +       /**< Flag indicating the device is attached */
> +
> +       event_crypto_adapter_enqueue ca_enqueue;
> +       /**< Pointer to PMD crypto adapter enqueue function. */
> +
> +       uint64_t reserved_64s[4]; /**< Reserved for future fields */
> +       void *reserved_ptrs[3];   /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +extern struct rte_eventdev *rte_eventdevs;
> +/** @internal The pool of rte_eventdev structures. */
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /*_RTE_EVENTDEV_CORE_H_*/
> --
> 2.17.1
>

  reply	other threads:[~2021-10-14  9:12 UTC|newest]

Thread overview: 119+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-23 19:40 [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal pbhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 02/15] eventdev: separate internal structures pbhagavatula
2021-10-14  9:11   ` Jerin Jacob [this message]
2021-08-23 19:40 ` [dpdk-dev] [RFC 03/15] eventdev: move eventdevs globals to hugepage mem pbhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 04/15] eventdev: move inline APIs into separate structure pbhagavatula
2021-09-08 12:03   ` Kinsella, Ray
2021-08-23 19:40 ` [dpdk-dev] [RFC 05/15] eventdev: add helper functions for new driver API pbhagavatula
2021-09-08 12:04   ` Kinsella, Ray
2021-08-23 19:40 ` [dpdk-dev] [RFC 06/15] eventdev: use new API for inline functions pbhagavatula
2021-08-30 14:41   ` Jayatheerthan, Jay
2021-08-30 14:46   ` David Marchand
2021-10-02 20:32     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 07/15] eventdev: make drivers to use new API pbhagavatula
2021-09-08  6:43   ` Hemant Agrawal
2021-08-23 19:40 ` [dpdk-dev] [RFC 08/15] eventdev: hide event device related structures pbhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 09/15] eventdev: hide timer adapter pmd file pbhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 10/15] eventdev: remove rte prefix for internal structs pbhagavatula
2021-08-30 14:42   ` Jayatheerthan, Jay
2021-08-23 19:40 ` [dpdk-dev] [RFC 11/15] eventdev: reserve fields in timer object pbhagavatula
2021-08-23 20:42   ` Carrillo, Erik G
2021-08-24  5:16     ` Pavan Nikhilesh Bhagavatula
2021-08-24 15:10   ` Stephen Hemminger
2021-09-01  6:48     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-09-07 21:02       ` Carrillo, Erik G
2021-09-07 21:31   ` [dpdk-dev] " Stephen Hemminger
2021-08-23 19:40 ` [dpdk-dev] [RFC 12/15] eventdev: move timer adapters memory to hugepage pbhagavatula
2021-08-24 13:50   ` Carrillo, Erik G
2021-09-01  6:30     ` Pavan Nikhilesh Bhagavatula
2021-08-23 19:40 ` [dpdk-dev] [RFC 13/15] eventdev: promote event vector API to stable pbhagavatula
2021-08-30 14:43   ` Jayatheerthan, Jay
2021-09-08 12:05   ` Kinsella, Ray
2021-08-23 19:40 ` [dpdk-dev] [RFC 14/15] eventdev: make trace APIs internal pbhagavatula
2021-08-30 14:47   ` Jayatheerthan, Jay
2021-08-23 19:40 ` [dpdk-dev] [RFC 15/15] eventdev: promote trace variables to stable pbhagavatula
2021-09-08 12:06   ` Kinsella, Ray
2021-08-24  7:43 ` [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal Mattias Rönnblom
2021-08-24  7:47   ` Pavan Nikhilesh Bhagavatula
2021-08-24  8:05     ` Pavan Nikhilesh Bhagavatula
2021-08-30 10:25   ` Mattias Rönnblom
2021-08-30 16:00     ` [dpdk-dev] [RFC] eventdev: uninline inline API functions Mattias Rönnblom
2021-08-31 12:28       ` Jerin Jacob
2021-08-31 12:34         ` Mattias Rönnblom
2021-09-28  9:56 ` [dpdk-dev] [RFC 01/15] eventdev: make driver interface as internal Jerin Jacob
2021-10-03  8:26 ` [dpdk-dev] [PATCH v2 01/13] " pbhagavatula
2021-10-03  8:26   ` [dpdk-dev] [PATCH v2 02/13] eventdev: separate internal structures pbhagavatula
2021-10-03  8:26   ` [dpdk-dev] [PATCH v2 03/13] eventdev: allocate max space for internal arrays pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 04/13] eventdev: move inline APIs into separate structure pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 05/13] eventdev: use new API for inline functions pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 06/13] eventdev: hide event device related structures pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v 07/13] eventdev: hide timer adapter PMD file pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 " pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 08/13] eventdev: remove rte prefix for internal structs pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 09/13] eventdev: rearrange fields in timer object pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 10/13] eventdev: move timer adapters memory to hugepage pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 11/13] eventdev: promote event vector API to stable pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 12/13] eventdev: make trace APIs internal pbhagavatula
2021-10-03  8:27   ` [dpdk-dev] [PATCH v2 13/13] eventdev: mark trace variables as internal pbhagavatula
2021-10-06  6:49   ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " pbhagavatula
2021-10-06  6:49     ` [dpdk-dev] [PATCH v3 02/14] eventdev: separate internal structures pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 03/14] eventdev: allocate max space for internal arrays pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 04/14] eventdev: move inline APIs into separate structure pbhagavatula
2021-10-14  9:20       ` Jerin Jacob
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 05/14] drivers/event: invoke probing finish function pbhagavatula
2021-10-14  9:22       ` Jerin Jacob
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 06/14] eventdev: use new API for inline functions pbhagavatula
2021-10-11  9:51       ` Gujjar, Abhinandan S
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 07/14] eventdev: hide event device related structures pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 08/14] eventdev: hide timer adapter PMD file pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
2021-10-11  9:58       ` Gujjar, Abhinandan S
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 10/14] eventdev: rearrange fields in timer object pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
2021-10-07 20:49       ` Carrillo, Erik G
2021-10-08  5:38         ` Pavan Nikhilesh Bhagavatula
2021-10-08 15:57           ` Carrillo, Erik G
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 12/14] eventdev: promote event vector API to stable pbhagavatula
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 13/14] eventdev: make trace APIs internal pbhagavatula
2021-10-11  9:59       ` Gujjar, Abhinandan S
2021-10-06  6:50     ` [dpdk-dev] [PATCH v3 14/14] eventdev: mark trace variables as internal pbhagavatula
2021-10-06  7:11       ` David Marchand
2021-10-14  9:28         ` Jerin Jacob
2021-10-14  9:05     ` [dpdk-dev] [PATCH v3 01/14] eventdev: make driver interface " Jerin Jacob
2021-10-14  9:08     ` Jerin Jacob
2021-10-15 19:02     ` [dpdk-dev] [PATCH v4 " pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 02/14] eventdev: separate internal structures pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 03/14] eventdev: allocate max space for internal arrays pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 04/14] eventdev: move inline APIs into separate structure pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 05/14] drivers/event: invoke probing finish function pbhagavatula
2021-10-17 15:34         ` Hemant Agrawal
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 06/14] eventdev: use new API for inline functions pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 07/14] eventdev: hide event device related structures pbhagavatula
2021-10-18  7:07         ` Harman Kalra
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 08/14] eventdev: hide timer adapter PMD file pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 10/14] eventdev: rearrange fields in timer object pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 12/14] eventdev: promote event vector API to stable pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 13/14] eventdev: make trace APIs internal pbhagavatula
2021-10-15 19:02       ` [dpdk-dev] [PATCH v4 14/14] eventdev: mark trace variables as internal pbhagavatula
2021-10-17  5:58         ` Jerin Jacob
2021-10-18 15:06           ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-10-19  7:01             ` David Marchand
2021-10-17 15:35       ` [dpdk-dev] [PATCH v4 01/14] eventdev: make driver interface " Hemant Agrawal
2021-10-18 23:35       ` [dpdk-dev] [PATCH v5 " pbhagavatula
2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 02/14] eventdev: separate internal structures pbhagavatula
2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 03/14] eventdev: allocate max space for internal arrays pbhagavatula
2021-10-18 23:35         ` [dpdk-dev] [PATCH v5 04/14] eventdev: move inline APIs into separate structure pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 05/14] drivers/event: invoke probing finish function pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 06/14] eventdev: use new API for inline functions pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 07/14] eventdev: hide event device related structures pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 08/14] eventdev: hide timer adapter PMD file pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 09/14] eventdev: remove rte prefix for internal structs pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 10/14] eventdev: rearrange fields in timer object pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 11/14] eventdev: move timer adapters memory to hugepage pbhagavatula
2021-10-20 20:24           ` Carrillo, Erik G
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 12/14] eventdev: promote event vector API to stable pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 13/14] eventdev: make trace APIs internal pbhagavatula
2021-10-18 23:36         ` [dpdk-dev] [PATCH v5 14/14] eventdev: mark trace variables as internal pbhagavatula
2021-10-20  4:01         ` [dpdk-dev] [PATCH v5 01/14] eventdev: make driver interface " Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CALBAE1OtNwLOVxA9m6SUWgKOmeqtjj2u4kiqzkHMw0sgg1b0Zg@mail.gmail.com \
    --to=jerinjacobk@gmail.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=konstantin.ananyev@intel.com \
    --cc=pbhagavatula@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).