* [RFC 1/4] eventdev: add support for credit preallocation
2025-06-29 16:52 [RFC 0/4] Add support for event credit preallocation Mattias Rönnblom
@ 2025-06-29 16:52 ` Mattias Rönnblom
2025-06-29 16:52 ` [RFC 2/4] event/dsw: " Mattias Rönnblom
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Mattias Rönnblom @ 2025-06-29 16:52 UTC (permalink / raw)
To: dev, Jerin Jacob
Cc: Mattias Rönnblom, Maria Lingemark, Luka Jankovic,
Sriram Yagnaraman, Mattias Rönnblom
Optionally split the enqueue operation for new events into two steps;
allocating a "slot" for the event in the event device, and the actual
enqueue operation.
Pre-allocating credits reduces the risk of enqueue failures (i.e.,
backpressure) for new events. This is useful for applications
performing expensive or effectively irreversible processing before the
enqueue operation. In such a scenario, efficiency may be improved and
code complexity reduced, in case the application can know ahead of
time, with some certainty, that the enqueue operation will succeed.
A new function rte_event_credit_alloc() is used to allocate credits.
A new function rte_event_credit_free() may be used, in case the
application decides to not use allocated credits.
A new operation type RTE_EVENT_NEW_PREALLOCED is added, which is the
equivalent to RTE_EVENT_NEW, only the event consumes one of the
pre-allocated credits when the event is successfully enqueued.
Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
lib/eventdev/eventdev_pmd.h | 4 +
lib/eventdev/eventdev_private.c | 23 +++++
lib/eventdev/eventdev_trace_points.c | 8 ++
lib/eventdev/rte_eventdev.h | 135 +++++++++++++++++++++++++++
lib/eventdev/rte_eventdev_core.h | 10 ++
lib/eventdev/rte_eventdev_trace_fp.h | 19 ++++
6 files changed, 199 insertions(+)
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index dda8ad82c9..84ec3ea555 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -172,6 +172,10 @@ struct __rte_cache_aligned rte_eventdev {
/**< Pointer to PMD dequeue burst function. */
event_maintain_t maintain;
/**< Pointer to PMD port maintenance function. */
+ event_credit_alloc_t credit_alloc;
+ /**< Pointer to PMD credit allocation function. */
+ event_credit_free_t credit_free;
+ /**< Pointer to PMD credit release function. */
event_tx_adapter_enqueue_t txa_enqueue_same_dest;
/**< Pointer to PMD eth Tx adapter burst enqueue function with
* events destined to same Eth port & Tx queue.
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
index dffd2c71d0..ec16125d83 100644
--- a/lib/eventdev/eventdev_private.c
+++ b/lib/eventdev/eventdev_private.c
@@ -34,6 +34,25 @@ dummy_event_maintain(__rte_unused void *port, __rte_unused int op)
"maintenance requested for unconfigured event device");
}
+static int
+dummy_event_credit_alloc(__rte_unused void *port,
+ __rte_unused unsigned int new_event_threshold,
+ __rte_unused unsigned int num_credits)
+{
+ RTE_EDEV_LOG_ERR(
+ "credit allocation request for unconfigured event device");
+ return 0;
+}
+
+static int
+dummy_event_credit_free(__rte_unused void *port,
+ __rte_unused unsigned int num_credits)
+{
+ RTE_EDEV_LOG_ERR(
+ "credit return request for unconfigured event device");
+ return 0;
+}
+
static uint16_t
dummy_event_tx_adapter_enqueue(__rte_unused void *port,
__rte_unused struct rte_event ev[],
@@ -118,6 +137,8 @@ event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
.enqueue_forward_burst = dummy_event_enqueue_burst,
.dequeue_burst = dummy_event_dequeue_burst,
.maintain = dummy_event_maintain,
+ .credit_alloc = dummy_event_credit_alloc,
+ .credit_free = dummy_event_credit_free,
.txa_enqueue = dummy_event_tx_adapter_enqueue,
.txa_enqueue_same_dest = dummy_event_tx_adapter_enqueue_same_dest,
.ca_enqueue = dummy_event_crypto_adapter_enqueue,
@@ -141,6 +162,8 @@ event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
fp_op->dequeue_burst = dev->dequeue_burst;
fp_op->maintain = dev->maintain;
+ fp_op->credit_alloc = dev->credit_alloc;
+ fp_op->credit_free = dev->credit_free;
fp_op->txa_enqueue = dev->txa_enqueue;
fp_op->txa_enqueue_same_dest = dev->txa_enqueue_same_dest;
fp_op->ca_enqueue = dev->ca_enqueue;
diff --git a/lib/eventdev/eventdev_trace_points.c b/lib/eventdev/eventdev_trace_points.c
index ade6723b7b..c563f5cab1 100644
--- a/lib/eventdev/eventdev_trace_points.c
+++ b/lib/eventdev/eventdev_trace_points.c
@@ -50,6 +50,14 @@ RTE_EXPORT_SYMBOL(__rte_eventdev_trace_maintain)
RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_maintain,
lib.eventdev.maintain)
+RTE_EXPORT_SYMBOL(__rte_eventdev_credit_alloc)
+RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_credit_alloc,
+ lib.eventdev.credit_alloc)
+
+RTE_EXPORT_SYMBOL(__rte_eventdev_credit_free)
+RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_credit_free,
+ lib.eventdev.credit_free)
+
RTE_EXPORT_EXPERIMENTAL_SYMBOL(__rte_eventdev_trace_port_profile_switch, 23.11)
RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_profile_switch,
lib.eventdev.port.profile.switch)
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 3c7fcbf0be..812ed2705c 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -504,6 +504,16 @@ struct rte_event;
* @see rte_event_port_preschedule()
*/
+#define RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION (1ULL << 21)
+/**< Event device supports credit preallocation for new events.
+ *
+ * The event device supports preallocation credits, which in turn allows
+ * the use of @ref RTE_EVENT_OP_NEW_PREALLOCED.
+ *
+ * @see rte_event_credit_alloc()
+ * @see rte_event_credit_free()
+ */
+
/* Event device priority levels */
#define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
/**< Highest priority level for events and queues.
@@ -1621,6 +1631,10 @@ struct __rte_aligned(16) rte_event_vector {
* i.e. one not previously dequeued, into the event device, to be scheduled
* for processing.
*/
+#define RTE_EVENT_OP_NEW_PREALLOCED 3
+/**< The @ref rte_event.op field must be set to this operation type to inject a new event
+ * for which a credit has already been allocated with rte_event_credit_alloc().
+ */
#define RTE_EVENT_OP_FORWARD 1
/**< The application must set the @ref rte_event.op field to this operation type to return a
* previously dequeued event to the event device to be scheduled for further processing.
@@ -2933,6 +2947,127 @@ rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
return 0;
}
+/**
+ * Preallocate credits for new events.
+ *
+ * Preallocate credits for use with @ref RTE_EVENT_OP_NEW_PREALLOCED
+ * events. One credit gives the right to enqueue one such event. Upon
+ * successfully enqueuing an @ref RTE_EVENT_OP_NEW_PREALLOCED type
+ * event, one credit is considered spent.
+ *
+ * The credits are tied to the event port from which it was allocated.
+ * Thus, credit allocation and the enqueue operation must happen on
+ * the same port.
+ *
+ * The use of preallocated credits reduces the risk of enqueue
+ * failures, but does not guarantee that such will not occur.
+ *
+ * Besides using up credits by enqueuing @ref RTE_EVENT_OP_NEW_PREALLOCAD
+ * events, the application may also return credits using
+ * rte_event_credit_free().
+ *
+ * rte_event_credit_alloc() may also be used to pick a different @c
+ * new_event_threshold than is configured on the event port.
+ *
+ * This function will only succeed for event devices which have the
+ * @ref RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION flag set.
+ *
+ * The application may not attempt to enqueue @ref RTE_EVENT_OP_NEW_PREALLOCED
+ * events if it does not possess any credits (for that event port).
+ *
+ * Since credits are allocated against a certain @c new_event_threshold,
+ * and the number of in-flight events may change quickly (i.e., with
+ * an incoming burst of packets), storing credits for long durations
+ * of time may impact desired backpressure behavior. Also, since a
+ * credit represent an in-flight event, allocated but unused credits
+ * reduces the number of actual in-flight events allowed.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param new_event_threshold
+ * The @c new_event_threshold to use for this allocation. If set to 0, the
+ * event port's @c new_event_threshold will be used.
+ * @param num_credits
+ * The number of credits the application wish to acquire.
+ * @return
+ * - The number of credits allocated (<= @c num_credits).
+ * - -EINVAL if *dev_id*, *port_id*, or *op* is invalid.
+ * - -ENOTSUP if event device does not support credit preallocation.
+ *
+ * @see RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION
+ */
+static inline int
+rte_event_credit_alloc(uint8_t dev_id, uint8_t port_id, unsigned int new_event_threshold,
+ unsigned int num_credits)
+{
+ const struct rte_event_fp_ops *fp_ops;
+ void *port;
+
+ fp_ops = &rte_event_fp_ops[dev_id];
+ port = fp_ops->data[port_id];
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ if (dev_id >= RTE_EVENT_MAX_DEVS ||
+ port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
+ return -EINVAL;
+
+ if (port == NULL)
+ return -EINVAL;
+#endif
+ rte_eventdev_trace_credit_alloc(dev_id, port_id, new_event_threshold, num_credits);
+
+ if (unlikely(fp_ops->credit_alloc == NULL))
+ return -ENOTSUP;
+
+ return fp_ops->credit_alloc(port, new_event_threshold, num_credits);
+}
+
+/**
+ * Return preallocated credits for new events.
+ *
+ * Return unused credits allocated with rte_event_credit_alloc().
+ *
+ * This function will only succeed for event devices which have the
+ * @ref RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION flag set.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param num_credits
+ * The number of credits the application wish to return.
+ * @return
+ * - 0 on success.
+ * - -EINVAL if *dev_id*, *port_id*, or *op* is invalid.
+ * - -ENOTSUP if event device does not support credit preallocation.
+ *
+ * @see RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION
+ */
+static inline int
+rte_event_credit_free(uint8_t dev_id, uint8_t port_id, unsigned int num_credits)
+{
+ const struct rte_event_fp_ops *fp_ops;
+ void *port;
+
+ fp_ops = &rte_event_fp_ops[dev_id];
+ port = fp_ops->data[port_id];
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ if (dev_id >= RTE_EVENT_MAX_DEVS ||
+ port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
+ return -EINVAL;
+
+ if (port == NULL)
+ return -EINVAL;
+#endif
+ rte_eventdev_trace_credit_free(dev_id, port_id, num_credits);
+
+ if (unlikely(fp_ops->credit_free == NULL))
+ return -ENOTSUP;
+
+ return fp_ops->credit_free(port, num_credits);
+}
+
/**
* Change the active profile on an event port.
*
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 1818483044..57a3ff4b67 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -25,6 +25,12 @@ typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
typedef void (*event_maintain_t)(void *port, int op);
/**< @internal Maintains a port */
+typedef int (*event_credit_alloc_t)(void *port, unsigned int new_event_threshold, unsigned int num_credits);
+/**< @internal Allocates credits for new events */
+
+typedef int (*event_credit_free_t)(void *port, unsigned int num_credits);
+/**< @internal Returns credits for new events */
+
typedef uint16_t (*event_tx_adapter_enqueue_t)(void *port,
struct rte_event ev[],
uint16_t nb_events);
@@ -63,6 +69,10 @@ struct __rte_cache_aligned rte_event_fp_ops {
/**< PMD dequeue burst function. */
event_maintain_t maintain;
/**< PMD port maintenance function. */
+ event_credit_alloc_t credit_alloc;
+ /**< PMD credit pre-allocation function. */
+ event_credit_free_t credit_free;
+ /**< PMD credit return function. */
event_tx_adapter_enqueue_t txa_enqueue;
/**< PMD Tx adapter enqueue function. */
event_tx_adapter_enqueue_t txa_enqueue_same_dest;
diff --git a/lib/eventdev/rte_eventdev_trace_fp.h b/lib/eventdev/rte_eventdev_trace_fp.h
index 8b794d1362..9a4b4f3f77 100644
--- a/lib/eventdev/rte_eventdev_trace_fp.h
+++ b/lib/eventdev/rte_eventdev_trace_fp.h
@@ -46,6 +46,25 @@ RTE_TRACE_POINT_FP(
rte_trace_point_emit_int(op);
)
+RTE_TRACE_POINT_FP(
+ rte_eventdev_trace_credit_alloc,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id,
+ unsigned int new_event_threshold, unsigned int num_credits),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_u8(port_id);
+ rte_trace_point_emit_int(new_event_threshold);
+ rte_trace_point_emit_int(num_credits);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_eventdev_trace_credit_free,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id,
+ unsigned int num_credits),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_u8(port_id);
+ rte_trace_point_emit_int(num_credits);
+)
+
RTE_TRACE_POINT_FP(
rte_eventdev_trace_port_profile_switch,
RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, uint8_t profile),
--
2.43.0
^ permalink raw reply [flat|nested] 5+ messages in thread
* [RFC 2/4] event/dsw: add support for credit preallocation
2025-06-29 16:52 [RFC 0/4] Add support for event credit preallocation Mattias Rönnblom
2025-06-29 16:52 ` [RFC 1/4] eventdev: add support for " Mattias Rönnblom
@ 2025-06-29 16:52 ` Mattias Rönnblom
2025-06-29 16:52 ` [RFC 3/4] eventdev: add enqueue optimized for prealloced events Mattias Rönnblom
2025-06-29 16:52 ` [RFC 4/4] event/dsw: implement " Mattias Rönnblom
3 siblings, 0 replies; 5+ messages in thread
From: Mattias Rönnblom @ 2025-06-29 16:52 UTC (permalink / raw)
To: dev, Jerin Jacob
Cc: Mattias Rönnblom, Maria Lingemark, Luka Jankovic,
Sriram Yagnaraman, Mattias Rönnblom
Implement RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION.
Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
drivers/event/dsw/dsw_evdev.c | 5 ++-
drivers/event/dsw/dsw_evdev.h | 6 +++
drivers/event/dsw/dsw_event.c | 70 ++++++++++++++++++++++++++++------
drivers/event/dsw/dsw_xstats.c | 3 ++
4 files changed, 71 insertions(+), 13 deletions(-)
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index e819412639..ecc1d947dd 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -228,7 +228,8 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused,
RTE_EVENT_DEV_CAP_NONSEQ_MODE|
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
- RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ
+ RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ |
+ RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION
};
}
@@ -458,6 +459,8 @@ dsw_probe(struct rte_vdev_device *vdev)
dev->enqueue_forward_burst = dsw_event_enqueue_forward_burst;
dev->dequeue_burst = dsw_event_dequeue_burst;
dev->maintain = dsw_event_maintain;
+ dev->credit_alloc = dsw_event_credit_alloc;
+ dev->credit_free = dsw_event_credit_free;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index d78c5f4f26..c026b0a135 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -208,6 +208,7 @@ struct __rte_cache_aligned dsw_port {
uint64_t enqueue_calls;
uint64_t new_enqueued;
+ uint64_t new_prealloced_enqueued;
uint64_t forward_enqueued;
uint64_t release_enqueued;
uint64_t queue_enqueued[DSW_MAX_QUEUES];
@@ -284,6 +285,11 @@ uint16_t dsw_event_dequeue_burst(void *port, struct rte_event *events,
uint16_t num, uint64_t wait);
void dsw_event_maintain(void *port, int op);
+int dsw_event_credit_alloc(void *port, unsigned int new_event_threshold,
+ unsigned int num_credits);
+
+int dsw_event_credit_free(void *port, unsigned int num_credits);
+
int dsw_xstats_get_names(const struct rte_eventdev *dev,
enum rte_event_dev_xstats_mode mode,
uint8_t queue_port_id,
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 399d9f050e..09f353b324 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -93,9 +93,11 @@ dsw_port_return_credits(struct dsw_evdev *dsw, struct dsw_port *port,
static void
dsw_port_enqueue_stats(struct dsw_port *port, uint16_t num_new,
- uint16_t num_forward, uint16_t num_release)
+ uint16_t num_new_prealloced, uint16_t num_forward,
+ uint16_t num_release)
{
port->new_enqueued += num_new;
+ port->new_prealloced_enqueued += num_new_prealloced;
port->forward_enqueued += num_forward;
port->release_enqueued += num_release;
}
@@ -1322,12 +1324,26 @@ dsw_port_flush_out_buffers(struct dsw_evdev *dsw, struct dsw_port *source_port)
dsw_port_transmit_buffered(dsw, source_port, dest_port_id);
}
+static inline bool
+dsw_should_backpressure(struct dsw_evdev *dsw, int32_t new_event_threshold)
+{
+ int32_t credits_on_loan;
+ bool over_threshold;
+
+ credits_on_loan = rte_atomic_load_explicit(&dsw->credits_on_loan,
+ rte_memory_order_relaxed);
+
+ over_threshold = credits_on_loan > new_event_threshold;
+
+ return over_threshold;
+}
+
static __rte_always_inline uint16_t
dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
const struct rte_event events[],
uint16_t events_len, bool op_types_known,
- uint16_t num_new, uint16_t num_forward,
- uint16_t num_release)
+ uint16_t num_new, uint16_t num_new_prealloced,
+ uint16_t num_forward, uint16_t num_release)
{
struct dsw_evdev *dsw = source_port->dsw;
bool enough_credits;
@@ -1364,6 +1380,9 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
case RTE_EVENT_OP_NEW:
num_new++;
break;
+ case RTE_EVENT_OP_NEW_PREALLOCED:
+ num_new_prealloced++;
+ break;
case RTE_EVENT_OP_FORWARD:
num_forward++;
break;
@@ -1379,9 +1398,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
* above the water mark.
*/
if (unlikely(num_new > 0 &&
- rte_atomic_load_explicit(&dsw->credits_on_loan,
- rte_memory_order_relaxed) >
- source_port->new_event_threshold))
+ dsw_should_backpressure(dsw, source_port->new_event_threshold)))
return 0;
enough_credits = dsw_port_acquire_credits(dsw, source_port, num_new);
@@ -1397,7 +1414,8 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
RTE_VERIFY(num_forward + num_release <= source_port->pending_releases);
source_port->pending_releases -= (num_forward + num_release);
- dsw_port_enqueue_stats(source_port, num_new, num_forward, num_release);
+ dsw_port_enqueue_stats(source_port, num_new, num_new_prealloced,
+ num_forward, num_release);
for (i = 0; i < events_len; i++) {
const struct rte_event *event = &events[i];
@@ -1409,9 +1427,9 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
}
DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "%d non-release events "
- "accepted.", num_new + num_forward);
+ "accepted.", num_new + num_new_prealloced + num_forward);
- return (num_new + num_forward + num_release);
+ return (num_new + num_new_prealloced + num_forward + num_release);
}
uint16_t
@@ -1424,7 +1442,7 @@ dsw_event_enqueue_burst(void *port, const struct rte_event events[],
events_len = source_port->enqueue_depth;
return dsw_event_enqueue_burst_generic(source_port, events,
- events_len, false, 0, 0, 0);
+ events_len, false, 0, 0, 0, 0);
}
uint16_t
@@ -1438,7 +1456,7 @@ dsw_event_enqueue_new_burst(void *port, const struct rte_event events[],
return dsw_event_enqueue_burst_generic(source_port, events,
events_len, true, events_len,
- 0, 0);
+ 0, 0, 0);
}
uint16_t
@@ -1451,7 +1469,7 @@ dsw_event_enqueue_forward_burst(void *port, const struct rte_event events[],
events_len = source_port->enqueue_depth;
return dsw_event_enqueue_burst_generic(source_port, events,
- events_len, true, 0,
+ events_len, true, 0, 0,
events_len, 0);
}
@@ -1604,3 +1622,31 @@ void dsw_event_maintain(void *port, int op)
if (op & RTE_EVENT_DEV_MAINT_OP_FLUSH)
dsw_port_flush_out_buffers(dsw, source_port);
}
+
+int dsw_event_credit_alloc(void *port, unsigned int new_event_threshold,
+ unsigned int num_credits)
+{
+ struct dsw_port *source_port = port;
+ struct dsw_evdev *dsw = source_port->dsw;
+ bool enough_credits;
+
+ if (dsw_should_backpressure(dsw, new_event_threshold))
+ return 0;
+
+ enough_credits = dsw_port_acquire_credits(dsw, source_port, num_credits);
+
+ if (!enough_credits)
+ return 0;
+
+ return num_credits;
+}
+
+int dsw_event_credit_free(void *port, unsigned int num_credits)
+{
+ struct dsw_port *source_port = port;
+ struct dsw_evdev *dsw = source_port->dsw;
+
+ dsw_port_return_credits(dsw, source_port, num_credits);
+
+ return 0;
+}
diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
index f61dfd80a8..2b58c26cb8 100644
--- a/drivers/event/dsw/dsw_xstats.c
+++ b/drivers/event/dsw/dsw_xstats.c
@@ -65,6 +65,7 @@ static struct dsw_xstat_dev dsw_dev_xstats[] = {
}
DSW_GEN_PORT_ACCESS_FN(new_enqueued)
+DSW_GEN_PORT_ACCESS_FN(new_prealloced_enqueued)
DSW_GEN_PORT_ACCESS_FN(forward_enqueued)
DSW_GEN_PORT_ACCESS_FN(release_enqueued)
@@ -136,6 +137,8 @@ DSW_GEN_PORT_ACCESS_FN(last_bg)
static struct dsw_xstats_port dsw_port_xstats[] = {
{ "port_%u_new_enqueued", dsw_xstats_port_get_new_enqueued,
false },
+ { "port_%u_new_prealloced_enqueued",
+ dsw_xstats_port_get_new_prealloced_enqueued, false },
{ "port_%u_forward_enqueued", dsw_xstats_port_get_forward_enqueued,
false },
{ "port_%u_release_enqueued", dsw_xstats_port_get_release_enqueued,
--
2.43.0
^ permalink raw reply [flat|nested] 5+ messages in thread
* [RFC 3/4] eventdev: add enqueue optimized for prealloced events
2025-06-29 16:52 [RFC 0/4] Add support for event credit preallocation Mattias Rönnblom
2025-06-29 16:52 ` [RFC 1/4] eventdev: add support for " Mattias Rönnblom
2025-06-29 16:52 ` [RFC 2/4] event/dsw: " Mattias Rönnblom
@ 2025-06-29 16:52 ` Mattias Rönnblom
2025-06-29 16:52 ` [RFC 4/4] event/dsw: implement " Mattias Rönnblom
3 siblings, 0 replies; 5+ messages in thread
From: Mattias Rönnblom @ 2025-06-29 16:52 UTC (permalink / raw)
To: dev, Jerin Jacob
Cc: Mattias Rönnblom, Maria Lingemark, Luka Jankovic,
Sriram Yagnaraman, Mattias Rönnblom
Extend Eventdev API with an enqueue function for events of the
RTE_EVENT_OP_NEW_PREALLOCED operation type.
Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
lib/eventdev/eventdev_pmd.h | 2 +
lib/eventdev/eventdev_private.c | 1 +
lib/eventdev/rte_eventdev.h | 72 ++++++++++++++++++++++++++++----
lib/eventdev/rte_eventdev_core.h | 2 +
4 files changed, 70 insertions(+), 7 deletions(-)
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 84ec3ea555..d636e9e7ac 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -166,6 +166,8 @@ struct __rte_cache_aligned rte_eventdev {
/**< Pointer to PMD enqueue burst function. */
event_enqueue_burst_t enqueue_new_burst;
/**< Pointer to PMD enqueue burst function(op new variant) */
+ event_enqueue_burst_t enqueue_new_prealloced_burst;
+ /**< Pointer to PMD enqueue burst function(op new prealloced variant) */
event_enqueue_burst_t enqueue_forward_burst;
/**< Pointer to PMD enqueue burst function(op forward variant) */
event_dequeue_burst_t dequeue_burst;
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
index ec16125d83..d830ba8f3b 100644
--- a/lib/eventdev/eventdev_private.c
+++ b/lib/eventdev/eventdev_private.c
@@ -159,6 +159,7 @@ event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
{
fp_op->enqueue_burst = dev->enqueue_burst;
fp_op->enqueue_new_burst = dev->enqueue_new_burst;
+ fp_op->enqueue_new_prealloced_burst = dev->enqueue_new_prealloced_burst;
fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
fp_op->dequeue_burst = dev->dequeue_burst;
fp_op->maintain = dev->maintain;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 812ed2705c..fc71c54b3e 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -507,8 +507,8 @@ struct rte_event;
#define RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION (1ULL << 21)
/**< Event device supports credit preallocation for new events.
*
- * The event device supports preallocation credits, which in turn allows
- * the use of @ref RTE_EVENT_OP_NEW_PREALLOCED.
+ * The event device supports preallocating credits, which in turn allows
+ * enqueueing events with operation type @ref RTE_EVENT_OP_NEW_PREALLOCED.
*
* @see rte_event_credit_alloc()
* @see rte_event_credit_free()
@@ -2734,6 +2734,64 @@ rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
fp_ops->enqueue_new_burst);
}
+/**
+ * Enqueue a burst of events objects of operation type
+ * @ref RTE_EVENT_OP_NEW_PREALLOCED on an event device designated by its
+ * *dev_id* through the event port specified by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(),
+ * expect that application can use this API when the all objects in
+ * the burst contains the enqueue operation of the type
+ * @ref RTE_EVENT_OP_NEW_PREALLOCED. This specialized function can
+ * provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_prealloced_burst() result is undefined if
+ * the enqueue burst has event object of operation type !=
+ * @ref RTE_EVENT_OP_NEW_PREALLOCED.
+ *
+ * This function may only be called on event devices with the
+ * @ref RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION capability.
+ *
+ * @param dev_id
+ * The identifier of the device.
+n * @param port_id
+ * The identifier of the event port.
+ * @param ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ * The number of event objects to enqueue, typically number of
+ * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ * available for this port.
+ *
+ * @return
+ * The number of event objects actually enqueued on the event device. The
+ * return value can be less than the value of the *nb_events* parameter when
+ * the event devices queue is full or if invalid parameters are specified in a
+ * *rte_event*. If the return value is less than *nb_events*, the remaining
+ * events at the end of ev[] are not consumed and the caller has to take care
+ * of them, and rte_errno is set accordingly. Possible errno values include:
+ * - EINVAL The port ID is invalid, device ID is invalid, an event's queue
+ * ID is invalid, or an event's sched type doesn't match the
+ * capabilities of the destination queue.
+ * - ENOSPC The event port was backpressured and unable to enqueue
+ * one or more events. This error code is only applicable to
+ * closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_new_prealloced_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ const struct rte_event_fp_ops *fp_ops;
+
+ fp_ops = &rte_event_fp_ops[dev_id];
+ return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+ fp_ops->enqueue_new_prealloced_burst);
+}
+
/**
* Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
* on an event device designated by its *dev_id* through the event port
@@ -2962,14 +3020,14 @@ rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
* The use of preallocated credits reduces the risk of enqueue
* failures, but does not guarantee that such will not occur.
*
- * Besides using up credits by enqueuing @ref RTE_EVENT_OP_NEW_PREALLOCAD
+ * Besides using up credits by enqueuing @ref RTE_EVENT_OP_NEW_PREALLOCED
* events, the application may also return credits using
* rte_event_credit_free().
*
* rte_event_credit_alloc() may also be used to pick a different @c
* new_event_threshold than is configured on the event port.
*
- * This function will only succeed for event devices which have the
+ * This function is only available on event devices which have the
* @ref RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION flag set.
*
* The application may not attempt to enqueue @ref RTE_EVENT_OP_NEW_PREALLOCED
@@ -2993,7 +3051,7 @@ rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
* The number of credits the application wish to acquire.
* @return
* - The number of credits allocated (<= @c num_credits).
- * - -EINVAL if *dev_id*, *port_id*, or *op* is invalid.
+ * - -EINVAL if *dev_id* or *port_id* is invalid.
* - -ENOTSUP if event device does not support credit preallocation.
*
* @see RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION
@@ -3028,7 +3086,7 @@ rte_event_credit_alloc(uint8_t dev_id, uint8_t port_id, unsigned int new_event_t
*
* Return unused credits allocated with rte_event_credit_alloc().
*
- * This function will only succeed for event devices which have the
+ * This function is only available on event devices which have the
* @ref RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION flag set.
*
* @param dev_id
@@ -3039,7 +3097,7 @@ rte_event_credit_alloc(uint8_t dev_id, uint8_t port_id, unsigned int new_event_t
* The number of credits the application wish to return.
* @return
* - 0 on success.
- * - -EINVAL if *dev_id*, *port_id*, or *op* is invalid.
+ * - -EINVAL if *dev_id* or *port_id* is invalid.
* - -ENOTSUP if event device does not support credit preallocation.
*
* @see RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 57a3ff4b67..89444e919e 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -63,6 +63,8 @@ struct __rte_cache_aligned rte_event_fp_ops {
/**< PMD enqueue burst function. */
event_enqueue_burst_t enqueue_new_burst;
/**< PMD enqueue burst new function. */
+ event_enqueue_burst_t enqueue_new_prealloced_burst;
+ /**< PMD enqueue burst new function. */
event_enqueue_burst_t enqueue_forward_burst;
/**< PMD enqueue burst fwd function. */
event_dequeue_burst_t dequeue_burst;
--
2.43.0
^ permalink raw reply [flat|nested] 5+ messages in thread
* [RFC 4/4] event/dsw: implement enqueue optimized for prealloced events
2025-06-29 16:52 [RFC 0/4] Add support for event credit preallocation Mattias Rönnblom
` (2 preceding siblings ...)
2025-06-29 16:52 ` [RFC 3/4] eventdev: add enqueue optimized for prealloced events Mattias Rönnblom
@ 2025-06-29 16:52 ` Mattias Rönnblom
3 siblings, 0 replies; 5+ messages in thread
From: Mattias Rönnblom @ 2025-06-29 16:52 UTC (permalink / raw)
To: dev, Jerin Jacob
Cc: Mattias Rönnblom, Maria Lingemark, Luka Jankovic,
Sriram Yagnaraman, Mattias Rönnblom
Implement rte_event_enqueue_new_prealloced_burst() in DSW.
Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
drivers/event/dsw/dsw_evdev.c | 1 +
drivers/event/dsw/dsw_evdev.h | 3 +++
drivers/event/dsw/dsw_event.c | 18 ++++++++++++++++++
3 files changed, 22 insertions(+)
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index ecc1d947dd..139f57b5f4 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -456,6 +456,7 @@ dsw_probe(struct rte_vdev_device *vdev)
dev->dev_ops = &dsw_evdev_ops;
dev->enqueue_burst = dsw_event_enqueue_burst;
dev->enqueue_new_burst = dsw_event_enqueue_new_burst;
+ dev->enqueue_new_prealloced_burst = dsw_event_enqueue_new_prealloced_burst;
dev->enqueue_forward_burst = dsw_event_enqueue_forward_burst;
dev->dequeue_burst = dsw_event_dequeue_burst;
dev->maintain = dsw_event_maintain;
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index c026b0a135..5c5699c64f 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -277,6 +277,9 @@ uint16_t dsw_event_enqueue_burst(void *port,
uint16_t dsw_event_enqueue_new_burst(void *port,
const struct rte_event events[],
uint16_t events_len);
+uint16_t dsw_event_enqueue_new_prealloced_burst(void *port,
+ const struct rte_event events[],
+ uint16_t events_len);
uint16_t dsw_event_enqueue_forward_burst(void *port,
const struct rte_event events[],
uint16_t events_len);
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 09f353b324..b9529bd5d5 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -1459,6 +1459,21 @@ dsw_event_enqueue_new_burst(void *port, const struct rte_event events[],
0, 0, 0);
}
+uint16_t
+dsw_event_enqueue_new_prealloced_burst(void *port,
+ const struct rte_event events[],
+ uint16_t events_len)
+{
+ struct dsw_port *source_port = port;
+
+ if (unlikely(events_len > source_port->enqueue_depth))
+ events_len = source_port->enqueue_depth;
+
+ return dsw_event_enqueue_burst_generic(source_port, events,
+ events_len, true, 0, events_len,
+ 0, 0);
+}
+
uint16_t
dsw_event_enqueue_forward_burst(void *port, const struct rte_event events[],
uint16_t events_len)
@@ -1630,6 +1645,9 @@ int dsw_event_credit_alloc(void *port, unsigned int new_event_threshold,
struct dsw_evdev *dsw = source_port->dsw;
bool enough_credits;
+ if (new_event_threshold == 0)
+ new_event_threshold = source_port->new_event_threshold;
+
if (dsw_should_backpressure(dsw, new_event_threshold))
return 0;
--
2.43.0
^ permalink raw reply [flat|nested] 5+ messages in thread