From: "Mattias Rönnblom" <mattias.ronnblom@ericsson.com>
To: <dev@dpdk.org>, Jerin Jacob <jerinj@marvell.com>
Cc: "Mattias Rönnblom" <hofors@lysator.liu.se>,
"Maria Lingemark" <maria.lingemark@ericsson.com>,
"Luka Jankovic" <luka.jankovic@ericsson.com>,
"Sriram Yagnaraman" <sriram.yagnaraman@ericsson.com>,
"Mattias Rönnblom" <mattias.ronnblom@ericsson.com>
Subject: [RFC 2/4] event/dsw: add support for credit preallocation
Date: Sun, 29 Jun 2025 18:52:12 +0200 [thread overview]
Message-ID: <20250629165214.3468-3-mattias.ronnblom@ericsson.com> (raw)
In-Reply-To: <20250629165214.3468-1-mattias.ronnblom@ericsson.com>
Implement RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION.
Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
drivers/event/dsw/dsw_evdev.c | 5 ++-
drivers/event/dsw/dsw_evdev.h | 6 +++
drivers/event/dsw/dsw_event.c | 70 ++++++++++++++++++++++++++++------
drivers/event/dsw/dsw_xstats.c | 3 ++
4 files changed, 71 insertions(+), 13 deletions(-)
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index e819412639..ecc1d947dd 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -228,7 +228,8 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused,
RTE_EVENT_DEV_CAP_NONSEQ_MODE|
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
- RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ
+ RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ |
+ RTE_EVENT_DEV_CAP_CREDIT_PREALLOCATION
};
}
@@ -458,6 +459,8 @@ dsw_probe(struct rte_vdev_device *vdev)
dev->enqueue_forward_burst = dsw_event_enqueue_forward_burst;
dev->dequeue_burst = dsw_event_dequeue_burst;
dev->maintain = dsw_event_maintain;
+ dev->credit_alloc = dsw_event_credit_alloc;
+ dev->credit_free = dsw_event_credit_free;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index d78c5f4f26..c026b0a135 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -208,6 +208,7 @@ struct __rte_cache_aligned dsw_port {
uint64_t enqueue_calls;
uint64_t new_enqueued;
+ uint64_t new_prealloced_enqueued;
uint64_t forward_enqueued;
uint64_t release_enqueued;
uint64_t queue_enqueued[DSW_MAX_QUEUES];
@@ -284,6 +285,11 @@ uint16_t dsw_event_dequeue_burst(void *port, struct rte_event *events,
uint16_t num, uint64_t wait);
void dsw_event_maintain(void *port, int op);
+int dsw_event_credit_alloc(void *port, unsigned int new_event_threshold,
+ unsigned int num_credits);
+
+int dsw_event_credit_free(void *port, unsigned int num_credits);
+
int dsw_xstats_get_names(const struct rte_eventdev *dev,
enum rte_event_dev_xstats_mode mode,
uint8_t queue_port_id,
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 399d9f050e..09f353b324 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -93,9 +93,11 @@ dsw_port_return_credits(struct dsw_evdev *dsw, struct dsw_port *port,
static void
dsw_port_enqueue_stats(struct dsw_port *port, uint16_t num_new,
- uint16_t num_forward, uint16_t num_release)
+ uint16_t num_new_prealloced, uint16_t num_forward,
+ uint16_t num_release)
{
port->new_enqueued += num_new;
+ port->new_prealloced_enqueued += num_new_prealloced;
port->forward_enqueued += num_forward;
port->release_enqueued += num_release;
}
@@ -1322,12 +1324,26 @@ dsw_port_flush_out_buffers(struct dsw_evdev *dsw, struct dsw_port *source_port)
dsw_port_transmit_buffered(dsw, source_port, dest_port_id);
}
+static inline bool
+dsw_should_backpressure(struct dsw_evdev *dsw, int32_t new_event_threshold)
+{
+ int32_t credits_on_loan;
+ bool over_threshold;
+
+ credits_on_loan = rte_atomic_load_explicit(&dsw->credits_on_loan,
+ rte_memory_order_relaxed);
+
+ over_threshold = credits_on_loan > new_event_threshold;
+
+ return over_threshold;
+}
+
static __rte_always_inline uint16_t
dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
const struct rte_event events[],
uint16_t events_len, bool op_types_known,
- uint16_t num_new, uint16_t num_forward,
- uint16_t num_release)
+ uint16_t num_new, uint16_t num_new_prealloced,
+ uint16_t num_forward, uint16_t num_release)
{
struct dsw_evdev *dsw = source_port->dsw;
bool enough_credits;
@@ -1364,6 +1380,9 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
case RTE_EVENT_OP_NEW:
num_new++;
break;
+ case RTE_EVENT_OP_NEW_PREALLOCED:
+ num_new_prealloced++;
+ break;
case RTE_EVENT_OP_FORWARD:
num_forward++;
break;
@@ -1379,9 +1398,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
* above the water mark.
*/
if (unlikely(num_new > 0 &&
- rte_atomic_load_explicit(&dsw->credits_on_loan,
- rte_memory_order_relaxed) >
- source_port->new_event_threshold))
+ dsw_should_backpressure(dsw, source_port->new_event_threshold)))
return 0;
enough_credits = dsw_port_acquire_credits(dsw, source_port, num_new);
@@ -1397,7 +1414,8 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
RTE_VERIFY(num_forward + num_release <= source_port->pending_releases);
source_port->pending_releases -= (num_forward + num_release);
- dsw_port_enqueue_stats(source_port, num_new, num_forward, num_release);
+ dsw_port_enqueue_stats(source_port, num_new, num_new_prealloced,
+ num_forward, num_release);
for (i = 0; i < events_len; i++) {
const struct rte_event *event = &events[i];
@@ -1409,9 +1427,9 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
}
DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "%d non-release events "
- "accepted.", num_new + num_forward);
+ "accepted.", num_new + num_new_prealloced + num_forward);
- return (num_new + num_forward + num_release);
+ return (num_new + num_new_prealloced + num_forward + num_release);
}
uint16_t
@@ -1424,7 +1442,7 @@ dsw_event_enqueue_burst(void *port, const struct rte_event events[],
events_len = source_port->enqueue_depth;
return dsw_event_enqueue_burst_generic(source_port, events,
- events_len, false, 0, 0, 0);
+ events_len, false, 0, 0, 0, 0);
}
uint16_t
@@ -1438,7 +1456,7 @@ dsw_event_enqueue_new_burst(void *port, const struct rte_event events[],
return dsw_event_enqueue_burst_generic(source_port, events,
events_len, true, events_len,
- 0, 0);
+ 0, 0, 0);
}
uint16_t
@@ -1451,7 +1469,7 @@ dsw_event_enqueue_forward_burst(void *port, const struct rte_event events[],
events_len = source_port->enqueue_depth;
return dsw_event_enqueue_burst_generic(source_port, events,
- events_len, true, 0,
+ events_len, true, 0, 0,
events_len, 0);
}
@@ -1604,3 +1622,31 @@ void dsw_event_maintain(void *port, int op)
if (op & RTE_EVENT_DEV_MAINT_OP_FLUSH)
dsw_port_flush_out_buffers(dsw, source_port);
}
+
+int dsw_event_credit_alloc(void *port, unsigned int new_event_threshold,
+ unsigned int num_credits)
+{
+ struct dsw_port *source_port = port;
+ struct dsw_evdev *dsw = source_port->dsw;
+ bool enough_credits;
+
+ if (dsw_should_backpressure(dsw, new_event_threshold))
+ return 0;
+
+ enough_credits = dsw_port_acquire_credits(dsw, source_port, num_credits);
+
+ if (!enough_credits)
+ return 0;
+
+ return num_credits;
+}
+
+int dsw_event_credit_free(void *port, unsigned int num_credits)
+{
+ struct dsw_port *source_port = port;
+ struct dsw_evdev *dsw = source_port->dsw;
+
+ dsw_port_return_credits(dsw, source_port, num_credits);
+
+ return 0;
+}
diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
index f61dfd80a8..2b58c26cb8 100644
--- a/drivers/event/dsw/dsw_xstats.c
+++ b/drivers/event/dsw/dsw_xstats.c
@@ -65,6 +65,7 @@ static struct dsw_xstat_dev dsw_dev_xstats[] = {
}
DSW_GEN_PORT_ACCESS_FN(new_enqueued)
+DSW_GEN_PORT_ACCESS_FN(new_prealloced_enqueued)
DSW_GEN_PORT_ACCESS_FN(forward_enqueued)
DSW_GEN_PORT_ACCESS_FN(release_enqueued)
@@ -136,6 +137,8 @@ DSW_GEN_PORT_ACCESS_FN(last_bg)
static struct dsw_xstats_port dsw_port_xstats[] = {
{ "port_%u_new_enqueued", dsw_xstats_port_get_new_enqueued,
false },
+ { "port_%u_new_prealloced_enqueued",
+ dsw_xstats_port_get_new_prealloced_enqueued, false },
{ "port_%u_forward_enqueued", dsw_xstats_port_get_forward_enqueued,
false },
{ "port_%u_release_enqueued", dsw_xstats_port_get_release_enqueued,
--
2.43.0
next prev parent reply other threads:[~2025-06-29 17:07 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-29 16:52 [RFC 0/4] Add support for event " Mattias Rönnblom
2025-06-29 16:52 ` [RFC 1/4] eventdev: add support for " Mattias Rönnblom
2025-06-29 16:52 ` Mattias Rönnblom [this message]
2025-06-29 16:52 ` [RFC 3/4] eventdev: add enqueue optimized for prealloced events Mattias Rönnblom
2025-06-29 16:52 ` [RFC 4/4] event/dsw: implement " Mattias Rönnblom
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250629165214.3468-3-mattias.ronnblom@ericsson.com \
--to=mattias.ronnblom@ericsson.com \
--cc=dev@dpdk.org \
--cc=hofors@lysator.liu.se \
--cc=jerinj@marvell.com \
--cc=luka.jankovic@ericsson.com \
--cc=maria.lingemark@ericsson.com \
--cc=sriram.yagnaraman@ericsson.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).