From: "Mattias Rönnblom" <mattias.ronnblom@ericsson.com>
To: Jerin Jacob <jerinj@marvell.com>
Cc: dev@dpdk.org, hofors@lysator.liu.se,
"Mattias Rönnblom" <mattias.ronnblom@ericsson.com>
Subject: [PATCH] event/dsw: optimize serving port logic
Date: Sun, 14 Jan 2024 22:50:51 +0100 [thread overview]
Message-ID: <20240114215051.366016-1-mattias.ronnblom@ericsson.com> (raw)
To reduce flow migration overhead, replace the array-based
representation of which set of ports are bound to a particular queue
by a bitmask-based one.
The maximum number of DSW event ports remains 64, but after this
change can no longer easily be increased by modifying DSW_MAX_PORTS
and recompile.
Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
drivers/event/dsw/dsw_evdev.c | 39 +++++++++++++++++++++--------------
drivers/event/dsw/dsw_evdev.h | 5 ++++-
drivers/event/dsw/dsw_event.c | 10 +++------
3 files changed, 31 insertions(+), 23 deletions(-)
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 1209e73a9d..629c929cb2 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -144,24 +144,23 @@ dsw_queue_release(struct rte_eventdev *dev __rte_unused,
static void
queue_add_port(struct dsw_queue *queue, uint16_t port_id)
{
- queue->serving_ports[queue->num_serving_ports] = port_id;
+ uint64_t port_mask = UINT64_C(1) << port_id;
+
+ queue->serving_ports |= port_mask;
queue->num_serving_ports++;
}
static bool
queue_remove_port(struct dsw_queue *queue, uint16_t port_id)
{
- uint16_t i;
+ uint64_t port_mask = UINT64_C(1) << port_id;
+
+ if (queue->serving_ports & port_mask) {
+ queue->num_serving_ports--;
+ queue->serving_ports ^= port_mask;
+ return true;
+ }
- for (i = 0; i < queue->num_serving_ports; i++)
- if (queue->serving_ports[i] == port_id) {
- uint16_t last_idx = queue->num_serving_ports - 1;
- if (i != last_idx)
- queue->serving_ports[i] =
- queue->serving_ports[last_idx];
- queue->num_serving_ports--;
- return true;
- }
return false;
}
@@ -256,10 +255,20 @@ initial_flow_to_port_assignment(struct dsw_evdev *dsw)
struct dsw_queue *queue = &dsw->queues[queue_id];
uint16_t flow_hash;
for (flow_hash = 0; flow_hash < DSW_MAX_FLOWS; flow_hash++) {
- uint8_t port_idx =
- rte_rand() % queue->num_serving_ports;
- uint8_t port_id =
- queue->serving_ports[port_idx];
+ uint8_t skip =
+ rte_rand_max(queue->num_serving_ports);
+ uint8_t port_id;
+
+ for (port_id = 0;; port_id++) {
+ uint64_t port_mask = UINT64_C(1) << port_id;
+
+ if (queue->serving_ports & port_mask) {
+ if (skip == 0)
+ break;
+ skip--;
+ }
+ }
+
dsw->queues[queue_id].flow_to_port_map[flow_hash] =
port_id;
}
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index 6416a8a898..8166340e1e 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -234,12 +234,15 @@ struct dsw_port {
struct dsw_queue {
uint8_t schedule_type;
- uint8_t serving_ports[DSW_MAX_PORTS];
+ uint64_t serving_ports;
uint16_t num_serving_ports;
uint8_t flow_to_port_map[DSW_MAX_FLOWS] __rte_cache_aligned;
};
+/* Limited by the size of the 'serving_ports' bitmask */
+static_assert(DSW_MAX_PORTS <= 64);
+
struct dsw_evdev {
struct rte_eventdev_data *data;
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 93bbeead2e..23488d9030 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -447,13 +447,9 @@ static bool
dsw_is_serving_port(struct dsw_evdev *dsw, uint8_t port_id, uint8_t queue_id)
{
struct dsw_queue *queue = &dsw->queues[queue_id];
- uint16_t i;
-
- for (i = 0; i < queue->num_serving_ports; i++)
- if (queue->serving_ports[i] == port_id)
- return true;
+ uint64_t port_mask = UINT64_C(1) << port_id;
- return false;
+ return queue->serving_ports & port_mask;
}
static bool
@@ -575,7 +571,7 @@ dsw_schedule(struct dsw_evdev *dsw, uint8_t queue_id, uint16_t flow_hash)
/* A single-link queue, or atomic/ordered/parallel but
* with just a single serving port.
*/
- port_id = queue->serving_ports[0];
+ port_id = rte_bsf64(queue->serving_ports);
DSW_LOG_DP(DEBUG, "Event with queue_id %d flow_hash %d is scheduled "
"to port %d.\n", queue_id, flow_hash, port_id);
--
2.34.1
next reply other threads:[~2024-01-14 21:59 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-14 21:50 Mattias Rönnblom [this message]
2024-01-16 12:12 ` [PATCH v2] " Mattias Rönnblom
2024-01-17 7:41 ` [PATCH v3] " Mattias Rönnblom
2024-01-18 7:13 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240114215051.366016-1-mattias.ronnblom@ericsson.com \
--to=mattias.ronnblom@ericsson.com \
--cc=dev@dpdk.org \
--cc=hofors@lysator.liu.se \
--cc=jerinj@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).