* [PATCH] event/dlb2: add datapath support in secondary processes
@ 2025-06-17 4:56 Tirthendu Sarkar
0 siblings, 0 replies; only message in thread
From: Tirthendu Sarkar @ 2025-06-17 4:56 UTC (permalink / raw)
To: dev; +Cc: bruce.richardson, pravin.pathak, Tirthendu Sarkar
The producer port and consumer queue addresses are configured during
port setup. These are needed during enqueue and dequeue operations.
For a eventdev that is configured in primary process, these need to be
stored so that secondary processes can retrieve them for doing enueue
and dequeue.
Store producer port and consumer queue addresses during port setup and
retrieve them back in local data during secondary init.
Additionally, add checks to retrieve stored addresses in primary data path
for ports configured in secondary.
Signed-off-by: Tirthendu Sarkar <tirthendu.sarkar@intel.com>
---
drivers/event/dlb2/dlb2.c | 23 ++++++++-
drivers/event/dlb2/dlb2_iface.c | 2 +-
drivers/event/dlb2/dlb2_iface.h | 2 +-
drivers/event/dlb2/pf/base/dlb2_hw_types.h | 2 +
drivers/event/dlb2/pf/dlb2_pf.c | 57 +++++++++++++++++-----
5 files changed, 70 insertions(+), 16 deletions(-)
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 286241ea41..08291b10b8 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -3257,6 +3257,9 @@ __dlb2_event_enqueue_burst_reorder(void *event_port,
port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
+ if (!port_data->mmaped)
+ dlb2_iface_low_level_io_init(&ev_port->dlb2->qm_instance);
+
num_tx = RTE_MIN(num, ev_port->conf.enqueue_depth);
#if DLB2_BYPASS_FENCE_ON_PP == 1
if (!qm_port->is_producer) /* Call memory fense once at the start */
@@ -3390,6 +3393,10 @@ __dlb2_event_enqueue_burst(void *event_port,
i = 0;
port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
+
+ if (!port_data->mmaped)
+ dlb2_iface_low_level_io_init(&ev_port->dlb2->qm_instance);
+
num_tx = RTE_MIN(num, ev_port->conf.enqueue_depth);
while (i < num_tx) {
uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE];
@@ -4448,11 +4455,17 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
struct dlb2_port *qm_port = &ev_port->qm_port;
struct dlb2_eventdev *dlb2 = ev_port->dlb2;
struct dlb2_reorder *order = qm_port->order;
+ struct process_local_port_data *port_data;
uint16_t cnt;
RTE_ASSERT(ev_port->setup_done);
RTE_ASSERT(ev != NULL);
+ port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
+
+ if (!port_data->mmaped)
+ dlb2_iface_low_level_io_init(&dlb2->qm_instance);
+
if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
uint16_t out_rels = ev_port->outstanding_releases;
if (qm_port->reorder_en) {
@@ -4495,11 +4508,17 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
struct dlb2_port *qm_port = &ev_port->qm_port;
struct dlb2_eventdev *dlb2 = ev_port->dlb2;
struct dlb2_reorder *order = qm_port->order;
+ struct process_local_port_data *port_data;
uint16_t cnt;
RTE_ASSERT(ev_port->setup_done);
RTE_ASSERT(ev != NULL);
+ port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
+
+ if (!port_data->mmaped)
+ dlb2_iface_low_level_io_init(&dlb2->qm_instance);
+
if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
uint16_t out_rels = ev_port->outstanding_releases;
if (qm_port->reorder_en) {
@@ -4983,7 +5002,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
rte_spinlock_init(&dlb2->qm_instance.resource_lock);
- dlb2_iface_low_level_io_init();
+ dlb2_iface_low_level_io_init(NULL);
dlb2_entry_points_init(dev);
@@ -5015,7 +5034,7 @@ dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
return err;
}
- dlb2_iface_low_level_io_init();
+ dlb2_iface_low_level_io_init(&dlb2->qm_instance);
dlb2_entry_points_init(dev);
diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c
index 100db434d0..72d5b82bc5 100644
--- a/drivers/event/dlb2/dlb2_iface.c
+++ b/drivers/event/dlb2/dlb2_iface.c
@@ -12,7 +12,7 @@
* If PCI (PF PMD), these will be implemented locally in user mode.
*/
-void (*dlb2_iface_low_level_io_init)(void);
+void (*dlb2_iface_low_level_io_init)(struct dlb2_hw_dev *handle);
int (*dlb2_iface_open)(struct dlb2_hw_dev *handle, const char *name);
diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h
index dc0c446ce8..5106b860b9 100644
--- a/drivers/event/dlb2/dlb2_iface.h
+++ b/drivers/event/dlb2/dlb2_iface.h
@@ -11,7 +11,7 @@
* If PCI (PF PMD), these will be implemented locally in user mode.
*/
-extern void (*dlb2_iface_low_level_io_init)(void);
+extern void (*dlb2_iface_low_level_io_init)(struct dlb2_hw_dev *handle);
extern int (*dlb2_iface_open)(struct dlb2_hw_dev *handle, const char *name);
diff --git a/drivers/event/dlb2/pf/base/dlb2_hw_types.h b/drivers/event/dlb2/pf/base/dlb2_hw_types.h
index be09363893..6a935f0bcd 100644
--- a/drivers/event/dlb2/pf/base/dlb2_hw_types.h
+++ b/drivers/event/dlb2/pf/base/dlb2_hw_types.h
@@ -139,6 +139,7 @@ struct dlb2_dir_pq_pair {
struct dlb2_list_entry func_list;
struct dlb2_resource_id id;
struct dlb2_resource_id domain_id;
+ struct process_local_port_data port_data;
u32 ref_cnt;
u8 init_tkn_cnt;
u8 queue_configured;
@@ -178,6 +179,7 @@ struct dlb2_ldb_port {
struct dlb2_resource_id domain_id;
/* The qid_map represents the hardware QID mapping state. */
struct dlb2_ldb_port_qid_map qid_map[DLB2_MAX_NUM_QIDS_PER_LDB_CQ];
+ struct process_local_port_data port_data;
u32 hist_list_entry_base;
u32 hist_list_entry_limit;
u32 ref_cnt;
diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c
index ed4e6e424c..ac432b81ad 100644
--- a/drivers/event/dlb2/pf/dlb2_pf.c
+++ b/drivers/event/dlb2/pf/dlb2_pf.c
@@ -45,20 +45,39 @@ static unsigned int dlb2_qe_sa_pct = 1;
static unsigned int dlb2_qid_sa_pct;
static void
-dlb2_pf_low_level_io_init(void)
+dlb2_pf_low_level_io_init(struct dlb2_hw_dev *handle)
{
int i;
- /* Addresses will be initialized at port create */
- for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) {
- /* First directed ports */
- dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
- dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
- dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
-
- /* Now load balanced ports */
- dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
- dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
- dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
+
+ if (handle == NULL) {
+ /* Addresses will be initialized at port create in primary process*/
+ for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) {
+ /* First directed ports */
+ dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
+ dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
+ dlb2_port[i][DLB2_DIR_PORT].mmaped = false;
+
+ /* Now load balanced ports */
+ dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
+ dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
+ dlb2_port[i][DLB2_LDB_PORT].mmaped = false;
+ }
+ } else {
+ /* Retrieve stored addresses in secondary processes */
+ struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+ struct dlb2_ldb_port *ldb_ports = dlb2_dev->hw.rsrcs.ldb_ports;
+ struct dlb2_dir_pq_pair *dir_ports = dlb2_dev->hw.rsrcs.dir_pq_pairs;
+
+ for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
+ dlb2_port[i][DLB2_LDB_PORT].cq_base = ldb_ports[i].port_data.cq_base;
+ dlb2_port[i][DLB2_LDB_PORT].pp_addr = ldb_ports[i].port_data.pp_addr;
+ dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
+ }
+ for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS_V2_5; i++) {
+ dlb2_port[i][DLB2_DIR_PORT].cq_base = dir_ports[i].port_data.cq_base;
+ dlb2_port[i][DLB2_DIR_PORT].pp_addr = dir_ports[i].port_data.pp_addr;
+ dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
+ }
}
}
@@ -304,6 +323,7 @@ dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
enum dlb2_cq_poll_modes poll_mode)
{
struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+ struct process_local_port_data *port_data;
struct dlb2_cmd_response response = {0};
struct dlb2_port_memory port_memory;
int ret, cq_alloc_depth;
@@ -355,6 +375,7 @@ dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
(void *)(pp_base + (rte_mem_page_size() * response.id));
dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
+ dlb2_port[response.id][DLB2_LDB_PORT].mmaped = true;
memset(&port_memory, 0, sizeof(port_memory));
dlb2_port[response.id][DLB2_LDB_PORT].mz = mz;
@@ -363,6 +384,11 @@ dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
cfg->response = response;
+ /* Store cq_base and pp_addr for secondary processes*/
+ port_data = &dlb2_dev->hw.rsrcs.ldb_ports[response.id].port_data;
+ port_data->pp_addr = dlb2_port[response.id][DLB2_LDB_PORT].pp_addr;
+ port_data->cq_base = (struct dlb2_dequeue_qe *)cq_base;
+
return 0;
create_port_err:
@@ -380,6 +406,7 @@ dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
enum dlb2_cq_poll_modes poll_mode)
{
struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+ struct process_local_port_data *port_data;
struct dlb2_cmd_response response = {0};
struct dlb2_port_memory port_memory;
int ret;
@@ -431,6 +458,7 @@ dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
(void *)(port_base);
+ dlb2_port[response.id][DLB2_DIR_PORT].mmaped = true;
memset(&port_memory, 0, sizeof(port_memory));
dlb2_port[response.id][DLB2_DIR_PORT].mz = mz;
@@ -439,6 +467,11 @@ dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
cfg->response = response;
+ /* Store cq_base and pp_addr for secondary processes*/
+ port_data = &dlb2_dev->hw.rsrcs.dir_pq_pairs[response.id].port_data;
+ port_data->pp_addr = dlb2_port[response.id][DLB2_DIR_PORT].pp_addr;
+ port_data->cq_base = (struct dlb2_dequeue_qe *)cq_base;
+
return 0;
create_port_err:
--
2.43.5
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2025-06-17 4:56 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-06-17 4:56 [PATCH] event/dlb2: add datapath support in secondary processes Tirthendu Sarkar
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).