* [PATCH 2/3] eventdev: update examples to use port quiesce
2022-04-27 11:32 [PATCH 1/3] eventdev: add function to quiesce an event port Pavan Nikhilesh
@ 2022-04-27 11:32 ` Pavan Nikhilesh
2022-04-27 11:32 ` [PATCH 3/3] event/cnxk: implement event port quiesce function Pavan Nikhilesh
` (3 subsequent siblings)
4 siblings, 0 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2022-04-27 11:32 UTC (permalink / raw)
To: jerinj, Harry van Haaren, Radu Nicolau, Akhil Goyal,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
Quiesce event ports used by the workers core on exit to free up
any outstanding resources.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Change-Id: Iea1f933d4f4926630d82a9883fbe3f1e75876097
---
Depends-on: Series-22677
app/test-eventdev/test_perf_common.c | 8 ++++++++
app/test-eventdev/test_pipeline_common.c | 12 ++++++++++++
examples/eventdev_pipeline/pipeline_common.h | 9 +++++++++
examples/ipsec-secgw/ipsec_worker.c | 13 +++++++++++++
examples/l2fwd-event/l2fwd_common.c | 13 +++++++++++++
examples/l3fwd/l3fwd_event.c | 13 +++++++++++++
6 files changed, 68 insertions(+)
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index f673a9fddd..2016583979 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -985,6 +985,13 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
}
+static void
+perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args)
+{
+ rte_mempool_put(args, ev.event_ptr);
+}
+
void
perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
@@ -1000,6 +1007,7 @@ perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
}
+ rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
}
void
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index a8dd070000..82e5745071 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -518,6 +518,16 @@ pipeline_vector_array_free(struct rte_event events[], uint16_t num)
}
}
+static void
+pipeline_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+ pipeline_vector_array_free(&ev, 1);
+ else
+ rte_pktmbuf_free(ev.mbuf);
+}
+
void
pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
uint16_t enq, uint16_t deq)
@@ -542,6 +552,8 @@ pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
rte_event_enqueue_burst(dev, port, ev, deq);
}
+
+ rte_event_port_quiesce(dev, port, pipeline_event_port_flush, NULL);
}
void
diff --git a/examples/eventdev_pipeline/pipeline_common.h b/examples/eventdev_pipeline/pipeline_common.h
index 9899b257b0..28b6ab85ff 100644
--- a/examples/eventdev_pipeline/pipeline_common.h
+++ b/examples/eventdev_pipeline/pipeline_common.h
@@ -140,6 +140,13 @@ schedule_devices(unsigned int lcore_id)
}
}
+static void
+event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ rte_mempool_put(args, ev.event_ptr);
+}
+
static inline void
worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
uint16_t nb_enq, uint16_t nb_deq)
@@ -160,6 +167,8 @@ worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
}
+
+ rte_event_port_quiesce(dev_id, port_id, event_port_flush, NULL);
}
void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 3df5acf384..7f259e4cf3 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -737,6 +737,13 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
* selected.
*/
+static void
+ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ rte_pktmbuf_free(ev.mbuf);
+}
+
/* Workers registered */
#define IPSEC_EVENTMODE_WORKERS 2
@@ -861,6 +868,9 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
rte_event_enqueue_burst(links[0].eventdev_id,
links[0].event_port_id, &ev, 1);
}
+
+ rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
+ ipsec_event_port_flush, NULL);
}
/*
@@ -974,6 +984,9 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
rte_event_enqueue_burst(links[0].eventdev_id,
links[0].event_port_id, &ev, 1);
}
+
+ rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
+ ipsec_event_port_flush, NULL);
}
static uint8_t
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index 15bfe790a0..41a0d3f22f 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -128,6 +128,16 @@ l2fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
}
}
+static void
+l2fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+ l2fwd_event_vector_array_free(&ev, 1);
+ else
+ rte_pktmbuf_free(ev.mbuf);
+}
+
void
l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
struct rte_event events[], uint16_t nb_enq,
@@ -147,4 +157,7 @@ l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(event_d_id, port_id, events, nb_deq);
}
+
+ rte_event_port_quiesce(event_d_id, port_id, l2fwd_event_port_flush,
+ NULL);
}
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index a14a21b414..0b58475c85 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -301,6 +301,16 @@ l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
}
}
+static void
+l3fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+ l3fwd_event_vector_array_free(&ev, 1);
+ else
+ rte_pktmbuf_free(ev.mbuf);
+}
+
void
l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
struct rte_event events[], uint16_t nb_enq,
@@ -320,4 +330,7 @@ l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
}
+
+ rte_event_port_quiesce(event_d_id, event_p_id, l3fwd_event_port_flush,
+ NULL);
}
--
2.35.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 3/3] event/cnxk: implement event port quiesce function
2022-04-27 11:32 [PATCH 1/3] eventdev: add function to quiesce an event port Pavan Nikhilesh
2022-04-27 11:32 ` [PATCH 2/3] eventdev: update examples to use port quiesce Pavan Nikhilesh
@ 2022-04-27 11:32 ` Pavan Nikhilesh
2022-04-27 11:37 ` [PATCH 1/3 v2] eventdev: add function to quiesce an event port Pavan Nikhilesh
` (2 subsequent siblings)
4 siblings, 0 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2022-04-27 11:32 UTC (permalink / raw)
To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev
Implement event port quiesce function to clean up any lcore
resources used.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Change-Id: I7dda3d54dc698645d25ebbfbabd81760940fe649
---
drivers/event/cnxk/cn10k_eventdev.c | 78 ++++++++++++++++++++++++++---
drivers/event/cnxk/cn9k_eventdev.c | 60 +++++++++++++++++++++-
2 files changed, 130 insertions(+), 8 deletions(-)
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 94829e789c..d84c5d2d1e 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -167,15 +167,23 @@ cn10k_sso_hws_reset(void *arg, void *hws)
uint64_t u64[2];
} gw;
uint8_t pend_tt;
+ bool is_pend;
plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
/* Wait till getwork/swtp/waitw/desched completes. */
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ pend_state = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ ws->swtag_req)
+ is_pend = true;
+
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
} while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
BIT_ULL(56) | BIT_ULL(54)));
pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
- if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */
if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
cnxk_sso_hws_swtag_untag(base +
SSOW_LF_GWS_OP_SWTAG_UNTAG);
@@ -189,15 +197,10 @@ cn10k_sso_hws_reset(void *arg, void *hws)
switch (dev->gw_mode) {
case CN10K_GW_MODE_PREF:
+ case CN10K_GW_MODE_PREF_WFE:
while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
;
break;
- case CN10K_GW_MODE_PREF_WFE:
- while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
- SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
- continue;
- plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
- break;
case CN10K_GW_MODE_NONE:
default:
break;
@@ -533,6 +536,66 @@ cn10k_sso_port_release(void *port)
rte_free(gws_cookie);
}
+static void
+cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
+ eventdev_port_flush_t flush_cb, void *args)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct cn10k_sso_hws *ws = port;
+ struct rte_event ev;
+ uint64_t ptag;
+ bool is_pend;
+
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ if (ptag & (BIT_ULL(62) | BIT_ULL(54)) || ws->swtag_req)
+ is_pend = true;
+ do {
+ ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ } while (ptag &
+ (BIT_ULL(62) | BIT_ULL(58) | BIT_ULL(56) | BIT_ULL(54)));
+
+ cn10k_sso_hws_get_work_empty(ws, &ev,
+ (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+ NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
+ if (is_pend && ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+
+ /* Check if we have work in PRF_WQE0, if so extract it. */
+ switch (dev->gw_mode) {
+ case CN10K_GW_MODE_PREF:
+ case CN10K_GW_MODE_PREF_WFE:
+ while (plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0) &
+ BIT_ULL(63))
+ ;
+ break;
+ case CN10K_GW_MODE_NONE:
+ default:
+ break;
+ }
+
+ if (CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0)) !=
+ SSO_TT_EMPTY) {
+ plt_write64(BIT_ULL(16) | 1,
+ ws->base + SSOW_LF_GWS_OP_GET_WORK0);
+ cn10k_sso_hws_get_work_empty(
+ ws, &ev,
+ (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+ NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
+ if (ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+ }
+ ws->swtag_req = 0;
+ plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
+}
+
static int
cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
const uint8_t queues[], const uint8_t priorities[],
@@ -852,6 +915,7 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
.port_def_conf = cnxk_sso_port_def_conf,
.port_setup = cn10k_sso_port_setup,
.port_release = cn10k_sso_port_release,
+ .port_quiesce = cn10k_sso_port_quiesce,
.port_link = cn10k_sso_port_link,
.port_unlink = cn10k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 987888d3db..46885c5f92 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -186,6 +186,7 @@ cn9k_sso_hws_reset(void *arg, void *hws)
uint64_t pend_state;
uint8_t pend_tt;
uintptr_t base;
+ bool is_pend;
uint64_t tag;
uint8_t i;
@@ -193,6 +194,13 @@ cn9k_sso_hws_reset(void *arg, void *hws)
ws = hws;
for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
base = dev->dual_ws ? dws->base[i] : ws->base;
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ (dev->dual_ws ? (dws->swtag_req && i == !dws->vws) :
+ ws->swtag_req))
+ is_pend = true;
/* Wait till getwork/swtp/waitw/desched completes. */
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
@@ -201,7 +209,7 @@ cn9k_sso_hws_reset(void *arg, void *hws)
tag = plt_read64(base + SSOW_LF_GWS_TAG);
pend_tt = (tag >> 32) & 0x3;
- if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */
if (pend_tt == SSO_TT_ATOMIC ||
pend_tt == SSO_TT_ORDERED)
cnxk_sso_hws_swtag_untag(
@@ -213,7 +221,14 @@ cn9k_sso_hws_reset(void *arg, void *hws)
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
} while (pend_state & BIT_ULL(58));
+
+ plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
}
+
+ if (dev->dual_ws)
+ dws->swtag_req = 0;
+ else
+ ws->swtag_req = 0;
}
void
@@ -789,6 +804,48 @@ cn9k_sso_port_release(void *port)
rte_free(gws_cookie);
}
+static void
+cn9k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
+ eventdev_port_flush_t flush_cb, void *args)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct cn9k_sso_hws_dual *dws;
+ struct cn9k_sso_hws *ws;
+ struct rte_event ev;
+ uintptr_t base;
+ uint64_t ptag;
+ bool is_pend;
+ uint8_t i;
+
+ dws = port;
+ ws = port;
+ for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
+ base = dev->dual_ws ? dws->base[i] : ws->base;
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ if (ptag & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ (dev->dual_ws ? (dws->swtag_req && i == !dws->vws) :
+ ws->swtag_req))
+ is_pend = true;
+ /* Wait till getwork/swtp/waitw/desched completes. */
+ do {
+ ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ } while (ptag & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
+ BIT_ULL(56)));
+
+ cn9k_sso_hws_get_work_empty(
+ base, &ev, dev->rx_offloads,
+ dev->dual_ws ? dws->lookup_mem : ws->lookup_mem,
+ dev->dual_ws ? dws->tstamp : ws->tstamp);
+ if (is_pend && ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+ }
+}
+
static int
cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
const uint8_t queues[], const uint8_t priorities[],
@@ -1090,6 +1147,7 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
.port_def_conf = cnxk_sso_port_def_conf,
.port_setup = cn9k_sso_port_setup,
.port_release = cn9k_sso_port_release,
+ .port_quiesce = cn9k_sso_port_quiesce,
.port_link = cn9k_sso_port_link,
.port_unlink = cn9k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
--
2.35.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 1/3 v2] eventdev: add function to quiesce an event port
2022-04-27 11:32 [PATCH 1/3] eventdev: add function to quiesce an event port Pavan Nikhilesh
2022-04-27 11:32 ` [PATCH 2/3] eventdev: update examples to use port quiesce Pavan Nikhilesh
2022-04-27 11:32 ` [PATCH 3/3] event/cnxk: implement event port quiesce function Pavan Nikhilesh
@ 2022-04-27 11:37 ` Pavan Nikhilesh
2022-04-27 11:37 ` [PATCH 2/3 v2] eventdev: update examples to use port quiesce Pavan Nikhilesh
` (2 more replies)
2022-05-04 9:02 ` [PATCH " Ray Kinsella
2022-05-09 17:29 ` Jerin Jacob
4 siblings, 3 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2022-04-27 11:37 UTC (permalink / raw)
To: jerinj, Ray Kinsella; +Cc: dev, Pavan Nikhilesh
Add function to quiesce any core specific resources consumed by
the event port.
When the application decides to migrate the event port to another lcore
or teardown the current lcore it may to call `rte_event_port_quiesce`
to make sure that all the data associated with the event port are released
from the lcore, this might also include any prefetched events.
While releasing the event port from the lcore, this function calls the
user-provided flush callback once per event.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
v2 Changes:
- Remove internal Change-Id tag from commit messages.
lib/eventdev/eventdev_pmd.h | 19 +++++++++++++++++++
lib/eventdev/rte_eventdev.c | 19 +++++++++++++++++++
lib/eventdev/rte_eventdev.h | 33 +++++++++++++++++++++++++++++++++
lib/eventdev/version.map | 3 +++
4 files changed, 74 insertions(+)
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index ce469d47a6..cf9f2146a1 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -381,6 +381,23 @@ typedef int (*eventdev_port_setup_t)(struct rte_eventdev *dev,
*/
typedef void (*eventdev_port_release_t)(void *port);
+/**
+ * Quiesce any core specific resources consumed by the event port
+ *
+ * @param dev
+ * Event device pointer.
+ * @param port
+ * Event port pointer.
+ * @param flush_cb
+ * User-provided event flush function.
+ * @param args
+ * Arguments to be passed to the user-provided event flush function.
+ *
+ */
+typedef void (*eventdev_port_quiesce_t)(struct rte_eventdev *dev, void *port,
+ eventdev_port_flush_t flush_cb,
+ void *args);
+
/**
* Link multiple source event queues to destination event port.
*
@@ -1218,6 +1235,8 @@ struct eventdev_ops {
/**< Set up an event port. */
eventdev_port_release_t port_release;
/**< Release an event port. */
+ eventdev_port_quiesce_t port_quiesce;
+ /**< Quiesce an event port. */
eventdev_port_link_t port_link;
/**< Link event queues to an event port. */
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 532a253553..541fa5dc61 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -730,6 +730,25 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
return 0;
}
+void
+rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
+ eventdev_port_flush_t release_cb, void *args)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
+ dev = &rte_eventdevs[dev_id];
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return;
+ }
+
+ if (dev->dev_ops->port_quiesce)
+ (*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
+ release_cb, args);
+}
+
int
rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
uint32_t *attr_value)
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 42a5660169..c86d8a5576 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -830,6 +830,39 @@ int
rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
const struct rte_event_port_conf *port_conf);
+typedef void (*eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event,
+ void *arg);
+/**< Callback function prototype that can be passed during
+ * rte_event_port_release(), invoked once per a released event.
+ */
+
+/**
+ * Quiesce any core specific resources consumed by the event port.
+ *
+ * Event ports are generally coupled with lcores, and a given Hardware
+ * implementation might require the PMD to store port specific data in the
+ * lcore.
+ * When the application decides to migrate the event port to an other lcore
+ * or teardown the current lcore it may to call `rte_event_port_quiesce`
+ * to make sure that all the data associated with the event port are released
+ * from the lcore, this might also include any prefetched events.
+ * While releasing the event port from the lcore, this function calls the
+ * user-provided flush callback once per event.
+ *
+ * The event port specific config is not reset.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The index of the event port to setup. The value must be in the range
+ * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
+ * @param release_cb
+ * Callback function invoked once per flushed event.
+ */
+__rte_experimental
+void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
+ eventdev_port_flush_t release_cb, void *args);
+
/**
* The queue depth of the port on the enqueue side
*/
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index cd5dada07f..1907093539 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -108,6 +108,9 @@ EXPERIMENTAL {
# added in 22.03
rte_event_eth_rx_adapter_event_port_get;
+
+ # added in 22.07
+ rte_event_port_quiesce;
};
INTERNAL {
--
2.35.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 2/3 v2] eventdev: update examples to use port quiesce
2022-04-27 11:37 ` [PATCH 1/3 v2] eventdev: add function to quiesce an event port Pavan Nikhilesh
@ 2022-04-27 11:37 ` Pavan Nikhilesh
2022-04-27 11:37 ` [PATCH 3/3 v2] event/cnxk: implement event port quiesce function Pavan Nikhilesh
2022-05-13 17:58 ` [PATCH v3 1/3] eventdev: add function to quiesce an event port pbhagavatula
2 siblings, 0 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2022-04-27 11:37 UTC (permalink / raw)
To: jerinj, Harry van Haaren, Radu Nicolau, Akhil Goyal,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
Quiesce event ports used by the workers core on exit to free up
any outstanding resources.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
Depends-on: Series-22677
app/test-eventdev/test_perf_common.c | 8 ++++++++
app/test-eventdev/test_pipeline_common.c | 12 ++++++++++++
examples/eventdev_pipeline/pipeline_common.h | 9 +++++++++
examples/ipsec-secgw/ipsec_worker.c | 13 +++++++++++++
examples/l2fwd-event/l2fwd_common.c | 13 +++++++++++++
examples/l3fwd/l3fwd_event.c | 13 +++++++++++++
6 files changed, 68 insertions(+)
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index f673a9fddd..2016583979 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -985,6 +985,13 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
}
+static void
+perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args)
+{
+ rte_mempool_put(args, ev.event_ptr);
+}
+
void
perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
@@ -1000,6 +1007,7 @@ perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
}
+ rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
}
void
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index a8dd070000..82e5745071 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -518,6 +518,16 @@ pipeline_vector_array_free(struct rte_event events[], uint16_t num)
}
}
+static void
+pipeline_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+ pipeline_vector_array_free(&ev, 1);
+ else
+ rte_pktmbuf_free(ev.mbuf);
+}
+
void
pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
uint16_t enq, uint16_t deq)
@@ -542,6 +552,8 @@ pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
rte_event_enqueue_burst(dev, port, ev, deq);
}
+
+ rte_event_port_quiesce(dev, port, pipeline_event_port_flush, NULL);
}
void
diff --git a/examples/eventdev_pipeline/pipeline_common.h b/examples/eventdev_pipeline/pipeline_common.h
index 9899b257b0..28b6ab85ff 100644
--- a/examples/eventdev_pipeline/pipeline_common.h
+++ b/examples/eventdev_pipeline/pipeline_common.h
@@ -140,6 +140,13 @@ schedule_devices(unsigned int lcore_id)
}
}
+static void
+event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ rte_mempool_put(args, ev.event_ptr);
+}
+
static inline void
worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
uint16_t nb_enq, uint16_t nb_deq)
@@ -160,6 +167,8 @@ worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
}
+
+ rte_event_port_quiesce(dev_id, port_id, event_port_flush, NULL);
}
void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 3df5acf384..7f259e4cf3 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -737,6 +737,13 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
* selected.
*/
+static void
+ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ rte_pktmbuf_free(ev.mbuf);
+}
+
/* Workers registered */
#define IPSEC_EVENTMODE_WORKERS 2
@@ -861,6 +868,9 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
rte_event_enqueue_burst(links[0].eventdev_id,
links[0].event_port_id, &ev, 1);
}
+
+ rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
+ ipsec_event_port_flush, NULL);
}
/*
@@ -974,6 +984,9 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
rte_event_enqueue_burst(links[0].eventdev_id,
links[0].event_port_id, &ev, 1);
}
+
+ rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
+ ipsec_event_port_flush, NULL);
}
static uint8_t
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index 15bfe790a0..41a0d3f22f 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -128,6 +128,16 @@ l2fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
}
}
+static void
+l2fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+ l2fwd_event_vector_array_free(&ev, 1);
+ else
+ rte_pktmbuf_free(ev.mbuf);
+}
+
void
l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
struct rte_event events[], uint16_t nb_enq,
@@ -147,4 +157,7 @@ l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(event_d_id, port_id, events, nb_deq);
}
+
+ rte_event_port_quiesce(event_d_id, port_id, l2fwd_event_port_flush,
+ NULL);
}
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index a14a21b414..0b58475c85 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -301,6 +301,16 @@ l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
}
}
+static void
+l3fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+ l3fwd_event_vector_array_free(&ev, 1);
+ else
+ rte_pktmbuf_free(ev.mbuf);
+}
+
void
l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
struct rte_event events[], uint16_t nb_enq,
@@ -320,4 +330,7 @@ l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
}
+
+ rte_event_port_quiesce(event_d_id, event_p_id, l3fwd_event_port_flush,
+ NULL);
}
--
2.35.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 3/3 v2] event/cnxk: implement event port quiesce function
2022-04-27 11:37 ` [PATCH 1/3 v2] eventdev: add function to quiesce an event port Pavan Nikhilesh
2022-04-27 11:37 ` [PATCH 2/3 v2] eventdev: update examples to use port quiesce Pavan Nikhilesh
@ 2022-04-27 11:37 ` Pavan Nikhilesh
2022-05-13 17:58 ` [PATCH v3 1/3] eventdev: add function to quiesce an event port pbhagavatula
2 siblings, 0 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2022-04-27 11:37 UTC (permalink / raw)
To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev
Implement event port quiesce function to clean up any lcore
resources used.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
drivers/event/cnxk/cn10k_eventdev.c | 78 ++++++++++++++++++++++++++---
drivers/event/cnxk/cn9k_eventdev.c | 60 +++++++++++++++++++++-
2 files changed, 130 insertions(+), 8 deletions(-)
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 94829e789c..d84c5d2d1e 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -167,15 +167,23 @@ cn10k_sso_hws_reset(void *arg, void *hws)
uint64_t u64[2];
} gw;
uint8_t pend_tt;
+ bool is_pend;
plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
/* Wait till getwork/swtp/waitw/desched completes. */
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ pend_state = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ ws->swtag_req)
+ is_pend = true;
+
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
} while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
BIT_ULL(56) | BIT_ULL(54)));
pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
- if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */
if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
cnxk_sso_hws_swtag_untag(base +
SSOW_LF_GWS_OP_SWTAG_UNTAG);
@@ -189,15 +197,10 @@ cn10k_sso_hws_reset(void *arg, void *hws)
switch (dev->gw_mode) {
case CN10K_GW_MODE_PREF:
+ case CN10K_GW_MODE_PREF_WFE:
while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
;
break;
- case CN10K_GW_MODE_PREF_WFE:
- while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
- SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
- continue;
- plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
- break;
case CN10K_GW_MODE_NONE:
default:
break;
@@ -533,6 +536,66 @@ cn10k_sso_port_release(void *port)
rte_free(gws_cookie);
}
+static void
+cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
+ eventdev_port_flush_t flush_cb, void *args)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct cn10k_sso_hws *ws = port;
+ struct rte_event ev;
+ uint64_t ptag;
+ bool is_pend;
+
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ if (ptag & (BIT_ULL(62) | BIT_ULL(54)) || ws->swtag_req)
+ is_pend = true;
+ do {
+ ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ } while (ptag &
+ (BIT_ULL(62) | BIT_ULL(58) | BIT_ULL(56) | BIT_ULL(54)));
+
+ cn10k_sso_hws_get_work_empty(ws, &ev,
+ (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+ NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
+ if (is_pend && ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+
+ /* Check if we have work in PRF_WQE0, if so extract it. */
+ switch (dev->gw_mode) {
+ case CN10K_GW_MODE_PREF:
+ case CN10K_GW_MODE_PREF_WFE:
+ while (plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0) &
+ BIT_ULL(63))
+ ;
+ break;
+ case CN10K_GW_MODE_NONE:
+ default:
+ break;
+ }
+
+ if (CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0)) !=
+ SSO_TT_EMPTY) {
+ plt_write64(BIT_ULL(16) | 1,
+ ws->base + SSOW_LF_GWS_OP_GET_WORK0);
+ cn10k_sso_hws_get_work_empty(
+ ws, &ev,
+ (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+ NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
+ if (ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+ }
+ ws->swtag_req = 0;
+ plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
+}
+
static int
cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
const uint8_t queues[], const uint8_t priorities[],
@@ -852,6 +915,7 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
.port_def_conf = cnxk_sso_port_def_conf,
.port_setup = cn10k_sso_port_setup,
.port_release = cn10k_sso_port_release,
+ .port_quiesce = cn10k_sso_port_quiesce,
.port_link = cn10k_sso_port_link,
.port_unlink = cn10k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 987888d3db..46885c5f92 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -186,6 +186,7 @@ cn9k_sso_hws_reset(void *arg, void *hws)
uint64_t pend_state;
uint8_t pend_tt;
uintptr_t base;
+ bool is_pend;
uint64_t tag;
uint8_t i;
@@ -193,6 +194,13 @@ cn9k_sso_hws_reset(void *arg, void *hws)
ws = hws;
for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
base = dev->dual_ws ? dws->base[i] : ws->base;
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ (dev->dual_ws ? (dws->swtag_req && i == !dws->vws) :
+ ws->swtag_req))
+ is_pend = true;
/* Wait till getwork/swtp/waitw/desched completes. */
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
@@ -201,7 +209,7 @@ cn9k_sso_hws_reset(void *arg, void *hws)
tag = plt_read64(base + SSOW_LF_GWS_TAG);
pend_tt = (tag >> 32) & 0x3;
- if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */
if (pend_tt == SSO_TT_ATOMIC ||
pend_tt == SSO_TT_ORDERED)
cnxk_sso_hws_swtag_untag(
@@ -213,7 +221,14 @@ cn9k_sso_hws_reset(void *arg, void *hws)
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
} while (pend_state & BIT_ULL(58));
+
+ plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
}
+
+ if (dev->dual_ws)
+ dws->swtag_req = 0;
+ else
+ ws->swtag_req = 0;
}
void
@@ -789,6 +804,48 @@ cn9k_sso_port_release(void *port)
rte_free(gws_cookie);
}
+static void
+cn9k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
+ eventdev_port_flush_t flush_cb, void *args)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct cn9k_sso_hws_dual *dws;
+ struct cn9k_sso_hws *ws;
+ struct rte_event ev;
+ uintptr_t base;
+ uint64_t ptag;
+ bool is_pend;
+ uint8_t i;
+
+ dws = port;
+ ws = port;
+ for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
+ base = dev->dual_ws ? dws->base[i] : ws->base;
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ if (ptag & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ (dev->dual_ws ? (dws->swtag_req && i == !dws->vws) :
+ ws->swtag_req))
+ is_pend = true;
+ /* Wait till getwork/swtp/waitw/desched completes. */
+ do {
+ ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ } while (ptag & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
+ BIT_ULL(56)));
+
+ cn9k_sso_hws_get_work_empty(
+ base, &ev, dev->rx_offloads,
+ dev->dual_ws ? dws->lookup_mem : ws->lookup_mem,
+ dev->dual_ws ? dws->tstamp : ws->tstamp);
+ if (is_pend && ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+ }
+}
+
static int
cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
const uint8_t queues[], const uint8_t priorities[],
@@ -1090,6 +1147,7 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
.port_def_conf = cnxk_sso_port_def_conf,
.port_setup = cn9k_sso_port_setup,
.port_release = cn9k_sso_port_release,
+ .port_quiesce = cn9k_sso_port_quiesce,
.port_link = cn9k_sso_port_link,
.port_unlink = cn9k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
--
2.35.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v3 1/3] eventdev: add function to quiesce an event port
2022-04-27 11:37 ` [PATCH 1/3 v2] eventdev: add function to quiesce an event port Pavan Nikhilesh
2022-04-27 11:37 ` [PATCH 2/3 v2] eventdev: update examples to use port quiesce Pavan Nikhilesh
2022-04-27 11:37 ` [PATCH 3/3 v2] event/cnxk: implement event port quiesce function Pavan Nikhilesh
@ 2022-05-13 17:58 ` pbhagavatula
2022-05-13 17:58 ` [PATCH v3 2/3] eventdev: update examples to use port quiesce pbhagavatula
` (2 more replies)
2 siblings, 3 replies; 12+ messages in thread
From: pbhagavatula @ 2022-05-13 17:58 UTC (permalink / raw)
To: jerinj, Ray Kinsella
Cc: dev, jay.jayatheerthan, erik.g.carrillo, abhinandan.gujjar,
timothy.mcdaniel, sthotton, hemant.agrawal, nipun.gupta,
harry.van.haaren, mattias.ronnblom, liangma, peter.mccarthy,
Pavan Nikhilesh
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add function to quiesce any core specific resources consumed by
the event port.
When the application decides to migrate the event port to another lcore
or teardown the current lcore it may to call `rte_event_port_quiesce`
to make sure that all the data associated with the event port are released
from the lcore, this might also include any prefetched events.
While releasing the event port from the lcore, this function calls the
user-provided flush callback once per event.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
v3 Changes:
- Add `rte_` prefix to callback function.
- Fix API documentation issues.
- Update eventdev documentation.
v2 Changes:
- Remove internal Change-Id tag from commit messages.
doc/guides/prog_guide/eventdev.rst | 35 ++++++++++++++++++++++++++++
lib/eventdev/eventdev_pmd.h | 19 +++++++++++++++
lib/eventdev/rte_eventdev.c | 19 +++++++++++++++
lib/eventdev/rte_eventdev.h | 37 ++++++++++++++++++++++++++++++
lib/eventdev/version.map | 3 +++
5 files changed, 113 insertions(+)
diff --git a/doc/guides/prog_guide/eventdev.rst b/doc/guides/prog_guide/eventdev.rst
index a49e486a30..afee674ee1 100644
--- a/doc/guides/prog_guide/eventdev.rst
+++ b/doc/guides/prog_guide/eventdev.rst
@@ -412,6 +412,41 @@ An event driven worker thread has following typical workflow on fastpath:
rte_event_enqueue_burst(...);
}
+Quiescing Event Ports
+~~~~~~~~~~~~~~~~~~~~~
+
+To migrate the event port to another lcore or while tearing down a worker core
+using an event port ``rte_event_port_quiesce()`` can be invoked to make sure
+that all the data associated with the event port are released from the worker
+core, this might also include any prefetched events.
+
+A flush callback can be passed to the function to handle any outstanding events.
+
+.. code-block:: c
+
+ rte_event_port_quiesce(dev_id, port_id, release_cb, NULL);
+
+.. Note::
+
+ The event port specific config shall not be reset when this API is
+ invoked.
+
+Stopping the EventDev
+~~~~~~~~~~~~~~~~~~~~~
+
+A single function call tells the eventdev instance to stop processing
+events. A flush callback can be registered to free any inflight events
+using ``rte_event_dev_stop_flush_callback_register()`` function.
+
+.. code-block:: c
+
+ int err = rte_event_dev_stop(dev_id);
+
+.. Note::
+
+ The event producers such as event_eth_rx_adapter, event_timer_adapter
+ and event_crypto_adapter need to be stopped before stopping the event
+ device.
Summary
-------
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index ce469d47a6..6173f22b9b 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -381,6 +381,23 @@ typedef int (*eventdev_port_setup_t)(struct rte_eventdev *dev,
*/
typedef void (*eventdev_port_release_t)(void *port);
+/**
+ * Quiesce any core specific resources consumed by the event port
+ *
+ * @param dev
+ * Event device pointer.
+ * @param port
+ * Event port pointer.
+ * @param flush_cb
+ * User-provided event flush function.
+ * @param args
+ * Arguments to be passed to the user-provided event flush function.
+ *
+ */
+typedef void (*eventdev_port_quiesce_t)(struct rte_eventdev *dev, void *port,
+ rte_eventdev_port_flush_t flush_cb,
+ void *args);
+
/**
* Link multiple source event queues to destination event port.
*
@@ -1218,6 +1235,8 @@ struct eventdev_ops {
/**< Set up an event port. */
eventdev_port_release_t port_release;
/**< Release an event port. */
+ eventdev_port_quiesce_t port_quiesce;
+ /**< Quiesce an event port. */
eventdev_port_link_t port_link;
/**< Link event queues to an event port. */
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 532a253553..0250e57f24 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -730,6 +730,25 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
return 0;
}
+void
+rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
+ rte_eventdev_port_flush_t release_cb, void *args)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
+ dev = &rte_eventdevs[dev_id];
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return;
+ }
+
+ if (dev->dev_ops->port_quiesce)
+ (*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
+ release_cb, args);
+}
+
int
rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
uint32_t *attr_value)
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 42a5660169..1a46d289a9 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -830,6 +830,43 @@ int
rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
const struct rte_event_port_conf *port_conf);
+typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
+ struct rte_event event, void *arg);
+/**< Callback function prototype that can be passed during
+ * rte_event_port_release(), invoked once per a released event.
+ */
+
+/**
+ * Quiesce any core specific resources consumed by the event port.
+ *
+ * Event ports are generally coupled with lcores, and a given Hardware
+ * implementation might require the PMD to store port specific data in the
+ * lcore.
+ * When the application decides to migrate the event port to another lcore
+ * or teardown the current lcore it may to call `rte_event_port_quiesce`
+ * to make sure that all the data associated with the event port are released
+ * from the lcore, this might also include any prefetched events.
+ * While releasing the event port from the lcore, this function calls the
+ * user-provided flush callback once per event.
+ *
+ * @note The event port specific config shall not be reset when this API is
+ * called.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The index of the event port to setup. The value must be in the range
+ * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
+ * @param release_cb
+ * Callback function invoked once per flushed event.
+ * @param args
+ * Argument supplied to callback.
+ */
+__rte_experimental
+void
+rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
+ rte_eventdev_port_flush_t release_cb, void *args);
+
/**
* The queue depth of the port on the enqueue side
*/
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index cd5dada07f..1907093539 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -108,6 +108,9 @@ EXPERIMENTAL {
# added in 22.03
rte_event_eth_rx_adapter_event_port_get;
+
+ # added in 22.07
+ rte_event_port_quiesce;
};
INTERNAL {
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v3 2/3] eventdev: update examples to use port quiesce
2022-05-13 17:58 ` [PATCH v3 1/3] eventdev: add function to quiesce an event port pbhagavatula
@ 2022-05-13 17:58 ` pbhagavatula
2022-05-13 17:58 ` [PATCH v3 3/3] event/cnxk: implement event port quiesce function pbhagavatula
2022-05-17 10:04 ` [PATCH v3 1/3] eventdev: add function to quiesce an event port Jerin Jacob
2 siblings, 0 replies; 12+ messages in thread
From: pbhagavatula @ 2022-05-13 17:58 UTC (permalink / raw)
To: jerinj, Harry van Haaren, Radu Nicolau, Akhil Goyal,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev, jay.jayatheerthan, erik.g.carrillo, abhinandan.gujjar,
timothy.mcdaniel, sthotton, hemant.agrawal, nipun.gupta,
mattias.ronnblom, liangma, peter.mccarthy
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Quiesce event ports used by the workers core on exit to free up
any outstanding resources.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
app/test-eventdev/test_perf_common.c | 8 ++++++++
app/test-eventdev/test_pipeline_common.c | 12 ++++++++++++
examples/eventdev_pipeline/pipeline_common.h | 9 +++++++++
examples/ipsec-secgw/ipsec_worker.c | 13 +++++++++++++
examples/l2fwd-event/l2fwd_common.c | 13 +++++++++++++
examples/l3fwd/l3fwd_event.c | 13 +++++++++++++
6 files changed, 68 insertions(+)
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index b51a100425..8e3836280d 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -985,6 +985,13 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
}
+static void
+perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args)
+{
+ rte_mempool_put(args, ev.event_ptr);
+}
+
void
perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
@@ -1000,6 +1007,7 @@ perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
}
+ rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
}
void
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index d8e80903b2..c66656cd39 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -518,6 +518,16 @@ pipeline_vector_array_free(struct rte_event events[], uint16_t num)
}
}
+static void
+pipeline_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+ pipeline_vector_array_free(&ev, 1);
+ else
+ rte_pktmbuf_free(ev.mbuf);
+}
+
void
pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
uint16_t enq, uint16_t deq)
@@ -542,6 +552,8 @@ pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
rte_event_enqueue_burst(dev, port, ev, deq);
}
+
+ rte_event_port_quiesce(dev, port, pipeline_event_port_flush, NULL);
}
void
diff --git a/examples/eventdev_pipeline/pipeline_common.h b/examples/eventdev_pipeline/pipeline_common.h
index 9899b257b0..28b6ab85ff 100644
--- a/examples/eventdev_pipeline/pipeline_common.h
+++ b/examples/eventdev_pipeline/pipeline_common.h
@@ -140,6 +140,13 @@ schedule_devices(unsigned int lcore_id)
}
}
+static void
+event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ rte_mempool_put(args, ev.event_ptr);
+}
+
static inline void
worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
uint16_t nb_enq, uint16_t nb_deq)
@@ -160,6 +167,8 @@ worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
}
+
+ rte_event_port_quiesce(dev_id, port_id, event_port_flush, NULL);
}
void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 3df5acf384..7f259e4cf3 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -737,6 +737,13 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
* selected.
*/
+static void
+ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ rte_pktmbuf_free(ev.mbuf);
+}
+
/* Workers registered */
#define IPSEC_EVENTMODE_WORKERS 2
@@ -861,6 +868,9 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
rte_event_enqueue_burst(links[0].eventdev_id,
links[0].event_port_id, &ev, 1);
}
+
+ rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
+ ipsec_event_port_flush, NULL);
}
/*
@@ -974,6 +984,9 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
rte_event_enqueue_burst(links[0].eventdev_id,
links[0].event_port_id, &ev, 1);
}
+
+ rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
+ ipsec_event_port_flush, NULL);
}
static uint8_t
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index 15bfe790a0..41a0d3f22f 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -128,6 +128,16 @@ l2fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
}
}
+static void
+l2fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+ l2fwd_event_vector_array_free(&ev, 1);
+ else
+ rte_pktmbuf_free(ev.mbuf);
+}
+
void
l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
struct rte_event events[], uint16_t nb_enq,
@@ -147,4 +157,7 @@ l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(event_d_id, port_id, events, nb_deq);
}
+
+ rte_event_port_quiesce(event_d_id, port_id, l2fwd_event_port_flush,
+ NULL);
}
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index a14a21b414..0b58475c85 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -301,6 +301,16 @@ l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
}
}
+static void
+l3fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+ l3fwd_event_vector_array_free(&ev, 1);
+ else
+ rte_pktmbuf_free(ev.mbuf);
+}
+
void
l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
struct rte_event events[], uint16_t nb_enq,
@@ -320,4 +330,7 @@ l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
}
+
+ rte_event_port_quiesce(event_d_id, event_p_id, l3fwd_event_port_flush,
+ NULL);
}
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v3 3/3] event/cnxk: implement event port quiesce function
2022-05-13 17:58 ` [PATCH v3 1/3] eventdev: add function to quiesce an event port pbhagavatula
2022-05-13 17:58 ` [PATCH v3 2/3] eventdev: update examples to use port quiesce pbhagavatula
@ 2022-05-13 17:58 ` pbhagavatula
2022-05-17 10:04 ` [PATCH v3 1/3] eventdev: add function to quiesce an event port Jerin Jacob
2 siblings, 0 replies; 12+ messages in thread
From: pbhagavatula @ 2022-05-13 17:58 UTC (permalink / raw)
To: jerinj, Pavan Nikhilesh, Shijith Thotton
Cc: dev, jay.jayatheerthan, erik.g.carrillo, abhinandan.gujjar,
timothy.mcdaniel, hemant.agrawal, nipun.gupta, harry.van.haaren,
mattias.ronnblom, liangma, peter.mccarthy
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Implement event port quiesce function to clean up any lcore
resources used.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
drivers/event/cnxk/cn10k_eventdev.c | 78 ++++++++++++++++++++++++++---
drivers/event/cnxk/cn9k_eventdev.c | 60 +++++++++++++++++++++-
2 files changed, 130 insertions(+), 8 deletions(-)
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 9b4d2895ec..409eb892a7 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -166,15 +166,23 @@ cn10k_sso_hws_reset(void *arg, void *hws)
uint64_t u64[2];
} gw;
uint8_t pend_tt;
+ bool is_pend;
plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
/* Wait till getwork/swtp/waitw/desched completes. */
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ pend_state = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ ws->swtag_req)
+ is_pend = true;
+
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
} while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
BIT_ULL(56) | BIT_ULL(54)));
pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
- if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */
if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
cnxk_sso_hws_swtag_untag(base +
SSOW_LF_GWS_OP_SWTAG_UNTAG);
@@ -188,15 +196,10 @@ cn10k_sso_hws_reset(void *arg, void *hws)
switch (dev->gw_mode) {
case CN10K_GW_MODE_PREF:
+ case CN10K_GW_MODE_PREF_WFE:
while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
;
break;
- case CN10K_GW_MODE_PREF_WFE:
- while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
- SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
- continue;
- plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
- break;
case CN10K_GW_MODE_NONE:
default:
break;
@@ -532,6 +535,66 @@ cn10k_sso_port_release(void *port)
rte_free(gws_cookie);
}
+static void
+cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
+ rte_eventdev_port_flush_t flush_cb, void *args)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct cn10k_sso_hws *ws = port;
+ struct rte_event ev;
+ uint64_t ptag;
+ bool is_pend;
+
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ if (ptag & (BIT_ULL(62) | BIT_ULL(54)) || ws->swtag_req)
+ is_pend = true;
+ do {
+ ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ } while (ptag &
+ (BIT_ULL(62) | BIT_ULL(58) | BIT_ULL(56) | BIT_ULL(54)));
+
+ cn10k_sso_hws_get_work_empty(ws, &ev,
+ (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+ NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
+ if (is_pend && ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+
+ /* Check if we have work in PRF_WQE0, if so extract it. */
+ switch (dev->gw_mode) {
+ case CN10K_GW_MODE_PREF:
+ case CN10K_GW_MODE_PREF_WFE:
+ while (plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0) &
+ BIT_ULL(63))
+ ;
+ break;
+ case CN10K_GW_MODE_NONE:
+ default:
+ break;
+ }
+
+ if (CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0)) !=
+ SSO_TT_EMPTY) {
+ plt_write64(BIT_ULL(16) | 1,
+ ws->base + SSOW_LF_GWS_OP_GET_WORK0);
+ cn10k_sso_hws_get_work_empty(
+ ws, &ev,
+ (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+ NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
+ if (ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+ }
+ ws->swtag_req = 0;
+ plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
+}
+
static int
cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
const uint8_t queues[], const uint8_t priorities[],
@@ -851,6 +914,7 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
.port_def_conf = cnxk_sso_port_def_conf,
.port_setup = cn10k_sso_port_setup,
.port_release = cn10k_sso_port_release,
+ .port_quiesce = cn10k_sso_port_quiesce,
.port_link = cn10k_sso_port_link,
.port_unlink = cn10k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 4bba477dd1..dde8497895 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -181,6 +181,7 @@ cn9k_sso_hws_reset(void *arg, void *hws)
uint64_t pend_state;
uint8_t pend_tt;
uintptr_t base;
+ bool is_pend;
uint64_t tag;
uint8_t i;
@@ -188,6 +189,13 @@ cn9k_sso_hws_reset(void *arg, void *hws)
ws = hws;
for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
base = dev->dual_ws ? dws->base[i] : ws->base;
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ (dev->dual_ws ? (dws->swtag_req && i == !dws->vws) :
+ ws->swtag_req))
+ is_pend = true;
/* Wait till getwork/swtp/waitw/desched completes. */
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
@@ -196,7 +204,7 @@ cn9k_sso_hws_reset(void *arg, void *hws)
tag = plt_read64(base + SSOW_LF_GWS_TAG);
pend_tt = (tag >> 32) & 0x3;
- if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */
if (pend_tt == SSO_TT_ATOMIC ||
pend_tt == SSO_TT_ORDERED)
cnxk_sso_hws_swtag_untag(
@@ -208,7 +216,14 @@ cn9k_sso_hws_reset(void *arg, void *hws)
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
} while (pend_state & BIT_ULL(58));
+
+ plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
}
+
+ if (dev->dual_ws)
+ dws->swtag_req = 0;
+ else
+ ws->swtag_req = 0;
}
void
@@ -784,6 +799,48 @@ cn9k_sso_port_release(void *port)
rte_free(gws_cookie);
}
+static void
+cn9k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
+ rte_eventdev_port_flush_t flush_cb, void *args)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct cn9k_sso_hws_dual *dws;
+ struct cn9k_sso_hws *ws;
+ struct rte_event ev;
+ uintptr_t base;
+ uint64_t ptag;
+ bool is_pend;
+ uint8_t i;
+
+ dws = port;
+ ws = port;
+ for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
+ base = dev->dual_ws ? dws->base[i] : ws->base;
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ if (ptag & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ (dev->dual_ws ? (dws->swtag_req && i == !dws->vws) :
+ ws->swtag_req))
+ is_pend = true;
+ /* Wait till getwork/swtp/waitw/desched completes. */
+ do {
+ ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ } while (ptag & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
+ BIT_ULL(56)));
+
+ cn9k_sso_hws_get_work_empty(
+ base, &ev, dev->rx_offloads,
+ dev->dual_ws ? dws->lookup_mem : ws->lookup_mem,
+ dev->dual_ws ? dws->tstamp : ws->tstamp);
+ if (is_pend && ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+ }
+}
+
static int
cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
const uint8_t queues[], const uint8_t priorities[],
@@ -1085,6 +1142,7 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
.port_def_conf = cnxk_sso_port_def_conf,
.port_setup = cn9k_sso_port_setup,
.port_release = cn9k_sso_port_release,
+ .port_quiesce = cn9k_sso_port_quiesce,
.port_link = cn9k_sso_port_link,
.port_unlink = cn9k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v3 1/3] eventdev: add function to quiesce an event port
2022-05-13 17:58 ` [PATCH v3 1/3] eventdev: add function to quiesce an event port pbhagavatula
2022-05-13 17:58 ` [PATCH v3 2/3] eventdev: update examples to use port quiesce pbhagavatula
2022-05-13 17:58 ` [PATCH v3 3/3] event/cnxk: implement event port quiesce function pbhagavatula
@ 2022-05-17 10:04 ` Jerin Jacob
2 siblings, 0 replies; 12+ messages in thread
From: Jerin Jacob @ 2022-05-17 10:04 UTC (permalink / raw)
To: Pavan Nikhilesh
Cc: Jerin Jacob, Ray Kinsella, dpdk-dev, Jayatheerthan, Jay,
Erik Gabriel Carrillo, Gujjar, Abhinandan S, McDaniel, Timothy,
Shijith Thotton, Hemant Agrawal, Nipun Gupta, Van Haaren, Harry,
Mattias Rönnblom, Liang Ma, Peter Mccarthy
On Fri, May 13, 2022 at 11:31 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Add function to quiesce any core specific resources consumed by
> the event port.
>
> When the application decides to migrate the event port to another lcore
> or teardown the current lcore it may to call `rte_event_port_quiesce`
> to make sure that all the data associated with the event port are released
> from the lcore, this might also include any prefetched events.
>
> While releasing the event port from the lcore, this function calls the
> user-provided flush callback once per event.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
> v3 Changes:
> - Add `rte_` prefix to callback function.
> - Fix API documentation issues.
> - Update eventdev documentation.
>
> v2 Changes:
> - Remove internal Change-Id tag from commit messages.
Changed git commit message heading as "eventdev: support to quiesce an
event port"
Also updated release notes and improved the documentation[1]
Series-Acked-by: Jerin Jacob <jerinj@marvell.com>
Series applied to dpdk-next-net-eventdev/for-main. Thanks
[1]
[for-main]dell[dpdk-next-eventdev] $ git diff
diff --git a/doc/guides/prog_guide/eventdev.rst
b/doc/guides/prog_guide/eventdev.rst
index 973c9838ae..7a053de132 100644
--- a/doc/guides/prog_guide/eventdev.rst
+++ b/doc/guides/prog_guide/eventdev.rst
@@ -428,8 +428,7 @@ A flush callback can be passed to the function to
handle any outstanding events.
.. Note::
- The event port specific config shall not be reset when this API is
- invoked.
+ Invocation of this API does not affect the existing port configuration.
Summary
-------
diff --git a/doc/guides/rel_notes/release_22_07.rst
b/doc/guides/rel_notes/release_22_07.rst
index 88d6e96cc1..7eae0b04f9 100644
--- a/doc/guides/rel_notes/release_22_07.rst
+++ b/doc/guides/rel_notes/release_22_07.rst
@@ -55,6 +55,12 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Added API to quiesce an event port.**
+
+ * Added ``rte_event_port_quiesce()`` to quiesce any lcore specific
+ resources consumed by the event port, when the lcore no more
+ associated with event port.
+
* **Updated Intel iavf driver.**
* Added Tx QoS queue rate limitation support.
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 1a46d289a9..80bfbf4293 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -849,8 +849,7 @@ typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
* While releasing the event port from the lcore, this function calls the
* user-provided flush callback once per event.
*
- * @note The event port specific config shall not be reset when this API is
- * called.
+ * @note Invocation of this API does not affect the existing port
configuration.
*
* @param dev_id
* The identifier of the device.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/3] eventdev: add function to quiesce an event port
2022-04-27 11:32 [PATCH 1/3] eventdev: add function to quiesce an event port Pavan Nikhilesh
` (2 preceding siblings ...)
2022-04-27 11:37 ` [PATCH 1/3 v2] eventdev: add function to quiesce an event port Pavan Nikhilesh
@ 2022-05-04 9:02 ` Ray Kinsella
2022-05-09 17:29 ` Jerin Jacob
4 siblings, 0 replies; 12+ messages in thread
From: Ray Kinsella @ 2022-05-04 9:02 UTC (permalink / raw)
To: Pavan Nikhilesh; +Cc: jerinj, dev
Pavan Nikhilesh <pbhagavatula@marvell.com> writes:
> Add function to quiesce any core specific resources consumed by
> the event port.
>
> When the application decides to migrate the event port to another lcore
> or teardown the current lcore it may to call `rte_event_port_quiesce`
> to make sure that all the data associated with the event port are released
> from the lcore, this might also include any prefetched events.
>
> While releasing the event port from the lcore, this function calls the
> user-provided flush callback once per event.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
> lib/eventdev/eventdev_pmd.h | 19 +++++++++++++++++++
> lib/eventdev/rte_eventdev.c | 19 +++++++++++++++++++
> lib/eventdev/rte_eventdev.h | 33 +++++++++++++++++++++++++++++++++
> lib/eventdev/version.map | 3 +++
> 4 files changed, 74 insertions(+)
>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
--
Regards, Ray K
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/3] eventdev: add function to quiesce an event port
2022-04-27 11:32 [PATCH 1/3] eventdev: add function to quiesce an event port Pavan Nikhilesh
` (3 preceding siblings ...)
2022-05-04 9:02 ` [PATCH " Ray Kinsella
@ 2022-05-09 17:29 ` Jerin Jacob
4 siblings, 0 replies; 12+ messages in thread
From: Jerin Jacob @ 2022-05-09 17:29 UTC (permalink / raw)
To: Pavan Nikhilesh, Jayatheerthan, Jay, Erik Gabriel Carrillo,
Gujjar, Abhinandan S, McDaniel, Timothy, Hemant Agrawal,
Nipun Gupta, Van Haaren, Harry, Mattias Rönnblom, Liang Ma,
Peter Mccarthy
Cc: Jerin Jacob, Ray Kinsella, dpdk-dev, Shijith Thotton
On Wed, Apr 27, 2022 at 5:02 PM Pavan Nikhilesh
<pbhagavatula@marvell.com> wrote:
>
> Add function to quiesce any core specific resources consumed by
> the event port.
>
> When the application decides to migrate the event port to another lcore
> or teardown the current lcore it may to call `rte_event_port_quiesce`
> to make sure that all the data associated with the event port are released
> from the lcore, this might also include any prefetched events.
>
> While releasing the event port from the lcore, this function calls the
> user-provided flush callback once per event.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
+ eventdev stake holder
@jay.jayatheerthan@intel.com @erik.g.carrillo@intel.com
@abhinandan.gujjar@intel.com timothy.mcdaniel@intel.com
sthotton@marvell.com hemant.agrawal@nxp.com nipun.gupta@nxp.com
harry.van.haaren@intel.com mattias.ronnblom@ericsson.com
liangma@liangbit.com peter.mccarthy@intel.com
Since it is in a slow path and allows port teardown on migration for
the implementations where core has some state for the port. The new
API addition looks good to me.
Any objection or alternative thought from eventdev stake holders?
Some comments below.
> ---
> lib/eventdev/eventdev_pmd.h | 19 +++++++++++++++++++
> lib/eventdev/rte_eventdev.c | 19 +++++++++++++++++++
> lib/eventdev/rte_eventdev.h | 33 +++++++++++++++++++++++++++++++++
> lib/eventdev/version.map | 3 +++
> 4 files changed, 74 insertions(+)
>
> diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
> index ce469d47a6..cf9f2146a1 100644
> --- a/lib/eventdev/eventdev_pmd.h
> +++ b/lib/eventdev/eventdev_pmd.h
> @@ -381,6 +381,23 @@ typedef int (*eventdev_port_setup_t)(struct rte_eventdev *dev,
> */
> typedef void (*eventdev_port_release_t)(void *port);
>
> +/**
> + * Quiesce any core specific resources consumed by the event port
> + *
> + * @param dev
> + * Event device pointer.
> + * @param port
> + * Event port pointer.
> + * @param flush_cb
> + * User-provided event flush function.
> + * @param args
> + * Arguments to be passed to the user-provided event flush function.
> + *
> + */
> +typedef void (*eventdev_port_quiesce_t)(struct rte_eventdev *dev, void *port,
Please prefix rte_ for public symbols. i.e rte_event_port_quiesce_t.
I know we missed for existing eventdev_stop_flush_t, which we can fix
in the next ABI. I will send a patch for same.
> + eventdev_port_flush_t flush_cb,
> + void *args);
> +
> /**
> * Link multiple source event queues to destination event port.
> *
> @@ -1218,6 +1235,8 @@ struct eventdev_ops {
> /**< Set up an event port. */
> eventdev_port_release_t port_release;
> /**< Release an event port. */
> + eventdev_port_quiesce_t port_quiesce;
> + /**< Quiesce an event port. */
>
> eventdev_port_link_t port_link;
> /**< Link event queues to an event port. */
> diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
> index 532a253553..541fa5dc61 100644
> --- a/lib/eventdev/rte_eventdev.c
> +++ b/lib/eventdev/rte_eventdev.c
> @@ -730,6 +730,25 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
> return 0;
> }
>
> +void
> +rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
> + eventdev_port_flush_t release_cb, void *args)
> +{
> + struct rte_eventdev *dev;
> +
> + RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
> + dev = &rte_eventdevs[dev_id];
> +
> + if (!is_valid_port(dev, port_id)) {
> + RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
> + return;
> + }
> +
> + if (dev->dev_ops->port_quiesce)
> + (*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
> + release_cb, args);
> +}
> +
> int
> rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
> uint32_t *attr_value)
> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> index 42a5660169..c86d8a5576 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -830,6 +830,39 @@ int
> rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
> const struct rte_event_port_conf *port_conf);
>
> +typedef void (*eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event,
> + void *arg);
> +/**< Callback function prototype that can be passed during
> + * rte_event_port_release(), invoked once per a released event.
> + */
> +
> +/**
> + * Quiesce any core specific resources consumed by the event port.
> + *
> + * Event ports are generally coupled with lcores, and a given Hardware
> + * implementation might require the PMD to store port specific data in the
> + * lcore.
> + * When the application decides to migrate the event port to an other lcore
an other -> another
> + * or teardown the current lcore it may to call `rte_event_port_quiesce`
> + * to make sure that all the data associated with the event port are released
> + * from the lcore, this might also include any prefetched events.
> + * While releasing the event port from the lcore, this function calls the
> + * user-provided flush callback once per event.
> + *
> + * The event port specific config is not reset.
Make this as @note The event port-specific config shall not reset on
this API call or similar.
> + *
> + * @param dev_id
> + * The identifier of the device.
> + * @param port_id
> + * The index of the event port to setup. The value must be in the range
> + * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
> + * @param release_cb
> + * Callback function invoked once per flushed event.
> + */
> +__rte_experimental
> +void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
> + eventdev_port_flush_t release_cb, void *args);
>
Please update doc/guides/prog_guide/eventdev.rst and add section for teardown
and mention existing rte_event_dev_stop_flush_callback_register() and
this new API.
^ permalink raw reply [flat|nested] 12+ messages in thread