From: Pavan Nikhilesh <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
"Shijith Thotton" <sthotton@marvell.com>
Cc: <dev@dpdk.org>
Subject: [PATCH 3/3] event/cnxk: implement event port quiesce function
Date: Wed, 27 Apr 2022 17:02:23 +0530 [thread overview]
Message-ID: <20220427113223.13948-3-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20220427113223.13948-1-pbhagavatula@marvell.com>
Implement event port quiesce function to clean up any lcore
resources used.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Change-Id: I7dda3d54dc698645d25ebbfbabd81760940fe649
---
drivers/event/cnxk/cn10k_eventdev.c | 78 ++++++++++++++++++++++++++---
drivers/event/cnxk/cn9k_eventdev.c | 60 +++++++++++++++++++++-
2 files changed, 130 insertions(+), 8 deletions(-)
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 94829e789c..d84c5d2d1e 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -167,15 +167,23 @@ cn10k_sso_hws_reset(void *arg, void *hws)
uint64_t u64[2];
} gw;
uint8_t pend_tt;
+ bool is_pend;
plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
/* Wait till getwork/swtp/waitw/desched completes. */
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ pend_state = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ ws->swtag_req)
+ is_pend = true;
+
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
} while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
BIT_ULL(56) | BIT_ULL(54)));
pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
- if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */
if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
cnxk_sso_hws_swtag_untag(base +
SSOW_LF_GWS_OP_SWTAG_UNTAG);
@@ -189,15 +197,10 @@ cn10k_sso_hws_reset(void *arg, void *hws)
switch (dev->gw_mode) {
case CN10K_GW_MODE_PREF:
+ case CN10K_GW_MODE_PREF_WFE:
while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
;
break;
- case CN10K_GW_MODE_PREF_WFE:
- while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
- SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
- continue;
- plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
- break;
case CN10K_GW_MODE_NONE:
default:
break;
@@ -533,6 +536,66 @@ cn10k_sso_port_release(void *port)
rte_free(gws_cookie);
}
+static void
+cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
+ eventdev_port_flush_t flush_cb, void *args)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct cn10k_sso_hws *ws = port;
+ struct rte_event ev;
+ uint64_t ptag;
+ bool is_pend;
+
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ if (ptag & (BIT_ULL(62) | BIT_ULL(54)) || ws->swtag_req)
+ is_pend = true;
+ do {
+ ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ } while (ptag &
+ (BIT_ULL(62) | BIT_ULL(58) | BIT_ULL(56) | BIT_ULL(54)));
+
+ cn10k_sso_hws_get_work_empty(ws, &ev,
+ (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+ NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
+ if (is_pend && ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+
+ /* Check if we have work in PRF_WQE0, if so extract it. */
+ switch (dev->gw_mode) {
+ case CN10K_GW_MODE_PREF:
+ case CN10K_GW_MODE_PREF_WFE:
+ while (plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0) &
+ BIT_ULL(63))
+ ;
+ break;
+ case CN10K_GW_MODE_NONE:
+ default:
+ break;
+ }
+
+ if (CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0)) !=
+ SSO_TT_EMPTY) {
+ plt_write64(BIT_ULL(16) | 1,
+ ws->base + SSOW_LF_GWS_OP_GET_WORK0);
+ cn10k_sso_hws_get_work_empty(
+ ws, &ev,
+ (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+ NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
+ if (ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+ }
+ ws->swtag_req = 0;
+ plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
+}
+
static int
cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
const uint8_t queues[], const uint8_t priorities[],
@@ -852,6 +915,7 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
.port_def_conf = cnxk_sso_port_def_conf,
.port_setup = cn10k_sso_port_setup,
.port_release = cn10k_sso_port_release,
+ .port_quiesce = cn10k_sso_port_quiesce,
.port_link = cn10k_sso_port_link,
.port_unlink = cn10k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 987888d3db..46885c5f92 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -186,6 +186,7 @@ cn9k_sso_hws_reset(void *arg, void *hws)
uint64_t pend_state;
uint8_t pend_tt;
uintptr_t base;
+ bool is_pend;
uint64_t tag;
uint8_t i;
@@ -193,6 +194,13 @@ cn9k_sso_hws_reset(void *arg, void *hws)
ws = hws;
for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
base = dev->dual_ws ? dws->base[i] : ws->base;
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ (dev->dual_ws ? (dws->swtag_req && i == !dws->vws) :
+ ws->swtag_req))
+ is_pend = true;
/* Wait till getwork/swtp/waitw/desched completes. */
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
@@ -201,7 +209,7 @@ cn9k_sso_hws_reset(void *arg, void *hws)
tag = plt_read64(base + SSOW_LF_GWS_TAG);
pend_tt = (tag >> 32) & 0x3;
- if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */
if (pend_tt == SSO_TT_ATOMIC ||
pend_tt == SSO_TT_ORDERED)
cnxk_sso_hws_swtag_untag(
@@ -213,7 +221,14 @@ cn9k_sso_hws_reset(void *arg, void *hws)
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
} while (pend_state & BIT_ULL(58));
+
+ plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
}
+
+ if (dev->dual_ws)
+ dws->swtag_req = 0;
+ else
+ ws->swtag_req = 0;
}
void
@@ -789,6 +804,48 @@ cn9k_sso_port_release(void *port)
rte_free(gws_cookie);
}
+static void
+cn9k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
+ eventdev_port_flush_t flush_cb, void *args)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct cn9k_sso_hws_dual *dws;
+ struct cn9k_sso_hws *ws;
+ struct rte_event ev;
+ uintptr_t base;
+ uint64_t ptag;
+ bool is_pend;
+ uint8_t i;
+
+ dws = port;
+ ws = port;
+ for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
+ base = dev->dual_ws ? dws->base[i] : ws->base;
+ is_pend = false;
+ /* Work in WQE0 is always consumed, unless its a SWTAG. */
+ ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ if (ptag & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+ (dev->dual_ws ? (dws->swtag_req && i == !dws->vws) :
+ ws->swtag_req))
+ is_pend = true;
+ /* Wait till getwork/swtp/waitw/desched completes. */
+ do {
+ ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ } while (ptag & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
+ BIT_ULL(56)));
+
+ cn9k_sso_hws_get_work_empty(
+ base, &ev, dev->rx_offloads,
+ dev->dual_ws ? dws->lookup_mem : ws->lookup_mem,
+ dev->dual_ws ? dws->tstamp : ws->tstamp);
+ if (is_pend && ev.u64) {
+ if (flush_cb)
+ flush_cb(event_dev->data->dev_id, ev, args);
+ cnxk_sso_hws_swtag_flush(ws->base);
+ }
+ }
+}
+
static int
cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
const uint8_t queues[], const uint8_t priorities[],
@@ -1090,6 +1147,7 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
.port_def_conf = cnxk_sso_port_def_conf,
.port_setup = cn9k_sso_port_setup,
.port_release = cn9k_sso_port_release,
+ .port_quiesce = cn9k_sso_port_quiesce,
.port_link = cn9k_sso_port_link,
.port_unlink = cn9k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
--
2.35.1
next prev parent reply other threads:[~2022-04-27 11:32 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-27 11:32 [PATCH 1/3] eventdev: add function to quiesce an event port Pavan Nikhilesh
2022-04-27 11:32 ` [PATCH 2/3] eventdev: update examples to use port quiesce Pavan Nikhilesh
2022-04-27 11:32 ` Pavan Nikhilesh [this message]
2022-04-27 11:37 ` [PATCH 1/3 v2] eventdev: add function to quiesce an event port Pavan Nikhilesh
2022-04-27 11:37 ` [PATCH 2/3 v2] eventdev: update examples to use port quiesce Pavan Nikhilesh
2022-04-27 11:37 ` [PATCH 3/3 v2] event/cnxk: implement event port quiesce function Pavan Nikhilesh
2022-05-13 17:58 ` [PATCH v3 1/3] eventdev: add function to quiesce an event port pbhagavatula
2022-05-13 17:58 ` [PATCH v3 2/3] eventdev: update examples to use port quiesce pbhagavatula
2022-05-13 17:58 ` [PATCH v3 3/3] event/cnxk: implement event port quiesce function pbhagavatula
2022-05-17 10:04 ` [PATCH v3 1/3] eventdev: add function to quiesce an event port Jerin Jacob
2022-05-04 9:02 ` [PATCH " Ray Kinsella
2022-05-09 17:29 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220427113223.13948-3-pbhagavatula@marvell.com \
--to=pbhagavatula@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=sthotton@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).