DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 1/2] event/cnxk: flush flow context on cleanup
@ 2023-09-09 16:57 pbhagavatula
  2023-09-09 16:57 ` [PATCH 2/2] common/cnxk: split XAQ counts pbhagavatula
  0 siblings, 1 reply; 3+ messages in thread
From: pbhagavatula @ 2023-09-09 16:57 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Flush currently held flow context on event port cleanup.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c | 18 ++++++++++++++----
 drivers/event/cnxk/cn9k_eventdev.c  | 25 +++++++++++++++++++------
 2 files changed, 33 insertions(+), 10 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 499a3aace7..211c51fd12 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -200,12 +200,14 @@ cn10k_sso_hws_reset(void *arg, void *hws)
 			cnxk_sso_hws_swtag_untag(base +
 						 SSOW_LF_GWS_OP_SWTAG_UNTAG);
 		plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
+	} else if (pend_tt != SSO_TT_EMPTY) {
+		plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
 	}
 
 	/* Wait for desched to complete. */
 	do {
 		pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
-	} while (pend_state & BIT_ULL(58));
+	} while (pend_state & (BIT_ULL(58) | BIT_ULL(56)));
 
 	switch (dev->gw_mode) {
 	case CN10K_GW_MODE_PREF:
@@ -582,11 +584,16 @@ cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
 
 	cn10k_sso_hws_get_work_empty(ws, &ev,
 				     (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F | NIX_RX_MULTI_SEG_F);
-	if (is_pend && ev.u64) {
+	if (is_pend && ev.u64)
 		if (flush_cb)
 			flush_cb(event_dev->data->dev_id, ev, args);
+	ptag = (plt_read64(ws->base + SSOW_LF_GWS_TAG) >> 32) & SSO_TT_EMPTY;
+	if (ptag != SSO_TT_EMPTY)
 		cnxk_sso_hws_swtag_flush(ws->base);
-	}
+
+	do {
+		ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+	} while (ptag & BIT_ULL(56));
 
 	/* Check if we have work in PRF_WQE0, if so extract it. */
 	switch (dev->gw_mode) {
@@ -610,8 +617,11 @@ cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
 		if (ev.u64) {
 			if (flush_cb)
 				flush_cb(event_dev->data->dev_id, ev, args);
-			cnxk_sso_hws_swtag_flush(ws->base);
 		}
+		cnxk_sso_hws_swtag_flush(ws->base);
+		do {
+			ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+		} while (ptag & BIT_ULL(56));
 	}
 	ws->swtag_req = 0;
 	plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 6cce5477f0..a03e3c138b 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -222,16 +222,16 @@ cn9k_sso_hws_reset(void *arg, void *hws)
 				cnxk_sso_hws_swtag_untag(
 					base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
 			plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
+		} else if (pend_tt != SSO_TT_EMPTY) {
+			plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
 		}
 
 		/* Wait for desched to complete. */
 		do {
 			pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
-		} while (pend_state & BIT_ULL(58));
-
+		} while (pend_state & (BIT_ULL(58) | BIT_ULL(56)));
 		plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
 	}
-
 	if (dev->dual_ws)
 		dws->swtag_req = 0;
 	else
@@ -686,12 +686,25 @@ cn9k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
 			base, &ev, dev->rx_offloads,
 			dev->dual_ws ? dws->lookup_mem : ws->lookup_mem,
 			dev->dual_ws ? dws->tstamp : ws->tstamp);
-		if (is_pend && ev.u64) {
+		if (is_pend && ev.u64)
 			if (flush_cb)
 				flush_cb(event_dev->data->dev_id, ev, args);
-			cnxk_sso_hws_swtag_flush(ws->base);
-		}
+
+		ptag = (plt_read64(base + SSOW_LF_GWS_TAG) >> 32) & SSO_TT_EMPTY;
+		if (ptag != SSO_TT_EMPTY)
+			cnxk_sso_hws_swtag_flush(base);
+
+		do {
+			ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+		} while (ptag & BIT_ULL(56));
+
+		plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
 	}
+
+	if (dev->dual_ws)
+		dws->swtag_req = 0;
+	else
+		ws->swtag_req = 0;
 }
 
 static int
-- 
2.25.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 2/2] common/cnxk: split XAQ counts
  2023-09-09 16:57 [PATCH 1/2] event/cnxk: flush flow context on cleanup pbhagavatula
@ 2023-09-09 16:57 ` pbhagavatula
  2023-09-19 17:00   ` Jerin Jacob
  0 siblings, 1 reply; 3+ messages in thread
From: pbhagavatula @ 2023-09-09 16:57 UTC (permalink / raw)
  To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Split XAQ counts into reserved and cached to allow more events
to be inflight.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/common/cnxk/roc_sso.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index a5f48d5bbc..0a1074b018 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -5,7 +5,8 @@
 #include "roc_api.h"
 #include "roc_priv.h"
 
-#define SSO_XAQ_CACHE_CNT (0x7)
+#define SSO_XAQ_CACHE_CNT (0x3)
+#define SSO_XAQ_RSVD_CNT  (0x4)
 #define SSO_XAQ_SLACK	  (16)
 
 /* Private functions. */
@@ -499,6 +500,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
 	 * pipelining.
 	 */
 	xaq->nb_xaq = (SSO_XAQ_CACHE_CNT * nb_hwgrp);
+	xaq->nb_xaq += (SSO_XAQ_RSVD_CNT * nb_hwgrp);
 	xaq->nb_xaq += PLT_MAX(1 + ((xaq->nb_xae - 1) / xae_waes), xaq->nb_xaq);
 	xaq->nb_xaq += SSO_XAQ_SLACK;
 
@@ -542,8 +544,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
 	 * There should be a minimum headroom of 7 XAQs per HWGRP for SSO
 	 * to request XAQ to cache them even before enqueue is called.
 	 */
-	xaq->xaq_lmt =
-		xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT) - SSO_XAQ_SLACK;
+	xaq->xaq_lmt = xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT) - SSO_XAQ_SLACK;
 
 	return 0;
 npa_fill_fail:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH 2/2] common/cnxk: split XAQ counts
  2023-09-09 16:57 ` [PATCH 2/2] common/cnxk: split XAQ counts pbhagavatula
@ 2023-09-19 17:00   ` Jerin Jacob
  0 siblings, 0 replies; 3+ messages in thread
From: Jerin Jacob @ 2023-09-19 17:00 UTC (permalink / raw)
  To: pbhagavatula
  Cc: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, dev

On Sun, Sep 10, 2023 at 1:46 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Split XAQ counts into reserved and cached to allow more events
> to be inflight.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>


Updated the git commit as follows and applied to
dpdk-next-net-eventdev/for-main. Thanks


commit e227b98adbbacde663fbf0cafe512a1a123adef1 (HEAD -> for-main,
origin/for-main, origin/HEAD)
Author: Pavan Nikhilesh <pbhagavatula@marvell.com>
Date:   Sat Sep 9 22:27:47 2023 +0530

    common/cnxk: fix XAQ limits

    Split XAQ counts into reserved and cached to allow more events
    to be in flight.

    Fixes: c3320d21b475 ("event/cnxk: use LMTST for enqueue new burst")
    Cc: stable@dpdk.org

    Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>

commit 87007fcc6a0c7274226dcb80675986fdcab57de8
Author: Pavan Nikhilesh <pbhagavatula@marvell.com>
Date:   Sat Sep 9 22:27:46 2023 +0530

    event/cnxk: fix context flush in port cleanup

    Flush currently held flow context during event port cleanup.

    Fixes: e8594de2731d ("event/cnxk: implement event port quiesce function")
    Cc: stable@dpdk.org

    Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2023-09-19 17:00 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-09-09 16:57 [PATCH 1/2] event/cnxk: flush flow context on cleanup pbhagavatula
2023-09-09 16:57 ` [PATCH 2/2] common/cnxk: split XAQ counts pbhagavatula
2023-09-19 17:00   ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).