DPDK patches and discussions
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
	"Shijith Thotton" <sthotton@marvell.com>
Cc: <dev@dpdk.org>
Subject: [PATCH] event/cnxk: use WFE LDP loop for getwork routine
Date: Fri, 5 Jan 2024 01:06:33 +0530	[thread overview]
Message-ID: <20240104193633.2264-1-pbhagavatula@marvell.com> (raw)

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Use WFE LDP loop while polling for GETWORK completion for better
power savings.
Disabled by default and can be enabled by setting
`RTE_ARM_USE_WFE` to `true` in `config/arm/meson.build`

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 doc/guides/eventdevs/cnxk.rst     |  9 ++++++
 drivers/event/cnxk/cn10k_worker.h | 52 +++++++++++++++++++++++++------
 2 files changed, 52 insertions(+), 9 deletions(-)

diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index cccb8a0304..d62c143c77 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -198,6 +198,15 @@ Runtime Config Options
 
     -a 0002:0e:00.0,tim_eclk_freq=122880000-1000000000-0
 
+Power Savings on CN10K
+----------------------
+
+ARM cores can additionally use WFE when polling for transactions on SSO bus
+to save power i.e., in the event dequeue call ARM core can enter WFE and exit
+when either work has been scheduled or dequeue timeout has reached.
+This can be enabled by setting ``RTE_ARM_USE_WFE`` to ``true`` in
+``config/arm/meson.build``.
+
 Debugging Options
 -----------------
 
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 8aa916fa12..92d5190842 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -250,23 +250,57 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
 
 	gw.get_work = ws->gw_wdata;
 #if defined(RTE_ARCH_ARM64)
-#if !defined(__clang__)
-	asm volatile(
-		PLT_CPU_FEATURE_PREAMBLE
-		"caspal %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
-		: [wdata] "+r"(gw.get_work)
-		: [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0)
-		: "memory");
-#else
+#if defined(__clang__)
 	register uint64_t x0 __asm("x0") = (uint64_t)gw.u64[0];
 	register uint64_t x1 __asm("x1") = (uint64_t)gw.u64[1];
+#if defined(RTE_ARM_USE_WFE)
+	plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0);
+	asm volatile(PLT_CPU_FEATURE_PREAMBLE
+		     "		ldp %[x0], %[x1], [%[tag_loc]]	\n"
+		     "		tbz %[x0], %[pend_gw], done%=	\n"
+		     "		sevl					\n"
+		     "rty%=:	wfe					\n"
+		     "		ldp %[x0], %[x1], [%[tag_loc]]	\n"
+		     "		tbnz %[x0], %[pend_gw], rty%=	\n"
+		     "done%=:						\n"
+		     "		dmb ld					\n"
+		     : [x0] "+r" (x0), [x1] "+r" (x1)
+		     : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0),
+		       [pend_gw] "i"(SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
+		     : "memory");
+#else
 	asm volatile(".arch armv8-a+lse\n"
 		     "caspal %[x0], %[x1], %[x0], %[x1], [%[dst]]\n"
-		     : [x0] "+r"(x0), [x1] "+r"(x1)
+		     : [x0] "+r" (x0), [x1] "+r" (x1)
 		     : [dst] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0)
 		     : "memory");
+#endif
 	gw.u64[0] = x0;
 	gw.u64[1] = x1;
+#else
+#if defined(RTE_ARM_USE_WFE)
+	plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0);
+	asm volatile(PLT_CPU_FEATURE_PREAMBLE
+		     "		ldp %[wdata], %H[wdata], [%[tag_loc]]	\n"
+		     "		tbz %[wdata], %[pend_gw], done%=	\n"
+		     "		sevl					\n"
+		     "rty%=:	wfe					\n"
+		     "		ldp %[wdata], %H[wdata], [%[tag_loc]]	\n"
+		     "		tbnz %[wdata], %[pend_gw], rty%=	\n"
+		     "done%=:						\n"
+		     "		dmb ld					\n"
+		     : [wdata] "=&r"(gw.get_work)
+		     : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0),
+		       [pend_gw] "i"(SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
+		     : "memory");
+#else
+	asm volatile(
+		PLT_CPU_FEATURE_PREAMBLE
+		"caspal %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
+		: [wdata] "+r"(gw.get_work)
+		: [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0)
+		: "memory");
+#endif
 #endif
 #else
 	plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0);
-- 
2.25.1


             reply	other threads:[~2024-01-04 19:36 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-04 19:36 pbhagavatula [this message]
2024-01-09  7:56 ` Jerin Jacob
2024-01-17 14:25 ` [PATCH v2 1/2] config/arm: allow WFE to be enabled config time pbhagavatula
2024-01-17 14:26   ` [PATCH v2 2/2] event/cnxk: use WFE LDP loop for getwork routine pbhagavatula
2024-01-18  1:52   ` [PATCH v2 1/2] config/arm: allow WFE to be enabled config time Ruifeng Wang
2024-01-21 15:21   ` [PATCH v3 " pbhagavatula
2024-01-21 15:21     ` [PATCH v3 2/2] event/cnxk: use WFE LDP loop for getwork routine pbhagavatula
2024-01-22  6:37     ` [PATCH v3 1/2] config/arm: allow WFE to be enabled config time fengchengwen
2024-01-22  6:43     ` Ruifeng Wang
2024-02-01 16:37       ` Jerin Jacob
2024-02-01 22:03     ` [PATCH v4] event/cnxk: use WFE LDP loop for getwork routine pbhagavatula
2024-02-25 15:20       ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240104193633.2264-1-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).