DPDK patches and discussions
 help / color / mirror / Atom feed
From: Vipin Varghese <vipin.varghese@amd.com>
To: <ferruh.yigit@amd.com>, <dev@dpdk.org>
Subject: [RFC 2/2] eal/lcore: add llc aware for each macro
Date: Tue, 27 Aug 2024 20:40:14 +0530	[thread overview]
Message-ID: <20240827151014.201-3-vipin.varghese@amd.com> (raw)
In-Reply-To: <20240827151014.201-1-vipin.varghese@amd.com>

add RTE_LCORE_FOREACH for dpdk lcore sharing the Last Level Cache.
For core complexes with shared LLC, the macro iterates for same llc
lcores. For cores within single LLC, the macro iterates over all
availble lcores.

MACRO added:
 - RTE_LCORE_FOREACH_LLC_FIRST
 - RTE_LCORE_FOREACH_LLC_FIRST_WORKER
 - RTE_LCORE_FOREACH_LLC_WORKER
 - RTE_LCORE_FOREACH_LLC_SKIP_FIRST_WORKER
 - RTE_LCORE_FOREACH_LLC_FIRST_N_WORKER
 - RTE_LCORE_FOREACH_LLC_SKIP_N_WORKER

Signed-off-by: Vipin Varghese <vipin.varghese@amd.com>
---
 lib/eal/include/rte_lcore.h | 89 +++++++++++++++++++++++++++++++++++++
 1 file changed, 89 insertions(+)

diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 7deae47af3..7c1a240bde 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -18,6 +18,7 @@
 #include <rte_eal.h>
 #include <rte_launch.h>
 #include <rte_thread.h>
+#include <rte_os.h>
 
 #ifdef __cplusplus
 extern "C" {
@@ -196,6 +197,21 @@ rte_cpuset_t rte_lcore_cpuset(unsigned int lcore_id);
  */
 int rte_lcore_is_enabled(unsigned int lcore_id);
 
+/**
+ * Get the next enabled lcore ID within same llc.
+ *
+ * @param i
+ *   The current lcore (reference).
+ * @param skip_main
+ *   If true, do not return the ID of the main lcore.
+ * @param wrap
+ *   If true, go back to 0 when RTE_MAX_LCORE is reached; otherwise,
+ *   return RTE_MAX_LCORE.
+ * @return
+ *   The next lcore_id or RTE_MAX_LCORE if not found.
+ */
+unsigned int rte_get_next_llc_lcore(unsigned int i, int skip_main, int wrap);
+
 /**
  * Get the next enabled lcore ID.
  *
@@ -211,6 +227,11 @@ int rte_lcore_is_enabled(unsigned int lcore_id);
  */
 unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap);
 
+unsigned int rte_get_llc_lcore (unsigned int i, rte_cpuset_t *llc_cpu, unsigned int *start, unsigned int *end);
+unsigned int rte_get_llc_first_lcores (rte_cpuset_t *llc_cpu);
+unsigned int rte_get_llc_n_lcore (unsigned int i, rte_cpuset_t *llc_cpu, unsigned int *start, unsigned int *end, unsigned int n, bool skip);
+
+
 /**
  * Macro to browse all running lcores.
  */
@@ -219,6 +240,7 @@ unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap);
 	     i < RTE_MAX_LCORE;						\
 	     i = rte_get_next_lcore(i, 0, 0))
 
+
 /**
  * Macro to browse all running lcores except the main lcore.
  */
@@ -227,6 +249,73 @@ unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap);
 	     i < RTE_MAX_LCORE;						\
 	     i = rte_get_next_lcore(i, 1, 0))
 
+/** Browse all the the cores in the provided llc domain **/
+
+#define RTE_LCORE_FOREACH_LLC_FIRST(i)	\
+	rte_cpuset_t llc_foreach_first_lcores;								\
+	CPU_ZERO(&llc_foreach_first_lcores); i = 0;							\
+	unsigned int llc_foreach_num_iter = rte_get_llc_first_lcores(&llc_foreach_first_lcores);	\
+	i = (0 == llc_foreach_num_iter) ? RTE_MAX_LCORE : i;						\
+	for (; i < RTE_MAX_LCORE; i++)									\
+		if (CPU_ISSET(i, &llc_foreach_first_lcores))
+	
+#define RTE_LCORE_FOREACH_LLC_FIRST_WORKER(i)	\
+	rte_cpuset_t llc_foreach_first_lcores;								\
+	CPU_ZERO(&llc_foreach_first_lcores); i = 0;							\
+	unsigned int llc_foreach_num_iter = rte_get_llc_first_lcores(&llc_foreach_first_lcores);	\
+	CPU_CLR(rte_get_main_lcore(), &llc_foreach_first_lcores);		\
+	i = (0 == llc_foreach_num_iter) ? RTE_MAX_LCORE : i;						\
+	for (; i < RTE_MAX_LCORE; i++)									\
+		if (CPU_ISSET(i, &llc_foreach_first_lcores))
+
+#define RTE_LCORE_FOREACH_LLC_WORKER(i)	\
+	rte_cpuset_t llc_foreach_first_lcores;								\
+	rte_cpuset_t llc_foreach_lcore;									\
+        unsigned int start,end;										\
+	CPU_ZERO(&llc_foreach_first_lcores); i = 0;							\
+	unsigned int llc_foreach_num_iter = rte_get_llc_first_lcores(&llc_foreach_first_lcores);	\
+	i = (0 == llc_foreach_num_iter) ? RTE_MAX_LCORE : i;						\
+	for (unsigned int llc_i = i; llc_i < RTE_MAX_LCORE; llc_i++)									\
+		if (CPU_ISSET(llc_i, &llc_foreach_first_lcores) && rte_get_llc_lcore (llc_i, &llc_foreach_lcore, &start, &end)) \
+			for (i = start; (i <= end); i++)						\
+				if (CPU_ISSET(i, &llc_foreach_lcore) && (i != rte_get_main_lcore()))
+
+#define RTE_LCORE_FOREACH_LLC_SKIP_FIRST_WORKER(i)	\
+	rte_cpuset_t llc_foreach_first_lcores;								\
+	rte_cpuset_t llc_foreach_lcore;									\
+        unsigned int start,end;										\
+	CPU_ZERO(&llc_foreach_first_lcores); i = 0;							\
+	unsigned int llc_foreach_num_iter = rte_get_llc_first_lcores(&llc_foreach_first_lcores);	\
+	i = (0 == llc_foreach_num_iter) ? RTE_MAX_LCORE : i;						\
+	for (unsigned int llc_i = i; llc_i < RTE_MAX_LCORE; llc_i++)									\
+		if (CPU_ISSET(llc_i, &llc_foreach_first_lcores) && rte_get_llc_lcore (llc_i, &llc_foreach_lcore, &start, &end)) \
+			for (i = start + 1; (i <= end); i++)						\
+				if (CPU_ISSET(i, &llc_foreach_lcore) && (i != rte_get_main_lcore()))
+
+#define RTE_LCORE_FOREACH_LLC_FIRST_N_WORKER(i,n)	\
+	rte_cpuset_t llc_foreach_first_lcores;	\
+	rte_cpuset_t llc_foreach_lcore;	\
+	unsigned int start,end, temp_count;	\
+	CPU_ZERO(&llc_foreach_first_lcores);	\
+	unsigned int llc_foreach_num_iter = rte_get_llc_first_lcores(&llc_foreach_first_lcores);	 \
+	i = (0 == llc_foreach_num_iter) ? RTE_MAX_LCORE : 0;	\
+	for (unsigned int llc_i = i; llc_i < RTE_MAX_LCORE; llc_i++)	\
+		if (CPU_ISSET(llc_i, &llc_foreach_first_lcores) && (rte_get_llc_n_lcore (llc_i, &llc_foreach_lcore, &start, &end, n, false) >= n))	\
+			for (i = start, temp_count = n; (i <= end) && (temp_count); i++)	\
+				if (CPU_ISSET(i, &llc_foreach_lcore) && (i != rte_get_main_lcore()) && (temp_count--))
+
+#define RTE_LCORE_FOREACH_LLC_SKIP_N_WORKER(i,n)	\
+	rte_cpuset_t llc_foreach_skip_first_lcores;	\
+	rte_cpuset_t llc_foreach_skip_lcore;	\
+	unsigned int start_skip,end_skip,llc_skip_i;	\
+	CPU_ZERO(&llc_foreach_skip_first_lcores);	\
+	unsigned int llc_foreach_skip_num_iter = rte_get_llc_first_lcores(&llc_foreach_skip_first_lcores);	\
+	i = (0 == llc_foreach_skip_num_iter) ? RTE_MAX_LCORE : 0;	\
+	for (llc_skip_i = i; llc_skip_i < RTE_MAX_LCORE; llc_skip_i++)	\
+		if (CPU_ISSET(llc_skip_i, &llc_foreach_skip_first_lcores) && (rte_get_llc_n_lcore (llc_skip_i, &llc_foreach_skip_lcore, &start_skip, &end_skip, n, true) > 0))	\
+			for (i = start_skip; (i <= end_skip); i++)	\
+				if (CPU_ISSET(i, &llc_foreach_skip_lcore) && (i != rte_get_main_lcore()))
+
 /**
  * Callback prototype for initializing lcores.
  *
-- 
2.34.1


  parent reply	other threads:[~2024-08-27 15:11 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-08-27 15:10 [RFC 0/2] introduce LLC aware functions Vipin Varghese
2024-08-27 15:10 ` [RFC 1/2] eal: add llc " Vipin Varghese
2024-08-27 17:36   ` Stephen Hemminger
2024-09-02  0:27     ` Varghese, Vipin
2024-08-27 20:56   ` Wathsala Wathawana Vithanage
2024-08-29  3:21     ` 答复: " Feifei Wang
2024-09-02  1:20     ` Varghese, Vipin
2024-09-03 17:54       ` Wathsala Wathawana Vithanage
2024-09-04  8:18         ` Bruce Richardson
2024-09-06 11:59         ` Varghese, Vipin
2024-09-12 16:58           ` Wathsala Wathawana Vithanage
2024-08-27 15:10 ` Vipin Varghese [this message]
2024-08-27 21:23 ` [RFC 0/2] introduce LLC " Mattias Rönnblom
2024-09-02  0:39   ` Varghese, Vipin
2024-09-04  9:30     ` Mattias Rönnblom
2024-09-04 14:37       ` Stephen Hemminger
2024-09-11  3:13         ` Varghese, Vipin
2024-09-11  3:53           ` Stephen Hemminger
2024-09-12  1:11             ` Varghese, Vipin
2024-09-09 14:22       ` Varghese, Vipin
2024-09-09 14:52         ` Mattias Rönnblom
2024-09-11  3:26           ` Varghese, Vipin
2024-09-11 15:55             ` Mattias Rönnblom
2024-09-11 17:04               ` Honnappa Nagarahalli
2024-09-12  1:33                 ` Varghese, Vipin
2024-09-12  6:38                   ` Mattias Rönnblom
2024-09-12  7:02                     ` Mattias Rönnblom
2024-09-12 11:23                       ` Varghese, Vipin
2024-09-12 12:12                         ` Mattias Rönnblom
2024-09-12 15:50                           ` Stephen Hemminger
2024-09-12 11:17                     ` Varghese, Vipin
2024-09-12 11:59                       ` Mattias Rönnblom
2024-09-12 13:30                         ` Bruce Richardson
2024-09-12 16:32                           ` Mattias Rönnblom
2024-09-12  2:28                 ` Varghese, Vipin
2024-09-11 16:01             ` Bruce Richardson
2024-09-11 22:25               ` Konstantin Ananyev
2024-09-12  2:38                 ` Varghese, Vipin
2024-09-12  2:19               ` Varghese, Vipin
2024-09-12  9:17                 ` Bruce Richardson
2024-09-12 11:50                   ` Varghese, Vipin
2024-09-13 14:15                     ` Burakov, Anatoly
2024-09-12 13:18                   ` Mattias Rönnblom
2024-08-28  8:38 ` Burakov, Anatoly
2024-09-02  1:08   ` Varghese, Vipin
2024-09-02 14:17     ` Burakov, Anatoly
2024-09-02 15:33       ` Varghese, Vipin
2024-09-03  8:50         ` Burakov, Anatoly
2024-09-05 13:05           ` Ferruh Yigit
2024-09-05 14:45             ` Burakov, Anatoly
2024-09-05 15:34               ` Ferruh Yigit
2024-09-06  8:44                 ` Burakov, Anatoly
2024-09-09 14:14                   ` Varghese, Vipin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240827151014.201-3-vipin.varghese@amd.com \
    --to=vipin.varghese@amd.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).