* [PATCH v4 1/4] eal/lcore: add topology based functions
2024-11-05 10:28 [PATCH v4 0/4] Introduce Topology NUMA grouping for lcores Vipin Varghese
@ 2024-11-05 10:28 ` Vipin Varghese
2024-11-05 10:28 ` [PATCH v4 2/4] test/lcore: enable tests for topology Vipin Varghese
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Vipin Varghese @ 2024-11-05 10:28 UTC (permalink / raw)
To: dev, roretzla, bruce.richardson, john.mcnamara, dmitry.kozliuk
Cc: pbhagavatula, jerinj, ruifeng.wang, mattias.ronnblom,
anatoly.burakov, stephen, ferruh.yigit, honnappa.nagarahalli,
wathsala.vithanage, konstantin.ananyev, mb
Introduce topology aware lcore mapping into lcore API.
With higher core density, more and more cores are categorized
into various chiplets based on IO (memory and PCIe) and
Last Level Cache (mainly L3).
Using hwloc library, the dpdk available lcores can be grouped
into various groups nameley L1, L2, L3, L4 and IO. This patch
introduces functions and MACRO that helps to identify such
groups.
Internal API:
- get_domain_lcore_count;
- get_domain_lcore_mapping
- rte_eal_topology_init;
- rte_eal_topology_release;
External Experimental API:
- rte_get_domain_count;
- rte_get_lcore_in_domain;
- rte_get_next_lcore_from_domain;
- rte_get_next_lcore_from_next_domain;
- rte_lcore_count_from_domain;
- rte_lcore_cpuset_in_domain;
- rte_lcore_is_main_in_domain;
v4 changes:
- add internal API get_domain_lcore_count
- add external API rte_lcore_cpuset_in_domain
- add external API rte_lcore_is_main_in_domain
- remove malloc casting: Stephen Hemminger
- remove NULL check before free: Stephen Hemminger
- convert l3_count & io_count to uint16_t: Stephen Hemminger
- use rte_malloc for internal core_mapping: Stephen Hemminger
- extend to L4 cache: Morten Brørup
- add comment as place holder for enable cache-id: Morten Brørup
v2 changes:
- focuses on rte_lcore api for getting topology
- use hwloc instead of sysfs exploration - Mattias Rönnblom
- L1, L2 and IO domain mapping - Ferruh, Vipin
- new API marked experimental - Stephen Hemminger
Signed-off-by: Vipin Varghese <vipin.varghese@amd.com>
---
config/meson.build | 18 +
lib/eal/common/eal_common_lcore.c | 714 ++++++++++++++++++++++++++++++
lib/eal/common/eal_private.h | 58 +++
lib/eal/freebsd/eal.c | 10 +
lib/eal/include/rte_lcore.h | 209 +++++++++
lib/eal/linux/eal.c | 11 +
lib/eal/meson.build | 4 +
lib/eal/version.map | 11 +
lib/eal/windows/eal.c | 12 +
9 files changed, 1047 insertions(+)
diff --git a/config/meson.build b/config/meson.build
index 5095d2fbcb..42e4d28f8d 100644
--- a/config/meson.build
+++ b/config/meson.build
@@ -240,6 +240,24 @@ if find_libnuma
endif
endif
+has_libhwloc = false
+find_libhwloc = true
+
+if meson.is_cross_build() and not meson.get_cross_property('hwloc', true)
+ # don't look for libhwloc if explicitly disabled in cross build
+ find_libhwloc = false
+endif
+
+if find_libhwloc
+ hwloc_dep = cc.find_library('hwloc', required: false)
+ if hwloc_dep.found() and cc.has_header('hwloc.h')
+ dpdk_conf.set10('RTE_HAS_LIBHWLOC', true)
+ has_libhwloc = true
+ add_project_link_arguments('-lhwloc', language: 'c')
+ dpdk_extra_ldflags += '-lhwloc'
+ endif
+endif
+
has_libfdt = false
fdt_dep = cc.find_library('fdt', required: false)
if fdt_dep.found() and cc.has_header('fdt.h')
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 2ff9252c52..756aaf9fbc 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -14,6 +14,7 @@
#ifndef RTE_EXEC_ENV_WINDOWS
#include <rte_telemetry.h>
#endif
+#include <rte_malloc.h>
#include "eal_private.h"
#include "eal_thread.h"
@@ -112,6 +113,371 @@ unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap)
return i;
}
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+static struct core_domain_mapping *
+get_domain_lcore_mapping(unsigned int domain_sel, unsigned int domain_indx)
+{
+ struct core_domain_mapping *ptr =
+ (domain_sel & RTE_LCORE_DOMAIN_IO) ? topo_cnfg.io[domain_indx] :
+ (domain_sel & RTE_LCORE_DOMAIN_L4) ? topo_cnfg.l4[domain_indx] :
+ (domain_sel & RTE_LCORE_DOMAIN_L3) ? topo_cnfg.l3[domain_indx] :
+ (domain_sel & RTE_LCORE_DOMAIN_L2) ? topo_cnfg.l2[domain_indx] :
+ (domain_sel & RTE_LCORE_DOMAIN_L1) ? topo_cnfg.l1[domain_indx] : NULL;
+
+ return ptr;
+}
+
+static unsigned int
+get_domain_lcore_count(unsigned int domain_sel)
+{
+ return ((domain_sel & RTE_LCORE_DOMAIN_IO) ? topo_cnfg.io_core_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L4) ? topo_cnfg.l4_core_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L3) ? topo_cnfg.l3_core_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L2) ? topo_cnfg.l2_core_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L1) ? topo_cnfg.l1_core_count : 0);
+}
+#endif
+
+unsigned int rte_get_domain_count(unsigned int domain_sel __rte_unused)
+{
+ unsigned int domain_cnt = 0;
+
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ if (domain_sel & RTE_LCORE_DOMAIN_ALL) {
+ domain_cnt =
+ (domain_sel & RTE_LCORE_DOMAIN_IO) ? topo_cnfg.io_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L4) ? topo_cnfg.l4_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L3) ? topo_cnfg.l3_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L2) ? topo_cnfg.l2_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L1) ? topo_cnfg.l1_count : 0;
+ }
+#endif
+
+ return domain_cnt;
+}
+
+unsigned int
+rte_lcore_count_from_domain(unsigned int domain_sel __rte_unused,
+unsigned int domain_indx __rte_unused)
+{
+ unsigned int core_cnt = 0;
+
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ unsigned int domain_cnt = 0;
+
+ if ((domain_sel & RTE_LCORE_DOMAIN_ALL) == 0)
+ return core_cnt;
+
+ domain_cnt = rte_get_domain_count(domain_sel);
+
+ if (domain_cnt == 0)
+ return core_cnt;
+
+ if ((domain_indx != RTE_LCORE_DOMAIN_LCORES_ALL) && (domain_indx >= domain_cnt))
+ return core_cnt;
+
+ core_cnt = (domain_sel & RTE_LCORE_DOMAIN_IO) ? topo_cnfg.io_core_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L4) ? topo_cnfg.l3_core_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L3) ? topo_cnfg.l3_core_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L2) ? topo_cnfg.l2_core_count :
+ (domain_sel & RTE_LCORE_DOMAIN_L1) ? topo_cnfg.l1_core_count : 0;
+
+ if ((domain_indx != RTE_LCORE_DOMAIN_LCORES_ALL) && (core_cnt)) {
+ struct core_domain_mapping *ptr = get_domain_lcore_mapping(domain_sel, domain_indx);
+ core_cnt = ptr->core_count;
+ }
+#endif
+
+ return core_cnt;
+}
+
+unsigned int
+rte_get_lcore_in_domain(unsigned int domain_sel __rte_unused,
+unsigned int domain_indx __rte_unused, unsigned int lcore_pos __rte_unused)
+{
+ uint16_t sel_core = RTE_MAX_LCORE;
+
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ unsigned int domain_cnt = 0;
+ unsigned int core_cnt = 0;
+
+ if (domain_sel & RTE_LCORE_DOMAIN_ALL) {
+ domain_cnt = rte_get_domain_count(domain_sel);
+ if (domain_cnt == 0)
+ return sel_core;
+
+ core_cnt = rte_lcore_count_from_domain(domain_sel, RTE_LCORE_DOMAIN_LCORES_ALL);
+ if (core_cnt == 0)
+ return sel_core;
+
+ struct core_domain_mapping *ptr = get_domain_lcore_mapping(domain_sel, domain_indx);
+ if ((ptr) && (ptr->core_count)) {
+ if (lcore_pos < ptr->core_count)
+ sel_core = ptr->cores[lcore_pos];
+ }
+ }
+#endif
+
+ return sel_core;
+}
+
+rte_cpuset_t
+rte_lcore_cpuset_in_domain(unsigned int domain_sel __rte_unused,
+unsigned int domain_indx __rte_unused)
+{
+ rte_cpuset_t ret_cpu_set;
+ CPU_ZERO(&ret_cpu_set);
+
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ struct core_domain_mapping *ptr = NULL;
+ unsigned int domain_count = rte_get_domain_count(domain_sel);
+
+ if ((domain_count == 0) || (domain_indx > domain_count))
+ return ret_cpu_set;
+
+ ptr = get_domain_lcore_mapping(domain_sel, domain_indx);
+ if (ptr->core_count == 0)
+ return ret_cpu_set;
+
+ CPU_OR(&ret_cpu_set, &ret_cpu_set, &ptr->core_set);
+#endif
+
+ return ret_cpu_set;
+}
+
+bool
+rte_lcore_is_main_in_domain(unsigned int domain_sel __rte_unused,
+unsigned int domain_indx __rte_unused)
+{
+ bool is_main_in_domain = false;
+
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ struct core_domain_mapping *ptr = NULL;
+ unsigned int main_lcore = rte_get_main_lcore();
+ unsigned int domain_count = rte_get_domain_count(domain_sel);
+
+ if ((domain_count == 0) || (domain_indx > domain_count))
+ return is_main_in_domain;
+
+ ptr = get_domain_lcore_mapping(domain_sel, domain_indx);
+ if (ptr->core_count == 0)
+ return is_main_in_domain;
+
+ is_main_in_domain = CPU_ISSET(main_lcore, &ptr->core_set);
+#endif
+
+ return is_main_in_domain;
+}
+
+unsigned int
+rte_get_next_lcore_from_domain(unsigned int indx __rte_unused,
+int skip_main __rte_unused, int wrap __rte_unused, uint32_t flag __rte_unused)
+{
+ if (indx >= RTE_MAX_LCORE) {
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ if (get_domain_lcore_count(flag) == 0)
+ return RTE_MAX_LCORE;
+#endif
+ indx = rte_get_next_lcore(-1, skip_main, wrap);
+ return indx;
+ }
+ uint16_t usr_lcore = indx % RTE_MAX_LCORE;
+ uint16_t sel_domain_core = RTE_MAX_LCORE;
+
+ EAL_LOG(DEBUG, "lcore (%u), skip main lcore (%d), wrap (%d), flag (%u)",
+ usr_lcore, skip_main, wrap, flag);
+
+ /* check the input lcore indx */
+ if (!rte_lcore_is_enabled(indx)) {
+ EAL_LOG(ERR, "User input lcore (%u) is not enabled!!!", indx);
+ return sel_domain_core;
+ }
+
+ if ((rte_lcore_count() == 1)) {
+ EAL_LOG(DEBUG, "only 1 lcore in dpdk process!!!");
+ sel_domain_core = wrap ? indx : sel_domain_core;
+ return sel_domain_core;
+ }
+
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ uint16_t main_lcore = rte_get_main_lcore();
+ uint16_t sel_domain = 0xffff;
+ uint16_t sel_domain_core_index = 0xffff;
+ uint16_t sel_domain_core_count = 0;
+
+ struct core_domain_mapping *ptr = NULL;
+ uint16_t domain_count = 0;
+ uint16_t domain_core_count = 0;
+ uint16_t *domain_core_list = NULL;
+
+ domain_count = rte_get_domain_count(flag);
+ if (domain_count == 0) {
+ EAL_LOG(DEBUG, "No domain found for cores with flag (%u)!!!", flag);
+ return sel_domain_core;
+ }
+
+ /* identify the lcore to get the domain to start from */
+ for (int i = 0; (i < domain_count) && (sel_domain_core_index == 0xffff); i++) {
+ ptr = get_domain_lcore_mapping(flag, i);
+
+ domain_core_count = ptr->core_count;
+ domain_core_list = ptr->cores;
+
+ for (int j = 0; j < domain_core_count; j++) {
+ if (usr_lcore == domain_core_list[j]) {
+ sel_domain_core_index = j;
+ sel_domain_core_count = domain_core_count;
+ sel_domain = i;
+ break;
+ }
+ }
+ }
+
+ if (sel_domain_core_count == 1) {
+ EAL_LOG(DEBUG, "there is no more lcore in the domain!!!");
+ return sel_domain_core;
+ }
+
+ EAL_LOG(DEBUG, "selected: domain (%u), core: count %u, index %u, core: current %u",
+ sel_domain, sel_domain_core_count, sel_domain_core_index,
+ domain_core_list[sel_domain_core_index]);
+
+ /* get next lcore from the selected domain */
+ /* next lcore is always `sel_domain_core_index + 1`, but needs boundary check */
+ bool lcore_found = false;
+ uint16_t next_domain_lcore_index = sel_domain_core_index + 1;
+ while (false == lcore_found) {
+
+ if (next_domain_lcore_index >= sel_domain_core_count) {
+ if (wrap) {
+ next_domain_lcore_index = 0;
+ continue;
+ }
+ break;
+ }
+
+ /* check if main lcore skip */
+ if ((domain_core_list[next_domain_lcore_index] == main_lcore) && (skip_main)) {
+ next_domain_lcore_index += 1;
+ continue;
+ }
+
+ lcore_found = true;
+ }
+ if (true == lcore_found)
+ sel_domain_core = domain_core_list[next_domain_lcore_index];
+#endif
+
+ EAL_LOG(DEBUG, "Selected core (%u)", sel_domain_core);
+ return sel_domain_core;
+}
+
+unsigned int
+rte_get_next_lcore_from_next_domain(unsigned int indx __rte_unused,
+int skip_main __rte_unused, int wrap __rte_unused,
+uint32_t flag __rte_unused, int cores_to_skip __rte_unused)
+{
+ if (indx >= RTE_MAX_LCORE) {
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ if (get_domain_lcore_count(flag) == 0)
+ return RTE_MAX_LCORE;
+#endif
+ indx = rte_get_next_lcore(-1, skip_main, wrap);
+ return indx;
+ }
+
+ uint16_t sel_domain_core = RTE_MAX_LCORE;
+ uint16_t usr_lcore = indx % RTE_MAX_LCORE;
+
+ EAL_LOG(DEBUG, "lcore (%u), skip main lcore (%d), wrap (%d), flag (%u)",
+ usr_lcore, skip_main, wrap, flag);
+
+ /* check the input lcore indx */
+ if (!rte_lcore_is_enabled(indx)) {
+ EAL_LOG(DEBUG, "User input lcore (%u) is not enabled!!!", indx);
+ return sel_domain_core;
+ }
+
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ uint16_t main_lcore = rte_get_main_lcore();
+
+ uint16_t sel_domain = 0xffff;
+ uint16_t sel_domain_core_index = 0xffff;
+
+ uint16_t domain_count = 0;
+ uint16_t domain_core_count = 0;
+ uint16_t *domain_core_list = NULL;
+
+ domain_count = rte_get_domain_count(flag);
+ if (domain_count == 0) {
+ EAL_LOG(DEBUG, "No Domains found for the flag (%u)!!!", flag);
+ return sel_domain_core;
+ }
+
+ /* identify the lcore to get the domain to start from */
+ struct core_domain_mapping *ptr = NULL;
+ for (int i = 0; (i < domain_count) && (sel_domain_core_index == 0xffff); i++) {
+ ptr = get_domain_lcore_mapping(flag, i);
+ domain_core_count = ptr->core_count;
+ domain_core_list = ptr->cores;
+
+ for (int j = 0; j < domain_core_count; j++) {
+ if (usr_lcore == domain_core_list[j]) {
+ sel_domain_core_index = j;
+ sel_domain = i;
+ break;
+ }
+ }
+ }
+
+ if (sel_domain_core_index == 0xffff) {
+ EAL_LOG(DEBUG, "Invalid lcore %u for the flag (%u)!!!", indx, flag);
+ return sel_domain_core;
+ }
+
+ EAL_LOG(DEBUG, "Selected - core_index (%u); domain (%u), core_count (%u), cores (%p)",
+ sel_domain_core_index, sel_domain, domain_core_count, domain_core_list);
+
+ uint16_t skip_cores = (cores_to_skip >= 0) ? cores_to_skip : (0 - cores_to_skip);
+
+ /* get the next domain & valid lcore */
+ sel_domain = (((1 + sel_domain) == domain_count) && (wrap)) ? 0 : (1 + sel_domain);
+ sel_domain_core_index = 0xffff;
+
+ bool iter_loop = false;
+ for (int i = sel_domain; (i < domain_count) && (sel_domain_core == RTE_MAX_LCORE); i++) {
+ ptr = get_domain_lcore_mapping(flag, i);
+
+ domain_core_count = ptr->core_count;
+ domain_core_list = ptr->cores;
+
+ /* check if we have cores to iterate from this domain */
+ if (skip_cores >= domain_core_count)
+ continue;
+
+ if (((1 + sel_domain) == domain_count) && (wrap)) {
+ if (iter_loop == true)
+ break;
+
+ iter_loop = true;
+ }
+
+ sel_domain_core_index = (cores_to_skip >= 0) ? skip_cores :
+ (domain_core_count - skip_cores);
+ sel_domain_core = domain_core_list[sel_domain_core_index];
+
+ if ((skip_main) && (sel_domain_core == main_lcore)) {
+ sel_domain_core_index = 0xffff;
+ sel_domain_core = RTE_MAX_LCORE;
+ continue;
+ }
+ }
+#endif
+
+ EAL_LOG(DEBUG, "Selected core (%u)", sel_domain_core);
+ return sel_domain_core;
+}
+
unsigned int
rte_lcore_to_socket_id(unsigned int lcore_id)
{
@@ -131,6 +497,354 @@ socket_id_cmp(const void *a, const void *b)
return 0;
}
+
+
+/*
+ * Use HWLOC library to parse L1|L2|L3|NUMA-IO on the running target machine.
+ * Store the topology structure in memory.
+ */
+int
+rte_eal_topology_init(void)
+{
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ memset(&topo_cnfg, 0, sizeof(struct topology_config));
+
+ hwloc_topology_init(&topo_cnfg.topology);
+ hwloc_topology_load(topo_cnfg.topology);
+
+ int l1_depth = hwloc_get_type_depth(topo_cnfg.topology, HWLOC_OBJ_L1CACHE);
+ int l2_depth = hwloc_get_type_depth(topo_cnfg.topology, HWLOC_OBJ_L2CACHE);
+ int l3_depth = hwloc_get_type_depth(topo_cnfg.topology, HWLOC_OBJ_L3CACHE);
+ int l4_depth = hwloc_get_type_depth(topo_cnfg.topology, HWLOC_OBJ_L4CACHE);
+ int io_depth = hwloc_get_type_depth(topo_cnfg.topology, HWLOC_OBJ_NUMANODE);
+
+ EAL_LOG(DEBUG, "TOPOLOGY - depth: l1 %d, l2 %d, l3 %d, l4 %d, io %d",
+ l1_depth, l2_depth, l3_depth, l4_depth, io_depth);
+
+ topo_cnfg.l1_count = hwloc_get_nbobjs_by_depth(topo_cnfg.topology, l1_depth);
+ topo_cnfg.l2_count = hwloc_get_nbobjs_by_depth(topo_cnfg.topology, l2_depth);
+ topo_cnfg.l3_count = hwloc_get_nbobjs_by_depth(topo_cnfg.topology, l3_depth);
+ topo_cnfg.l4_count = hwloc_get_nbobjs_by_depth(topo_cnfg.topology, l4_depth);
+ topo_cnfg.io_count = hwloc_get_nbobjs_by_depth(topo_cnfg.topology, io_depth);
+
+ EAL_LOG(DEBUG, "TOPOLOGY - obj count: l1 %d, l2 %d, l3 %d, l4 %d, io %d",
+ topo_cnfg.l1_count, topo_cnfg.l2_count,
+ topo_cnfg.l3_count, topo_cnfg.l4_count,
+ topo_cnfg.io_count);
+
+ if ((l1_depth) && (topo_cnfg.l1_count)) {
+ topo_cnfg.l1 = rte_malloc(NULL,
+ sizeof(struct core_domain_mapping *) * topo_cnfg.l1_count, 0);
+ if (topo_cnfg.l1 == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ for (int j = 0; j < topo_cnfg.l1_count; j++) {
+ hwloc_obj_t obj = hwloc_get_obj_by_depth(topo_cnfg.topology, l1_depth, j);
+ unsigned int first_cpu = hwloc_bitmap_first(obj->cpuset);
+ unsigned int cpu_count = hwloc_bitmap_weight(obj->cpuset);
+
+ topo_cnfg.l1[j] = rte_malloc(NULL, sizeof(struct core_domain_mapping), 0);
+ if (topo_cnfg.l1[j] == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ topo_cnfg.l1[j]->core_count = 0;
+ topo_cnfg.l1[j]->cores = rte_malloc(NULL, sizeof(uint16_t) * cpu_count, 0);
+ if (topo_cnfg.l1[j]->cores == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ signed int cpu_id = first_cpu;
+ unsigned int cpu_index = 0;
+ do {
+ if (rte_lcore_is_enabled(cpu_id)) {
+ EAL_LOG(DEBUG, " L1|SMT domain (%u) lcore %u", j, cpu_id);
+ topo_cnfg.l1[j]->cores[cpu_index] = cpu_id;
+ cpu_index++;
+
+ CPU_SET(cpu_id, &topo_cnfg.l1[j]->core_set);
+ topo_cnfg.l1[j]->core_count += 1;
+ topo_cnfg.l1_core_count += 1;
+ }
+ cpu_id = hwloc_bitmap_next(obj->cpuset, cpu_id);
+ cpu_count -= 1;
+ } while ((cpu_id != -1) && (cpu_count));
+ }
+ }
+
+ if ((l2_depth) && (topo_cnfg.l2_count)) {
+ topo_cnfg.l2 = rte_malloc(NULL,
+ sizeof(struct core_domain_mapping *) * topo_cnfg.l2_count, 0);
+ if (topo_cnfg.l2 == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ for (int j = 0; j < topo_cnfg.l2_count; j++) {
+ hwloc_obj_t obj = hwloc_get_obj_by_depth(topo_cnfg.topology, l2_depth, j);
+ unsigned int first_cpu = hwloc_bitmap_first(obj->cpuset);
+ unsigned int cpu_count = hwloc_bitmap_weight(obj->cpuset);
+
+ topo_cnfg.l2[j] = rte_malloc(NULL, sizeof(struct core_domain_mapping), 0);
+ if (topo_cnfg.l2[j] == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ topo_cnfg.l2[j]->core_count = 0;
+ topo_cnfg.l2[j]->cores = rte_malloc(NULL, sizeof(uint16_t) * cpu_count, 0);
+ if (topo_cnfg.l2[j]->cores == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ signed int cpu_id = first_cpu;
+ unsigned int cpu_index = 0;
+ do {
+ if (rte_lcore_is_enabled(cpu_id)) {
+ EAL_LOG(DEBUG, " L2 domain (%u) lcore %u", j, cpu_id);
+ topo_cnfg.l2[j]->cores[cpu_index] = cpu_id;
+ cpu_index++;
+
+ CPU_SET(cpu_id, &topo_cnfg.l2[j]->core_set);
+ topo_cnfg.l2[j]->core_count += 1;
+ topo_cnfg.l2_core_count += 1;
+ }
+ cpu_id = hwloc_bitmap_next(obj->cpuset, cpu_id);
+ cpu_count -= 1;
+ } while ((cpu_id != -1) && (cpu_count));
+ }
+ }
+
+ if ((l3_depth) && (topo_cnfg.l3_count)) {
+ topo_cnfg.l3 = rte_malloc(NULL,
+ sizeof(struct core_domain_mapping *) * topo_cnfg.l3_count, 0);
+ if (topo_cnfg.l3 == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ for (int j = 0; j < topo_cnfg.l3_count; j++) {
+ hwloc_obj_t obj = hwloc_get_obj_by_depth(topo_cnfg.topology, l3_depth, j);
+ unsigned int first_cpu = hwloc_bitmap_first(obj->cpuset);
+ unsigned int cpu_count = hwloc_bitmap_weight(obj->cpuset);
+
+ topo_cnfg.l3[j] = rte_malloc(NULL, sizeof(struct core_domain_mapping), 0);
+ if (topo_cnfg.l3[j] == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ topo_cnfg.l3[j]->core_count = 0;
+ topo_cnfg.l3[j]->cores = rte_malloc(NULL, sizeof(uint16_t) * cpu_count, 0);
+ if (topo_cnfg.l3[j]->cores == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ signed int cpu_id = first_cpu;
+ unsigned int cpu_index = 0;
+ do {
+ if (rte_lcore_is_enabled(cpu_id)) {
+ EAL_LOG(DEBUG, " L3 domain (%u) lcore %u", j, cpu_id);
+ topo_cnfg.l3[j]->cores[cpu_index] = cpu_id;
+ cpu_index++;
+
+ CPU_SET(cpu_id, &topo_cnfg.l3[j]->core_set);
+ topo_cnfg.l3[j]->core_count += 1;
+ topo_cnfg.l3_core_count += 1;
+ }
+ cpu_id = hwloc_bitmap_next(obj->cpuset, cpu_id);
+ cpu_count -= 1;
+ } while ((cpu_id != -1) && (cpu_count));
+ }
+ }
+
+ if ((l4_depth) && (topo_cnfg.l4_count)) {
+ topo_cnfg.l4 = rte_malloc(NULL,
+ sizeof(struct core_domain_mapping *) * topo_cnfg.l4_count, 0);
+ if (topo_cnfg.l4 == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ for (int j = 0; j < topo_cnfg.l4_count; j++) {
+ hwloc_obj_t obj = hwloc_get_obj_by_depth(topo_cnfg.topology, l4_depth, j);
+ unsigned int first_cpu = hwloc_bitmap_first(obj->cpuset);
+ unsigned int cpu_count = hwloc_bitmap_weight(obj->cpuset);
+
+ topo_cnfg.l4[j] = rte_malloc(NULL, sizeof(struct core_domain_mapping), 0);
+ if (topo_cnfg.l4[j] == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ topo_cnfg.l4[j]->core_count = 0;
+ topo_cnfg.l4[j]->cores = rte_malloc(NULL, sizeof(uint16_t) * cpu_count, 0);
+ if (topo_cnfg.l4[j]->cores == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ signed int cpu_id = first_cpu;
+ unsigned int cpu_index = 0;
+ do {
+ if (rte_lcore_is_enabled(cpu_id)) {
+ EAL_LOG(DEBUG, " L4 domain (%u) lcore %u", j, cpu_id);
+ topo_cnfg.l4[j]->cores[cpu_index] = cpu_id;
+ cpu_index++;
+
+ CPU_SET(cpu_id, &topo_cnfg.l3[j]->core_set);
+ topo_cnfg.l4[j]->core_count += 1;
+ topo_cnfg.l4_core_count += 1;
+ }
+ cpu_id = hwloc_bitmap_next(obj->cpuset, cpu_id);
+ cpu_count -= 1;
+ } while ((cpu_id != -1) && (cpu_count));
+ }
+ }
+
+ if ((io_depth) && (topo_cnfg.io_count)) {
+ topo_cnfg.io = rte_malloc(NULL,
+ sizeof(struct core_domain_mapping *) * topo_cnfg.io_count, 0);
+ if (topo_cnfg.io == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ for (int j = 0; j < topo_cnfg.io_count; j++) {
+ hwloc_obj_t obj = hwloc_get_obj_by_depth(topo_cnfg.topology, io_depth, j);
+ unsigned int first_cpu = hwloc_bitmap_first(obj->cpuset);
+ unsigned int cpu_count = hwloc_bitmap_weight(obj->cpuset);
+
+ topo_cnfg.io[j] = rte_malloc(NULL, sizeof(struct core_domain_mapping), 0);
+ if (topo_cnfg.io[j] == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ topo_cnfg.io[j]->core_count = 0;
+ topo_cnfg.io[j]->cores = rte_malloc(NULL, sizeof(uint16_t) * cpu_count, 0);
+ if (topo_cnfg.io[j]->cores == NULL) {
+ rte_eal_topology_release();
+ return -1;
+ }
+
+ signed int cpu_id = first_cpu;
+ unsigned int cpu_index = 0;
+ do {
+ if (rte_lcore_is_enabled(cpu_id)) {
+ EAL_LOG(DEBUG, " IO domain (%u) lcore %u", j, cpu_id);
+ topo_cnfg.io[j]->cores[cpu_index] = cpu_id;
+ cpu_index++;
+
+ CPU_SET(cpu_id, &topo_cnfg.io[j]->core_set);
+ topo_cnfg.io[j]->core_count += 1;
+ topo_cnfg.io_core_count += 1;
+ }
+ cpu_id = hwloc_bitmap_next(obj->cpuset, cpu_id);
+ cpu_count -= 1;
+ } while ((cpu_id != -1) && (cpu_count));
+ }
+ }
+
+ hwloc_topology_destroy(topo_cnfg.topology);
+ topo_cnfg.topology = NULL;
+
+ EAL_LOG(INFO, "TOPOLOGY - core count: l1 %u, l2 %u, l3 %u, l4 %u, io %u",
+ topo_cnfg.l1_core_count, topo_cnfg.l2_core_count,
+ topo_cnfg.l3_core_count, topo_cnfg.l4_core_count,
+ topo_cnfg.io_core_count);
+#endif
+
+ return 0;
+}
+
+/*
+ * release HWLOC topology structure memory
+ */
+int
+rte_eal_topology_release(void)
+{
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ EAL_LOG(DEBUG, "release l1 domain memory!");
+ for (int i = 0; i < topo_cnfg.l1_count; i++) {
+ if (topo_cnfg.l1[i]->cores) {
+ rte_free(topo_cnfg.l1[i]->cores);
+ topo_cnfg.l1[i]->core_count = 0;
+ }
+ }
+
+ if (topo_cnfg.l1_count) {
+ rte_free(topo_cnfg.l1);
+ topo_cnfg.l1 = NULL;
+ topo_cnfg.l1_count = 0;
+ }
+
+ EAL_LOG(DEBUG, "release l2 domain memory!");
+ for (int i = 0; i < topo_cnfg.l2_count; i++) {
+ if (topo_cnfg.l2[i]->cores) {
+ rte_free(topo_cnfg.l2[i]->cores);
+ topo_cnfg.l2[i]->core_count = 0;
+ }
+ }
+
+ if (topo_cnfg.l2_count) {
+ rte_free(topo_cnfg.l2);
+ topo_cnfg.l2 = NULL;
+ topo_cnfg.l2_count = 0;
+ }
+
+ EAL_LOG(DEBUG, "release l3 domain memory!");
+ for (int i = 0; i < topo_cnfg.l3_count; i++) {
+ if (topo_cnfg.l3[i]->cores) {
+ rte_free(topo_cnfg.l3[i]->cores);
+ topo_cnfg.l3[i]->core_count = 0;
+ }
+ }
+
+ if (topo_cnfg.l3_count) {
+ rte_free(topo_cnfg.l3);
+ topo_cnfg.l3 = NULL;
+ topo_cnfg.l3_count = 0;
+ }
+
+ EAL_LOG(DEBUG, "release l4 domain memory!");
+ for (int i = 0; i < topo_cnfg.l4_count; i++) {
+ if (topo_cnfg.l4[i]->cores) {
+ rte_free(topo_cnfg.l4[i]->cores);
+ topo_cnfg.l4[i]->core_count = 0;
+ }
+ }
+
+ if (topo_cnfg.l4_count) {
+ rte_free(topo_cnfg.l4);
+ topo_cnfg.l4 = NULL;
+ topo_cnfg.l4_count = 0;
+ }
+
+ EAL_LOG(DEBUG, "release IO domain memory!");
+ for (int i = 0; i < topo_cnfg.io_count; i++) {
+ if (topo_cnfg.io[i]->cores) {
+ rte_free(topo_cnfg.io[i]->cores);
+ topo_cnfg.io[i]->core_count = 0;
+ }
+ }
+
+ if (topo_cnfg.io_count) {
+ rte_free(topo_cnfg.io);
+ topo_cnfg.io = NULL;
+ topo_cnfg.io_count = 0;
+ }
+#endif
+
+ return 0;
+}
+
/*
* Parse /sys/devices/system/cpu to get the number of physical and logical
* processors on the machine. The function will fill the cpu_info
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index bb315dab04..de9d5fc50f 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -14,9 +14,14 @@
#include <rte_lcore.h>
#include <rte_log.h>
#include <rte_memory.h>
+#include <rte_os.h>
#include "eal_internal_cfg.h"
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+#include <hwloc.h>
+#endif
+
/**
* Structure storing internal configuration (per-lcore)
*/
@@ -40,6 +45,45 @@ struct lcore_config {
extern struct lcore_config lcore_config[RTE_MAX_LCORE];
+struct core_domain_mapping {
+ rte_cpuset_t core_set; /**< cpu_set representing lcores within domain */
+ uint16_t core_count; /**< dpdk enabled lcores within domain */
+ uint16_t *cores; /**< list of cores */
+
+ /* uint16_t *l1_cache_id; */
+ /* uint16_t *l2_cache_id; */
+ /* uint16_t *l3_cache_id; */
+ /* uint16_t *l4_cache_id; */
+};
+
+struct topology_config {
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ hwloc_topology_t topology;
+#endif
+
+ /* domain count */
+ uint16_t l1_count;
+ uint16_t l2_count;
+ uint16_t l3_count;
+ uint16_t l4_count;
+ uint16_t io_count;
+
+ /* total cores under all domain */
+ uint16_t l1_core_count;
+ uint16_t l2_core_count;
+ uint16_t l3_core_count;
+ uint16_t l4_core_count;
+ uint16_t io_core_count;
+
+ /* two dimensional array for each domain */
+ struct core_domain_mapping **l1;
+ struct core_domain_mapping **l2;
+ struct core_domain_mapping **l3;
+ struct core_domain_mapping **l4;
+ struct core_domain_mapping **io;
+};
+extern struct topology_config topo_cnfg;
+
/**
* The global RTE configuration structure.
*/
@@ -81,6 +125,20 @@ struct rte_config *rte_eal_get_configuration(void);
*/
int rte_eal_memzone_init(void);
+
+/**
+ * Initialize the topology structure using HWLOC Library
+ */
+__rte_internal
+int rte_eal_topology_init(void);
+
+/**
+ * Release the memory held by Topology structure
+ */
+__rte_internal
+int rte_eal_topology_release(void);
+
+
/**
* Fill configuration with number of physical and logical processors
*
diff --git a/lib/eal/freebsd/eal.c b/lib/eal/freebsd/eal.c
index 1229230063..301f993748 100644
--- a/lib/eal/freebsd/eal.c
+++ b/lib/eal/freebsd/eal.c
@@ -73,6 +73,8 @@ struct lcore_config lcore_config[RTE_MAX_LCORE];
/* used by rte_rdtsc() */
int rte_cycles_vmware_tsc_map;
+/* holds topology information */
+struct topology_config topo_cnfg;
int
eal_clean_runtime_dir(void)
@@ -912,6 +914,12 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ if (rte_eal_topology_init()) {
+ rte_eal_init_alert("Cannot invoke topology!!!");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+
eal_mcfg_complete();
return fctret;
@@ -932,6 +940,8 @@ rte_eal_cleanup(void)
struct internal_config *internal_conf =
eal_get_internal_configuration();
+
+ rte_eal_topology_release();
rte_service_finalize();
rte_mp_channel_cleanup();
eal_bus_cleanup();
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 549b9e68c5..56c309d0e7 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -18,6 +18,7 @@
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_thread.h>
+#include <rte_bitset.h>
#ifdef __cplusplus
extern "C" {
@@ -37,6 +38,44 @@ enum rte_lcore_role_t {
ROLE_NON_EAL,
};
+/**
+ * The lcore grouping with in the L1 Domain.
+ */
+#define RTE_LCORE_DOMAIN_L1 RTE_BIT32(0)
+/**
+ * The lcore grouping with in the L2 Domain.
+ */
+#define RTE_LCORE_DOMAIN_L2 RTE_BIT32(1)
+/**
+ * The lcore grouping with in the L3 Domain.
+ */
+#define RTE_LCORE_DOMAIN_L3 RTE_BIT32(2)
+/**
+ * The lcore grouping with in the L4 Domain.
+ */
+#define RTE_LCORE_DOMAIN_L4 RTE_BIT32(3)
+/**
+ * The lcore grouping with in the IO Domain.
+ */
+#define RTE_LCORE_DOMAIN_IO RTE_BIT32(4)
+/**
+ * The lcore grouping with in the SMT Domain (Like L1 Domain).
+ */
+#define RTE_LCORE_DOMAIN_SMT RTE_LCORE_DOMAIN_L1
+/**
+ * The lcore grouping based on Domains (L1|L2|L3|L4|IO).
+ */
+#define RTE_LCORE_DOMAIN_ALL (RTE_LCORE_DOMAIN_L1 | \
+ RTE_LCORE_DOMAIN_L2 | \
+ RTE_LCORE_DOMAIN_L3 | \
+ RTE_LCORE_DOMAIN_L4 | \
+ RTE_LCORE_DOMAIN_IO)
+/**
+ * The mask for getting all cores under same topology.
+ */
+#define RTE_LCORE_DOMAIN_LCORES_ALL RTE_GENMASK32(31, 0)
+
+
/**
* Get a lcore's role.
*
@@ -211,6 +250,144 @@ int rte_lcore_is_enabled(unsigned int lcore_id);
*/
unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap);
+/**
+ * Get count for selected domain.
+ *
+ * @param domain_sel
+ * Domain selection, RTE_LCORE_DOMAIN_[L1|L2|L3|L4|IO].
+ * @return
+ * total count for selected domain.
+ *
+ * @note valid for EAL args of lcore and coremask.
+ *
+ */
+__rte_experimental
+unsigned int rte_get_domain_count(unsigned int domain_sel);
+
+/**
+ * Get count for lcores for a domain.
+ *
+ * @param domain_sel
+ * Domain selection, RTE_LCORE_DOMAIN_[L1|L2|L3|L4|IO].
+ * @param domain_indx
+ * Domain Index, valid range from 0 to (rte_get_domain_count - 1).
+ * @return
+ * total count for lcore in a selected index of a domain.
+ *
+ * @note valid for EAL args of lcore and coremask.
+ *
+ */
+__rte_experimental
+unsigned int
+rte_lcore_count_from_domain(unsigned int domain_sel, unsigned int domain_indx);
+
+/**
+ * Get n'th lcore from a selected domain.
+ *
+ * @param domain_sel
+ * Domain selection, RTE_LCORE_DOMAIN_[L1|L2|L3|L4|IO].
+ * @param domain_indx
+ * Domain Index, valid range from 0 to (rte_get_domain_count - 1).
+ * @param lcore_pos
+ * lcore position, valid range from 0 to (dpdk_enabled_lcores in the domain -1)
+ * @return
+ * lcore from the list for the selected domain.
+ *
+ * @note valid for EAL args of lcore and coremask.
+ *
+ */
+__rte_experimental
+unsigned int
+rte_get_lcore_in_domain(unsigned int domain_sel,
+unsigned int domain_indx, unsigned int lcore_pos);
+
+#ifdef RTE_HAS_CPUSET
+/**
+ * Return cpuset for all lcores in selected domain.
+ *
+ * @param domain_sel
+ * Domain selection, RTE_LCORE_DOMAIN_[L1|L2|L3|L4|IO].
+ * @param domain_indx
+ * Domain Index, valid range from 0 to (rte_get_domain_count - 1).
+ * @return
+ * cpuset for all lcores from the selected domain.
+ *
+ * @note valid for EAL args of lcore and coremask.
+ *
+ */
+__rte_experimental
+rte_cpuset_t
+rte_lcore_cpuset_in_domain(unsigned int domain_sel, unsigned int domain_indx);
+__rte_experimental
+#endif
+
+/**
+ * Return TRUE|FALSE if main lcore in available in selected domain.
+ *
+ * @param domain_sel
+ * Domain selection, RTE_LCORE_DOMAIN_[L1|L2|L3|L4|IO].
+ * @param domain_indx
+ * Domain Index, valid range from 0 to (rte_get_domain_count - 1).
+ * @return
+ * Check if main lcore is avaialable in the selected domain.
+ *
+ * @note valid for EAL args of lcore and coremask.
+ *
+ */
+bool
+rte_lcore_is_main_in_domain(unsigned int domain_sel, unsigned int domain_indx);
+
+/**
+ * Get the enabled lcores from next domain based on extended flag.
+ *
+ * @param i
+ * The current lcore (reference).
+ * @param skip_main
+ * If true, do not return the ID of the main lcore.
+ * @param wrap
+ * If true, go back to first core of flag based domain when last core is reached.
+ * If false, return RTE_MAX_LCORE when no more cores are available.
+ * @param flag
+ * Allows user to select various domain as specified under RTE_LCORE_DOMAIN_[L1|L2|L3|L4|IO]
+ *
+ * @return
+ * The next lcore_id or RTE_MAX_LCORE if not found.
+ *
+ * @note valid for EAL args of lcore and coremask.
+ *
+ */
+__rte_experimental
+unsigned int
+rte_get_next_lcore_from_domain(unsigned int i, int skip_main, int wrap,
+uint32_t flag);
+
+/**
+ * Get the Nth (first|last) lcores from next domain based on extended flag.
+ *
+ * @param i
+ * The current lcore (reference).
+ * @param skip_main
+ * If true, do not return the ID of the main lcore.
+ * @param wrap
+ * If true, go back to first core of flag based domain when last core is reached.
+ * If false, return RTE_MAX_LCORE when no more cores are available.
+ * @param flag
+ * Allows user to select various domain as specified under RTE_LCORE_DOMAIN_(L1|L2|L3|L4|IO)
+ * @param cores_to_skip
+ * If set to positive value, will skip to Nth lcore from start.
+ * If set to negative value, will skip to Nth lcore from last.
+ *
+ * @return
+ * The next lcore_id or RTE_MAX_LCORE if not found.
+ *
+ * @note valid for EAL args of lcore and coremask.
+ *
+ */
+__rte_experimental
+unsigned int
+rte_get_next_lcore_from_next_domain(unsigned int i,
+int skip_main, int wrap, uint32_t flag, int cores_to_skip);
+
/**
* Macro to browse all running lcores.
*/
@@ -227,6 +404,38 @@ unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap);
i < RTE_MAX_LCORE; \
i = rte_get_next_lcore(i, 1, 0))
+/**
+ * Macro to browse all running lcores in a domain.
+ */
+#define RTE_LCORE_FOREACH_DOMAIN(i, flag) \
+ for (i = rte_get_next_lcore_from_domain(-1, 0, 0, flag); \
+ i < RTE_MAX_LCORE; \
+ i = rte_get_next_lcore_from_domain(i, 0, 0, flag))
+
+/**
+ * Macro to browse all running lcores except the main lcore in domain.
+ */
+#define RTE_LCORE_FOREACH_WORKER_DOMAIN(i, flag) \
+ for (i = rte_get_next_lcore_from_domain(-1, 1, 0, flag); \
+ i < RTE_MAX_LCORE; \
+ i = rte_get_next_lcore_from_domain(i, 1, 0, flag))
+
+/**
+ * Macro to browse Nth lcores on each domain.
+ */
+#define RTE_LCORE_FORN_NEXT_DOMAIN(i, flag, n) \
+ for (i = rte_get_next_lcore_from_next_domain(-1, 0, 0, flag, n);\
+ i < RTE_MAX_LCORE; \
+ i = rte_get_next_lcore_from_next_domain(i, 0, 0, flag, n))
+
+/**
+ * Macro to browse all Nth lcores except the main lcore on each domain.
+ */
+#define RTE_LCORE_FORN_WORKER_NEXT_DOMAIN(i, flag, n) \
+ for (i = rte_get_next_lcore_from_next_domain(-1, 1, 0, flag, n);\
+ i < RTE_MAX_LCORE; \
+ i = rte_get_next_lcore_from_next_domain(i, 1, 0, flag, n))
+
/**
* Callback prototype for initializing lcores.
*
diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c
index 54577b7718..093f208319 100644
--- a/lib/eal/linux/eal.c
+++ b/lib/eal/linux/eal.c
@@ -65,6 +65,9 @@
* duration of the program, as we hold a write lock on it in the primary proc */
static int mem_cfg_fd = -1;
+/* holds topology information */
+struct topology_config topo_cnfg;
+
static struct flock wr_lock = {
.l_type = F_WRLCK,
.l_whence = SEEK_SET,
@@ -1311,6 +1314,12 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ if (rte_eal_topology_init()) {
+ rte_eal_init_alert("Cannot invoke topology!!!");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+
eal_mcfg_complete();
return fctret;
@@ -1352,6 +1361,8 @@ rte_eal_cleanup(void)
struct internal_config *internal_conf =
eal_get_internal_configuration();
+ rte_eal_topology_release();
+
if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
internal_conf->hugepage_file.unlink_existing)
rte_memseg_walk(mark_freeable, NULL);
diff --git a/lib/eal/meson.build b/lib/eal/meson.build
index e1d6c4cf17..690b95d5df 100644
--- a/lib/eal/meson.build
+++ b/lib/eal/meson.build
@@ -31,3 +31,7 @@ endif
if is_freebsd
annotate_locks = false
endif
+
+if has_libhwloc
+ dpdk_conf.set10('RTE_EAL_HWLOC_TOPOLOGY_PROBE', true)
+endif
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 747331af60..a2f1fc1a6c 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -397,6 +397,15 @@ EXPERIMENTAL {
# added in 24.11
rte_bitset_to_str;
+
+ # added in 25.03
+ rte_get_domain_count;
+ rte_get_lcore_in_domain;
+ rte_get_next_lcore_from_domain;
+ rte_get_next_lcore_from_next_domain;
+ rte_lcore_count_from_domain;
+ rte_lcore_cpuset_in_domain;
+ rte_lcore_is_main_in_domain;
};
INTERNAL {
@@ -406,6 +415,8 @@ INTERNAL {
rte_bus_unregister;
rte_eal_get_baseaddr;
rte_eal_parse_coremask;
+ rte_eal_topology_init;
+ rte_eal_topology_release;
rte_firmware_read;
rte_intr_allow_others;
rte_intr_cap_multiple;
diff --git a/lib/eal/windows/eal.c b/lib/eal/windows/eal.c
index 28b78a95a6..2edfc4128c 100644
--- a/lib/eal/windows/eal.c
+++ b/lib/eal/windows/eal.c
@@ -40,6 +40,10 @@ static int mem_cfg_fd = -1;
/* internal configuration (per-core) */
struct lcore_config lcore_config[RTE_MAX_LCORE];
+/* holds topology information */
+struct topology_config topo_cnfg;
+
+
/* Detect if we are a primary or a secondary process */
enum rte_proc_type_t
eal_proc_type_detect(void)
@@ -262,6 +266,8 @@ rte_eal_cleanup(void)
struct internal_config *internal_conf =
eal_get_internal_configuration();
+ rte_eal_topology_release();
+
eal_intr_thread_cancel();
eal_mem_virt2iova_cleanup();
eal_bus_cleanup();
@@ -505,6 +511,12 @@ rte_eal_init(int argc, char **argv)
rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN);
rte_eal_mp_wait_lcore();
+ if (rte_eal_topology_init()) {
+ rte_eal_init_alert("Cannot invoke topology!!!");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+
eal_mcfg_complete();
return fctret;
--
2.34.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v4 2/4] test/lcore: enable tests for topology
2024-11-05 10:28 [PATCH v4 0/4] Introduce Topology NUMA grouping for lcores Vipin Varghese
2024-11-05 10:28 ` [PATCH v4 1/4] eal/lcore: add topology based functions Vipin Varghese
@ 2024-11-05 10:28 ` Vipin Varghese
2024-11-05 10:28 ` [PATCH v4 3/4] doc: add topology grouping details Vipin Varghese
2024-11-05 10:28 ` [PATCH v4 4/4] examples: update with lcore topology API Vipin Varghese
3 siblings, 0 replies; 5+ messages in thread
From: Vipin Varghese @ 2024-11-05 10:28 UTC (permalink / raw)
To: dev, roretzla, bruce.richardson, john.mcnamara, dmitry.kozliuk
Cc: pbhagavatula, jerinj, ruifeng.wang, mattias.ronnblom,
anatoly.burakov, stephen, ferruh.yigit, honnappa.nagarahalli,
wathsala.vithanage, konstantin.ananyev, mb
add functional test cases to validate topology supported lcore
API.
v4 cahnges:
- add MACRO for triggering tests if topology is enabled.
- add test cases for
* rte_lcore_is_main_in_domain
* rte_lcore_cpuset_in_domain
* MACRO RTE_LCORE_FORN_NEXT_DOMAIN
* MACRO RTE_LCORE_FORN_WORKER_NEXT_DOMAIN
v3 changes:
- fix test check for RTE_LCORE_FOREACH_DOMAIN
Signed-off-by: Vipin Varghese <vipin.varghese@amd.com>
---
app/test/test_lcores.c | 528 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 528 insertions(+)
diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c
index bd5c0dd94b..15a9417e66 100644
--- a/app/test/test_lcores.c
+++ b/app/test/test_lcores.c
@@ -389,6 +389,513 @@ test_ctrl_thread(void)
return 0;
}
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+static int
+test_topology_macro(void)
+{
+ unsigned int total_lcores = 0;
+ unsigned int total_wrkr_lcores = 0;
+
+ unsigned int total_lcore_io = 0;
+ unsigned int total_lcore_l4 = 0;
+ unsigned int total_lcore_l3 = 0;
+ unsigned int total_lcore_l2 = 0;
+ unsigned int total_lcore_l1 = 0;
+
+ unsigned int total_wrkr_lcore_io = 0;
+ unsigned int total_wrkr_lcore_l4 = 0;
+ unsigned int total_wrkr_lcore_l3 = 0;
+ unsigned int total_wrkr_lcore_l2 = 0;
+ unsigned int total_wrkr_lcore_l1 = 0;
+
+ unsigned int lcore;
+
+ /* get topology core count */
+ lcore = -1;
+ RTE_LCORE_FOREACH(lcore)
+ total_lcores += 1;
+
+ lcore = -1;
+ RTE_LCORE_FOREACH_WORKER(lcore)
+ total_wrkr_lcores += 1;
+
+ if ((total_wrkr_lcores + 1) != total_lcores) {
+ printf("ERR: failed in MACRO for RTE_LCORE_FOREACH\n");
+ return -2;
+ }
+
+ lcore = -1;
+ RTE_LCORE_FOREACH_DOMAIN(lcore, RTE_LCORE_DOMAIN_IO)
+ total_lcore_io += 1;
+
+ lcore = -1;
+ RTE_LCORE_FOREACH_DOMAIN(lcore, RTE_LCORE_DOMAIN_L4)
+ total_lcore_l4 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FOREACH_DOMAIN(lcore, RTE_LCORE_DOMAIN_L3)
+ total_lcore_l3 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FOREACH_DOMAIN(lcore, RTE_LCORE_DOMAIN_L2)
+ total_lcore_l2 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FOREACH_DOMAIN(lcore, RTE_LCORE_DOMAIN_L1)
+ total_lcore_l1 += 1;
+
+ printf("DBG: lcore count: default (%u), io (%u), l4 (%u), l3 (%u), l2 (%u), l1 (%u).\n",
+ total_lcores, total_lcore_io,
+ total_lcore_l4, total_lcore_l3, total_lcore_l2, total_lcore_l1);
+
+
+ lcore = -1;
+ RTE_LCORE_FOREACH_WORKER_DOMAIN(lcore, RTE_LCORE_DOMAIN_IO)
+ total_wrkr_lcore_io += 1;
+
+ lcore = -1;
+ RTE_LCORE_FOREACH_WORKER_DOMAIN(lcore, RTE_LCORE_DOMAIN_L4)
+ total_wrkr_lcore_l4 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FOREACH_WORKER_DOMAIN(lcore, RTE_LCORE_DOMAIN_L3)
+ total_wrkr_lcore_l3 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FOREACH_WORKER_DOMAIN(lcore, RTE_LCORE_DOMAIN_L2)
+ total_wrkr_lcore_l2 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FOREACH_WORKER_DOMAIN(lcore, RTE_LCORE_DOMAIN_L1)
+ total_wrkr_lcore_l1 += 1;
+
+ printf("DBG: worker lcore count: default (%u), io (%u), l4 (%u), l3 (%u), l2 (%u), l1 (%u).\n",
+ total_wrkr_lcores, total_wrkr_lcore_io,
+ total_wrkr_lcore_l4, total_wrkr_lcore_l3,
+ total_wrkr_lcore_l2, total_wrkr_lcore_l1);
+
+
+ if ((total_wrkr_lcore_io) > total_lcore_io) {
+ printf("ERR: failed in MACRO for RTE_LCORE_FOREACH_DOMAIN for IO\n");
+ return -2;
+ }
+
+ if ((total_wrkr_lcore_l4) > total_lcore_l4) {
+ printf("ERR: failed in MACRO for RTE_LCORE_FOREACH_DOMAIN for L4\n");
+ return -2;
+ }
+
+ if ((total_wrkr_lcore_l3) > total_lcore_l3) {
+ printf("ERR: failed in MACRO for RTE_LCORE_FOREACH_DOMAIN for L3\n");
+ return -2;
+ }
+
+ if ((total_wrkr_lcore_l2) > total_lcore_l2) {
+ printf("ERR: failed in MACRO for RTE_LCORE_FOREACH_DOMAIN for L2\n");
+ return -2;
+ }
+
+ if ((total_wrkr_lcore_l1) > total_lcore_l1) {
+ printf("ERR: failed in MACRO for RTE_LCORE_FOREACH_DOMAIN for L1\n");
+ return -2;
+ }
+
+ total_lcore_io = 0;
+ total_lcore_l4 = 0;
+ total_lcore_l3 = 0;
+ total_lcore_l2 = 0;
+ total_lcore_l1 = 0;
+
+ lcore = -1;
+ RTE_LCORE_FORN_NEXT_DOMAIN(lcore, RTE_LCORE_DOMAIN_IO, 0)
+ total_lcore_io += 1;
+
+ lcore = -1;
+ RTE_LCORE_FORN_NEXT_DOMAIN(lcore, RTE_LCORE_DOMAIN_L4, 0)
+ total_lcore_l4 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FORN_NEXT_DOMAIN(lcore, RTE_LCORE_DOMAIN_L3, 0)
+ total_lcore_l3 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FORN_NEXT_DOMAIN(lcore, RTE_LCORE_DOMAIN_L2, 0)
+ total_lcore_l2 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FORN_NEXT_DOMAIN(lcore, RTE_LCORE_DOMAIN_L1, 0)
+ total_lcore_l1 += 1;
+
+ printf("DBG: macro domain lcore: default (%u), io (%u), l4 (%u), l3 (%u), l2 (%u), l1 (%u).\n",
+ total_lcores, total_lcore_io,
+ total_lcore_l4, total_lcore_l3, total_lcore_l2, total_lcore_l1);
+
+ total_wrkr_lcore_io = 0;
+ total_wrkr_lcore_l4 = 0;
+ total_wrkr_lcore_l3 = 0;
+ total_wrkr_lcore_l2 = 0;
+ total_wrkr_lcore_l1 = 0;
+
+ lcore = -1;
+ RTE_LCORE_FORN_WORKER_NEXT_DOMAIN(lcore, RTE_LCORE_DOMAIN_IO, 0)
+ total_wrkr_lcore_io += 1;
+
+ lcore = -1;
+ RTE_LCORE_FORN_WORKER_NEXT_DOMAIN(lcore, RTE_LCORE_DOMAIN_L4, 0)
+ total_wrkr_lcore_l4 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FORN_WORKER_NEXT_DOMAIN(lcore, RTE_LCORE_DOMAIN_L3, 0)
+ total_wrkr_lcore_l3 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FORN_WORKER_NEXT_DOMAIN(lcore, RTE_LCORE_DOMAIN_L2, 0)
+ total_wrkr_lcore_l2 += 1;
+
+ lcore = -1;
+ RTE_LCORE_FORN_WORKER_NEXT_DOMAIN(lcore, RTE_LCORE_DOMAIN_L1, 0)
+ total_wrkr_lcore_l1 += 1;
+
+ printf("DBG: macro next domain worker count: default (%u), io (%u), l4 (%u), l3 (%u), l2 (%u), l1 (%u).\n",
+ total_wrkr_lcores, total_wrkr_lcore_io,
+ total_wrkr_lcore_l4, total_wrkr_lcore_l3,
+ total_wrkr_lcore_l2, total_wrkr_lcore_l1);
+
+ if ((total_wrkr_lcore_io) > total_lcore_io) {
+ printf("ERR: failed in MACRO for RTE_LCORE_FORN_NEXT_DOMAIN for IO\n");
+ return -2;
+ }
+
+ if ((total_wrkr_lcore_l4) > total_lcore_l4) {
+ printf("ERR: failed in MACRO for RTE_LCORE_FORN_NEXT_DOMAIN for L4\n");
+ return -2;
+ }
+
+ if ((total_wrkr_lcore_l3) > total_lcore_l3) {
+ printf("ERR: failed in MACRO for RTE_LCORE_FORN_NEXT_DOMAIN for L3\n");
+ return -2;
+ }
+
+ if ((total_wrkr_lcore_l2) > total_lcore_l2) {
+ printf("ERR: failed in MACRO for RTE_LCORE_FORN_NEXT_DOMAIN for L2\n");
+ return -2;
+ }
+
+ if ((total_wrkr_lcore_l1) > total_lcore_l1) {
+ printf("ERR: failed in MACRO for RTE_LCORE_FORN_NEXT_DOMAIN for L1\n");
+ return -2;
+ }
+ printf("INFO: lcore DOMAIN macro: success!\n");
+ return 0;
+}
+
+static int
+test_lcore_count_from_domain(void)
+{
+ unsigned int total_lcores = 0;
+ unsigned int total_lcore_io = 0;
+ unsigned int total_lcore_l4 = 0;
+ unsigned int total_lcore_l3 = 0;
+ unsigned int total_lcore_l2 = 0;
+ unsigned int total_lcore_l1 = 0;
+
+ unsigned int domain_count;
+ unsigned int i;
+
+ /* get topology core count */
+ total_lcores = rte_lcore_count();
+
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_IO);
+ for (i = 0; i < domain_count; i++)
+ total_lcore_io += rte_lcore_count_from_domain(RTE_LCORE_DOMAIN_IO, i);
+
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L4);
+ for (i = 0; i < domain_count; i++)
+ total_lcore_l4 += rte_lcore_count_from_domain(RTE_LCORE_DOMAIN_L4, i);
+
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L3);
+ for (i = 0; i < domain_count; i++)
+ total_lcore_l3 += rte_lcore_count_from_domain(RTE_LCORE_DOMAIN_L3, i);
+
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L2);
+ for (i = 0; i < domain_count; i++)
+ total_lcore_l2 += rte_lcore_count_from_domain(RTE_LCORE_DOMAIN_L2, i);
+
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L1);
+ for (i = 0; i < domain_count; i++)
+ total_lcore_l1 += rte_lcore_count_from_domain(RTE_LCORE_DOMAIN_L1, i);
+
+ printf("DBG: lcore count: default (%u), io (%u), l4 (%u), l3 (%u), l2 (%u), l1 (%u).\n",
+ total_lcores, total_lcore_io,
+ total_lcore_l4, total_lcore_l3, total_lcore_l2, total_lcore_l1);
+
+ if ((total_lcore_l1 && (total_lcores != total_lcore_l1)) ||
+ (total_lcore_l2 && (total_lcores != total_lcore_l2)) ||
+ (total_lcore_l3 && (total_lcores != total_lcore_l3)) ||
+ (total_lcore_l4 && (total_lcores != total_lcore_l4)) ||
+ (total_lcore_io && (total_lcores != total_lcore_io))) {
+ printf("ERR: failed in domain API\n");
+ return -2;
+ }
+
+ printf("INFO: lcore count domain API: success\n");
+
+ return 0;
+}
+
+#ifdef RTE_HAS_CPUSET
+static int
+test_lcore_cpuset_from_domain(void)
+{
+ unsigned int domain_count;
+ uint16_t dmn_idx;
+ rte_cpuset_t cpu_set_list;
+
+ dmn_idx = 0;
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_IO);
+
+ for (; dmn_idx < domain_count; dmn_idx++) {
+ cpu_set_list = rte_lcore_cpuset_in_domain(RTE_LCORE_DOMAIN_IO, dmn_idx);
+
+ for (uint16_t cpu_idx = 0; cpu_idx < RTE_MAX_LCORE; cpu_idx++) {
+ if (CPU_ISSET(cpu_idx, &cpu_set_list)) {
+ if (!rte_lcore_is_enabled(cpu_idx)) {
+ printf("ERR: lcore id: %u, shared from IO (%u) domain is not enabled!\n",
+ cpu_idx, dmn_idx);
+ return -1;
+ }
+ }
+ }
+ }
+
+ dmn_idx = 0;
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L4);
+
+ for (; dmn_idx < domain_count; dmn_idx++) {
+ cpu_set_list = rte_lcore_cpuset_in_domain(RTE_LCORE_DOMAIN_L4, dmn_idx);
+
+ for (uint16_t cpu_idx = 0; cpu_idx < RTE_MAX_LCORE; cpu_idx++) {
+ if (CPU_ISSET(cpu_idx, &cpu_set_list)) {
+ if (!rte_lcore_is_enabled(cpu_idx)) {
+ printf("ERR: lcore id: %u, shared from L4 (%u) domain is not enabled!\n",
+ cpu_idx, dmn_idx);
+ return -1;
+ }
+ }
+ }
+ }
+
+ dmn_idx = 0;
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L3);
+
+ for (; dmn_idx < domain_count; dmn_idx++) {
+ cpu_set_list = rte_lcore_cpuset_in_domain(RTE_LCORE_DOMAIN_L3, dmn_idx);
+
+ for (uint16_t cpu_idx = 0; cpu_idx < RTE_MAX_LCORE; cpu_idx++) {
+ if (CPU_ISSET(cpu_idx, &cpu_set_list)) {
+ if (!rte_lcore_is_enabled(cpu_idx)) {
+ printf("ERR: lcore id: %u, shared from L3 (%u) domain is not enabled!\n",
+ cpu_idx, dmn_idx);
+ return -1;
+ }
+ }
+ }
+ }
+
+ dmn_idx = 0;
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L2);
+
+ for (; dmn_idx < domain_count; dmn_idx++) {
+ cpu_set_list = rte_lcore_cpuset_in_domain(RTE_LCORE_DOMAIN_L2, dmn_idx);
+
+ for (uint16_t cpu_idx = 0; cpu_idx < RTE_MAX_LCORE; cpu_idx++) {
+ if (CPU_ISSET(cpu_idx, &cpu_set_list)) {
+ if (!rte_lcore_is_enabled(cpu_idx)) {
+ printf("ERR: lcore id: %u, shared from L2 (%u) domain is not enabled!\n",
+ cpu_idx, dmn_idx);
+ return -1;
+ }
+ }
+ }
+ }
+
+ dmn_idx = 0;
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L1);
+
+ for (; dmn_idx < domain_count; dmn_idx++) {
+ cpu_set_list = rte_lcore_cpuset_in_domain(RTE_LCORE_DOMAIN_L1, dmn_idx);
+
+ for (uint16_t cpu_idx = 0; cpu_idx < RTE_MAX_LCORE; cpu_idx++) {
+ if (CPU_ISSET(cpu_idx, &cpu_set_list)) {
+ if (!rte_lcore_is_enabled(cpu_idx)) {
+ printf("ERR: lcore id: %u, shared from IO (%u) domain is not enabled!\n",
+ cpu_idx, dmn_idx);
+ return -1;
+ }
+ }
+ }
+ }
+
+ cpu_set_list = rte_lcore_cpuset_in_domain(RTE_LCORE_DOMAIN_L1, RTE_MAX_LCORE);
+ if (CPU_COUNT(&cpu_set_list)) {
+ printf("ERR: RTE_MAX_LCORE (%u) in L1 domain is enabled!\n", RTE_MAX_LCORE);
+ return -2;
+ }
+
+ cpu_set_list = rte_lcore_cpuset_in_domain(RTE_LCORE_DOMAIN_L2, RTE_MAX_LCORE);
+ if (CPU_COUNT(&cpu_set_list)) {
+ printf("ERR: RTE_MAX_LCORE (%u) in L2 domain is enabled!\n", RTE_MAX_LCORE);
+ return -2;
+ }
+
+ cpu_set_list = rte_lcore_cpuset_in_domain(RTE_LCORE_DOMAIN_L3, RTE_MAX_LCORE);
+ if (CPU_COUNT(&cpu_set_list)) {
+ printf("ERR: RTE_MAX_LCORE (%u) in L3 domain is enabled!\n", RTE_MAX_LCORE);
+ return -2;
+ }
+
+ cpu_set_list = rte_lcore_cpuset_in_domain(RTE_LCORE_DOMAIN_IO, RTE_MAX_LCORE);
+ if (CPU_COUNT(&cpu_set_list)) {
+ printf("ERR: RTE_MAX_LCORE (%u) in IO domain is enabled!\n", RTE_MAX_LCORE);
+ return -2;
+ }
+
+ printf("INFO: cpuset_in_domain API: success!\n");
+ return 0;
+}
+#endif
+
+static int
+test_main_lcore_in_domain(void)
+{
+ bool main_lcore_found;
+ unsigned int domain_count;
+ uint16_t dmn_idx;
+
+ main_lcore_found = false;
+ dmn_idx = 0;
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_IO);
+ for (; dmn_idx < domain_count; dmn_idx++) {
+ main_lcore_found = rte_lcore_is_main_in_domain(RTE_LCORE_DOMAIN_IO, dmn_idx);
+ if (main_lcore_found) {
+ printf("DBG: main lcore found in IO domain: %u\n", dmn_idx);
+ break;
+ }
+ }
+
+ if ((domain_count) && (main_lcore_found == false)) {
+ printf("ERR: main lcore is not found in any of the IO domain!\n");
+ return -1;
+ }
+
+ main_lcore_found = false;
+ dmn_idx = 0;
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L4);
+ for (; dmn_idx < domain_count; dmn_idx++) {
+ main_lcore_found = rte_lcore_is_main_in_domain(RTE_LCORE_DOMAIN_L4, dmn_idx);
+ if (main_lcore_found) {
+ printf("DBG: main lcore found in L4 domain: %u\n", dmn_idx);
+ break;
+ }
+ }
+
+ if ((domain_count) && (main_lcore_found == false)) {
+ printf("ERR: main lcore is not found in any of the L4 domain!\n");
+ return -1;
+ }
+
+ main_lcore_found = false;
+ dmn_idx = 0;
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L3);
+ for (; dmn_idx < domain_count; dmn_idx++) {
+ main_lcore_found = rte_lcore_is_main_in_domain(RTE_LCORE_DOMAIN_L3, dmn_idx);
+ if (main_lcore_found) {
+ printf("DBG: main lcore found in L3 domain: %u\n", dmn_idx);
+ break;
+ }
+ }
+
+ if ((domain_count) && (main_lcore_found == false)) {
+ printf("ERR: main lcore is not found in any of the L3 domain!\n");
+ return -1;
+ }
+
+ main_lcore_found = false;
+ dmn_idx = 0;
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L2);
+ for (; dmn_idx < domain_count; dmn_idx++) {
+ main_lcore_found = rte_lcore_is_main_in_domain(RTE_LCORE_DOMAIN_L2, dmn_idx);
+ if (main_lcore_found) {
+ printf("DBG: main lcore is found on the L2 domain: %u\n", dmn_idx);
+ break;
+ }
+ }
+
+ if ((domain_count) && (main_lcore_found == false)) {
+ printf("ERR: main lcore is not found in any of the L2 domain!\n");
+ return -1;
+ }
+
+ main_lcore_found = false;
+ dmn_idx = 0;
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L1);
+ for (; dmn_idx < domain_count; dmn_idx++) {
+ main_lcore_found = rte_lcore_is_main_in_domain(RTE_LCORE_DOMAIN_L1, dmn_idx);
+ if (main_lcore_found) {
+ printf("DBG: main lcore is found on the L1 domain: %u\n", dmn_idx);
+ break;
+ }
+ }
+
+ if ((domain_count) && (main_lcore_found == false)) {
+ printf("ERR: main lcore is not found in any of the L1 domain!\n");
+ return -1;
+ }
+
+ printf("INFO: is_main_lcore_in_domain API: success!\n");
+ return 0;
+}
+
+static int
+test_lcore_from_domain_negative(void)
+{
+ unsigned int domain_count;
+
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_IO);
+ if ((domain_count) && (rte_lcore_count_from_domain(RTE_LCORE_DOMAIN_IO, domain_count))) {
+ printf("ERR: domain API inconsistent for IO\n");
+ return -1;
+ }
+
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L4);
+ if ((domain_count) && (rte_lcore_count_from_domain(RTE_LCORE_DOMAIN_L4, domain_count))) {
+ printf("ERR: domain API inconsistent for L4\n");
+ return -1;
+ }
+
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L3);
+ if ((domain_count) && (rte_lcore_count_from_domain(RTE_LCORE_DOMAIN_L3, domain_count))) {
+ printf("ERR: domain API inconsistent for L3\n");
+ return -1;
+ }
+
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L2);
+ if ((domain_count) && (rte_lcore_count_from_domain(RTE_LCORE_DOMAIN_L2, domain_count))) {
+ printf("ERR: domain API inconsistent for L2\n");
+ return -1;
+ }
+
+ domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_L1);
+ if ((domain_count) && (rte_lcore_count_from_domain(RTE_LCORE_DOMAIN_L1, domain_count))) {
+ printf("ERR: domain API inconsistent for L1\n");
+ return -1;
+ }
+
+ printf("INFO: lcore domain API: success!\n");
+ return 0;
+}
+#endif
+
static int
test_lcores(void)
{
@@ -419,6 +926,27 @@ test_lcores(void)
if (test_ctrl_thread() < 0)
return TEST_FAILED;
+#ifdef RTE_EAL_HWLOC_TOPOLOGY_PROBE
+ printf("\nTopology test\n");
+
+ if (test_topology_macro() < 0)
+ return TEST_FAILED;
+
+ if (test_lcore_count_from_domain() < 0)
+ return TEST_FAILED;
+
+ if (test_lcore_from_domain_negative() < 0)
+ return TEST_FAILED;
+
+#ifdef RTE_HAS_CPUSET
+ if (test_lcore_cpuset_from_domain() < 0)
+ return TEST_FAILED;
+#endif
+
+ if (test_main_lcore_in_domain() < 0)
+ return TEST_FAILED;
+#endif
+
return TEST_SUCCESS;
}
--
2.34.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v4 4/4] examples: update with lcore topology API
2024-11-05 10:28 [PATCH v4 0/4] Introduce Topology NUMA grouping for lcores Vipin Varghese
` (2 preceding siblings ...)
2024-11-05 10:28 ` [PATCH v4 3/4] doc: add topology grouping details Vipin Varghese
@ 2024-11-05 10:28 ` Vipin Varghese
3 siblings, 0 replies; 5+ messages in thread
From: Vipin Varghese @ 2024-11-05 10:28 UTC (permalink / raw)
To: dev, roretzla, bruce.richardson, john.mcnamara, dmitry.kozliuk
Cc: pbhagavatula, jerinj, ruifeng.wang, mattias.ronnblom,
anatoly.burakov, stephen, ferruh.yigit, honnappa.nagarahalli,
wathsala.vithanage, konstantin.ananyev, mb
Enhance example code to allow topology based lcores API, while
retaining default behaviour.
- helloworld: allow lcoes to send hello to lcores in selected topology.
- l2fwd: allow use of IO lcores topology.
- skeleton: choose the lcore from IO topology which has more ports.
v4 changes:
- cross compilation failure on ARM: Pavan Nikhilesh Bhagavatula
- update helloworld for L4
v3 changes:
- fix typo from SE_NO_TOPOLOGY to USE_NO_TOPOLOGY
Signed-off-by: Vipin Varghese <vipin.varghese@amd.com>
---
examples/helloworld/main.c | 154 ++++++++++++++++++++++++++++++++++-
examples/l2fwd/main.c | 56 +++++++++++--
examples/skeleton/basicfwd.c | 22 +++++
3 files changed, 222 insertions(+), 10 deletions(-)
diff --git a/examples/helloworld/main.c b/examples/helloworld/main.c
index af509138da..f39db532d9 100644
--- a/examples/helloworld/main.c
+++ b/examples/helloworld/main.c
@@ -5,8 +5,10 @@
#include <stdio.h>
#include <string.h>
#include <stdint.h>
+#include <stdlib.h>
#include <errno.h>
#include <sys/queue.h>
+#include <getopt.h>
#include <rte_memory.h>
#include <rte_launch.h>
@@ -14,6 +16,14 @@
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_debug.h>
+#include <rte_log.h>
+
+#define RTE_LOGTYPE_HELLOWORLD RTE_LOGTYPE_USER1
+#define USE_NO_TOPOLOGY 0xffff
+
+static uint16_t topo_sel = USE_NO_TOPOLOGY;
+/* lcore selector based on Topology */
+static const char short_options[] = "T:";
/* Launch a function on lcore. 8< */
static int
@@ -21,11 +31,119 @@ lcore_hello(__rte_unused void *arg)
{
unsigned lcore_id;
lcore_id = rte_lcore_id();
+
printf("hello from core %u\n", lcore_id);
return 0;
}
+
+static int
+send_lcore_hello(__rte_unused void *arg)
+{
+ unsigned int lcore_id;
+ uint16_t send_lcore_id;
+ uint16_t send_count = 0;
+
+ lcore_id = rte_lcore_id();
+
+ send_lcore_id = rte_get_next_lcore_from_domain(lcore_id, false, true, topo_sel);
+
+ while ((send_lcore_id != RTE_MAX_LCORE) && (lcore_id != send_lcore_id)) {
+ printf("hello from lcore %3u to lcore %3u\n",
+ lcore_id, send_lcore_id);
+
+ send_lcore_id = rte_get_next_lcore_from_domain(send_lcore_id,
+ false, true, topo_sel);
+ send_count += 1;
+ }
+
+ if (send_count == 0)
+ printf("for %3u lcore; there are no lcore in (%s) domain!!!\n",
+ lcore_id,
+ (topo_sel == RTE_LCORE_DOMAIN_L1) ? "L1" :
+ (topo_sel == RTE_LCORE_DOMAIN_L2) ? "L2" :
+ (topo_sel == RTE_LCORE_DOMAIN_L3) ? "L3" :
+ (topo_sel == RTE_LCORE_DOMAIN_L4) ? "L4" : "IO");
+
+ return 0;
+}
/* >8 End of launching function on lcore. */
+/* display usage. 8< */
+static void
+helloworld_usage(const char *prgname)
+{
+ printf("%s [EAL options] -- [-T TOPO]\n"
+ " -T TOPO: choose topology to send hello to cores\n"
+ " - 0: sharing IO\n"
+ " - 1: sharing L1\n"
+ " - 2: sharing L2\n"
+ " - 3: sharing L3\n"
+ " - 4: sharing L4\n\n",
+ prgname);
+}
+
+static unsigned int
+parse_topology(const char *q_arg)
+{
+ char *end = NULL;
+ unsigned long n;
+
+ /* parse the topology option */
+ n = strtoul(q_arg, &end, 10);
+
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return 0;
+
+ if (n > 4)
+ return USE_NO_TOPOLOGY;
+
+ n = (n == 0) ? RTE_LCORE_DOMAIN_IO :
+ (n == 1) ? RTE_LCORE_DOMAIN_L1 :
+ (n == 2) ? RTE_LCORE_DOMAIN_L2 :
+ (n == 3) ? RTE_LCORE_DOMAIN_L3 : RTE_LCORE_DOMAIN_L4;
+
+ return n;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+helloworld_parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt = argv;
+ int option_index;
+ char *prgname = argv[0];
+ while ((opt = getopt_long(argc, argvopt, short_options,
+ NULL, &option_index)) != EOF) {
+ switch (opt) {
+ /* Topology selection */
+ case 'T':
+ topo_sel = parse_topology(optarg);
+ if (topo_sel == USE_NO_TOPOLOGY) {
+ helloworld_usage(prgname);
+ rte_exit(EXIT_FAILURE, "Invalid Topology selection\n");
+ }
+
+ RTE_LOG(DEBUG, HELLOWORLD, "USR selects (%s) domain cores!\n",
+ (topo_sel == RTE_LCORE_DOMAIN_L1) ? "L1" :
+ (topo_sel == RTE_LCORE_DOMAIN_L2) ? "L2" :
+ (topo_sel == RTE_LCORE_DOMAIN_L3) ? "L3" :
+ (topo_sel == RTE_LCORE_DOMAIN_L4) ? "L4" : "IO");
+
+ ret = 0;
+ break;
+ default:
+ helloworld_usage(prgname);
+ return -1;
+ }
+ }
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+ ret = optind-1;
+ optind = 1; /* reset getopt lib */
+ return ret;
+}
+
/* Initialization of Environment Abstraction Layer (EAL). 8< */
int
main(int argc, char **argv)
@@ -38,15 +156,47 @@ main(int argc, char **argv)
rte_panic("Cannot init EAL\n");
/* >8 End of initialization of Environment Abstraction Layer */
+ argc -= ret;
+ argv += ret;
+
+ ret = helloworld_parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid arguments\n");
+
+ if (topo_sel != USE_NO_TOPOLOGY) {
+ uint16_t domain_count = rte_get_domain_count(topo_sel);
+
+ RTE_LOG(DEBUG, HELLOWORLD, "selected Domain (%s)\n",
+ (topo_sel == RTE_LCORE_DOMAIN_L1) ? "L1" :
+ (topo_sel == RTE_LCORE_DOMAIN_L2) ? "L2" :
+ (topo_sel == RTE_LCORE_DOMAIN_L3) ? "L3" : "IO");
+
+ for (int i = 0; i < domain_count; i++) {
+ uint16_t domain_lcore_count = rte_lcore_count_from_domain(topo_sel, i);
+ uint16_t domain_lcore = rte_get_lcore_in_domain(topo_sel, i, 0);
+
+ if (domain_lcore_count)
+ RTE_LOG(DEBUG, HELLOWORLD, "at index (%u), %u cores, lcore (%u) at index 0\n",
+ i,
+ domain_lcore_count,
+ domain_lcore);
+ }
+ }
+
/* Launches the function on each lcore. 8< */
RTE_LCORE_FOREACH_WORKER(lcore_id) {
/* Simpler equivalent. 8< */
- rte_eal_remote_launch(lcore_hello, NULL, lcore_id);
+ rte_eal_remote_launch((topo_sel == USE_NO_TOPOLOGY) ?
+ lcore_hello : send_lcore_hello, NULL, lcore_id);
/* >8 End of simpler equivalent. */
}
/* call it on main lcore too */
- lcore_hello(NULL);
+ if (topo_sel == USE_NO_TOPOLOGY)
+ lcore_hello(NULL);
+ else
+ send_lcore_hello(NULL);
+
/* >8 End of launching the function on each lcore. */
rte_eal_mp_wait_lcore();
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index c6fafdd019..398dd15502 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -46,6 +46,9 @@ static int mac_updating = 1;
/* Ports set in promiscuous mode off by default. */
static int promiscuous_on;
+/* select lcores based on ports numa (RTE_LCORE_DOMAIN_IO). */
+static bool select_port_from_io_domain;
+
#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
#define MAX_PKT_BURST 32
@@ -314,6 +317,7 @@ l2fwd_usage(const char *prgname)
" -P : Enable promiscuous mode\n"
" -q NQ: number of queue (=ports) per lcore (default is 1)\n"
" -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n"
+ " -t : Enable IO domain lcores mapping to Ports\n"
" --no-mac-updating: Disable MAC addresses updating (enabled by default)\n"
" When enabled:\n"
" - The source MAC address is replaced by the TX port MAC address\n"
@@ -431,6 +435,7 @@ static const char short_options[] =
"P" /* promiscuous */
"q:" /* number of queues */
"T:" /* timer period */
+ "t" /* lcore from port io numa */
;
#define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
@@ -502,6 +507,11 @@ l2fwd_parse_args(int argc, char **argv)
timer_period = timer_secs;
break;
+ /* lcores from port io numa */
+ case 't':
+ select_port_from_io_domain = true;
+ break;
+
/* long options */
case CMD_LINE_OPT_PORTMAP_NUM:
ret = l2fwd_parse_port_pair_config(optarg);
@@ -654,7 +664,7 @@ main(int argc, char **argv)
uint16_t nb_ports;
uint16_t nb_ports_available = 0;
uint16_t portid, last_port;
- unsigned lcore_id, rx_lcore_id;
+ uint16_t lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
unsigned int nb_lcores = 0;
unsigned int nb_mbufs;
@@ -738,18 +748,48 @@ main(int argc, char **argv)
qconf = NULL;
/* Initialize the port/queue configuration of each logical core */
+ if (rte_get_domain_count(RTE_LCORE_DOMAIN_IO) == 0)
+ rte_exit(EXIT_FAILURE, "we do not have enough cores in IO numa!\n");
+
+ uint16_t coreindx_io_domain[RTE_MAX_ETHPORTS] = {0};
+ uint16_t lcore_io_domain[RTE_MAX_ETHPORTS] = {RTE_MAX_LCORE};
+ uint16_t l3_domain_count = rte_get_domain_count(RTE_LCORE_DOMAIN_IO);
+
+ for (int i = 0; i < l3_domain_count; i++)
+ lcore_io_domain[i] = rte_get_lcore_in_domain(RTE_LCORE_DOMAIN_IO, i, 0);
+
RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
continue;
- /* get the lcore_id for this port */
- while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
- lcore_queue_conf[rx_lcore_id].n_rx_port ==
- l2fwd_rx_queue_per_lcore) {
- rx_lcore_id++;
- if (rx_lcore_id >= RTE_MAX_LCORE)
- rte_exit(EXIT_FAILURE, "Not enough cores\n");
+ /* get IO NUMA for the port */
+ int port_socket = rte_eth_dev_socket_id(portid);
+
+ if (select_port_from_io_domain == false) {
+ /* get the lcore_id for this port */
+ while ((rte_lcore_is_enabled(rx_lcore_id) == 0) ||
+ (lcore_queue_conf[rx_lcore_id].n_rx_port ==
+ l2fwd_rx_queue_per_lcore)) {
+ rx_lcore_id++;
+ if (rx_lcore_id >= RTE_MAX_LCORE)
+ rte_exit(EXIT_FAILURE, "Not enough cores\n");
+ }
+ } else {
+ /* get lcore from IO numa for this port */
+ rx_lcore_id = lcore_io_domain[port_socket];
+
+ if (lcore_queue_conf[rx_lcore_id].n_rx_port == l2fwd_rx_queue_per_lcore) {
+ coreindx_io_domain[port_socket] += 1;
+ rx_lcore_id = rte_get_lcore_in_domain(RTE_LCORE_DOMAIN_IO,
+ port_socket, coreindx_io_domain[port_socket]);
+ }
+
+ if (rx_lcore_id == RTE_MAX_LCORE)
+ rte_exit(EXIT_FAILURE, "unable find IO (%u) numa lcore for port (%u)\n",
+ port_socket, portid);
+
+ lcore_io_domain[port_socket] = rx_lcore_id;
}
if (qconf != &lcore_queue_conf[rx_lcore_id]) {
diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c
index 133293cf15..65faf46e16 100644
--- a/examples/skeleton/basicfwd.c
+++ b/examples/skeleton/basicfwd.c
@@ -176,6 +176,11 @@ main(int argc, char *argv[])
unsigned nb_ports;
uint16_t portid;
+ uint16_t ports_socket_domain[RTE_MAX_ETHPORTS] = {0};
+ uint16_t sel_io_socket = 0;
+ uint16_t sel_io_indx = 0;
+ uint16_t core_count_from_io = 0;
+
/* Initializion the Environment Abstraction Layer (EAL). 8< */
int ret = rte_eal_init(argc, argv);
if (ret < 0)
@@ -190,6 +195,20 @@ main(int argc, char *argv[])
if (nb_ports < 2 || (nb_ports & 1))
rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
+ /* get the socekt of each port */
+ RTE_ETH_FOREACH_DEV(portid) {
+ ports_socket_domain[rte_eth_dev_socket_id(portid)] += 1;
+
+ if (ports_socket_domain[rte_eth_dev_socket_id(portid)] > sel_io_socket) {
+ sel_io_socket = ports_socket_domain[rte_eth_dev_socket_id(portid)];
+ sel_io_indx = rte_eth_dev_socket_id(portid);
+ }
+ }
+
+ core_count_from_io = rte_lcore_count_from_domain(RTE_LCORE_DOMAIN_IO, sel_io_indx);
+ if (core_count_from_io == 0)
+ printf("\nWARNING: select main_lcore from IO domain (%u)\n", sel_io_indx);
+
/* Creates a new mempool in memory to hold the mbufs. */
/* Allocates mempool to hold the mbufs. 8< */
@@ -210,6 +229,9 @@ main(int argc, char *argv[])
if (rte_lcore_count() > 1)
printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
+ if (rte_lcore_is_main_in_domain(RTE_LCORE_DOMAIN_IO, sel_io_indx) == false)
+ printf("\nWARNING: please use lcore from IO domain %u.\n", sel_io_indx);
+
/* Call lcore_main on the main core only. Called on single lcore. 8< */
lcore_main();
/* >8 End of called on single lcore. */
--
2.34.1
^ permalink raw reply [flat|nested] 5+ messages in thread