DPDK patches and discussions
 help / color / mirror / Atom feed
From: Harman Kalra <hkalra@marvell.com>
To: <dev@dpdk.org>, <bruce.richardson@intel.com>,
	<ciara.power@intel.com>,
	Anatoly Burakov <anatoly.burakov@intel.com>
Cc: Harman Kalra <hkalra@marvell.com>
Subject: [dpdk-dev] [PATCH v2] eal: add telemetry callbacks for memory info
Date: Fri, 8 Oct 2021 18:14:07 +0530	[thread overview]
Message-ID: <20211008124407.24738-1-hkalra@marvell.com> (raw)
In-Reply-To: <20210915095336.105635-1-hkalra@marvell.com>

Registering new telemetry callbacks to list named (memzones)
and unnamed (malloc) memory reserved and return information
based on arguments provided by user.

Example:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 21.11.0-rc0", "pid": 59754, "max_output_len": 16384}
Connected to application: "dpdk-testpmd"
-->
--> /eal/memzone_list
{"/eal/memzone_list": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]}
-->
-->
--> /eal/memzone_info,0
{"/eal/memzone_info": {"Zone": 0, "Name": "rte_eth_dev_data",    \
"Length": 225408, "Address": "0x13ffc0280", "Socket": 0, "Flags": 0, \
"Hugepage_size": 536870912, "Hugepage_base": "0x120000000",   \
"Hugepage_used": 1}}
-->
-->
--> /eal/memzone_info,6
{"/eal/memzone_info": {"Zone": 6, "Name": "MP_mb_pool_0_0",  \
"Length": 669918336, "Address": "0x15811db80", "Socket": 0,  \
"Flags": 0, "Hugepage_size": 536870912, "Hugepage_base": "0x140000000", \
"Hugepage_used": 2}}
-->
-->
--> /eal/memzone_info,14
{"/eal/memzone_info": null}
-->
-->
--> /eal/heap_list
{"/eal/heap_list": [0]}
-->
-->
--> /eal/heap_info,0
{"/eal/heap_info": {"Head id": 0, "Name": "socket_0",     \
"Heap_size": 1610612736, "Free_size": 927645952,          \
"Alloc_size": 682966784, "Greatest_free_size": 529153152, \
"Alloc_count": 482, "Free_count": 2}}

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 v2:
 - Reimplemented the patch which is aligned with the telemetry
 ideology i.e. perform read operations to fetch the info with
 no changes to filesystem.
 - Fixed windows build failure.

---
 lib/eal/common/eal_common_memory.c | 173 +++++++++++++++++++++++++++++
 1 file changed, 173 insertions(+)

diff --git a/lib/eal/common/eal_common_memory.c b/lib/eal/common/eal_common_memory.c
index f83b75092e..616db5ce31 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -20,6 +20,9 @@
 #include <rte_eal_paging.h>
 #include <rte_errno.h>
 #include <rte_log.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
+#include <rte_telemetry.h>
+#endif
 
 #include "eal_memalloc.h"
 #include "eal_private.h"
@@ -1102,3 +1105,173 @@ rte_eal_memory_init(void)
 	rte_mcfg_mem_read_unlock();
 	return -1;
 }
+
+#ifndef RTE_EXEC_ENV_WINDOWS
+#define EAL_MEMZONE_LIST_REQ	"/eal/memzone_list"
+#define EAL_MEMZONE_INFO_REQ	"/eal/memzone_info"
+#define EAL_HEAP_LIST_REQ	"/eal/heap_list"
+#define EAL_HEAP_INFO_REQ	"/eal/heap_info"
+#define ADDR_STR		15
+
+/* Telemetry callback handler to return heap stats for requested heap id. */
+static int
+handle_eal_heap_info_request(const char *cmd __rte_unused, const char *params,
+			     struct rte_tel_data *d)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct rte_malloc_socket_stats sock_stats;
+	struct malloc_heap *heap;
+	unsigned int heap_id;
+
+	if (params == NULL || strlen(params) == 0)
+		return -1;
+
+	heap_id = (unsigned int)strtoul(params, NULL, 10);
+
+	/* Get the heap stats of user provided heap id */
+	heap = &mcfg->malloc_heaps[heap_id];
+	malloc_heap_get_stats(heap, &sock_stats);
+
+	rte_tel_data_start_dict(d);
+	rte_tel_data_add_dict_int(d, "Head id", heap_id);
+	rte_tel_data_add_dict_string(d, "Name", heap->name);
+	rte_tel_data_add_dict_u64(d, "Heap_size",
+				  sock_stats.heap_totalsz_bytes);
+	rte_tel_data_add_dict_u64(d, "Free_size", sock_stats.heap_freesz_bytes);
+	rte_tel_data_add_dict_u64(d, "Alloc_size",
+				  sock_stats.heap_allocsz_bytes);
+	rte_tel_data_add_dict_u64(d, "Greatest_free_size",
+				  sock_stats.greatest_free_size);
+	rte_tel_data_add_dict_u64(d, "Alloc_count", sock_stats.alloc_count);
+	rte_tel_data_add_dict_u64(d, "Free_count", sock_stats.free_count);
+
+	return 0;
+}
+
+/* Telemetry callback handler to list the heap ids setup. */
+static int
+handle_eal_heap_list_request(const char *cmd __rte_unused,
+				const char *params __rte_unused,
+				struct rte_tel_data *d)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct rte_malloc_socket_stats sock_stats;
+	unsigned int heap_id;
+
+	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+	/* Iterate through all initialised heaps */
+	for (heap_id = 0; heap_id < RTE_MAX_HEAPS; heap_id++) {
+		struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
+
+		malloc_heap_get_stats(heap, &sock_stats);
+		if (sock_stats.heap_totalsz_bytes != 0)
+			rte_tel_data_add_array_int(d, heap_id);
+	}
+
+	return 0;
+}
+
+/* Telemetry callback handler to return memzone info for requested index. */
+static int
+handle_eal_memzone_info_request(const char *cmd __rte_unused,
+				const char *params, struct rte_tel_data *d)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct rte_memseg_list *msl = NULL;
+	int ms_idx, ms_count = 0;
+	void *cur_addr, *mz_end;
+	struct rte_memzone *mz;
+	struct rte_memseg *ms;
+	char addr[ADDR_STR];
+	unsigned int mz_idx;
+	size_t page_sz;
+
+	if (params == NULL || strlen(params) == 0)
+		return -1;
+
+	mz_idx = strtoul(params, NULL, 10);
+
+	/* Get the memzone handle using index */
+	mz = rte_fbarray_get(&mcfg->memzones, mz_idx);
+
+	rte_tel_data_start_dict(d);
+	rte_tel_data_add_dict_int(d, "Zone", mz_idx);
+	rte_tel_data_add_dict_string(d, "Name", mz->name);
+	rte_tel_data_add_dict_int(d, "Length", mz->len);
+	snprintf(addr, ADDR_STR, "%p", mz->addr);
+	rte_tel_data_add_dict_string(d, "Address", addr);
+	rte_tel_data_add_dict_int(d, "Socket", mz->socket_id);
+	rte_tel_data_add_dict_int(d, "Flags", mz->flags);
+
+	/* go through each page occupied by this memzone */
+	msl = rte_mem_virt2memseg_list(mz->addr);
+	if (!msl) {
+		RTE_LOG(DEBUG, EAL, "Skipping bad memzone\n");
+		return -1;
+	}
+	page_sz = (size_t)mz->hugepage_sz;
+	cur_addr = RTE_PTR_ALIGN_FLOOR(mz->addr, page_sz);
+	mz_end = RTE_PTR_ADD(cur_addr, mz->len);
+
+	ms_idx = RTE_PTR_DIFF(mz->addr, msl->base_va) / page_sz;
+	ms = rte_fbarray_get(&msl->memseg_arr, ms_idx);
+
+	rte_tel_data_add_dict_int(d, "Hugepage_size", page_sz);
+	snprintf(addr, ADDR_STR, "%p", ms->addr);
+	rte_tel_data_add_dict_string(d, "Hugepage_base", addr);
+
+	do {
+		/* advance VA to next page */
+		cur_addr = RTE_PTR_ADD(cur_addr, page_sz);
+
+		/* memzones occupy contiguous segments */
+		++ms;
+		ms_count++;
+	} while (cur_addr < mz_end);
+
+	rte_tel_data_add_dict_int(d, "Hugepage_used", ms_count);
+
+	return 0;
+}
+
+static void
+memzone_list_cb(const struct rte_memzone *mz __rte_unused,
+		 void *arg __rte_unused)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct rte_tel_data *d = arg;
+	int mz_idx;
+
+	mz_idx = rte_fbarray_find_idx(&mcfg->memzones, mz);
+	rte_tel_data_add_array_int(d, mz_idx);
+}
+
+
+/* Telemetry callback handler to list the memzones reserved. */
+static int
+handle_eal_memzone_list_request(const char *cmd __rte_unused,
+				const char *params __rte_unused,
+				struct rte_tel_data *d)
+{
+	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+	rte_memzone_walk(memzone_list_cb, d);
+
+	return 0;
+}
+
+RTE_INIT(memory_telemetry)
+{
+	rte_telemetry_register_cmd(
+			EAL_MEMZONE_LIST_REQ, handle_eal_memzone_list_request,
+			"List of memzone index reserved. Takes no parameters");
+	rte_telemetry_register_cmd(
+			EAL_MEMZONE_INFO_REQ, handle_eal_memzone_info_request,
+			"Returns memzone info. Parameters: int mz_id");
+	rte_telemetry_register_cmd(
+			EAL_HEAP_LIST_REQ, handle_eal_heap_list_request,
+			"List of heap index setup. Takes no parameters");
+	rte_telemetry_register_cmd(
+			EAL_HEAP_INFO_REQ, handle_eal_heap_info_request,
+			"Returns malloc heap stats. Parameters: int heap_id");
+}
+#endif
-- 
2.18.0


  parent reply	other threads:[~2021-10-08 12:44 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-15  9:53 [dpdk-dev] [PATCH] " Harman Kalra
2021-09-20 15:56 ` Bruce Richardson
2021-09-21  9:05   ` [dpdk-dev] [EXT] " Harman Kalra
2021-09-27 16:37     ` Bruce Richardson
2021-10-07 11:01       ` Harman Kalra
2021-10-08 12:44 ` Harman Kalra [this message]
2021-10-14 17:17   ` [dpdk-dev] [PATCH v2] " Harman Kalra
2021-10-15  8:28     ` Power, Ciara
2021-10-19 15:04       ` Bruce Richardson
2021-10-25 18:55         ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211008124407.24738-1-hkalra@marvell.com \
    --to=hkalra@marvell.com \
    --cc=anatoly.burakov@intel.com \
    --cc=bruce.richardson@intel.com \
    --cc=ciara.power@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).