* [RFC PATCH 0/4] lcore telemetry improvements
@ 2022-11-23 10:26 Robin Jarry
2022-11-23 10:26 ` [RFC PATCH 1/4] eal: add lcore info in telemetry Robin Jarry
` (17 more replies)
0 siblings, 18 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-23 10:26 UTC (permalink / raw)
To: dev
Cc: Bruce Richardson, Jerin Jacob, Kevin Laatz, Konstantin Ananyev,
Mattias Rönnblom, Morten Brørup, Robin Jarry
This is a follow up on previous work by Kevin Laatz:
http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
This is a much more basic and naive approach which leaves the busy
cycles percentage completely up to the application.
This series is aimed at allowing DPDK applications to expose their CPU
busy cycles ratio in the DPDK telemetry under /eal/lcore/info.
I have left it as RFC since calculating busy cycles can be
a controversial topic.
For reference, I have implemented a draft patch in OvS to use
rte_lcore_register_busy_percent_cb() and report the already available
busy cycles information.
https://github.com/rjarry/ovs/commit/4286c0e75583075a223a67eee746084a2f3b0547
Robin Jarry (4):
eal: add lcore info in telemetry
eal: allow applications to report their cpu utilization
testpmd: add show lcores command
testpmd: report lcore usage
app/test-pmd/5tswap.c | 5 +-
app/test-pmd/cmdline.c | 31 ++++++++
app/test-pmd/csumonly.c | 6 +-
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +-
app/test-pmd/iofwd.c | 5 +-
app/test-pmd/macfwd.c | 5 +-
app/test-pmd/macswap.c | 5 +-
app/test-pmd/noisy_vnf.c | 4 +
app/test-pmd/rxonly.c | 5 +-
app/test-pmd/shared_rxq_fwd.c | 5 +-
app/test-pmd/testpmd.c | 69 +++++++++++++++-
app/test-pmd/testpmd.h | 25 +++++-
app/test-pmd/txonly.c | 7 +-
lib/eal/common/eal_common_lcore.c | 127 +++++++++++++++++++++++++++++-
lib/eal/include/rte_lcore.h | 30 +++++++
lib/eal/version.map | 1 +
17 files changed, 306 insertions(+), 32 deletions(-)
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [RFC PATCH 1/4] eal: add lcore info in telemetry
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
@ 2022-11-23 10:26 ` Robin Jarry
2022-11-23 16:44 ` Stephen Hemminger
2022-11-23 10:26 ` [RFC PATCH 2/4] eal: allow applications to report their cpu utilization Robin Jarry
` (16 subsequent siblings)
17 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2022-11-23 10:26 UTC (permalink / raw)
To: dev
Cc: Bruce Richardson, Jerin Jacob, Kevin Laatz, Konstantin Ananyev,
Mattias Rönnblom, Morten Brørup, Robin Jarry
Report the same information than rte_lcore_dump() in the telemetry
API into /eal/lcore/list and /eal/lcore/info,ID.
Example:
--> /eal/lcore/info,3
{
"/eal/lcore/info": {
"lcore_id": 3,
"socket": 0,
"role": "RTE",
"cpuset": "3"
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
lib/eal/common/eal_common_lcore.c | 90 +++++++++++++++++++++++++++++++
1 file changed, 90 insertions(+)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 06c594b0224f..31e3965dc5ad 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -10,6 +10,7 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
+#include <rte_telemetry.h>
#include "eal_private.h"
#include "eal_thread.h"
@@ -456,3 +457,92 @@ rte_lcore_dump(FILE *f)
{
rte_lcore_iterate(lcore_dump_cb, f);
}
+
+static int
+lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_tel_data *d = arg;
+ return rte_tel_data_add_array_int(d, lcore_id);
+}
+
+static int
+handle_lcore_list(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+ if (ret)
+ return ret;
+ return rte_lcore_iterate(lcore_telemetry_id_cb, d);
+}
+
+struct lcore_telemetry_info {
+ unsigned int lcore_id;
+ struct rte_tel_data *d;
+};
+
+static int
+lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
+{
+ struct lcore_telemetry_info *info = arg;
+ struct rte_config *cfg = rte_eal_get_configuration();
+ char cpuset[RTE_CPU_AFFINITY_STR_LEN + 3];
+ const char *role;
+
+ if (info->lcore_id != lcore_id)
+ return 0;
+
+ switch (cfg->lcore_role[lcore_id]) {
+ case ROLE_RTE:
+ role = "RTE";
+ break;
+ case ROLE_SERVICE:
+ role = "SERVICE";
+ break;
+ case ROLE_NON_EAL:
+ role = "NON_EAL";
+ break;
+ default:
+ role = "UNKNOWN";
+ break;
+ }
+ if (eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset, sizeof(cpuset) - 3)) {
+ cpuset[sizeof(cpuset) - 4] = '.';
+ cpuset[sizeof(cpuset) - 3] = '.';
+ cpuset[sizeof(cpuset) - 2] = '.';
+ cpuset[sizeof(cpuset) - 1] = '\0';
+ }
+ rte_tel_data_start_dict(info->d);
+ rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
+ rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
+ rte_tel_data_add_dict_string(info->d, "role", role);
+ rte_tel_data_add_dict_string(info->d, "cpuset", cpuset);
+
+ return 0;
+}
+
+static int
+handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
+{
+ struct lcore_telemetry_info info = { .d = d };
+ char *endptr = NULL;
+ if (params == NULL || strlen(params) == 0)
+ return -EINVAL;
+ errno = 0;
+ info.lcore_id = strtoul(params, &endptr, 10);
+ if (errno)
+ return -errno;
+ if (endptr == params)
+ return -EINVAL;
+ return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
+}
+
+RTE_INIT(lcore_telemetry)
+{
+ rte_telemetry_register_cmd(
+ "/eal/lcore/list", handle_lcore_list,
+ "List of lcore ids. Takes no parameters");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/info", handle_lcore_info,
+ "Returns lcore info. Parameters: int lcore_id");
+}
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [RFC PATCH 2/4] eal: allow applications to report their cpu utilization
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
2022-11-23 10:26 ` [RFC PATCH 1/4] eal: add lcore info in telemetry Robin Jarry
@ 2022-11-23 10:26 ` Robin Jarry
2022-11-23 10:26 ` [RFC PATCH 3/4] testpmd: add show lcores command Robin Jarry
` (15 subsequent siblings)
17 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-23 10:26 UTC (permalink / raw)
To: dev
Cc: Bruce Richardson, Jerin Jacob, Kevin Laatz, Konstantin Ananyev,
Mattias Rönnblom, Morten Brørup, Robin Jarry
Allow applications to register a callback that will be invoked in
rte_lcore_dump() and when requesting lcore info in the telemetry API.
The callback is expected to return a number between 0 and 100
representing the percentage of busy cycles spent over a fixed period of
time. The period of time is configured when registering the callback.
Cc: Bruce Richardson <bruce.richardson@intel.com>
Cc: Jerin Jacob <jerinj@marvell.com>
Cc: Kevin Laatz <kevin.laatz@intel.com>
Cc: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Cc: Mattias Rönnblom <hofors@lysator.liu.se>
Cc: Morten Brørup <mb@smartsharesystems.com>
Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
lib/eal/common/eal_common_lcore.c | 37 ++++++++++++++++++++++++++++---
lib/eal/include/rte_lcore.h | 30 +++++++++++++++++++++++++
lib/eal/version.map | 1 +
3 files changed, 65 insertions(+), 3 deletions(-)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 31e3965dc5ad..9a85fd8854df 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -420,14 +420,36 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
return ret;
}
+static rte_lcore_busy_percent_cb lcore_busy_cb;
+static unsigned int lcore_busy_period;
+
+void
+rte_lcore_register_busy_percent_cb(rte_lcore_busy_percent_cb cb, unsigned int period)
+{
+ lcore_busy_cb = cb;
+ lcore_busy_period = period;
+}
+
+static int
+lcore_busy_percent(unsigned int lcore_id)
+{
+ int percent = -1;
+ if (lcore_busy_cb)
+ percent = lcore_busy_cb(lcore_id);
+ if (percent > 100)
+ percent = 100;
+ return percent;
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ char busy_str[16];
const char *role;
FILE *f = arg;
- int ret;
+ int ret, busy;
switch (cfg->lcore_role[lcore_id]) {
case ROLE_RTE:
@@ -446,9 +468,16 @@ lcore_dump_cb(unsigned int lcore_id, void *arg)
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
- fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
+ busy = lcore_busy_percent(lcore_id);
+ if (busy < 0) {
+ snprintf(busy_str, sizeof(busy_str), "%s", "N/A");
+ } else {
+ snprintf(busy_str, sizeof(busy_str), "%d%% last %d sec",
+ busy, lcore_busy_period);
+ }
+ fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s, busy %s\n", lcore_id,
rte_lcore_to_socket_id(lcore_id), role, cpuset,
- ret == 0 ? "" : "...");
+ ret == 0 ? "" : "...", busy_str);
return 0;
}
@@ -517,6 +546,8 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
rte_tel_data_add_dict_string(info->d, "role", role);
rte_tel_data_add_dict_string(info->d, "cpuset", cpuset);
+ rte_tel_data_add_dict_int(info->d, "busy_percent", lcore_busy_percent(lcore_id));
+ rte_tel_data_add_dict_int(info->d, "busy_period", lcore_busy_period);
return 0;
}
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 6938c3fd7b81..b1223eaa12bf 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -328,6 +328,36 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
int
rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
+/**
+ * Callback to allow applications to report CPU utilization.
+ *
+ * @param lcore_id
+ * The lcore to consider.
+ * @return
+ * - A number between 0 and 100 representing the percentage of busy cycles
+ * over the last period for the given lcore_id.
+ * - -1 if the information is not available or if any error occurred.
+ */
+typedef int (*rte_lcore_busy_percent_cb)(unsigned int lcore_id);
+
+/**
+ * Register a callback from an application to be called in rte_lcore_dump()
+ * and the /eal/lcore/info telemetry endpoint handler.
+ *
+ * Applications are expected to return a number between 0 and 100 representing
+ * the percentage of busy cycles over the last period for the provided lcore_id.
+ * The implementation details for computing such a ratio is specific to each
+ * application.
+ *
+ * @param cb
+ * The callback function.
+ * @param period
+ * The period in seconds over which the percentage of busy cycles will be
+ * reported by the application.
+ */
+__rte_experimental
+void rte_lcore_register_busy_percent_cb(rte_lcore_busy_percent_cb cb, unsigned int period);
+
/**
* List all lcores.
*
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 7ad12a7dc985..138537ee5835 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -440,6 +440,7 @@ EXPERIMENTAL {
rte_thread_detach;
rte_thread_equal;
rte_thread_join;
+ rte_lcore_register_busy_percent_cb;
};
INTERNAL {
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [RFC PATCH 3/4] testpmd: add show lcores command
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
2022-11-23 10:26 ` [RFC PATCH 1/4] eal: add lcore info in telemetry Robin Jarry
2022-11-23 10:26 ` [RFC PATCH 2/4] eal: allow applications to report their cpu utilization Robin Jarry
@ 2022-11-23 10:26 ` Robin Jarry
2022-11-23 10:26 ` [RFC PATCH 4/4] testpmd: report lcore usage Robin Jarry
` (14 subsequent siblings)
17 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-23 10:26 UTC (permalink / raw)
To: dev
Cc: Bruce Richardson, Jerin Jacob, Kevin Laatz, Konstantin Ananyev,
Mattias Rönnblom, Morten Brørup, Robin Jarry
Add a simple command that calls rte_lcore_dump().
Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
app/test-pmd/cmdline.c | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index b32dc8bfd445..d290938ffb4e 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -7151,6 +7151,36 @@ static cmdline_parse_inst_t cmd_showfwdall = {
},
};
+/* show lcores */
+struct lcores_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t lcores;
+};
+
+static cmdline_parse_token_string_t cmd_lcores_action =
+ TOKEN_STRING_INITIALIZER(struct lcores_result, show, "show");
+static cmdline_parse_token_string_t cmd_lcores_lcores =
+ TOKEN_STRING_INITIALIZER(struct lcores_result, lcores, "lcores");
+
+static void
+cmd_showlcores_parsed(__rte_unused void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ rte_lcore_dump(stdout);
+}
+
+static cmdline_parse_inst_t cmd_showlcores = {
+ .f = cmd_showlcores_parsed,
+ .data = NULL,
+ .help_str = "show lcores",
+ .tokens = {
+ (void *)&cmd_lcores_action,
+ (void *)&cmd_lcores_lcores,
+ NULL,
+ },
+};
+
/* *** READ A RING DESCRIPTOR OF A PORT RX/TX QUEUE *** */
struct cmd_read_rxd_txd_result {
cmdline_fixed_string_t read;
@@ -12637,6 +12667,7 @@ static cmdline_parse_ctx_t builtin_ctx[] = {
(cmdline_parse_inst_t *)&cmd_showdevice,
(cmdline_parse_inst_t *)&cmd_showcfg,
(cmdline_parse_inst_t *)&cmd_showfwdall,
+ (cmdline_parse_inst_t *)&cmd_showlcores,
(cmdline_parse_inst_t *)&cmd_start,
(cmdline_parse_inst_t *)&cmd_start_tx_first,
(cmdline_parse_inst_t *)&cmd_start_tx_first_n,
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [RFC PATCH 4/4] testpmd: report lcore usage
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (2 preceding siblings ...)
2022-11-23 10:26 ` [RFC PATCH 3/4] testpmd: add show lcores command Robin Jarry
@ 2022-11-23 10:26 ` Robin Jarry
2022-11-28 8:59 ` [PATCH v2 0/4] lcore telemetry improvements Robin Jarry
` (13 subsequent siblings)
17 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-23 10:26 UTC (permalink / raw)
To: dev
Cc: Bruce Richardson, Jerin Jacob, Kevin Laatz, Konstantin Ananyev,
Mattias Rönnblom, Morten Brørup, Robin Jarry
Reuse the --record-core-cycles option to account for busy cycles. One
turn of packet_fwd_t is considered "busy" if there was at least one
received or transmitted packet.
Add two new interval_cycles and busy_cycles fields to the fwd_stream
structure. The core_cycles field cannot be reset to zero without
breaking the current behaviour.
Update get_end_cycles() to accept an additional argument for the number
of processed packets.
Every 10s, copy the number of cycles (busy and total) into a moving
average buffer. The buffer holds 6 samples of 10s and is rotated when
full.
When --record-core-cycles is specified, register a callback with
rte_lcore_register_busy_percent_cb(). In the callback, access the
average buffer to compute the percentage of busy cycles.
Example:
testpmd> show lcores
lcore 3, socket 0, role RTE, cpuset 3, busy N/A
lcore 4, socket 0, role RTE, cpuset 4, busy 39% last 60 sec
Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
app/test-pmd/5tswap.c | 5 ++-
app/test-pmd/csumonly.c | 6 +--
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +--
app/test-pmd/iofwd.c | 5 ++-
app/test-pmd/macfwd.c | 5 ++-
app/test-pmd/macswap.c | 5 ++-
app/test-pmd/noisy_vnf.c | 4 ++
app/test-pmd/rxonly.c | 5 ++-
app/test-pmd/shared_rxq_fwd.c | 5 ++-
app/test-pmd/testpmd.c | 69 +++++++++++++++++++++++++++++++++--
app/test-pmd/testpmd.h | 25 +++++++++++--
app/test-pmd/txonly.c | 7 ++--
13 files changed, 120 insertions(+), 29 deletions(-)
diff --git a/app/test-pmd/5tswap.c b/app/test-pmd/5tswap.c
index f041a5e1d530..03225075716c 100644
--- a/app/test-pmd/5tswap.c
+++ b/app/test-pmd/5tswap.c
@@ -116,7 +116,7 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -182,7 +182,8 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 1c2459851522..03e141221a56 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -868,7 +868,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
rx_bad_ip_csum = 0;
@@ -1200,8 +1200,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index fd6abc0f4124..7b2f0ffdf0f5 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -196,7 +196,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
RTE_PER_LCORE(_next_flow) = next_flow;
- get_end_cycles(fs, start_tsc);
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index 066f2a3ab79b..2fc9f96dc95f 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -303,7 +303,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_replies = 0;
@@ -508,8 +508,8 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
} while (++nb_tx < nb_replies);
}
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 8fafdec548ad..e5a2dbe20c69 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -59,7 +59,7 @@ pkt_burst_io_forward(struct fwd_stream *fs)
pkts_burst, nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
@@ -84,7 +84,8 @@ pkt_burst_io_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index beb220fbb462..9db623999970 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -65,7 +65,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -115,7 +115,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 4f8deb338296..4db134ac1d91 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -66,7 +66,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -93,7 +93,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index c65ec6f06a5c..290bdcda45f0 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -152,6 +152,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
uint64_t delta_ms;
bool needs_flush = false;
uint64_t now;
+ uint64_t start_tsc = 0;
+
+ get_start_cycles(&start_tsc);
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
pkts_burst, nb_pkt_per_burst);
@@ -219,6 +222,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
ncf->prev_time = rte_get_timer_cycles();
}
+ get_end_cycles(fs, start_tsc, nb_rx + nb_tx);
}
#define NOISY_STRSIZE 256
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index d528d4f34e60..519202339e16 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -58,13 +58,14 @@ pkt_burst_receive(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++)
rte_pktmbuf_free(pkts_burst[i]);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/shared_rxq_fwd.c b/app/test-pmd/shared_rxq_fwd.c
index 2e9047804b5b..395b73bfe52e 100644
--- a/app/test-pmd/shared_rxq_fwd.c
+++ b/app/test-pmd/shared_rxq_fwd.c
@@ -102,9 +102,10 @@ shared_rxq_fwd(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
forward_shared_rxq(fs, nb_rx, pkts_burst);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 134d79a55547..450bc281fd69 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2248,20 +2248,26 @@ static void
run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
{
struct fwd_stream **fsm;
+ uint64_t tics_per_1sec;
+ uint64_t tics_current;
+ uint64_t tics;
streamid_t nb_fs;
streamid_t sm_id;
+ int interval, loop;
#ifdef RTE_LIB_BITRATESTATS
- uint64_t tics_per_1sec;
uint64_t tics_datum;
- uint64_t tics_current;
uint16_t i, cnt_ports;
cnt_ports = nb_ports;
tics_datum = rte_rdtsc();
- tics_per_1sec = rte_get_timer_hz();
#endif
+ tics_per_1sec = rte_get_timer_hz();
+ tics = rte_rdtsc();
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
+ fc->lcore_id = rte_lcore_id();
+ interval = 0;
+ loop = 0;
do {
for (sm_id = 0; sm_id < nb_fs; sm_id++)
if (!fsm[sm_id]->disabled)
@@ -2284,8 +2290,58 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
latencystats_lcore_id == rte_lcore_id())
rte_latencystats_update();
#endif
-
+ if (record_core_cycles && ++loop == 1024) {
+ loop = 0;
+ tics_current = rte_rdtsc();
+ if (tics_current - tics >= tics_per_1sec * LCORE_CYCLES_INTERVAL_LEN) {
+ for (sm_id = 0; sm_id < nb_fs; sm_id++) {
+ struct fwd_stream *fs = fsm[sm_id];
+ if (fs->disabled)
+ continue;
+ fc->cycles[interval].busy += fs->interval_cycles.busy;
+ fc->cycles[interval].total += fs->interval_cycles.total;
+ fs->interval_cycles.busy = 0;
+ fs->interval_cycles.total = 0;
+ }
+ interval += 1;
+ if (interval == LCORE_CYCLES_INTERVAL_COUNT) {
+ memmove(&fc->cycles[0], &fc->cycles[1],
+ (LCORE_CYCLES_INTERVAL_COUNT - 1)
+ * sizeof(fc->cycles[0]));
+ interval = 0;
+ }
+ fc->cycles[interval].busy = 0;
+ fc->cycles[interval].total = 0;
+ tics = tics_current;
+ }
+ }
} while (! fc->stopped);
+ memset(&fc->cycles, 0, sizeof(fc->cycles));
+}
+
+static int
+lcore_busy_percent_callback(unsigned int lcore_id)
+{
+ uint64_t busy_cycles, total_cycles;
+ struct fwd_lcore *fc;
+ int i, c;
+
+ for (c = 0; c < nb_lcores; c++) {
+ fc = fwd_lcores[c];
+ if (fc->lcore_id != lcore_id)
+ continue;
+ busy_cycles = total_cycles = 0;
+
+ for (i = 0; i < LCORE_CYCLES_INTERVAL_COUNT; i++) {
+ busy_cycles += fc->cycles[i].busy;
+ total_cycles += fc->cycles[i].total;
+ }
+ if (total_cycles == 0)
+ return -1;
+ return 100 * busy_cycles / total_cycles;
+ }
+
+ return -1;
}
static int
@@ -4522,6 +4578,11 @@ main(int argc, char** argv)
rte_stats_bitrate_reg(bitrate_data);
}
#endif
+
+ if (record_core_cycles)
+ rte_lcore_register_busy_percent_cb(lcore_busy_percent_callback,
+ LCORE_CYCLES_INTERVAL_LEN * LCORE_CYCLES_INTERVAL_COUNT);
+
#ifdef RTE_LIB_CMDLINE
if (init_cmdline() != 0)
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970d2..684a06919986 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -145,6 +145,14 @@ extern const struct rss_type_info rss_type_table[];
*/
extern char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
+/**
+ * Used with --record-core-cycles.
+ */
+struct lcore_cycles {
+ uint64_t busy;
+ uint64_t total;
+};
+
/**
* The data structure associated with a forwarding stream between a receive
* port/queue and a transmit port/queue.
@@ -175,6 +183,7 @@ struct fwd_stream {
unsigned int gro_times; /**< GRO operation times */
#endif
uint64_t core_cycles; /**< used for RX and TX processing */
+ struct lcore_cycles interval_cycles;
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
struct fwd_lcore *lcore; /**< Lcore being scheduled. */
@@ -341,6 +350,9 @@ struct rte_port {
struct xstat_display_info xstats_info;
};
+#define LCORE_CYCLES_INTERVAL_COUNT 6
+#define LCORE_CYCLES_INTERVAL_LEN 10
+
/**
* The data structure associated with each forwarding logical core.
* The logical cores are internally numbered by a core index from 0 to
@@ -360,6 +372,8 @@ struct fwd_lcore {
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
volatile char stopped; /**< stop forwarding when set */
+ unsigned int lcore_id; /**< return value of rte_lcore_id() */
+ struct lcore_cycles cycles[LCORE_CYCLES_INTERVAL_COUNT]; /**< busy percent stats */
};
/*
@@ -836,10 +850,15 @@ get_start_cycles(uint64_t *start_tsc)
}
static inline void
-get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc)
+get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc, uint64_t nb_packets)
{
- if (record_core_cycles)
- fs->core_cycles += rte_rdtsc() - start_tsc;
+ if (record_core_cycles) {
+ uint64_t cycles = rte_rdtsc() - start_tsc;
+ fs->core_cycles += cycles;
+ fs->interval_cycles.total += cycles;
+ if (nb_packets > 0)
+ fs->interval_cycles.busy += cycles;
+ }
}
static inline void
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 021624952daa..ad37626ff63c 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -331,7 +331,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
struct rte_mbuf *pkt;
struct rte_mempool *mbp;
struct rte_ether_hdr eth_hdr;
- uint16_t nb_tx;
+ uint16_t nb_tx = 0;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
uint32_t retry;
@@ -392,7 +392,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
}
if (nb_pkt == 0)
- return;
+ goto end;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
@@ -426,7 +426,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
} while (++nb_tx < nb_pkt);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [RFC PATCH 1/4] eal: add lcore info in telemetry
2022-11-23 10:26 ` [RFC PATCH 1/4] eal: add lcore info in telemetry Robin Jarry
@ 2022-11-23 16:44 ` Stephen Hemminger
2022-11-23 23:15 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: Stephen Hemminger @ 2022-11-23 16:44 UTC (permalink / raw)
To: Robin Jarry
Cc: dev, Bruce Richardson, Jerin Jacob, Kevin Laatz,
Konstantin Ananyev, Mattias Rönnblom, Morten Brørup
On Wed, 23 Nov 2022 11:26:09 +0100
Robin Jarry <rjarry@redhat.com> wrote:
> Report the same information than rte_lcore_dump() in the telemetry
> API into /eal/lcore/list and /eal/lcore/info,ID.
>
> Example:
>
> --> /eal/lcore/info,3
> {
> "/eal/lcore/info": {
> "lcore_id": 3,
> "socket": 0,
> "role": "RTE",
> "cpuset": "3"
> }
> }
Rather than reporting cpuset as a string might it be better
as a JSON array?
"cpuset" : [ 3, 5 ]
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [RFC PATCH 1/4] eal: add lcore info in telemetry
2022-11-23 16:44 ` Stephen Hemminger
@ 2022-11-23 23:15 ` Robin Jarry
0 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-23 23:15 UTC (permalink / raw)
To: Stephen Hemminger
Cc: dev, Bruce Richardson, Jerin Jacob, Kevin Laatz,
Konstantin Ananyev, Mattias Rönnblom, Morten Brørup
Stephen Hemminger, Nov 23, 2022 at 17:44:
> Rather than reporting cpuset as a string might it be better
> as a JSON array?
> "cpuset" : [ 3, 5 ]
Good point, will do that in v2.
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v2 0/4] lcore telemetry improvements
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (3 preceding siblings ...)
2022-11-23 10:26 ` [RFC PATCH 4/4] testpmd: report lcore usage Robin Jarry
@ 2022-11-28 8:59 ` Robin Jarry
2022-11-28 8:59 ` [PATCH v2 1/4] eal: add lcore info in telemetry Robin Jarry
` (3 more replies)
2022-11-29 15:33 ` [PATCH v3 0/4] lcore telemetry improvements Robin Jarry
` (12 subsequent siblings)
17 siblings, 4 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-28 8:59 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry
This is a follow up on previous work by Kevin Laatz:
http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
This series is aimed at allowing DPDK applications to expose their CPU
usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
more basic and naive approach which leaves the cpu cycles accounting
completely up to the application.
For reference, I have implemented a draft patch in OvS to use
rte_lcore_register_usage_cb() and report the already available busy
cycles information.
https://github.com/rjarry/ovs/commit/327db12c751be0375fcfed5e44b6065bcfb75c82
Changes since v1:
- The cpuset field in telemetry is now a JSON list of CPU ids.
- Applications must now report their raw CPU cycles counts. The busyness
ratio and rate of change is left to external monitoring tools.
- Renamed show lcores -> dump_lcores in testpmd.
Robin Jarry (4):
eal: add lcore info in telemetry
eal: allow applications to report their cpu cycles utilization
testpmd: add dump_lcores command
testpmd: report lcore usage
app/test-pmd/5tswap.c | 5 +-
app/test-pmd/cmdline.c | 3 +
app/test-pmd/csumonly.c | 6 +-
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +-
app/test-pmd/iofwd.c | 5 +-
app/test-pmd/macfwd.c | 5 +-
app/test-pmd/macswap.c | 5 +-
app/test-pmd/noisy_vnf.c | 4 +
app/test-pmd/rxonly.c | 5 +-
app/test-pmd/shared_rxq_fwd.c | 5 +-
app/test-pmd/testpmd.c | 39 +++++++++-
app/test-pmd/testpmd.h | 14 +++-
app/test-pmd/txonly.c | 7 +-
lib/eal/common/eal_common_lcore.c | 123 +++++++++++++++++++++++++++++-
lib/eal/include/rte_lcore.h | 29 +++++++
lib/eal/version.map | 1 +
17 files changed, 234 insertions(+), 30 deletions(-)
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v2 1/4] eal: add lcore info in telemetry
2022-11-28 8:59 ` [PATCH v2 0/4] lcore telemetry improvements Robin Jarry
@ 2022-11-28 8:59 ` Robin Jarry
2022-11-28 8:59 ` [PATCH v2 2/4] eal: allow applications to report their cpu cycles utilization Robin Jarry
` (2 subsequent siblings)
3 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-28 8:59 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Stephen Hemminger
Report the same information than rte_lcore_dump() in the telemetry
API into /eal/lcore/list and /eal/lcore/info,ID.
Example:
--> /eal/lcore/info,3
{
"/eal/lcore/info": {
"lcore_id": 3,
"socket": 0,
"role": "RTE",
"cpuset": [
3
]
}
}
Cc: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
v1 -> v2: Changed "cpuset" to an array of CPU ids instead of a string.
lib/eal/common/eal_common_lcore.c | 92 +++++++++++++++++++++++++++++++
1 file changed, 92 insertions(+)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 06c594b0224f..8a6c12550238 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -10,6 +10,7 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
+#include <rte_telemetry.h>
#include "eal_private.h"
#include "eal_thread.h"
@@ -456,3 +457,94 @@ rte_lcore_dump(FILE *f)
{
rte_lcore_iterate(lcore_dump_cb, f);
}
+
+static int
+lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_tel_data *d = arg;
+ return rte_tel_data_add_array_int(d, lcore_id);
+}
+
+static int
+handle_lcore_list(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+ if (ret)
+ return ret;
+ return rte_lcore_iterate(lcore_telemetry_id_cb, d);
+}
+
+struct lcore_telemetry_info {
+ unsigned int lcore_id;
+ struct rte_tel_data *d;
+};
+
+static int
+lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
+{
+ struct lcore_telemetry_info *info = arg;
+ struct rte_config *cfg = rte_eal_get_configuration();
+ struct rte_tel_data *cpuset;
+ const char *role;
+ unsigned int cpu;
+
+ if (info->lcore_id != lcore_id)
+ return 0;
+
+ switch (cfg->lcore_role[lcore_id]) {
+ case ROLE_RTE:
+ role = "RTE";
+ break;
+ case ROLE_SERVICE:
+ role = "SERVICE";
+ break;
+ case ROLE_NON_EAL:
+ role = "NON_EAL";
+ break;
+ default:
+ role = "UNKNOWN";
+ break;
+ }
+ rte_tel_data_start_dict(info->d);
+ rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
+ rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
+ rte_tel_data_add_dict_string(info->d, "role", role);
+ cpuset = rte_tel_data_alloc();
+ if (!cpuset)
+ return -ENOMEM;
+ rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++)
+ if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
+ rte_tel_data_add_array_int(cpuset, cpu);
+ rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+
+ return 0;
+}
+
+static int
+handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
+{
+ struct lcore_telemetry_info info = { .d = d };
+ char *endptr = NULL;
+ if (params == NULL || strlen(params) == 0)
+ return -EINVAL;
+ errno = 0;
+ info.lcore_id = strtoul(params, &endptr, 10);
+ if (errno)
+ return -errno;
+ if (endptr == params)
+ return -EINVAL;
+ return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
+}
+
+RTE_INIT(lcore_telemetry)
+{
+ rte_telemetry_register_cmd(
+ "/eal/lcore/list", handle_lcore_list,
+ "List of lcore ids. Takes no parameters");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/info", handle_lcore_info,
+ "Returns lcore info. Parameters: int lcore_id");
+}
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v2 2/4] eal: allow applications to report their cpu cycles utilization
2022-11-28 8:59 ` [PATCH v2 0/4] lcore telemetry improvements Robin Jarry
2022-11-28 8:59 ` [PATCH v2 1/4] eal: add lcore info in telemetry Robin Jarry
@ 2022-11-28 8:59 ` Robin Jarry
2022-11-28 10:52 ` Morten Brørup
2022-11-28 8:59 ` [PATCH v2 3/4] testpmd: add dump_lcores command Robin Jarry
2022-11-28 8:59 ` [PATCH v2 4/4] testpmd: report lcore usage Robin Jarry
3 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2022-11-28 8:59 UTC (permalink / raw)
To: dev
Cc: Robin Jarry, Bruce Richardson, Jerin Jacob, Kevin Laatz,
Konstantin Ananyev, Mattias Rönnblom, Morten Brørup
Allow applications to register a callback that will be invoked in
rte_lcore_dump() and when requesting lcore info in the telemetry API.
The callback is expected to return the number of CPU cycles that have
passed since application start and the number of these cycles that were
spent doing busy work.
Cc: Bruce Richardson <bruce.richardson@intel.com>
Cc: Jerin Jacob <jerinj@marvell.com>
Cc: Kevin Laatz <kevin.laatz@intel.com>
Cc: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Cc: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Cc: Morten Brørup <mb@smartsharesystems.com>
Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
v1 -> v2:
Changed the approach based on Morten's review: the callback is now
expected to report the total number of cycles since application start
and the amount of these cycles that were spent doing busy work. This
will give more flexibility in external monitoring tools to decide the
sample period to compute busyness ratio.
lib/eal/common/eal_common_lcore.c | 31 ++++++++++++++++++++++++++++---
lib/eal/include/rte_lcore.h | 29 +++++++++++++++++++++++++++++
lib/eal/version.map | 1 +
3 files changed, 58 insertions(+), 3 deletions(-)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 8a6c12550238..51f53fc93ece 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -2,6 +2,7 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
@@ -420,11 +421,20 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
return ret;
}
+static rte_lcore_usage_cb lcore_usage_cb;
+
+void
+rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
+{
+ lcore_usage_cb = cb;
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
- char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ char cpuset[RTE_CPU_AFFINITY_STR_LEN], usage_str[256];
+ uint64_t busy_cycles, total_cycles;
const char *role;
FILE *f = arg;
int ret;
@@ -444,11 +454,19 @@ lcore_dump_cb(unsigned int lcore_id, void *arg)
break;
}
+ busy_cycles = 0;
+ total_cycles = 0;
+ usage_str[0] = '\0';
+ if (lcore_usage_cb && lcore_usage_cb(lcore_id, &busy_cycles, &total_cycles) == 0) {
+ snprintf(usage_str, sizeof(usage_str), ", busy cycles %"PRIu64"/%"PRIu64,
+ busy_cycles, total_cycles);
+ }
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
- fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
+ fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
rte_lcore_to_socket_id(lcore_id), role, cpuset,
- ret == 0 ? "" : "...");
+ ret == 0 ? "" : "...", usage_str);
+
return 0;
}
@@ -486,6 +504,7 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
{
struct lcore_telemetry_info *info = arg;
struct rte_config *cfg = rte_eal_get_configuration();
+ uint64_t busy_cycles, total_cycles;
struct rte_tel_data *cpuset;
const char *role;
unsigned int cpu;
@@ -519,6 +538,12 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
rte_tel_data_add_array_int(cpuset, cpu);
rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+ busy_cycles = 0;
+ total_cycles = 0;
+ if (lcore_usage_cb && lcore_usage_cb(lcore_id, &busy_cycles, &total_cycles) == 0) {
+ rte_tel_data_add_dict_u64(info->d, "busy_cycles", busy_cycles);
+ rte_tel_data_add_dict_u64(info->d, "total_cycles", total_cycles);
+ }
return 0;
}
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 6938c3fd7b81..dc352297bcbc 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -328,6 +328,35 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
int
rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
+/**
+ * Callback to allow applications to report CPU usage.
+ *
+ * @param [in] lcore_id
+ * The lcore to consider.
+ * @param [out] busy
+ * The number of busy CPU cycles since the application start.
+ * @param [out] total
+ * The total number of CPU cycles since the application start.
+ * @return
+ * - 0 if both busy and total were set correctly.
+ * - a negative value if the information is not available or if any error occurred.
+ */
+typedef int (*rte_lcore_usage_cb)(
+ unsigned int lcore_id, uint64_t *busy_cycles, uint64_t *total_cycles);
+
+/**
+ * Register a callback from an application to be called in rte_lcore_dump()
+ * and the /eal/lcore/info telemetry endpoint handler.
+ *
+ * Applications are expected to report the amount of busy and total CPU cycles
+ * since their startup.
+ *
+ * @param cb
+ * The callback function.
+ */
+__rte_experimental
+void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
+
/**
* List all lcores.
*
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 7ad12a7dc985..30fd216a12ea 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -440,6 +440,7 @@ EXPERIMENTAL {
rte_thread_detach;
rte_thread_equal;
rte_thread_join;
+ rte_lcore_register_usage_cb;
};
INTERNAL {
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v2 3/4] testpmd: add dump_lcores command
2022-11-28 8:59 ` [PATCH v2 0/4] lcore telemetry improvements Robin Jarry
2022-11-28 8:59 ` [PATCH v2 1/4] eal: add lcore info in telemetry Robin Jarry
2022-11-28 8:59 ` [PATCH v2 2/4] eal: allow applications to report their cpu cycles utilization Robin Jarry
@ 2022-11-28 8:59 ` Robin Jarry
2022-11-28 8:59 ` [PATCH v2 4/4] testpmd: report lcore usage Robin Jarry
3 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-28 8:59 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Chengwen Feng
Add a simple command that calls rte_lcore_dump().
Cc: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
v1 -> v2: renamed show lcores -> dump_lcores
app/test-pmd/cmdline.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index b32dc8bfd445..96474d2ae458 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8345,6 +8345,8 @@ static void cmd_dump_parsed(void *parsed_result,
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
rte_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_lcores"))
+ rte_lcore_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
}
@@ -8358,6 +8360,7 @@ static cmdline_parse_token_string_t cmd_dump_dump =
"dump_ring#"
"dump_mempool#"
"dump_devargs#"
+ "dump_lcores#"
"dump_log_types");
static cmdline_parse_inst_t cmd_dump = {
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v2 4/4] testpmd: report lcore usage
2022-11-28 8:59 ` [PATCH v2 0/4] lcore telemetry improvements Robin Jarry
` (2 preceding siblings ...)
2022-11-28 8:59 ` [PATCH v2 3/4] testpmd: add dump_lcores command Robin Jarry
@ 2022-11-28 8:59 ` Robin Jarry
3 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-28 8:59 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry
Reuse the --record-core-cycles option to account for busy cycles. One
turn of packet_fwd_t is considered "busy" if there was at least one
received or transmitted packet.
Add a new busy_cycles field in struct fwd_stream. Update get_end_cycles
to accept an additional argument for the number of processed packets.
Update fwd_stream.busy_cycles when the number of packets is greater than
zero.
When --record-core-cycles is specified, register a callback with
rte_lcore_register_usage_cb(). In the callback, use the new lcore_id
field in struct fwd_lcore to identify the correct index in fwd_lcores
and return the sum of busy/total cycles of all fwd_streams.
This makes the cycles counters available in rte_lcore_dump() and the
lcore telemetry API:
testpmd> dump_lcores
lcore 3, socket 0, role RTE, cpuset 3
lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
--> /eal/lcore/info,4
{
"/eal/lcore/info": {
"lcore_id": 4,
"socket": 0,
"role": "RTE",
"cpuset": [
4
],
"busy_cycles": 10623340318,
"total_cycles": 55331167354
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
v1 -> v2: adjusted to new lcore_usage api
app/test-pmd/5tswap.c | 5 +++--
app/test-pmd/csumonly.c | 6 +++---
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +++---
app/test-pmd/iofwd.c | 5 +++--
app/test-pmd/macfwd.c | 5 +++--
app/test-pmd/macswap.c | 5 +++--
app/test-pmd/noisy_vnf.c | 4 ++++
app/test-pmd/rxonly.c | 5 +++--
app/test-pmd/shared_rxq_fwd.c | 5 +++--
app/test-pmd/testpmd.c | 39 ++++++++++++++++++++++++++++++++++-
app/test-pmd/testpmd.h | 14 +++++++++----
app/test-pmd/txonly.c | 7 ++++---
13 files changed, 81 insertions(+), 27 deletions(-)
diff --git a/app/test-pmd/5tswap.c b/app/test-pmd/5tswap.c
index f041a5e1d530..03225075716c 100644
--- a/app/test-pmd/5tswap.c
+++ b/app/test-pmd/5tswap.c
@@ -116,7 +116,7 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -182,7 +182,8 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 1c2459851522..03e141221a56 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -868,7 +868,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
rx_bad_ip_csum = 0;
@@ -1200,8 +1200,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index fd6abc0f4124..7b2f0ffdf0f5 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -196,7 +196,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
RTE_PER_LCORE(_next_flow) = next_flow;
- get_end_cycles(fs, start_tsc);
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index 066f2a3ab79b..2fc9f96dc95f 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -303,7 +303,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_replies = 0;
@@ -508,8 +508,8 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
} while (++nb_tx < nb_replies);
}
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 8fafdec548ad..e5a2dbe20c69 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -59,7 +59,7 @@ pkt_burst_io_forward(struct fwd_stream *fs)
pkts_burst, nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
@@ -84,7 +84,8 @@ pkt_burst_io_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index beb220fbb462..9db623999970 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -65,7 +65,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -115,7 +115,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 4f8deb338296..4db134ac1d91 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -66,7 +66,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -93,7 +93,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index c65ec6f06a5c..290bdcda45f0 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -152,6 +152,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
uint64_t delta_ms;
bool needs_flush = false;
uint64_t now;
+ uint64_t start_tsc = 0;
+
+ get_start_cycles(&start_tsc);
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
pkts_burst, nb_pkt_per_burst);
@@ -219,6 +222,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
ncf->prev_time = rte_get_timer_cycles();
}
+ get_end_cycles(fs, start_tsc, nb_rx + nb_tx);
}
#define NOISY_STRSIZE 256
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index d528d4f34e60..519202339e16 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -58,13 +58,14 @@ pkt_burst_receive(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++)
rte_pktmbuf_free(pkts_burst[i]);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/shared_rxq_fwd.c b/app/test-pmd/shared_rxq_fwd.c
index 2e9047804b5b..395b73bfe52e 100644
--- a/app/test-pmd/shared_rxq_fwd.c
+++ b/app/test-pmd/shared_rxq_fwd.c
@@ -102,9 +102,10 @@ shared_rxq_fwd(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
forward_shared_rxq(fs, nb_rx, pkts_burst);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 134d79a55547..6ad91334d352 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2053,7 +2053,7 @@ fwd_stats_display(void)
fs->rx_bad_outer_ip_csum;
if (record_core_cycles)
- fwd_cycles += fs->core_cycles;
+ fwd_cycles += fs->busy_cycles;
}
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
@@ -2184,6 +2184,7 @@ fwd_stats_reset(void)
memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
+ fs->busy_cycles = 0;
fs->core_cycles = 0;
}
}
@@ -2260,6 +2261,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
tics_datum = rte_rdtsc();
tics_per_1sec = rte_get_timer_hz();
#endif
+ fc->lcore_id = rte_lcore_id();
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
do {
@@ -2288,6 +2290,37 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
} while (! fc->stopped);
}
+static int
+lcore_usage_callback(unsigned int lcore_id, uint64_t *busy_cycles, uint64_t *total_cycles)
+{
+ struct fwd_stream **fsm;
+ struct fwd_lcore *fc;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+ int c;
+
+ for (c = 0; c < nb_lcores; c++) {
+ fc = fwd_lcores[c];
+ if (fc->lcore_id != lcore_id)
+ continue;
+
+ fsm = &fwd_streams[fc->stream_idx];
+ nb_fs = fc->stream_nb;
+ *busy_cycles = 0;
+ *total_cycles = 0;
+
+ for (sm_id = 0; sm_id < nb_fs; sm_id++)
+ if (!fsm[sm_id]->disabled) {
+ *busy_cycles += fsm[sm_id]->busy_cycles;
+ *total_cycles += fsm[sm_id]->core_cycles;
+ }
+
+ return 0;
+ }
+
+ return -1;
+}
+
static int
start_pkt_forward_on_core(void *fwd_arg)
{
@@ -4522,6 +4555,10 @@ main(int argc, char** argv)
rte_stats_bitrate_reg(bitrate_data);
}
#endif
+
+ if (record_core_cycles)
+ rte_lcore_register_usage_cb(lcore_usage_callback);
+
#ifdef RTE_LIB_CMDLINE
if (init_cmdline() != 0)
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970d2..5dbf5d1c465c 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -174,7 +174,8 @@ struct fwd_stream {
#ifdef RTE_LIB_GRO
unsigned int gro_times; /**< GRO operation times */
#endif
- uint64_t core_cycles; /**< used for RX and TX processing */
+ uint64_t busy_cycles; /**< used with --record-core-cycles */
+ uint64_t core_cycles; /**< used with --record-core-cycles */
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
struct fwd_lcore *lcore; /**< Lcore being scheduled. */
@@ -360,6 +361,7 @@ struct fwd_lcore {
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
volatile char stopped; /**< stop forwarding when set */
+ unsigned int lcore_id; /**< return value of rte_lcore_id() */
};
/*
@@ -836,10 +838,14 @@ get_start_cycles(uint64_t *start_tsc)
}
static inline void
-get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc)
+get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc, uint64_t nb_packets)
{
- if (record_core_cycles)
- fs->core_cycles += rte_rdtsc() - start_tsc;
+ if (record_core_cycles) {
+ uint64_t cycles = rte_rdtsc() - start_tsc;
+ fs->core_cycles += cycles;
+ if (nb_packets > 0)
+ fs->busy_cycles += cycles;
+ }
}
static inline void
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 021624952daa..ad37626ff63c 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -331,7 +331,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
struct rte_mbuf *pkt;
struct rte_mempool *mbp;
struct rte_ether_hdr eth_hdr;
- uint16_t nb_tx;
+ uint16_t nb_tx = 0;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
uint32_t retry;
@@ -392,7 +392,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
}
if (nb_pkt == 0)
- return;
+ goto end;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
@@ -426,7 +426,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
} while (++nb_tx < nb_pkt);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v2 2/4] eal: allow applications to report their cpu cycles utilization
2022-11-28 8:59 ` [PATCH v2 2/4] eal: allow applications to report their cpu cycles utilization Robin Jarry
@ 2022-11-28 10:52 ` Morten Brørup
2022-11-29 8:19 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: Morten Brørup @ 2022-11-28 10:52 UTC (permalink / raw)
To: Robin Jarry, dev
Cc: Bruce Richardson, Jerin Jacob, Kevin Laatz, Konstantin Ananyev,
Mattias Rönnblom
> From: Robin Jarry [mailto:rjarry@redhat.com]
> Sent: Monday, 28 November 2022 10.00
>
> Allow applications to register a callback that will be invoked in
> rte_lcore_dump() and when requesting lcore info in the telemetry API.
>
> The callback is expected to return the number of CPU cycles that have
> passed since application start and the number of these cycles that were
> spent doing busy work.
>
> Cc: Bruce Richardson <bruce.richardson@intel.com>
> Cc: Jerin Jacob <jerinj@marvell.com>
> Cc: Kevin Laatz <kevin.laatz@intel.com>
> Cc: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
> Cc: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> Cc: Morten Brørup <mb@smartsharesystems.com>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> ---
> v1 -> v2:
>
> Changed the approach based on Morten's review: the callback is now
> expected to report the total number of cycles since application start
> and the amount of these cycles that were spent doing busy work. This
> will give more flexibility in external monitoring tools to decide the
> sample period to compute busyness ratio.
>
> lib/eal/common/eal_common_lcore.c | 31 ++++++++++++++++++++++++++++---
> lib/eal/include/rte_lcore.h | 29 +++++++++++++++++++++++++++++
> lib/eal/version.map | 1 +
> 3 files changed, 58 insertions(+), 3 deletions(-)
>
> diff --git a/lib/eal/common/eal_common_lcore.c
> b/lib/eal/common/eal_common_lcore.c
> index 8a6c12550238..51f53fc93ece 100644
> --- a/lib/eal/common/eal_common_lcore.c
> +++ b/lib/eal/common/eal_common_lcore.c
> @@ -2,6 +2,7 @@
> * Copyright(c) 2010-2014 Intel Corporation
> */
>
> +#include <inttypes.h>
> #include <stdlib.h>
> #include <string.h>
>
> @@ -420,11 +421,20 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void
> *arg)
> return ret;
> }
>
> +static rte_lcore_usage_cb lcore_usage_cb;
> +
> +void
> +rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
> +{
> + lcore_usage_cb = cb;
> +}
> +
> static int
> lcore_dump_cb(unsigned int lcore_id, void *arg)
> {
> struct rte_config *cfg = rte_eal_get_configuration();
> - char cpuset[RTE_CPU_AFFINITY_STR_LEN];
> + char cpuset[RTE_CPU_AFFINITY_STR_LEN], usage_str[256];
> + uint64_t busy_cycles, total_cycles;
> const char *role;
> FILE *f = arg;
> int ret;
> @@ -444,11 +454,19 @@ lcore_dump_cb(unsigned int lcore_id, void *arg)
> break;
> }
>
> + busy_cycles = 0;
> + total_cycles = 0;
> + usage_str[0] = '\0';
> + if (lcore_usage_cb && lcore_usage_cb(lcore_id, &busy_cycles,
> &total_cycles) == 0) {
The DPDK coding convention is to explicitly compare to NULL, i.e.:
if (lcore_usage_cb != NULL && lcore_usage_cb(...
> + snprintf(usage_str, sizeof(usage_str), ", busy cycles
> %"PRIu64"/%"PRIu64,
> + busy_cycles, total_cycles);
Consider adding the percentage here, for easy human consumption:
", busy cycles %"PRIu64"/%"PRIu64" (%.02f%%)",
busy_cycles, total_cycles,
busy_cycles ? (float)busy_cycles / (float)total_cycles * (float)100);
On the other hand, it is the average over the total uptime, so the percentage might only be useful for very few cases.
> + }
> ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset,
> cpuset,
> sizeof(cpuset));
> - fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n",
> lcore_id,
> + fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n",
> lcore_id,
> rte_lcore_to_socket_id(lcore_id), role, cpuset,
> - ret == 0 ? "" : "...");
> + ret == 0 ? "" : "...", usage_str);
> +
> return 0;
> }
>
> @@ -486,6 +504,7 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void
> *arg)
> {
> struct lcore_telemetry_info *info = arg;
> struct rte_config *cfg = rte_eal_get_configuration();
> + uint64_t busy_cycles, total_cycles;
> struct rte_tel_data *cpuset;
> const char *role;
> unsigned int cpu;
> @@ -519,6 +538,12 @@ lcore_telemetry_info_cb(unsigned int lcore_id,
> void *arg)
> if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
> rte_tel_data_add_array_int(cpuset, cpu);
> rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
> + busy_cycles = 0;
> + total_cycles = 0;
> + if (lcore_usage_cb && lcore_usage_cb(lcore_id, &busy_cycles,
> &total_cycles) == 0) {
Same comment about coding convention:
if (lcore_usage_cb != NULL && lcore_usage_cb(...
> + rte_tel_data_add_dict_u64(info->d, "busy_cycles",
> busy_cycles);
> + rte_tel_data_add_dict_u64(info->d, "total_cycles",
> total_cycles);
> + }
>
> return 0;
> }
> diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
> index 6938c3fd7b81..dc352297bcbc 100644
> --- a/lib/eal/include/rte_lcore.h
> +++ b/lib/eal/include/rte_lcore.h
> @@ -328,6 +328,35 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int
> lcore_id, void *arg);
> int
> rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
>
> +/**
> + * Callback to allow applications to report CPU usage.
> + *
> + * @param [in] lcore_id
> + * The lcore to consider.
> + * @param [out] busy
> + * The number of busy CPU cycles since the application start.
> + * @param [out] total
> + * The total number of CPU cycles since the application start.
> + * @return
> + * - 0 if both busy and total were set correctly.
> + * - a negative value if the information is not available or if any
> error occurred.
> + */
> +typedef int (*rte_lcore_usage_cb)(
> + unsigned int lcore_id, uint64_t *busy_cycles, uint64_t
> *total_cycles);
> +
> +/**
> + * Register a callback from an application to be called in
> rte_lcore_dump()
> + * and the /eal/lcore/info telemetry endpoint handler.
> + *
> + * Applications are expected to report the amount of busy and total
> CPU cycles
> + * since their startup.
> + *
> + * @param cb
> + * The callback function.
> + */
> +__rte_experimental
> +void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
> +
> /**
> * List all lcores.
> *
> diff --git a/lib/eal/version.map b/lib/eal/version.map
> index 7ad12a7dc985..30fd216a12ea 100644
> --- a/lib/eal/version.map
> +++ b/lib/eal/version.map
> @@ -440,6 +440,7 @@ EXPERIMENTAL {
> rte_thread_detach;
> rte_thread_equal;
> rte_thread_join;
> + rte_lcore_register_usage_cb;
> };
>
> INTERNAL {
> --
> 2.38.1
>
Looks good to me.
And we could probably discuss naming forever... "Usage" and "utilization" are synonyms, but usage is shorter, so let's stick with that.
Acked-by: Morten Brørup <mb@smartsharesystems.com>
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v2 2/4] eal: allow applications to report their cpu cycles utilization
2022-11-28 10:52 ` Morten Brørup
@ 2022-11-29 8:19 ` Robin Jarry
0 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-29 8:19 UTC (permalink / raw)
To: Morten Brørup, dev
Cc: Bruce Richardson, Jerin Jacob, Kevin Laatz, Konstantin Ananyev,
Mattias Rönnblom
Morten Brørup, Nov 28, 2022 at 11:52:
> Consider adding the percentage here, for easy human consumption:
>
> ", busy cycles %"PRIu64"/%"PRIu64" (%.02f%%)",
> busy_cycles, total_cycles,
> busy_cycles ? (float)busy_cycles / (float)total_cycles * (float)100);
>
> On the other hand, it is the average over the total uptime, so the
> percentage might only be useful for very few cases.
I had thought adding the percentage. But as you said, I'm not sure how
this can be of interest. I can add it in v3 if needed.
> Same comment about coding convention:
> if (lcore_usage_cb != NULL && lcore_usage_cb(...
Will fix that in v3.
> Looks good to me.
>
> And we could probably discuss naming forever... "Usage" and
> "utilization" are synonyms, but usage is shorter, so let's stick with
> that.
>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
Thanks. I'll do s/utilization/usage/g for v3.
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v3 0/4] lcore telemetry improvements
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (4 preceding siblings ...)
2022-11-28 8:59 ` [PATCH v2 0/4] lcore telemetry improvements Robin Jarry
@ 2022-11-29 15:33 ` Robin Jarry
2022-11-29 15:33 ` [PATCH v3 1/4] eal: add lcore info in telemetry Robin Jarry
` (4 more replies)
2022-12-07 16:21 ` [PATCH " Robin Jarry
` (11 subsequent siblings)
17 siblings, 5 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-29 15:33 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry
This is a follow up on previous work by Kevin Laatz:
http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
This series is aimed at allowing DPDK applications to expose their CPU
usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
more basic and naive approach which leaves the cpu cycles accounting
completely up to the application.
For reference, I have implemented a draft patch in OvS to use
rte_lcore_register_usage_cb() and report the already available busy
cycles information.
https://github.com/rjarry/ovs/commit/327db12c751be0375fcfed5e44b6065bcfb75c82
Changes since v2:
- Fixed typos in docstrings.
- Used if (xxx != NULL) instead of if (xxx) test convention.
- Guarded against an unlikely race if rte_lcore_dump() is called by
a thread while another one calls rte_lcore_register_usage_cb(NULL).
- s/utilization/usage/
- Fixed build on Windows.
Changes since v1:
- The cpuset field in telemetry is now a JSON list of CPU ids.
- Applications must now report their raw CPU cycles counts. The busyness
ratio and rate of change is left to external monitoring tools.
- Renamed show lcores -> dump_lcores in testpmd.
Robin Jarry (4):
eal: add lcore info in telemetry
eal: allow applications to report their cpu cycles usage
testpmd: add dump_lcores command
testpmd: report lcore usage
app/test-pmd/5tswap.c | 5 +-
app/test-pmd/cmdline.c | 3 +
app/test-pmd/csumonly.c | 6 +-
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +-
app/test-pmd/iofwd.c | 5 +-
app/test-pmd/macfwd.c | 5 +-
app/test-pmd/macswap.c | 5 +-
app/test-pmd/noisy_vnf.c | 4 +
app/test-pmd/rxonly.c | 5 +-
app/test-pmd/shared_rxq_fwd.c | 5 +-
app/test-pmd/testpmd.c | 39 ++++++++-
app/test-pmd/testpmd.h | 14 +++-
app/test-pmd/txonly.c | 7 +-
lib/eal/common/eal_common_lcore.c | 131 +++++++++++++++++++++++++++++-
lib/eal/include/rte_lcore.h | 29 +++++++
lib/eal/version.map | 1 +
17 files changed, 242 insertions(+), 30 deletions(-)
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v3 1/4] eal: add lcore info in telemetry
2022-11-29 15:33 ` [PATCH v3 0/4] lcore telemetry improvements Robin Jarry
@ 2022-11-29 15:33 ` Robin Jarry
2022-11-29 15:33 ` [PATCH v3 2/4] eal: allow applications to report their cpu cycles usage Robin Jarry
` (3 subsequent siblings)
4 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-29 15:33 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup
Report the same information than rte_lcore_dump() in the telemetry
API into /eal/lcore/list and /eal/lcore/info,ID.
Example:
--> /eal/lcore/info,3
{
"/eal/lcore/info": {
"lcore_id": 3,
"socket": 0,
"role": "RTE",
"cpuset": [
3
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v2 -> v3: Added #ifndef WINDOWS guards. Telemetry is not available.
v1 -> v2: Changed "cpuset" to an array of CPU ids instead of a string.
lib/eal/common/eal_common_lcore.c | 96 +++++++++++++++++++++++++++++++
1 file changed, 96 insertions(+)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 06c594b0224f..16548977dce8 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -10,6 +10,9 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
+#include <rte_telemetry.h>
+#endif
#include "eal_private.h"
#include "eal_thread.h"
@@ -456,3 +459,96 @@ rte_lcore_dump(FILE *f)
{
rte_lcore_iterate(lcore_dump_cb, f);
}
+
+#ifndef RTE_EXEC_ENV_WINDOWS
+static int
+lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_tel_data *d = arg;
+ return rte_tel_data_add_array_int(d, lcore_id);
+}
+
+static int
+handle_lcore_list(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+ if (ret)
+ return ret;
+ return rte_lcore_iterate(lcore_telemetry_id_cb, d);
+}
+
+struct lcore_telemetry_info {
+ unsigned int lcore_id;
+ struct rte_tel_data *d;
+};
+
+static int
+lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
+{
+ struct lcore_telemetry_info *info = arg;
+ struct rte_config *cfg = rte_eal_get_configuration();
+ struct rte_tel_data *cpuset;
+ const char *role;
+ unsigned int cpu;
+
+ if (info->lcore_id != lcore_id)
+ return 0;
+
+ switch (cfg->lcore_role[lcore_id]) {
+ case ROLE_RTE:
+ role = "RTE";
+ break;
+ case ROLE_SERVICE:
+ role = "SERVICE";
+ break;
+ case ROLE_NON_EAL:
+ role = "NON_EAL";
+ break;
+ default:
+ role = "UNKNOWN";
+ break;
+ }
+ rte_tel_data_start_dict(info->d);
+ rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
+ rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
+ rte_tel_data_add_dict_string(info->d, "role", role);
+ cpuset = rte_tel_data_alloc();
+ if (!cpuset)
+ return -ENOMEM;
+ rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++)
+ if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
+ rte_tel_data_add_array_int(cpuset, cpu);
+ rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+
+ return 0;
+}
+
+static int
+handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
+{
+ struct lcore_telemetry_info info = { .d = d };
+ char *endptr = NULL;
+ if (params == NULL || strlen(params) == 0)
+ return -EINVAL;
+ errno = 0;
+ info.lcore_id = strtoul(params, &endptr, 10);
+ if (errno)
+ return -errno;
+ if (endptr == params)
+ return -EINVAL;
+ return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
+}
+
+RTE_INIT(lcore_telemetry)
+{
+ rte_telemetry_register_cmd(
+ "/eal/lcore/list", handle_lcore_list,
+ "List of lcore ids. Takes no parameters");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/info", handle_lcore_info,
+ "Returns lcore info. Parameters: int lcore_id");
+}
+#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v3 2/4] eal: allow applications to report their cpu cycles usage
2022-11-29 15:33 ` [PATCH v3 0/4] lcore telemetry improvements Robin Jarry
2022-11-29 15:33 ` [PATCH v3 1/4] eal: add lcore info in telemetry Robin Jarry
@ 2022-11-29 15:33 ` Robin Jarry
2022-11-29 16:10 ` Mattias Rönnblom
2022-11-29 15:33 ` [PATCH v3 3/4] testpmd: add dump_lcores command Robin Jarry
` (2 subsequent siblings)
4 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2022-11-29 15:33 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup
Allow applications to register a callback that will be invoked in
rte_lcore_dump() and when requesting lcore info in the telemetry API.
The callback is expected to return the number of CPU cycles that have
passed since application start and the number of these cycles that were
spent doing busy work.
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v2 -> v3:
- Copied callback to local variable to guard against (unlikely) races.
- Used != NULL convention to test if callback is defined.
- Fixed typo in doc string.
- Did not add a % value in rte_lcore_dump() as its use would be very
limited.
v1 -> v2:
Changed the approach based on Morten's review: the callback is now
expected to report the total number of cycles since application start
and the amount of these cycles that were spent doing busy work. This
will give more flexibility in external monitoring tools to decide the
sample period to compute busyness ratio.
lib/eal/common/eal_common_lcore.c | 35 ++++++++++++++++++++++++++++---
lib/eal/include/rte_lcore.h | 29 +++++++++++++++++++++++++
lib/eal/version.map | 1 +
3 files changed, 62 insertions(+), 3 deletions(-)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 16548977dce8..23717abf6530 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -2,6 +2,7 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
@@ -422,11 +423,21 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
return ret;
}
+static rte_lcore_usage_cb lcore_usage_cb;
+
+void
+rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
+{
+ lcore_usage_cb = cb;
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
- char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ char cpuset[RTE_CPU_AFFINITY_STR_LEN], usage_str[256];
+ uint64_t busy_cycles, total_cycles;
+ rte_lcore_usage_cb usage_cb;
const char *role;
FILE *f = arg;
int ret;
@@ -446,11 +457,20 @@ lcore_dump_cb(unsigned int lcore_id, void *arg)
break;
}
+ busy_cycles = 0;
+ total_cycles = 0;
+ usage_str[0] = '\0';
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &busy_cycles, &total_cycles) == 0) {
+ snprintf(usage_str, sizeof(usage_str), ", busy cycles %"PRIu64"/%"PRIu64,
+ busy_cycles, total_cycles);
+ }
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
- fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
+ fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
rte_lcore_to_socket_id(lcore_id), role, cpuset,
- ret == 0 ? "" : "...");
+ ret == 0 ? "" : "...", usage_str);
+
return 0;
}
@@ -489,7 +509,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
{
struct lcore_telemetry_info *info = arg;
struct rte_config *cfg = rte_eal_get_configuration();
+ uint64_t busy_cycles, total_cycles;
struct rte_tel_data *cpuset;
+ rte_lcore_usage_cb usage_cb;
const char *role;
unsigned int cpu;
@@ -522,6 +544,13 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
rte_tel_data_add_array_int(cpuset, cpu);
rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+ busy_cycles = 0;
+ total_cycles = 0;
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &busy_cycles, &total_cycles) == 0) {
+ rte_tel_data_add_dict_u64(info->d, "busy_cycles", busy_cycles);
+ rte_tel_data_add_dict_u64(info->d, "total_cycles", total_cycles);
+ }
return 0;
}
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 6938c3fd7b81..0552e6f44142 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -328,6 +328,35 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
int
rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
+/**
+ * Callback to allow applications to report CPU usage.
+ *
+ * @param [in] lcore_id
+ * The lcore to consider.
+ * @param [out] busy_cycles
+ * The number of busy CPU cycles since the application start.
+ * @param [out] total_cycles
+ * The total number of CPU cycles since the application start.
+ * @return
+ * - 0 if both busy and total were set correctly.
+ * - a negative value if the information is not available or if any error occurred.
+ */
+typedef int (*rte_lcore_usage_cb)(
+ unsigned int lcore_id, uint64_t *busy_cycles, uint64_t *total_cycles);
+
+/**
+ * Register a callback from an application to be called in rte_lcore_dump()
+ * and the /eal/lcore/info telemetry endpoint handler.
+ *
+ * Applications are expected to report the amount of busy and total CPU cycles
+ * since their startup.
+ *
+ * @param cb
+ * The callback function.
+ */
+__rte_experimental
+void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
+
/**
* List all lcores.
*
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 7ad12a7dc985..30fd216a12ea 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -440,6 +440,7 @@ EXPERIMENTAL {
rte_thread_detach;
rte_thread_equal;
rte_thread_join;
+ rte_lcore_register_usage_cb;
};
INTERNAL {
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v3 3/4] testpmd: add dump_lcores command
2022-11-29 15:33 ` [PATCH v3 0/4] lcore telemetry improvements Robin Jarry
2022-11-29 15:33 ` [PATCH v3 1/4] eal: add lcore info in telemetry Robin Jarry
2022-11-29 15:33 ` [PATCH v3 2/4] eal: allow applications to report their cpu cycles usage Robin Jarry
@ 2022-11-29 15:33 ` Robin Jarry
2022-11-29 15:33 ` [PATCH v3 4/4] testpmd: report lcore usage Robin Jarry
2022-11-29 16:14 ` [PATCH v3 0/4] lcore telemetry improvements Mattias Rönnblom
4 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-29 15:33 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup
Add a simple command that calls rte_lcore_dump().
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v2 -> v3: no change
v1 -> v2: renamed show lcores -> dump_lcores
app/test-pmd/cmdline.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index b32dc8bfd445..96474d2ae458 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8345,6 +8345,8 @@ static void cmd_dump_parsed(void *parsed_result,
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
rte_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_lcores"))
+ rte_lcore_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
}
@@ -8358,6 +8360,7 @@ static cmdline_parse_token_string_t cmd_dump_dump =
"dump_ring#"
"dump_mempool#"
"dump_devargs#"
+ "dump_lcores#"
"dump_log_types");
static cmdline_parse_inst_t cmd_dump = {
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v3 4/4] testpmd: report lcore usage
2022-11-29 15:33 ` [PATCH v3 0/4] lcore telemetry improvements Robin Jarry
` (2 preceding siblings ...)
2022-11-29 15:33 ` [PATCH v3 3/4] testpmd: add dump_lcores command Robin Jarry
@ 2022-11-29 15:33 ` Robin Jarry
2022-11-29 16:14 ` [PATCH v3 0/4] lcore telemetry improvements Mattias Rönnblom
4 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-29 15:33 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup
Reuse the --record-core-cycles option to account for busy cycles. One
turn of packet_fwd_t is considered "busy" if there was at least one
received or transmitted packet.
Add a new busy_cycles field in struct fwd_stream. Update get_end_cycles
to accept an additional argument for the number of processed packets.
Update fwd_stream.busy_cycles when the number of packets is greater than
zero.
When --record-core-cycles is specified, register a callback with
rte_lcore_register_usage_cb(). In the callback, use the new lcore_id
field in struct fwd_lcore to identify the correct index in fwd_lcores
and return the sum of busy/total cycles of all fwd_streams.
This makes the cycles counters available in rte_lcore_dump() and the
lcore telemetry API:
testpmd> dump_lcores
lcore 3, socket 0, role RTE, cpuset 3
lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
--> /eal/lcore/info,4
{
"/eal/lcore/info": {
"lcore_id": 4,
"socket": 0,
"role": "RTE",
"cpuset": [
4
],
"busy_cycles": 10623340318,
"total_cycles": 55331167354
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v2 -> v3: no change
v1 -> v2: adjusted to new lcore_usage api
app/test-pmd/5tswap.c | 5 +++--
app/test-pmd/csumonly.c | 6 +++---
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +++---
app/test-pmd/iofwd.c | 5 +++--
app/test-pmd/macfwd.c | 5 +++--
app/test-pmd/macswap.c | 5 +++--
app/test-pmd/noisy_vnf.c | 4 ++++
app/test-pmd/rxonly.c | 5 +++--
app/test-pmd/shared_rxq_fwd.c | 5 +++--
app/test-pmd/testpmd.c | 39 ++++++++++++++++++++++++++++++++++-
app/test-pmd/testpmd.h | 14 +++++++++----
app/test-pmd/txonly.c | 7 ++++---
13 files changed, 81 insertions(+), 27 deletions(-)
diff --git a/app/test-pmd/5tswap.c b/app/test-pmd/5tswap.c
index f041a5e1d530..03225075716c 100644
--- a/app/test-pmd/5tswap.c
+++ b/app/test-pmd/5tswap.c
@@ -116,7 +116,7 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -182,7 +182,8 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 1c2459851522..03e141221a56 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -868,7 +868,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
rx_bad_ip_csum = 0;
@@ -1200,8 +1200,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index fd6abc0f4124..7b2f0ffdf0f5 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -196,7 +196,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
RTE_PER_LCORE(_next_flow) = next_flow;
- get_end_cycles(fs, start_tsc);
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index 066f2a3ab79b..2fc9f96dc95f 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -303,7 +303,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_replies = 0;
@@ -508,8 +508,8 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
} while (++nb_tx < nb_replies);
}
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 8fafdec548ad..e5a2dbe20c69 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -59,7 +59,7 @@ pkt_burst_io_forward(struct fwd_stream *fs)
pkts_burst, nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
@@ -84,7 +84,8 @@ pkt_burst_io_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index beb220fbb462..9db623999970 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -65,7 +65,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -115,7 +115,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 4f8deb338296..4db134ac1d91 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -66,7 +66,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -93,7 +93,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index c65ec6f06a5c..290bdcda45f0 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -152,6 +152,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
uint64_t delta_ms;
bool needs_flush = false;
uint64_t now;
+ uint64_t start_tsc = 0;
+
+ get_start_cycles(&start_tsc);
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
pkts_burst, nb_pkt_per_burst);
@@ -219,6 +222,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
ncf->prev_time = rte_get_timer_cycles();
}
+ get_end_cycles(fs, start_tsc, nb_rx + nb_tx);
}
#define NOISY_STRSIZE 256
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index d528d4f34e60..519202339e16 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -58,13 +58,14 @@ pkt_burst_receive(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++)
rte_pktmbuf_free(pkts_burst[i]);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/shared_rxq_fwd.c b/app/test-pmd/shared_rxq_fwd.c
index 2e9047804b5b..395b73bfe52e 100644
--- a/app/test-pmd/shared_rxq_fwd.c
+++ b/app/test-pmd/shared_rxq_fwd.c
@@ -102,9 +102,10 @@ shared_rxq_fwd(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
forward_shared_rxq(fs, nb_rx, pkts_burst);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 134d79a55547..6ad91334d352 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2053,7 +2053,7 @@ fwd_stats_display(void)
fs->rx_bad_outer_ip_csum;
if (record_core_cycles)
- fwd_cycles += fs->core_cycles;
+ fwd_cycles += fs->busy_cycles;
}
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
@@ -2184,6 +2184,7 @@ fwd_stats_reset(void)
memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
+ fs->busy_cycles = 0;
fs->core_cycles = 0;
}
}
@@ -2260,6 +2261,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
tics_datum = rte_rdtsc();
tics_per_1sec = rte_get_timer_hz();
#endif
+ fc->lcore_id = rte_lcore_id();
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
do {
@@ -2288,6 +2290,37 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
} while (! fc->stopped);
}
+static int
+lcore_usage_callback(unsigned int lcore_id, uint64_t *busy_cycles, uint64_t *total_cycles)
+{
+ struct fwd_stream **fsm;
+ struct fwd_lcore *fc;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+ int c;
+
+ for (c = 0; c < nb_lcores; c++) {
+ fc = fwd_lcores[c];
+ if (fc->lcore_id != lcore_id)
+ continue;
+
+ fsm = &fwd_streams[fc->stream_idx];
+ nb_fs = fc->stream_nb;
+ *busy_cycles = 0;
+ *total_cycles = 0;
+
+ for (sm_id = 0; sm_id < nb_fs; sm_id++)
+ if (!fsm[sm_id]->disabled) {
+ *busy_cycles += fsm[sm_id]->busy_cycles;
+ *total_cycles += fsm[sm_id]->core_cycles;
+ }
+
+ return 0;
+ }
+
+ return -1;
+}
+
static int
start_pkt_forward_on_core(void *fwd_arg)
{
@@ -4522,6 +4555,10 @@ main(int argc, char** argv)
rte_stats_bitrate_reg(bitrate_data);
}
#endif
+
+ if (record_core_cycles)
+ rte_lcore_register_usage_cb(lcore_usage_callback);
+
#ifdef RTE_LIB_CMDLINE
if (init_cmdline() != 0)
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970d2..5dbf5d1c465c 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -174,7 +174,8 @@ struct fwd_stream {
#ifdef RTE_LIB_GRO
unsigned int gro_times; /**< GRO operation times */
#endif
- uint64_t core_cycles; /**< used for RX and TX processing */
+ uint64_t busy_cycles; /**< used with --record-core-cycles */
+ uint64_t core_cycles; /**< used with --record-core-cycles */
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
struct fwd_lcore *lcore; /**< Lcore being scheduled. */
@@ -360,6 +361,7 @@ struct fwd_lcore {
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
volatile char stopped; /**< stop forwarding when set */
+ unsigned int lcore_id; /**< return value of rte_lcore_id() */
};
/*
@@ -836,10 +838,14 @@ get_start_cycles(uint64_t *start_tsc)
}
static inline void
-get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc)
+get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc, uint64_t nb_packets)
{
- if (record_core_cycles)
- fs->core_cycles += rte_rdtsc() - start_tsc;
+ if (record_core_cycles) {
+ uint64_t cycles = rte_rdtsc() - start_tsc;
+ fs->core_cycles += cycles;
+ if (nb_packets > 0)
+ fs->busy_cycles += cycles;
+ }
}
static inline void
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 021624952daa..ad37626ff63c 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -331,7 +331,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
struct rte_mbuf *pkt;
struct rte_mempool *mbp;
struct rte_ether_hdr eth_hdr;
- uint16_t nb_tx;
+ uint16_t nb_tx = 0;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
uint32_t retry;
@@ -392,7 +392,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
}
if (nb_pkt == 0)
- return;
+ goto end;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
@@ -426,7 +426,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
} while (++nb_tx < nb_pkt);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v3 2/4] eal: allow applications to report their cpu cycles usage
2022-11-29 15:33 ` [PATCH v3 2/4] eal: allow applications to report their cpu cycles usage Robin Jarry
@ 2022-11-29 16:10 ` Mattias Rönnblom
2022-12-07 11:00 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: Mattias Rönnblom @ 2022-11-29 16:10 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Morten Brørup
On 2022-11-29 16:33, Robin Jarry wrote:
> Allow applications to register a callback that will be invoked in
> rte_lcore_dump() and when requesting lcore info in the telemetry API.
>
> The callback is expected to return the number of CPU cycles that have
> passed since application start and the number of these cycles that were
> spent doing busy work.
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
> v2 -> v3:
>
> - Copied callback to local variable to guard against (unlikely) races.
> - Used != NULL convention to test if callback is defined.
> - Fixed typo in doc string.
> - Did not add a % value in rte_lcore_dump() as its use would be very
> limited.
>
> v1 -> v2:
>
> Changed the approach based on Morten's review: the callback is now
> expected to report the total number of cycles since application start
> and the amount of these cycles that were spent doing busy work. This
> will give more flexibility in external monitoring tools to decide the
> sample period to compute busyness ratio.
>
> lib/eal/common/eal_common_lcore.c | 35 ++++++++++++++++++++++++++++---
> lib/eal/include/rte_lcore.h | 29 +++++++++++++++++++++++++
> lib/eal/version.map | 1 +
> 3 files changed, 62 insertions(+), 3 deletions(-)
>
> diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
> index 16548977dce8..23717abf6530 100644
> --- a/lib/eal/common/eal_common_lcore.c
> +++ b/lib/eal/common/eal_common_lcore.c
> @@ -2,6 +2,7 @@
> * Copyright(c) 2010-2014 Intel Corporation
> */
>
> +#include <inttypes.h>
> #include <stdlib.h>
> #include <string.h>
>
> @@ -422,11 +423,21 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
> return ret;
> }
>
> +static rte_lcore_usage_cb lcore_usage_cb;
> +
> +void
> +rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
> +{
> + lcore_usage_cb = cb;
> +}
> +
> static int
> lcore_dump_cb(unsigned int lcore_id, void *arg)
> {
> struct rte_config *cfg = rte_eal_get_configuration();
> - char cpuset[RTE_CPU_AFFINITY_STR_LEN];
> + char cpuset[RTE_CPU_AFFINITY_STR_LEN], usage_str[256];
> + uint64_t busy_cycles, total_cycles;
> + rte_lcore_usage_cb usage_cb;
> const char *role;
> FILE *f = arg;
> int ret;
> @@ -446,11 +457,20 @@ lcore_dump_cb(unsigned int lcore_id, void *arg)
> break;
> }
>
> + busy_cycles = 0;
> + total_cycles = 0;
> + usage_str[0] = '\0';
> + usage_cb = lcore_usage_cb;
> + if (usage_cb != NULL && usage_cb(lcore_id, &busy_cycles, &total_cycles) == 0) {
> + snprintf(usage_str, sizeof(usage_str), ", busy cycles %"PRIu64"/%"PRIu64,
> + busy_cycles, total_cycles);
> + }
> ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
> sizeof(cpuset));
> - fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
> + fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
> rte_lcore_to_socket_id(lcore_id), role, cpuset,
> - ret == 0 ? "" : "...");
> + ret == 0 ? "" : "...", usage_str);
> +
> return 0;
> }
>
> @@ -489,7 +509,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
> {
> struct lcore_telemetry_info *info = arg;
> struct rte_config *cfg = rte_eal_get_configuration();
> + uint64_t busy_cycles, total_cycles;
> struct rte_tel_data *cpuset;
> + rte_lcore_usage_cb usage_cb;
> const char *role;
> unsigned int cpu;
>
> @@ -522,6 +544,13 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
> if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
> rte_tel_data_add_array_int(cpuset, cpu);
> rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
> + busy_cycles = 0;
> + total_cycles = 0;
> + usage_cb = lcore_usage_cb;
> + if (usage_cb != NULL && usage_cb(lcore_id, &busy_cycles, &total_cycles) == 0) {
> + rte_tel_data_add_dict_u64(info->d, "busy_cycles", busy_cycles);
> + rte_tel_data_add_dict_u64(info->d, "total_cycles", total_cycles);
> + }
>
> return 0;
> }
> diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
> index 6938c3fd7b81..0552e6f44142 100644
> --- a/lib/eal/include/rte_lcore.h
> +++ b/lib/eal/include/rte_lcore.h
> @@ -328,6 +328,35 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
> int
> rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
>
> +/**
> + * Callback to allow applications to report CPU usage.
> + *
> + * @param [in] lcore_id
> + * The lcore to consider.
> + * @param [out] busy_cycles
> + * The number of busy CPU cycles since the application start.
Wouldn't it be more appropriate to describe it as "TSC cycles", instead
of "CPU cycles"? Or technically "The amount of busy time since
application start, in TSC cycles".
Reporting actual CPU core cycles spent is not possible, in the general case.
> + * @param [out] total_cycles
> + * The total number of CPU cycles since the application start.
> + * @return
> + * - 0 if both busy and total were set correctly.
> + * - a negative value if the information is not available or if any error occurred.
> + */
> +typedef int (*rte_lcore_usage_cb)(
> + unsigned int lcore_id, uint64_t *busy_cycles, uint64_t *total_cycles);
> +
> +/**
> + * Register a callback from an application to be called in rte_lcore_dump()
> + * and the /eal/lcore/info telemetry endpoint handler.
> + *
> + * Applications are expected to report the amount of busy and total CPU cycles
> + * since their startup.
> + *
> + * @param cb
> + * The callback function.
> + */
> +__rte_experimental
> +void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
> +
> /**
> * List all lcores.
> *
> diff --git a/lib/eal/version.map b/lib/eal/version.map
> index 7ad12a7dc985..30fd216a12ea 100644
> --- a/lib/eal/version.map
> +++ b/lib/eal/version.map
> @@ -440,6 +440,7 @@ EXPERIMENTAL {
> rte_thread_detach;
> rte_thread_equal;
> rte_thread_join;
> + rte_lcore_register_usage_cb;
> };
>
> INTERNAL {
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v3 0/4] lcore telemetry improvements
2022-11-29 15:33 ` [PATCH v3 0/4] lcore telemetry improvements Robin Jarry
` (3 preceding siblings ...)
2022-11-29 15:33 ` [PATCH v3 4/4] testpmd: report lcore usage Robin Jarry
@ 2022-11-29 16:14 ` Mattias Rönnblom
4 siblings, 0 replies; 134+ messages in thread
From: Mattias Rönnblom @ 2022-11-29 16:14 UTC (permalink / raw)
To: Robin Jarry, dev
On 2022-11-29 16:33, Robin Jarry wrote:
> This is a follow up on previous work by Kevin Laatz:
>
> http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
>
> This series is aimed at allowing DPDK applications to expose their CPU
> usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
> more basic and naive approach which leaves the cpu cycles accounting
> completely up to the application.
>
This seems like a reasonable approach to me.
DPDK needs to have more OS-type constructs for a feature like this to be
implemented, without application involvement. I would love for this to
happen, but we aren't there yet.
> For reference, I have implemented a draft patch in OvS to use
> rte_lcore_register_usage_cb() and report the already available busy
> cycles information.
>
> https://github.com/rjarry/ovs/commit/327db12c751be0375fcfed5e44b6065bcfb75c82
>
> Changes since v2:
>
> - Fixed typos in docstrings.
> - Used if (xxx != NULL) instead of if (xxx) test convention.
> - Guarded against an unlikely race if rte_lcore_dump() is called by
> a thread while another one calls rte_lcore_register_usage_cb(NULL).
> - s/utilization/usage/
> - Fixed build on Windows.
>
> Changes since v1:
>
> - The cpuset field in telemetry is now a JSON list of CPU ids.
> - Applications must now report their raw CPU cycles counts. The busyness
> ratio and rate of change is left to external monitoring tools.
> - Renamed show lcores -> dump_lcores in testpmd.
>
> Robin Jarry (4):
> eal: add lcore info in telemetry
> eal: allow applications to report their cpu cycles usage
> testpmd: add dump_lcores command
> testpmd: report lcore usage
>
> app/test-pmd/5tswap.c | 5 +-
> app/test-pmd/cmdline.c | 3 +
> app/test-pmd/csumonly.c | 6 +-
> app/test-pmd/flowgen.c | 2 +-
> app/test-pmd/icmpecho.c | 6 +-
> app/test-pmd/iofwd.c | 5 +-
> app/test-pmd/macfwd.c | 5 +-
> app/test-pmd/macswap.c | 5 +-
> app/test-pmd/noisy_vnf.c | 4 +
> app/test-pmd/rxonly.c | 5 +-
> app/test-pmd/shared_rxq_fwd.c | 5 +-
> app/test-pmd/testpmd.c | 39 ++++++++-
> app/test-pmd/testpmd.h | 14 +++-
> app/test-pmd/txonly.c | 7 +-
> lib/eal/common/eal_common_lcore.c | 131 +++++++++++++++++++++++++++++-
> lib/eal/include/rte_lcore.h | 29 +++++++
> lib/eal/version.map | 1 +
> 17 files changed, 242 insertions(+), 30 deletions(-)
>
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v3 2/4] eal: allow applications to report their cpu cycles usage
2022-11-29 16:10 ` Mattias Rönnblom
@ 2022-12-07 11:00 ` Robin Jarry
2022-12-07 11:21 ` Morten Brørup
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2022-12-07 11:00 UTC (permalink / raw)
To: Mattias Rönnblom, dev; +Cc: Morten Brørup
Hi Mattias,
Mattias Rönnblom, Nov 29, 2022 at 17:10:
> > + * @param [out] busy_cycles
> > + * The number of busy CPU cycles since the application start.
>
> Wouldn't it be more appropriate to describe it as "TSC cycles", instead
> of "CPU cycles"? Or technically "The amount of busy time since
> application start, in TSC cycles".
>
> Reporting actual CPU core cycles spent is not possible, in the general case.
Well, technically, these "cycles" could mean anything. You could report
the number of microseconds, Joules, whatever. The important information
is the ratio compared to the total number of "cycles". Perhaps Morten's
suggestion (units) would be more precise, but I fear it would also be
more confusing.
I don't mind adding the specific TSC cycles nomenclature if there is
a consensus.
Cheers
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v3 2/4] eal: allow applications to report their cpu cycles usage
2022-12-07 11:00 ` Robin Jarry
@ 2022-12-07 11:21 ` Morten Brørup
0 siblings, 0 replies; 134+ messages in thread
From: Morten Brørup @ 2022-12-07 11:21 UTC (permalink / raw)
To: Robin Jarry, Mattias Rönnblom, dev
> From: Robin Jarry [mailto:rjarry@redhat.com]
> Sent: Wednesday, 7 December 2022 12.01
>
> Hi Mattias,
>
> Mattias Rönnblom, Nov 29, 2022 at 17:10:
> > > + * @param [out] busy_cycles
> > > + * The number of busy CPU cycles since the application start.
> >
> > Wouldn't it be more appropriate to describe it as "TSC cycles",
> instead
> > of "CPU cycles"? Or technically "The amount of busy time since
> > application start, in TSC cycles".
> >
> > Reporting actual CPU core cycles spent is not possible, in the
> general case.
>
> Well, technically, these "cycles" could mean anything. You could report
> the number of microseconds, Joules, whatever. The important information
> is the ratio compared to the total number of "cycles". Perhaps Morten's
> suggestion (units) would be more precise, but I fear it would also be
> more confusing.
>
> I don't mind adding the specific TSC cycles nomenclature if there is
> a consensus.
Let's stick with TSC cycles. As you mention yourself, sometimes clarity is better than abstraction.
Having a fixed unit of measure also makes it easier to use in DPDK libraries and wherever, in the future.
-Morten
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH 0/4] lcore telemetry improvements
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (5 preceding siblings ...)
2022-11-29 15:33 ` [PATCH v3 0/4] lcore telemetry improvements Robin Jarry
@ 2022-12-07 16:21 ` Robin Jarry
2022-12-07 16:21 ` [PATCH 1/4] eal: add lcore info in telemetry Robin Jarry
` (3 more replies)
2022-12-16 10:21 ` [PATCH v5 0/4] lcore telemetry improvements Robin Jarry
` (10 subsequent siblings)
17 siblings, 4 replies; 134+ messages in thread
From: Robin Jarry @ 2022-12-07 16:21 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry
This is a follow up on previous work by Kevin Laatz:
http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
This series is aimed at allowing DPDK applications to expose their CPU
usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
more basic and naive approach which leaves the cpu cycles accounting
completely up to the application.
For reference, I have implemented a draft patch in OvS to use
rte_lcore_register_usage_cb() and report the already available busy
cycles information.
https://github.com/rjarry/ovs/commit/327db12c751be0375fcfed5e44b6065bcfb75c82
Changes since v3:
- Changed nomenclature from CPU cycles to TSC cycles in the docstring of
rte_lcore_usage_cb.
Changes since v2:
- Fixed typos in docstrings.
- Used if (xxx != NULL) instead of if (xxx) test convention.
- Guarded against an unlikely race if rte_lcore_dump() is called by
a thread while another one calls rte_lcore_register_usage_cb(NULL).
- s/utilization/usage/
- Fixed build on Windows.
Changes since v1:
- The cpuset field in telemetry is now a JSON list of CPU ids.
- Applications must now report their raw CPU cycles counts. The busyness
ratio and rate of change is left to external monitoring tools.
- Renamed show lcores -> dump_lcores in testpmd.
Robin Jarry (4):
eal: add lcore info in telemetry
eal: allow applications to report their cpu usage
testpmd: add dump_lcores command
testpmd: report lcore usage
app/test-pmd/5tswap.c | 5 +-
app/test-pmd/cmdline.c | 3 +
app/test-pmd/csumonly.c | 6 +-
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +-
app/test-pmd/iofwd.c | 5 +-
app/test-pmd/macfwd.c | 5 +-
app/test-pmd/macswap.c | 5 +-
app/test-pmd/noisy_vnf.c | 4 +
app/test-pmd/rxonly.c | 5 +-
app/test-pmd/shared_rxq_fwd.c | 5 +-
app/test-pmd/testpmd.c | 39 ++++++++-
app/test-pmd/testpmd.h | 14 +++-
app/test-pmd/txonly.c | 7 +-
lib/eal/common/eal_common_lcore.c | 131 +++++++++++++++++++++++++++++-
lib/eal/include/rte_lcore.h | 29 +++++++
lib/eal/version.map | 1 +
17 files changed, 242 insertions(+), 30 deletions(-)
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH 1/4] eal: add lcore info in telemetry
2022-12-07 16:21 ` [PATCH " Robin Jarry
@ 2022-12-07 16:21 ` Robin Jarry
2022-12-07 16:21 ` [PATCH 2/4] eal: allow applications to report their cpu usage Robin Jarry
` (2 subsequent siblings)
3 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-12-07 16:21 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup
Report the same information than rte_lcore_dump() in the telemetry
API into /eal/lcore/list and /eal/lcore/info,ID.
Example:
--> /eal/lcore/info,3
{
"/eal/lcore/info": {
"lcore_id": 3,
"socket": 0,
"role": "RTE",
"cpuset": [
3
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v3 -> v4: No change
lib/eal/common/eal_common_lcore.c | 96 +++++++++++++++++++++++++++++++
1 file changed, 96 insertions(+)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 06c594b0224f..16548977dce8 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -10,6 +10,9 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
+#include <rte_telemetry.h>
+#endif
#include "eal_private.h"
#include "eal_thread.h"
@@ -456,3 +459,96 @@ rte_lcore_dump(FILE *f)
{
rte_lcore_iterate(lcore_dump_cb, f);
}
+
+#ifndef RTE_EXEC_ENV_WINDOWS
+static int
+lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_tel_data *d = arg;
+ return rte_tel_data_add_array_int(d, lcore_id);
+}
+
+static int
+handle_lcore_list(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+ if (ret)
+ return ret;
+ return rte_lcore_iterate(lcore_telemetry_id_cb, d);
+}
+
+struct lcore_telemetry_info {
+ unsigned int lcore_id;
+ struct rte_tel_data *d;
+};
+
+static int
+lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
+{
+ struct lcore_telemetry_info *info = arg;
+ struct rte_config *cfg = rte_eal_get_configuration();
+ struct rte_tel_data *cpuset;
+ const char *role;
+ unsigned int cpu;
+
+ if (info->lcore_id != lcore_id)
+ return 0;
+
+ switch (cfg->lcore_role[lcore_id]) {
+ case ROLE_RTE:
+ role = "RTE";
+ break;
+ case ROLE_SERVICE:
+ role = "SERVICE";
+ break;
+ case ROLE_NON_EAL:
+ role = "NON_EAL";
+ break;
+ default:
+ role = "UNKNOWN";
+ break;
+ }
+ rte_tel_data_start_dict(info->d);
+ rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
+ rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
+ rte_tel_data_add_dict_string(info->d, "role", role);
+ cpuset = rte_tel_data_alloc();
+ if (!cpuset)
+ return -ENOMEM;
+ rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++)
+ if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
+ rte_tel_data_add_array_int(cpuset, cpu);
+ rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+
+ return 0;
+}
+
+static int
+handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
+{
+ struct lcore_telemetry_info info = { .d = d };
+ char *endptr = NULL;
+ if (params == NULL || strlen(params) == 0)
+ return -EINVAL;
+ errno = 0;
+ info.lcore_id = strtoul(params, &endptr, 10);
+ if (errno)
+ return -errno;
+ if (endptr == params)
+ return -EINVAL;
+ return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
+}
+
+RTE_INIT(lcore_telemetry)
+{
+ rte_telemetry_register_cmd(
+ "/eal/lcore/list", handle_lcore_list,
+ "List of lcore ids. Takes no parameters");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/info", handle_lcore_info,
+ "Returns lcore info. Parameters: int lcore_id");
+}
+#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH 2/4] eal: allow applications to report their cpu usage
2022-12-07 16:21 ` [PATCH " Robin Jarry
2022-12-07 16:21 ` [PATCH 1/4] eal: add lcore info in telemetry Robin Jarry
@ 2022-12-07 16:21 ` Robin Jarry
2022-12-13 15:49 ` Robin Jarry
2022-12-07 16:21 ` [PATCH 3/4] testpmd: add dump_lcores command Robin Jarry
2022-12-07 16:21 ` [PATCH 4/4] testpmd: report lcore usage Robin Jarry
3 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2022-12-07 16:21 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup
Allow applications to register a callback that will be invoked in
rte_lcore_dump() and when requesting lcore info in the telemetry API.
The callback is expected to return the number of TSC cycles that have
passed since application start and the number of these cycles that were
spent doing busy work.
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v3 -> v4: Changed nomenclature: CPU cycles -> TSC cycles
lib/eal/common/eal_common_lcore.c | 35 ++++++++++++++++++++++++++++---
lib/eal/include/rte_lcore.h | 29 +++++++++++++++++++++++++
lib/eal/version.map | 1 +
3 files changed, 62 insertions(+), 3 deletions(-)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 16548977dce8..23717abf6530 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -2,6 +2,7 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
@@ -422,11 +423,21 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
return ret;
}
+static rte_lcore_usage_cb lcore_usage_cb;
+
+void
+rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
+{
+ lcore_usage_cb = cb;
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
- char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ char cpuset[RTE_CPU_AFFINITY_STR_LEN], usage_str[256];
+ uint64_t busy_cycles, total_cycles;
+ rte_lcore_usage_cb usage_cb;
const char *role;
FILE *f = arg;
int ret;
@@ -446,11 +457,20 @@ lcore_dump_cb(unsigned int lcore_id, void *arg)
break;
}
+ busy_cycles = 0;
+ total_cycles = 0;
+ usage_str[0] = '\0';
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &busy_cycles, &total_cycles) == 0) {
+ snprintf(usage_str, sizeof(usage_str), ", busy cycles %"PRIu64"/%"PRIu64,
+ busy_cycles, total_cycles);
+ }
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
- fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
+ fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
rte_lcore_to_socket_id(lcore_id), role, cpuset,
- ret == 0 ? "" : "...");
+ ret == 0 ? "" : "...", usage_str);
+
return 0;
}
@@ -489,7 +509,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
{
struct lcore_telemetry_info *info = arg;
struct rte_config *cfg = rte_eal_get_configuration();
+ uint64_t busy_cycles, total_cycles;
struct rte_tel_data *cpuset;
+ rte_lcore_usage_cb usage_cb;
const char *role;
unsigned int cpu;
@@ -522,6 +544,13 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
rte_tel_data_add_array_int(cpuset, cpu);
rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+ busy_cycles = 0;
+ total_cycles = 0;
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &busy_cycles, &total_cycles) == 0) {
+ rte_tel_data_add_dict_u64(info->d, "busy_cycles", busy_cycles);
+ rte_tel_data_add_dict_u64(info->d, "total_cycles", total_cycles);
+ }
return 0;
}
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 6938c3fd7b81..df7f0a8e07c6 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -328,6 +328,35 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
int
rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
+/**
+ * Callback to allow applications to report CPU usage.
+ *
+ * @param [in] lcore_id
+ * The lcore to consider.
+ * @param [out] busy_cycles
+ * The amount of busy time since application start, in TSC cycles.
+ * @param [out] total_cycles
+ * The total amount of time since application start, in TSC cycles.
+ * @return
+ * - 0 if both busy and total were set correctly.
+ * - a negative value if the information is not available or if any error occurred.
+ */
+typedef int (*rte_lcore_usage_cb)(
+ unsigned int lcore_id, uint64_t *busy_cycles, uint64_t *total_cycles);
+
+/**
+ * Register a callback from an application to be called in rte_lcore_dump()
+ * and the /eal/lcore/info telemetry endpoint handler.
+ *
+ * Applications are expected to report the amount of busy and total TSC cycles
+ * since their startup.
+ *
+ * @param cb
+ * The callback function.
+ */
+__rte_experimental
+void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
+
/**
* List all lcores.
*
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 7ad12a7dc985..30fd216a12ea 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -440,6 +440,7 @@ EXPERIMENTAL {
rte_thread_detach;
rte_thread_equal;
rte_thread_join;
+ rte_lcore_register_usage_cb;
};
INTERNAL {
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH 3/4] testpmd: add dump_lcores command
2022-12-07 16:21 ` [PATCH " Robin Jarry
2022-12-07 16:21 ` [PATCH 1/4] eal: add lcore info in telemetry Robin Jarry
2022-12-07 16:21 ` [PATCH 2/4] eal: allow applications to report their cpu usage Robin Jarry
@ 2022-12-07 16:21 ` Robin Jarry
2022-12-07 16:21 ` [PATCH 4/4] testpmd: report lcore usage Robin Jarry
3 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-12-07 16:21 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup
Add a simple command that calls rte_lcore_dump().
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v3 -> v4: no change
app/test-pmd/cmdline.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index b32dc8bfd445..96474d2ae458 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8345,6 +8345,8 @@ static void cmd_dump_parsed(void *parsed_result,
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
rte_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_lcores"))
+ rte_lcore_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
}
@@ -8358,6 +8360,7 @@ static cmdline_parse_token_string_t cmd_dump_dump =
"dump_ring#"
"dump_mempool#"
"dump_devargs#"
+ "dump_lcores#"
"dump_log_types");
static cmdline_parse_inst_t cmd_dump = {
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH 4/4] testpmd: report lcore usage
2022-12-07 16:21 ` [PATCH " Robin Jarry
` (2 preceding siblings ...)
2022-12-07 16:21 ` [PATCH 3/4] testpmd: add dump_lcores command Robin Jarry
@ 2022-12-07 16:21 ` Robin Jarry
3 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-12-07 16:21 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup
Reuse the --record-core-cycles option to account for busy cycles. One
turn of packet_fwd_t is considered "busy" if there was at least one
received or transmitted packet.
Add a new busy_cycles field in struct fwd_stream. Update get_end_cycles
to accept an additional argument for the number of processed packets.
Update fwd_stream.busy_cycles when the number of packets is greater than
zero.
When --record-core-cycles is specified, register a callback with
rte_lcore_register_usage_cb(). In the callback, use the new lcore_id
field in struct fwd_lcore to identify the correct index in fwd_lcores
and return the sum of busy/total cycles of all fwd_streams.
This makes the cycles counters available in rte_lcore_dump() and the
lcore telemetry API:
testpmd> dump_lcores
lcore 3, socket 0, role RTE, cpuset 3
lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
--> /eal/lcore/info,4
{
"/eal/lcore/info": {
"lcore_id": 4,
"socket": 0,
"role": "RTE",
"cpuset": [
4
],
"busy_cycles": 10623340318,
"total_cycles": 55331167354
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v3 -> v4: no change
app/test-pmd/5tswap.c | 5 +++--
app/test-pmd/csumonly.c | 6 +++---
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +++---
app/test-pmd/iofwd.c | 5 +++--
app/test-pmd/macfwd.c | 5 +++--
app/test-pmd/macswap.c | 5 +++--
app/test-pmd/noisy_vnf.c | 4 ++++
app/test-pmd/rxonly.c | 5 +++--
app/test-pmd/shared_rxq_fwd.c | 5 +++--
app/test-pmd/testpmd.c | 39 ++++++++++++++++++++++++++++++++++-
app/test-pmd/testpmd.h | 14 +++++++++----
app/test-pmd/txonly.c | 7 ++++---
13 files changed, 81 insertions(+), 27 deletions(-)
diff --git a/app/test-pmd/5tswap.c b/app/test-pmd/5tswap.c
index f041a5e1d530..03225075716c 100644
--- a/app/test-pmd/5tswap.c
+++ b/app/test-pmd/5tswap.c
@@ -116,7 +116,7 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -182,7 +182,8 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 1c2459851522..03e141221a56 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -868,7 +868,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
rx_bad_ip_csum = 0;
@@ -1200,8 +1200,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index fd6abc0f4124..7b2f0ffdf0f5 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -196,7 +196,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
RTE_PER_LCORE(_next_flow) = next_flow;
- get_end_cycles(fs, start_tsc);
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index 066f2a3ab79b..2fc9f96dc95f 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -303,7 +303,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_replies = 0;
@@ -508,8 +508,8 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
} while (++nb_tx < nb_replies);
}
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 8fafdec548ad..e5a2dbe20c69 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -59,7 +59,7 @@ pkt_burst_io_forward(struct fwd_stream *fs)
pkts_burst, nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
@@ -84,7 +84,8 @@ pkt_burst_io_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index beb220fbb462..9db623999970 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -65,7 +65,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -115,7 +115,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 4f8deb338296..4db134ac1d91 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -66,7 +66,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -93,7 +93,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index c65ec6f06a5c..290bdcda45f0 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -152,6 +152,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
uint64_t delta_ms;
bool needs_flush = false;
uint64_t now;
+ uint64_t start_tsc = 0;
+
+ get_start_cycles(&start_tsc);
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
pkts_burst, nb_pkt_per_burst);
@@ -219,6 +222,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
ncf->prev_time = rte_get_timer_cycles();
}
+ get_end_cycles(fs, start_tsc, nb_rx + nb_tx);
}
#define NOISY_STRSIZE 256
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index d528d4f34e60..519202339e16 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -58,13 +58,14 @@ pkt_burst_receive(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++)
rte_pktmbuf_free(pkts_burst[i]);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/shared_rxq_fwd.c b/app/test-pmd/shared_rxq_fwd.c
index 2e9047804b5b..395b73bfe52e 100644
--- a/app/test-pmd/shared_rxq_fwd.c
+++ b/app/test-pmd/shared_rxq_fwd.c
@@ -102,9 +102,10 @@ shared_rxq_fwd(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
forward_shared_rxq(fs, nb_rx, pkts_burst);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 134d79a55547..6ad91334d352 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2053,7 +2053,7 @@ fwd_stats_display(void)
fs->rx_bad_outer_ip_csum;
if (record_core_cycles)
- fwd_cycles += fs->core_cycles;
+ fwd_cycles += fs->busy_cycles;
}
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
@@ -2184,6 +2184,7 @@ fwd_stats_reset(void)
memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
+ fs->busy_cycles = 0;
fs->core_cycles = 0;
}
}
@@ -2260,6 +2261,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
tics_datum = rte_rdtsc();
tics_per_1sec = rte_get_timer_hz();
#endif
+ fc->lcore_id = rte_lcore_id();
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
do {
@@ -2288,6 +2290,37 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
} while (! fc->stopped);
}
+static int
+lcore_usage_callback(unsigned int lcore_id, uint64_t *busy_cycles, uint64_t *total_cycles)
+{
+ struct fwd_stream **fsm;
+ struct fwd_lcore *fc;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+ int c;
+
+ for (c = 0; c < nb_lcores; c++) {
+ fc = fwd_lcores[c];
+ if (fc->lcore_id != lcore_id)
+ continue;
+
+ fsm = &fwd_streams[fc->stream_idx];
+ nb_fs = fc->stream_nb;
+ *busy_cycles = 0;
+ *total_cycles = 0;
+
+ for (sm_id = 0; sm_id < nb_fs; sm_id++)
+ if (!fsm[sm_id]->disabled) {
+ *busy_cycles += fsm[sm_id]->busy_cycles;
+ *total_cycles += fsm[sm_id]->core_cycles;
+ }
+
+ return 0;
+ }
+
+ return -1;
+}
+
static int
start_pkt_forward_on_core(void *fwd_arg)
{
@@ -4522,6 +4555,10 @@ main(int argc, char** argv)
rte_stats_bitrate_reg(bitrate_data);
}
#endif
+
+ if (record_core_cycles)
+ rte_lcore_register_usage_cb(lcore_usage_callback);
+
#ifdef RTE_LIB_CMDLINE
if (init_cmdline() != 0)
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970d2..5dbf5d1c465c 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -174,7 +174,8 @@ struct fwd_stream {
#ifdef RTE_LIB_GRO
unsigned int gro_times; /**< GRO operation times */
#endif
- uint64_t core_cycles; /**< used for RX and TX processing */
+ uint64_t busy_cycles; /**< used with --record-core-cycles */
+ uint64_t core_cycles; /**< used with --record-core-cycles */
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
struct fwd_lcore *lcore; /**< Lcore being scheduled. */
@@ -360,6 +361,7 @@ struct fwd_lcore {
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
volatile char stopped; /**< stop forwarding when set */
+ unsigned int lcore_id; /**< return value of rte_lcore_id() */
};
/*
@@ -836,10 +838,14 @@ get_start_cycles(uint64_t *start_tsc)
}
static inline void
-get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc)
+get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc, uint64_t nb_packets)
{
- if (record_core_cycles)
- fs->core_cycles += rte_rdtsc() - start_tsc;
+ if (record_core_cycles) {
+ uint64_t cycles = rte_rdtsc() - start_tsc;
+ fs->core_cycles += cycles;
+ if (nb_packets > 0)
+ fs->busy_cycles += cycles;
+ }
}
static inline void
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 021624952daa..ad37626ff63c 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -331,7 +331,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
struct rte_mbuf *pkt;
struct rte_mempool *mbp;
struct rte_ether_hdr eth_hdr;
- uint16_t nb_tx;
+ uint16_t nb_tx = 0;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
uint32_t retry;
@@ -392,7 +392,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
}
if (nb_pkt == 0)
- return;
+ goto end;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
@@ -426,7 +426,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
} while (++nb_tx < nb_pkt);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH 2/4] eal: allow applications to report their cpu usage
2022-12-07 16:21 ` [PATCH 2/4] eal: allow applications to report their cpu usage Robin Jarry
@ 2022-12-13 15:49 ` Robin Jarry
2022-12-13 16:39 ` Morten Brørup
2022-12-13 17:45 ` Tyler Retzlaff
0 siblings, 2 replies; 134+ messages in thread
From: Robin Jarry @ 2022-12-13 15:49 UTC (permalink / raw)
To: dev; +Cc: Morten Brørup, Kevin Laatz
Robin Jarry, Dec 07, 2022 at 17:21:
> Allow applications to register a callback that will be invoked in
> rte_lcore_dump() and when requesting lcore info in the telemetry API.
>
> The callback is expected to return the number of TSC cycles that have
> passed since application start and the number of these cycles that were
> spent doing busy work.
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
> v3 -> v4: Changed nomenclature: CPU cycles -> TSC cycles
As you may have noticed, I forgot to add -v4 for that iteration...
> diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
> index 6938c3fd7b81..df7f0a8e07c6 100644
> --- a/lib/eal/include/rte_lcore.h
> +++ b/lib/eal/include/rte_lcore.h
> @@ -328,6 +328,35 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
> int
> rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
>
> +/**
> + * Callback to allow applications to report CPU usage.
> + *
> + * @param [in] lcore_id
> + * The lcore to consider.
> + * @param [out] busy_cycles
> + * The amount of busy time since application start, in TSC cycles.
> + * @param [out] total_cycles
> + * The total amount of time since application start, in TSC cycles.
> + * @return
> + * - 0 if both busy and total were set correctly.
> + * - a negative value if the information is not available or if any error occurred.
> + */
> +typedef int (*rte_lcore_usage_cb)(
> + unsigned int lcore_id, uint64_t *busy_cycles, uint64_t *total_cycles);
Instead of two uint64_t pointers, I was thinking a better approach would
be to pass a pointer to a struct containing these two fields. That way
it leaves room for adding more counters if need be. And do so without
breaking the ABI.
Thoughts?
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH 2/4] eal: allow applications to report their cpu usage
2022-12-13 15:49 ` Robin Jarry
@ 2022-12-13 16:39 ` Morten Brørup
2022-12-13 17:45 ` Tyler Retzlaff
1 sibling, 0 replies; 134+ messages in thread
From: Morten Brørup @ 2022-12-13 16:39 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Kevin Laatz, mattias.ronnblom
> From: Robin Jarry [mailto:rjarry@redhat.com]
> Sent: Tuesday, 13 December 2022 16.50
>
> Robin Jarry, Dec 07, 2022 at 17:21:
> > Allow applications to register a callback that will be invoked in
> > rte_lcore_dump() and when requesting lcore info in the telemetry API.
> >
> > The callback is expected to return the number of TSC cycles that have
> > passed since application start and the number of these cycles that
> were
> > spent doing busy work.
> >
> > Signed-off-by: Robin Jarry <rjarry@redhat.com>
> > Acked-by: Morten Brørup <mb@smartsharesystems.com>
> > ---
> > v3 -> v4: Changed nomenclature: CPU cycles -> TSC cycles
>
> As you may have noticed, I forgot to add -v4 for that iteration...
>
> > diff --git a/lib/eal/include/rte_lcore.h
> b/lib/eal/include/rte_lcore.h
> > index 6938c3fd7b81..df7f0a8e07c6 100644
> > --- a/lib/eal/include/rte_lcore.h
> > +++ b/lib/eal/include/rte_lcore.h
> > @@ -328,6 +328,35 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int
> lcore_id, void *arg);
> > int
> > rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
> >
> > +/**
> > + * Callback to allow applications to report CPU usage.
> > + *
> > + * @param [in] lcore_id
> > + * The lcore to consider.
> > + * @param [out] busy_cycles
> > + * The amount of busy time since application start, in TSC cycles.
> > + * @param [out] total_cycles
> > + * The total amount of time since application start, in TSC
> cycles.
> > + * @return
> > + * - 0 if both busy and total were set correctly.
> > + * - a negative value if the information is not available or if
> any error occurred.
> > + */
> > +typedef int (*rte_lcore_usage_cb)(
> > + unsigned int lcore_id, uint64_t *busy_cycles, uint64_t
> *total_cycles);
>
> Instead of two uint64_t pointers, I was thinking a better approach
> would
> be to pass a pointer to a struct containing these two fields. That way
> it leaves room for adding more counters if need be. And do so without
> breaking the ABI.
>
> Thoughts?
I like the idea.
For compatibility between newer DPDK libraries (with more fields) and older applications, the callback should return an indication of how much of the structure it has filled, so DPDK knows that some fields are unfilled.
The simplest method would be that the callback returns the number of bytes of the structure filled instead of 0 on success. However, that would not allow for holes in the returned structure.
Alternatively, a bitfield can be the first field in the structure, each bit representing a data field in the structure. That would allow flexibility to fill any of up to 64 fields. So with total_cycles and busy_cycles as data fields, the returned structure would contain e.g. {3, 1000, 900}. (As a personal preference, I would put total_cycles before busy_cycles in such a structure.)
And I'm not saying that fields must be uint64_t; they can be any size.
On the other hand, I might be suggesting too much flexibility with the bitfield proposal. Perhaps the simple method suffices. And perhaps only uint64_t fields suffice.
-Morten
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH 2/4] eal: allow applications to report their cpu usage
2022-12-13 15:49 ` Robin Jarry
2022-12-13 16:39 ` Morten Brørup
@ 2022-12-13 17:45 ` Tyler Retzlaff
1 sibling, 0 replies; 134+ messages in thread
From: Tyler Retzlaff @ 2022-12-13 17:45 UTC (permalink / raw)
To: Robin Jarry; +Cc: dev, Morten Brørup, Kevin Laatz
On Tue, Dec 13, 2022 at 04:49:31PM +0100, Robin Jarry wrote:
> Robin Jarry, Dec 07, 2022 at 17:21:
> > Allow applications to register a callback that will be invoked in
> > rte_lcore_dump() and when requesting lcore info in the telemetry API.
> >
> > The callback is expected to return the number of TSC cycles that have
> > passed since application start and the number of these cycles that were
> > spent doing busy work.
> >
> > Signed-off-by: Robin Jarry <rjarry@redhat.com>
> > Acked-by: Morten Brørup <mb@smartsharesystems.com>
> > ---
> > v3 -> v4: Changed nomenclature: CPU cycles -> TSC cycles
>
> As you may have noticed, I forgot to add -v4 for that iteration...
>
> > diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
> > index 6938c3fd7b81..df7f0a8e07c6 100644
> > --- a/lib/eal/include/rte_lcore.h
> > +++ b/lib/eal/include/rte_lcore.h
> > @@ -328,6 +328,35 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
> > int
> > rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
> >
> > +/**
> > + * Callback to allow applications to report CPU usage.
> > + *
> > + * @param [in] lcore_id
> > + * The lcore to consider.
> > + * @param [out] busy_cycles
> > + * The amount of busy time since application start, in TSC cycles.
> > + * @param [out] total_cycles
> > + * The total amount of time since application start, in TSC cycles.
> > + * @return
> > + * - 0 if both busy and total were set correctly.
> > + * - a negative value if the information is not available or if any error occurred.
> > + */
> > +typedef int (*rte_lcore_usage_cb)(
> > + unsigned int lcore_id, uint64_t *busy_cycles, uint64_t *total_cycles);
>
> Instead of two uint64_t pointers, I was thinking a better approach would
> be to pass a pointer to a struct containing these two fields. That way
> it leaves room for adding more counters if need be. And do so without
> breaking the ABI.
>
> Thoughts?
yes, please.
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v5 0/4] lcore telemetry improvements
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (6 preceding siblings ...)
2022-12-07 16:21 ` [PATCH " Robin Jarry
@ 2022-12-16 10:21 ` Robin Jarry
2022-12-16 10:21 ` [PATCH v5 1/4] eal: add lcore info in telemetry Robin Jarry
` (4 more replies)
2023-01-19 15:06 ` [PATCH v6 0/5] " Robin Jarry
` (9 subsequent siblings)
17 siblings, 5 replies; 134+ messages in thread
From: Robin Jarry @ 2022-12-16 10:21 UTC (permalink / raw)
To: dev; +Cc: Tyler Retzlaff, Kevin Laatz, Robin Jarry
This is a follow up on previous work by Kevin Laatz:
http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
This series is aimed at allowing DPDK applications to expose their CPU
usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
more basic and naive approach which leaves the cpu cycles accounting
completely up to the application.
For reference, I have implemented a draft patch in OvS to use
rte_lcore_register_usage_cb() and report the already available busy
cycles information.
https://github.com/rjarry/ovs/commit/643e672fe388e348ea7ccbbda6f5a87a066fd919
Changes since v4:
- rte_lcore_usage_cb now takes a pointer to a rte_lcore_usage structure.
I chose not to include any API version tracking mechanism since the
unsupported/unused fields can simply be left to zero. This is only
telemetry after all.
Changes since v3:
- Changed nomenclature from CPU cycles to TSC cycles in the docstring of
rte_lcore_usage_cb.
Changes since v2:
- Fixed typos in docstrings.
- Used if (xxx != NULL) instead of if (xxx) test convention.
- Guarded against an unlikely race if rte_lcore_dump() is called by
a thread while another one calls rte_lcore_register_usage_cb(NULL).
- s/utilization/usage/
- Fixed build on Windows.
Changes since v1:
- The cpuset field in telemetry is now a JSON list of CPU ids.
- Applications must now report their raw CPU cycles counts. The busyness
ratio and rate of change is left to external monitoring tools.
- Renamed show lcores -> dump_lcores in testpmd.
Robin Jarry (4):
eal: add lcore info in telemetry
eal: allow applications to report their cpu usage
testpmd: add dump_lcores command
testpmd: report lcore usage
app/test-pmd/5tswap.c | 5 +-
app/test-pmd/cmdline.c | 3 +
app/test-pmd/csumonly.c | 6 +-
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +-
app/test-pmd/iofwd.c | 5 +-
app/test-pmd/macfwd.c | 5 +-
app/test-pmd/macswap.c | 5 +-
app/test-pmd/noisy_vnf.c | 4 +
app/test-pmd/rxonly.c | 5 +-
app/test-pmd/shared_rxq_fwd.c | 5 +-
app/test-pmd/testpmd.c | 39 ++++++++-
app/test-pmd/testpmd.h | 14 +++-
app/test-pmd/txonly.c | 7 +-
lib/eal/common/eal_common_lcore.c | 129 +++++++++++++++++++++++++++++-
lib/eal/include/rte_lcore.h | 35 ++++++++
lib/eal/version.map | 1 +
17 files changed, 246 insertions(+), 30 deletions(-)
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v5 1/4] eal: add lcore info in telemetry
2022-12-16 10:21 ` [PATCH v5 0/4] lcore telemetry improvements Robin Jarry
@ 2022-12-16 10:21 ` Robin Jarry
2023-01-18 9:42 ` Kevin Laatz
2022-12-16 10:21 ` [PATCH v5 2/4] eal: allow applications to report their cpu usage Robin Jarry
` (3 subsequent siblings)
4 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2022-12-16 10:21 UTC (permalink / raw)
To: dev; +Cc: Tyler Retzlaff, Kevin Laatz, Robin Jarry, Morten Brørup
Report the same information than rte_lcore_dump() in the telemetry
API into /eal/lcore/list and /eal/lcore/info,ID.
Example:
--> /eal/lcore/info,3
{
"/eal/lcore/info": {
"lcore_id": 3,
"socket": 0,
"role": "RTE",
"cpuset": [
3
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v4 -> v5: No change
lib/eal/common/eal_common_lcore.c | 96 +++++++++++++++++++++++++++++++
1 file changed, 96 insertions(+)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 06c594b0224f..16548977dce8 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -10,6 +10,9 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
+#include <rte_telemetry.h>
+#endif
#include "eal_private.h"
#include "eal_thread.h"
@@ -456,3 +459,96 @@ rte_lcore_dump(FILE *f)
{
rte_lcore_iterate(lcore_dump_cb, f);
}
+
+#ifndef RTE_EXEC_ENV_WINDOWS
+static int
+lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_tel_data *d = arg;
+ return rte_tel_data_add_array_int(d, lcore_id);
+}
+
+static int
+handle_lcore_list(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+ if (ret)
+ return ret;
+ return rte_lcore_iterate(lcore_telemetry_id_cb, d);
+}
+
+struct lcore_telemetry_info {
+ unsigned int lcore_id;
+ struct rte_tel_data *d;
+};
+
+static int
+lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
+{
+ struct lcore_telemetry_info *info = arg;
+ struct rte_config *cfg = rte_eal_get_configuration();
+ struct rte_tel_data *cpuset;
+ const char *role;
+ unsigned int cpu;
+
+ if (info->lcore_id != lcore_id)
+ return 0;
+
+ switch (cfg->lcore_role[lcore_id]) {
+ case ROLE_RTE:
+ role = "RTE";
+ break;
+ case ROLE_SERVICE:
+ role = "SERVICE";
+ break;
+ case ROLE_NON_EAL:
+ role = "NON_EAL";
+ break;
+ default:
+ role = "UNKNOWN";
+ break;
+ }
+ rte_tel_data_start_dict(info->d);
+ rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
+ rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
+ rte_tel_data_add_dict_string(info->d, "role", role);
+ cpuset = rte_tel_data_alloc();
+ if (!cpuset)
+ return -ENOMEM;
+ rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++)
+ if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
+ rte_tel_data_add_array_int(cpuset, cpu);
+ rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+
+ return 0;
+}
+
+static int
+handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
+{
+ struct lcore_telemetry_info info = { .d = d };
+ char *endptr = NULL;
+ if (params == NULL || strlen(params) == 0)
+ return -EINVAL;
+ errno = 0;
+ info.lcore_id = strtoul(params, &endptr, 10);
+ if (errno)
+ return -errno;
+ if (endptr == params)
+ return -EINVAL;
+ return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
+}
+
+RTE_INIT(lcore_telemetry)
+{
+ rte_telemetry_register_cmd(
+ "/eal/lcore/list", handle_lcore_list,
+ "List of lcore ids. Takes no parameters");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/info", handle_lcore_info,
+ "Returns lcore info. Parameters: int lcore_id");
+}
+#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v5 2/4] eal: allow applications to report their cpu usage
2022-12-16 10:21 ` [PATCH v5 0/4] lcore telemetry improvements Robin Jarry
2022-12-16 10:21 ` [PATCH v5 1/4] eal: add lcore info in telemetry Robin Jarry
@ 2022-12-16 10:21 ` Robin Jarry
2022-12-16 10:47 ` Morten Brørup
` (2 more replies)
2022-12-16 10:21 ` [PATCH v5 3/4] testpmd: add dump_lcores command Robin Jarry
` (2 subsequent siblings)
4 siblings, 3 replies; 134+ messages in thread
From: Robin Jarry @ 2022-12-16 10:21 UTC (permalink / raw)
To: dev; +Cc: Tyler Retzlaff, Kevin Laatz, Robin Jarry, Morten Brørup
Allow applications to register a callback that will be invoked in
rte_lcore_dump() and when requesting lcore info in the telemetry API.
The callback is expected to return the number of TSC cycles that have
passed since application start and the number of these cycles that were
spent doing busy work.
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v4 -> v5:
The callback now takes a pointer to a rte_lcore_usage structure.
I chose not to include any API version tracking mechanism since the
unsupported/unused fields can simply be left to zero. This is only
telemetry after all.
lib/eal/common/eal_common_lcore.c | 33 ++++++++++++++++++++++++++---
lib/eal/include/rte_lcore.h | 35 +++++++++++++++++++++++++++++++
lib/eal/version.map | 1 +
3 files changed, 66 insertions(+), 3 deletions(-)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 16548977dce8..210636d21d6b 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -2,6 +2,7 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
@@ -422,11 +423,21 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
return ret;
}
+static rte_lcore_usage_cb lcore_usage_cb;
+
+void
+rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
+{
+ lcore_usage_cb = cb;
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
- char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ char cpuset[RTE_CPU_AFFINITY_STR_LEN], usage_str[256];
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
const char *role;
FILE *f = arg;
int ret;
@@ -446,11 +457,19 @@ lcore_dump_cb(unsigned int lcore_id, void *arg)
break;
}
+ memset(&usage, 0, sizeof(usage));
+ usage_str[0] = '\0';
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ snprintf(usage_str, sizeof(usage_str), ", busy cycles %"PRIu64"/%"PRIu64,
+ usage.busy_cycles, usage.total_cycles);
+ }
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
- fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
+ fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
rte_lcore_to_socket_id(lcore_id), role, cpuset,
- ret == 0 ? "" : "...");
+ ret == 0 ? "" : "...", usage_str);
+
return 0;
}
@@ -489,7 +508,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
{
struct lcore_telemetry_info *info = arg;
struct rte_config *cfg = rte_eal_get_configuration();
+ struct rte_lcore_usage usage;
struct rte_tel_data *cpuset;
+ rte_lcore_usage_cb usage_cb;
const char *role;
unsigned int cpu;
@@ -522,6 +543,12 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
rte_tel_data_add_array_int(cpuset, cpu);
rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+ memset(&usage, 0, sizeof(usage));
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ rte_tel_data_add_dict_u64(info->d, "busy_cycles", usage.busy_cycles);
+ rte_tel_data_add_dict_u64(info->d, "total_cycles", usage.total_cycles);
+ }
return 0;
}
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 6938c3fd7b81..a92313577355 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -328,6 +328,41 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
int
rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
+/**
+ * CPU usage statistics.
+ */
+struct rte_lcore_usage {
+ uint64_t total_cycles;
+ /**< The total amount of time since application start, in TSC cycles. */
+ uint64_t busy_cycles;
+ /**< The amount of busy time since application start, in TSC cycles. */
+};
+
+/**
+ * Callback to allow applications to report CPU usage.
+ *
+ * @param [in] lcore_id
+ * The lcore to consider.
+ * @param [out] usage
+ * Counters representing this lcore usage. This can never be NULL.
+ * @return
+ * - 0 if fields in usage were updated successfully. The fields that the
+ * application does not support should be left to their default value.
+ * - a negative value if the information is not available or if any error occurred.
+ */
+typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct rte_lcore_usage *usage);
+
+/**
+ * Register a callback from an application to be called in rte_lcore_dump() and
+ * the /eal/lcore/info telemetry endpoint handler. Applications are expected to
+ * report CPU usage statistics via this callback.
+ *
+ * @param cb
+ * The callback function.
+ */
+__rte_experimental
+void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
+
/**
* List all lcores.
*
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 7ad12a7dc985..30fd216a12ea 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -440,6 +440,7 @@ EXPERIMENTAL {
rte_thread_detach;
rte_thread_equal;
rte_thread_join;
+ rte_lcore_register_usage_cb;
};
INTERNAL {
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v5 3/4] testpmd: add dump_lcores command
2022-12-16 10:21 ` [PATCH v5 0/4] lcore telemetry improvements Robin Jarry
2022-12-16 10:21 ` [PATCH v5 1/4] eal: add lcore info in telemetry Robin Jarry
2022-12-16 10:21 ` [PATCH v5 2/4] eal: allow applications to report their cpu usage Robin Jarry
@ 2022-12-16 10:21 ` Robin Jarry
2022-12-22 12:43 ` Konstantin Ananyev
2022-12-16 10:21 ` [PATCH v5 4/4] testpmd: report lcore usage Robin Jarry
2023-01-18 9:13 ` [PATCH v5 0/4] lcore telemetry improvements Robin Jarry
4 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2022-12-16 10:21 UTC (permalink / raw)
To: dev; +Cc: Tyler Retzlaff, Kevin Laatz, Robin Jarry, Morten Brørup
Add a simple command that calls rte_lcore_dump().
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v4 -> v5: no change
app/test-pmd/cmdline.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index b32dc8bfd445..96474d2ae458 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8345,6 +8345,8 @@ static void cmd_dump_parsed(void *parsed_result,
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
rte_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_lcores"))
+ rte_lcore_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
}
@@ -8358,6 +8360,7 @@ static cmdline_parse_token_string_t cmd_dump_dump =
"dump_ring#"
"dump_mempool#"
"dump_devargs#"
+ "dump_lcores#"
"dump_log_types");
static cmdline_parse_inst_t cmd_dump = {
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v5 4/4] testpmd: report lcore usage
2022-12-16 10:21 ` [PATCH v5 0/4] lcore telemetry improvements Robin Jarry
` (2 preceding siblings ...)
2022-12-16 10:21 ` [PATCH v5 3/4] testpmd: add dump_lcores command Robin Jarry
@ 2022-12-16 10:21 ` Robin Jarry
2022-12-22 12:44 ` Konstantin Ananyev
2023-01-18 9:13 ` [PATCH v5 0/4] lcore telemetry improvements Robin Jarry
4 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2022-12-16 10:21 UTC (permalink / raw)
To: dev; +Cc: Tyler Retzlaff, Kevin Laatz, Robin Jarry, Morten Brørup
Reuse the --record-core-cycles option to account for busy cycles. One
turn of packet_fwd_t is considered "busy" if there was at least one
received or transmitted packet.
Add a new busy_cycles field in struct fwd_stream. Update get_end_cycles
to accept an additional argument for the number of processed packets.
Update fwd_stream.busy_cycles when the number of packets is greater than
zero.
When --record-core-cycles is specified, register a callback with
rte_lcore_register_usage_cb(). In the callback, use the new lcore_id
field in struct fwd_lcore to identify the correct index in fwd_lcores
and return the sum of busy/total cycles of all fwd_streams.
This makes the cycles counters available in rte_lcore_dump() and the
lcore telemetry API:
testpmd> dump_lcores
lcore 3, socket 0, role RTE, cpuset 3
lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
--> /eal/lcore/info,4
{
"/eal/lcore/info": {
"lcore_id": 4,
"socket": 0,
"role": "RTE",
"cpuset": [
4
],
"busy_cycles": 10623340318,
"total_cycles": 55331167354
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
v4 -> v5: Updated to use rte_lcore_usage struct.
app/test-pmd/5tswap.c | 5 +++--
app/test-pmd/csumonly.c | 6 +++---
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +++---
app/test-pmd/iofwd.c | 5 +++--
app/test-pmd/macfwd.c | 5 +++--
app/test-pmd/macswap.c | 5 +++--
app/test-pmd/noisy_vnf.c | 4 ++++
app/test-pmd/rxonly.c | 5 +++--
app/test-pmd/shared_rxq_fwd.c | 5 +++--
app/test-pmd/testpmd.c | 39 ++++++++++++++++++++++++++++++++++-
app/test-pmd/testpmd.h | 14 +++++++++----
app/test-pmd/txonly.c | 7 ++++---
13 files changed, 81 insertions(+), 27 deletions(-)
diff --git a/app/test-pmd/5tswap.c b/app/test-pmd/5tswap.c
index f041a5e1d530..03225075716c 100644
--- a/app/test-pmd/5tswap.c
+++ b/app/test-pmd/5tswap.c
@@ -116,7 +116,7 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -182,7 +182,8 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 1c2459851522..03e141221a56 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -868,7 +868,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
rx_bad_ip_csum = 0;
@@ -1200,8 +1200,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index fd6abc0f4124..7b2f0ffdf0f5 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -196,7 +196,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
RTE_PER_LCORE(_next_flow) = next_flow;
- get_end_cycles(fs, start_tsc);
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index 066f2a3ab79b..2fc9f96dc95f 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -303,7 +303,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_replies = 0;
@@ -508,8 +508,8 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
} while (++nb_tx < nb_replies);
}
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 8fafdec548ad..e5a2dbe20c69 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -59,7 +59,7 @@ pkt_burst_io_forward(struct fwd_stream *fs)
pkts_burst, nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
@@ -84,7 +84,8 @@ pkt_burst_io_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index beb220fbb462..9db623999970 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -65,7 +65,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -115,7 +115,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 4f8deb338296..4db134ac1d91 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -66,7 +66,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -93,7 +93,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index c65ec6f06a5c..290bdcda45f0 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -152,6 +152,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
uint64_t delta_ms;
bool needs_flush = false;
uint64_t now;
+ uint64_t start_tsc = 0;
+
+ get_start_cycles(&start_tsc);
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
pkts_burst, nb_pkt_per_burst);
@@ -219,6 +222,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
ncf->prev_time = rte_get_timer_cycles();
}
+ get_end_cycles(fs, start_tsc, nb_rx + nb_tx);
}
#define NOISY_STRSIZE 256
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index d528d4f34e60..519202339e16 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -58,13 +58,14 @@ pkt_burst_receive(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++)
rte_pktmbuf_free(pkts_burst[i]);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/shared_rxq_fwd.c b/app/test-pmd/shared_rxq_fwd.c
index 2e9047804b5b..395b73bfe52e 100644
--- a/app/test-pmd/shared_rxq_fwd.c
+++ b/app/test-pmd/shared_rxq_fwd.c
@@ -102,9 +102,10 @@ shared_rxq_fwd(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
forward_shared_rxq(fs, nb_rx, pkts_burst);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 134d79a55547..d80867b91b3d 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2053,7 +2053,7 @@ fwd_stats_display(void)
fs->rx_bad_outer_ip_csum;
if (record_core_cycles)
- fwd_cycles += fs->core_cycles;
+ fwd_cycles += fs->busy_cycles;
}
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
@@ -2184,6 +2184,7 @@ fwd_stats_reset(void)
memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
+ fs->busy_cycles = 0;
fs->core_cycles = 0;
}
}
@@ -2260,6 +2261,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
tics_datum = rte_rdtsc();
tics_per_1sec = rte_get_timer_hz();
#endif
+ fc->lcore_id = rte_lcore_id();
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
do {
@@ -2288,6 +2290,37 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
} while (! fc->stopped);
}
+static int
+lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
+{
+ struct fwd_stream **fsm;
+ struct fwd_lcore *fc;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+ int c;
+
+ for (c = 0; c < nb_lcores; c++) {
+ fc = fwd_lcores[c];
+ if (fc->lcore_id != lcore_id)
+ continue;
+
+ fsm = &fwd_streams[fc->stream_idx];
+ nb_fs = fc->stream_nb;
+ usage->busy_cycles = 0;
+ usage->total_cycles = 0;
+
+ for (sm_id = 0; sm_id < nb_fs; sm_id++)
+ if (!fsm[sm_id]->disabled) {
+ usage->busy_cycles += fsm[sm_id]->busy_cycles;
+ usage->total_cycles += fsm[sm_id]->core_cycles;
+ }
+
+ return 0;
+ }
+
+ return -1;
+}
+
static int
start_pkt_forward_on_core(void *fwd_arg)
{
@@ -4522,6 +4555,10 @@ main(int argc, char** argv)
rte_stats_bitrate_reg(bitrate_data);
}
#endif
+
+ if (record_core_cycles)
+ rte_lcore_register_usage_cb(lcore_usage_callback);
+
#ifdef RTE_LIB_CMDLINE
if (init_cmdline() != 0)
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970d2..5dbf5d1c465c 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -174,7 +174,8 @@ struct fwd_stream {
#ifdef RTE_LIB_GRO
unsigned int gro_times; /**< GRO operation times */
#endif
- uint64_t core_cycles; /**< used for RX and TX processing */
+ uint64_t busy_cycles; /**< used with --record-core-cycles */
+ uint64_t core_cycles; /**< used with --record-core-cycles */
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
struct fwd_lcore *lcore; /**< Lcore being scheduled. */
@@ -360,6 +361,7 @@ struct fwd_lcore {
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
volatile char stopped; /**< stop forwarding when set */
+ unsigned int lcore_id; /**< return value of rte_lcore_id() */
};
/*
@@ -836,10 +838,14 @@ get_start_cycles(uint64_t *start_tsc)
}
static inline void
-get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc)
+get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc, uint64_t nb_packets)
{
- if (record_core_cycles)
- fs->core_cycles += rte_rdtsc() - start_tsc;
+ if (record_core_cycles) {
+ uint64_t cycles = rte_rdtsc() - start_tsc;
+ fs->core_cycles += cycles;
+ if (nb_packets > 0)
+ fs->busy_cycles += cycles;
+ }
}
static inline void
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 021624952daa..ad37626ff63c 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -331,7 +331,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
struct rte_mbuf *pkt;
struct rte_mempool *mbp;
struct rte_ether_hdr eth_hdr;
- uint16_t nb_tx;
+ uint16_t nb_tx = 0;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
uint32_t retry;
@@ -392,7 +392,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
}
if (nb_pkt == 0)
- return;
+ goto end;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
@@ -426,7 +426,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
} while (++nb_tx < nb_pkt);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2022-12-16 10:21 ` [PATCH v5 2/4] eal: allow applications to report their cpu usage Robin Jarry
@ 2022-12-16 10:47 ` Morten Brørup
2023-01-04 10:13 ` Robin Jarry
2022-12-22 12:41 ` Konstantin Ananyev
2023-01-04 10:15 ` Robin Jarry
2 siblings, 1 reply; 134+ messages in thread
From: Morten Brørup @ 2022-12-16 10:47 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz
> From: Robin Jarry [mailto:rjarry@redhat.com]
> Sent: Friday, 16 December 2022 11.21
>
> Allow applications to register a callback that will be invoked in
> rte_lcore_dump() and when requesting lcore info in the telemetry API.
>
> The callback is expected to return the number of TSC cycles that have
> passed since application start and the number of these cycles that were
> spent doing busy work.
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
> v4 -> v5:
>
> The callback now takes a pointer to a rte_lcore_usage structure.
> I chose not to include any API version tracking mechanism since the
> unsupported/unused fields can simply be left to zero. This is only
> telemetry after all.
ACK to this decision, with a minor clarification to avoid any misinterpretation:
The callback should not modify (i.e. zero out) unsupported/unused fields.
The caller needs to clear the structure before calling the callback - because the callback might not use the updated size of the structure, if the application was written for an older DPDK version with a smaller structure. I can see you already do this. Consider adding a comment about it in the code.
[...]
> static int
> lcore_dump_cb(unsigned int lcore_id, void *arg)
> {
> struct rte_config *cfg = rte_eal_get_configuration();
> - char cpuset[RTE_CPU_AFFINITY_STR_LEN];
> + char cpuset[RTE_CPU_AFFINITY_STR_LEN], usage_str[256];
> + struct rte_lcore_usage usage;
> + rte_lcore_usage_cb usage_cb;
> const char *role;
> FILE *f = arg;
> int ret;
> @@ -446,11 +457,19 @@ lcore_dump_cb(unsigned int lcore_id, void *arg)
> break;
> }
>
> + memset(&usage, 0, sizeof(usage));
I would move this memset() inside the below if-block.
> + usage_str[0] = '\0';
> + usage_cb = lcore_usage_cb;
> + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
Move memset() inside here, and add comment:
+ /* The application's callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
> + snprintf(usage_str, sizeof(usage_str), ", busy cycles
> %"PRIu64"/%"PRIu64,
> + usage.busy_cycles, usage.total_cycles);
> + }
[...]
> @@ -522,6 +543,12 @@ lcore_telemetry_info_cb(unsigned int lcore_id,
> void *arg)
> if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
> rte_tel_data_add_array_int(cpuset, cpu);
> rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
> + memset(&usage, 0, sizeof(usage));
> + usage_cb = lcore_usage_cb;
> + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
Same comment as above: Move memset() inside here, and add a comment about why the structure is cleared here.
> + rte_tel_data_add_dict_u64(info->d, "busy_cycles",
> usage.busy_cycles);
> + rte_tel_data_add_dict_u64(info->d, "total_cycles",
> usage.total_cycles);
> + }
>
> return 0;
> }
> diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
> index 6938c3fd7b81..a92313577355 100644
> --- a/lib/eal/include/rte_lcore.h
> +++ b/lib/eal/include/rte_lcore.h
> @@ -328,6 +328,41 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int
> lcore_id, void *arg);
> int
> rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
>
> +/**
> + * CPU usage statistics.
> + */
> +struct rte_lcore_usage {
> + uint64_t total_cycles;
> + /**< The total amount of time since application start, in TSC
> cycles. */
> + uint64_t busy_cycles;
> + /**< The amount of busy time since application start, in TSC
> cycles. */
> +};
> +
> +/**
> + * Callback to allow applications to report CPU usage.
> + *
> + * @param [in] lcore_id
> + * The lcore to consider.
> + * @param [out] usage
> + * Counters representing this lcore usage. This can never be NULL.
> + * @return
> + * - 0 if fields in usage were updated successfully. The fields that
> the
> + * application does not support should be left to their default
> value.
"should be left to their default value." -> "must not be modified."
> + * - a negative value if the information is not available or if any
> error occurred.
> + */
> +typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct
> rte_lcore_usage *usage);
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2022-12-16 10:21 ` [PATCH v5 2/4] eal: allow applications to report their cpu usage Robin Jarry
2022-12-16 10:47 ` Morten Brørup
@ 2022-12-22 12:41 ` Konstantin Ananyev
2023-01-04 10:10 ` Robin Jarry
2023-01-04 10:15 ` Robin Jarry
2 siblings, 1 reply; 134+ messages in thread
From: Konstantin Ananyev @ 2022-12-22 12:41 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
>
> Allow applications to register a callback that will be invoked in
> rte_lcore_dump() and when requesting lcore info in the telemetry API.
>
> The callback is expected to return the number of TSC cycles that have
> passed since application start and the number of these cycles that were
> spent doing busy work.
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
> v4 -> v5:
>
> The callback now takes a pointer to a rte_lcore_usage structure.
> I chose not to include any API version tracking mechanism since the
> unsupported/unused fields can simply be left to zero. This is only
> telemetry after all.
>
> lib/eal/common/eal_common_lcore.c | 33 ++++++++++++++++++++++++++---
> lib/eal/include/rte_lcore.h | 35 +++++++++++++++++++++++++++++++
> lib/eal/version.map | 1 +
> 3 files changed, 66 insertions(+), 3 deletions(-)
>
> diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
> index 16548977dce8..210636d21d6b 100644
> --- a/lib/eal/common/eal_common_lcore.c
> +++ b/lib/eal/common/eal_common_lcore.c
> @@ -2,6 +2,7 @@
> * Copyright(c) 2010-2014 Intel Corporation
> */
>
> +#include <inttypes.h>
> #include <stdlib.h>
> #include <string.h>
>
> @@ -422,11 +423,21 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
> return ret;
> }
>
> +static rte_lcore_usage_cb lcore_usage_cb;
> +
> +void
> +rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
> +{
> + lcore_usage_cb = cb;
> +}
> +
LGTM in general.
One question: I assume this function is supposed to be called just once at app init stage,
so we probably don't need to worry about possible sync issues, right?
If so, then probably worth to mention it in the function formal comments below.
> static int
> lcore_dump_cb(unsigned int lcore_id, void *arg)
> {
> struct rte_config *cfg = rte_eal_get_configuration();
> - char cpuset[RTE_CPU_AFFINITY_STR_LEN];
> + char cpuset[RTE_CPU_AFFINITY_STR_LEN], usage_str[256];
> + struct rte_lcore_usage usage;
> + rte_lcore_usage_cb usage_cb;
> const char *role;
> FILE *f = arg;
> int ret;
> @@ -446,11 +457,19 @@ lcore_dump_cb(unsigned int lcore_id, void *arg)
> break;
> }
>
> + memset(&usage, 0, sizeof(usage));
> + usage_str[0] = '\0';
> + usage_cb = lcore_usage_cb;
> + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
> + snprintf(usage_str, sizeof(usage_str), ", busy cycles %"PRIu64"/%"PRIu64,
> + usage.busy_cycles, usage.total_cycles);
> + }
> ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
> sizeof(cpuset));
> - fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
> + fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
> rte_lcore_to_socket_id(lcore_id), role, cpuset,
> - ret == 0 ? "" : "...");
> + ret == 0 ? "" : "...", usage_str);
> +
> return 0;
> }
>
> @@ -489,7 +508,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
> {
> struct lcore_telemetry_info *info = arg;
> struct rte_config *cfg = rte_eal_get_configuration();
> + struct rte_lcore_usage usage;
> struct rte_tel_data *cpuset;
> + rte_lcore_usage_cb usage_cb;
> const char *role;
> unsigned int cpu;
>
> @@ -522,6 +543,12 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
> if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
> rte_tel_data_add_array_int(cpuset, cpu);
> rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
> + memset(&usage, 0, sizeof(usage));
> + usage_cb = lcore_usage_cb;
> + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
> + rte_tel_data_add_dict_u64(info->d, "busy_cycles", usage.busy_cycles);
> + rte_tel_data_add_dict_u64(info->d, "total_cycles", usage.total_cycles);
> + }
>
> return 0;
> }
> diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
> index 6938c3fd7b81..a92313577355 100644
> --- a/lib/eal/include/rte_lcore.h
> +++ b/lib/eal/include/rte_lcore.h
> @@ -328,6 +328,41 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
> int
> rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
>
> +/**
> + * CPU usage statistics.
> + */
> +struct rte_lcore_usage {
> + uint64_t total_cycles;
> + /**< The total amount of time since application start, in TSC cycles. */
> + uint64_t busy_cycles;
> + /**< The amount of busy time since application start, in TSC cycles. */
> +};
> +
> +/**
> + * Callback to allow applications to report CPU usage.
> + *
> + * @param [in] lcore_id
> + * The lcore to consider.
> + * @param [out] usage
> + * Counters representing this lcore usage. This can never be NULL.
> + * @return
> + * - 0 if fields in usage were updated successfully. The fields that the
> + * application does not support should be left to their default value.
> + * - a negative value if the information is not available or if any error occurred.
> + */
> +typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct rte_lcore_usage *usage);
> +
> +/**
> + * Register a callback from an application to be called in rte_lcore_dump() and
> + * the /eal/lcore/info telemetry endpoint handler. Applications are expected to
> + * report CPU usage statistics via this callback.
> + *
> + * @param cb
> + * The callback function.
> + */
> +__rte_experimental
> +void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
> +
> /**
> * List all lcores.
> *
> diff --git a/lib/eal/version.map b/lib/eal/version.map
> index 7ad12a7dc985..30fd216a12ea 100644
> --- a/lib/eal/version.map
> +++ b/lib/eal/version.map
> @@ -440,6 +440,7 @@ EXPERIMENTAL {
> rte_thread_detach;
> rte_thread_equal;
> rte_thread_join;
> + rte_lcore_register_usage_cb;
> };
>
> INTERNAL {
> --
> 2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 3/4] testpmd: add dump_lcores command
2022-12-16 10:21 ` [PATCH v5 3/4] testpmd: add dump_lcores command Robin Jarry
@ 2022-12-22 12:43 ` Konstantin Ananyev
0 siblings, 0 replies; 134+ messages in thread
From: Konstantin Ananyev @ 2022-12-22 12:43 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
>
> Add a simple command that calls rte_lcore_dump().
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
> v4 -> v5: no change
>
> app/test-pmd/cmdline.c | 3 +++
> 1 file changed, 3 insertions(+)
>
> diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
> index b32dc8bfd445..96474d2ae458 100644
> --- a/app/test-pmd/cmdline.c
> +++ b/app/test-pmd/cmdline.c
> @@ -8345,6 +8345,8 @@ static void cmd_dump_parsed(void *parsed_result,
> rte_mempool_list_dump(stdout);
> else if (!strcmp(res->dump, "dump_devargs"))
> rte_devargs_dump(stdout);
> + else if (!strcmp(res->dump, "dump_lcores"))
> + rte_lcore_dump(stdout);
> else if (!strcmp(res->dump, "dump_log_types"))
> rte_log_dump(stdout);
> }
> @@ -8358,6 +8360,7 @@ static cmdline_parse_token_string_t cmd_dump_dump =
> "dump_ring#"
> "dump_mempool#"
> "dump_devargs#"
> + "dump_lcores#"
> "dump_log_types");
>
> static cmdline_parse_inst_t cmd_dump = {
> --
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> 2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 4/4] testpmd: report lcore usage
2022-12-16 10:21 ` [PATCH v5 4/4] testpmd: report lcore usage Robin Jarry
@ 2022-12-22 12:44 ` Konstantin Ananyev
0 siblings, 0 replies; 134+ messages in thread
From: Konstantin Ananyev @ 2022-12-22 12:44 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
> Reuse the --record-core-cycles option to account for busy cycles. One
> turn of packet_fwd_t is considered "busy" if there was at least one
> received or transmitted packet.
>
> Add a new busy_cycles field in struct fwd_stream. Update get_end_cycles
> to accept an additional argument for the number of processed packets.
> Update fwd_stream.busy_cycles when the number of packets is greater than
> zero.
>
> When --record-core-cycles is specified, register a callback with
> rte_lcore_register_usage_cb(). In the callback, use the new lcore_id
> field in struct fwd_lcore to identify the correct index in fwd_lcores
> and return the sum of busy/total cycles of all fwd_streams.
>
> This makes the cycles counters available in rte_lcore_dump() and the
> lcore telemetry API:
>
> testpmd> dump_lcores
> lcore 3, socket 0, role RTE, cpuset 3
> lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
> lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
>
> --> /eal/lcore/info,4
> {
> "/eal/lcore/info": {
> "lcore_id": 4,
> "socket": 0,
> "role": "RTE",
> "cpuset": [
> 4
> ],
> "busy_cycles": 10623340318,
> "total_cycles": 55331167354
> }
> }
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> 2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2022-12-22 12:41 ` Konstantin Ananyev
@ 2023-01-04 10:10 ` Robin Jarry
2023-01-04 10:53 ` Konstantin Ananyev
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-01-04 10:10 UTC (permalink / raw)
To: Konstantin Ananyev, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
Konstantin Ananyev, Dec 22, 2022 at 13:41:
> > +static rte_lcore_usage_cb lcore_usage_cb;
> > +
> > +void
> > +rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
> > +{
> > + lcore_usage_cb = cb;
> > +}
> > +
>
> LGTM in general.
> One question: I assume this function is supposed to be called just
> once at app init stage, so we probably don't need to worry about
> possible sync issues, right? If so, then probably worth to mention it
> in the function formal comments below.
Yes, this is correct. I'll add a mention in the function docstring to
explain that this should be called once at init. Also I'll add a comment
in the function body to indicate that there is no risk of sync issues.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2022-12-16 10:47 ` Morten Brørup
@ 2023-01-04 10:13 ` Robin Jarry
2023-01-04 10:28 ` Morten Brørup
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-01-04 10:13 UTC (permalink / raw)
To: Morten Brørup, dev; +Cc: Tyler Retzlaff, Kevin Laatz
Morten Brørup, Dec 16, 2022 at 11:47:
> > + usage_str[0] = '\0';
> > + usage_cb = lcore_usage_cb;
> > + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
>
> Move memset() inside here, and add comment:
>
> + /* The application's callback may not set all the fields in the structure, so clear it here. */
> + memset(&usage, 0, sizeof(usage));
This may make the code more complex than it needs to be (two nested ifs)
for very little performance benefit. I'm not sure it is worth it. I can
add the comment, though.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2022-12-16 10:21 ` [PATCH v5 2/4] eal: allow applications to report their cpu usage Robin Jarry
2022-12-16 10:47 ` Morten Brørup
2022-12-22 12:41 ` Konstantin Ananyev
@ 2023-01-04 10:15 ` Robin Jarry
2 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-01-04 10:15 UTC (permalink / raw)
To: dev, Kevin Laatz, Bruce Richardson; +Cc: Tyler Retzlaff, Morten Brørup
Robin Jarry, Dec 16, 2022 at 11:21:
> Allow applications to register a callback that will be invoked in
> rte_lcore_dump() and when requesting lcore info in the telemetry API.
>
> The callback is expected to return the number of TSC cycles that have
> passed since application start and the number of these cycles that were
> spent doing busy work.
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
> v4 -> v5:
>
> The callback now takes a pointer to a rte_lcore_usage structure.
> I chose not to include any API version tracking mechanism since the
> unsupported/unused fields can simply be left to zero. This is only
> telemetry after all.
Hi Kevin, Bruce,
did you have a chance to go over this series? Do you have any comments?
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2023-01-04 10:13 ` Robin Jarry
@ 2023-01-04 10:28 ` Morten Brørup
0 siblings, 0 replies; 134+ messages in thread
From: Morten Brørup @ 2023-01-04 10:28 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz
> From: Robin Jarry [mailto:rjarry@redhat.com]
> Sent: Wednesday, 4 January 2023 11.14
>
> Morten Brørup, Dec 16, 2022 at 11:47:
> > > + usage_str[0] = '\0';
> > > + usage_cb = lcore_usage_cb;
> > > + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
> >
> > Move memset() inside here, and add comment:
> >
> > + /* The application's callback may not set all the fields in the
> structure, so clear it here. */
> > + memset(&usage, 0, sizeof(usage));
>
> This may make the code more complex than it needs to be (two nested
> ifs)
> for very little performance benefit. I'm not sure it is worth it. I can
> add the comment, though.
You are right - I missed that. Just adding the comment is fine.
Please also note my comment regarding the rte_lcore_usage_cb() function description:
"should be left to their default value." -> "must not be modified."
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2023-01-04 10:10 ` Robin Jarry
@ 2023-01-04 10:53 ` Konstantin Ananyev
2023-01-18 16:46 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: Konstantin Ananyev @ 2023-01-04 10:53 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
>
> Konstantin Ananyev, Dec 22, 2022 at 13:41:
> > > +static rte_lcore_usage_cb lcore_usage_cb;
> > > +
> > > +void
> > > +rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
> > > +{
> > > + lcore_usage_cb = cb;
> > > +}
> > > +
> >
> > LGTM in general.
> > One question: I assume this function is supposed to be called just
> > once at app init stage, so we probably don't need to worry about
> > possible sync issues, right? If so, then probably worth to mention it
> > in the function formal comments below.
>
> Yes, this is correct. I'll add a mention in the function docstring to
> explain that this should be called once at init. Also I'll add a comment
> in the function body to indicate that there is no risk of sync issues.
Sounds good to me.
Probably we can even print warning or so if some-one tries to overwrite
it once again.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 0/4] lcore telemetry improvements
2022-12-16 10:21 ` [PATCH v5 0/4] lcore telemetry improvements Robin Jarry
` (3 preceding siblings ...)
2022-12-16 10:21 ` [PATCH v5 4/4] testpmd: report lcore usage Robin Jarry
@ 2023-01-18 9:13 ` Robin Jarry
4 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-01-18 9:13 UTC (permalink / raw)
To: dev, Thomas Monjalon, David Marchand
Cc: Tyler Retzlaff, Kevin Laatz, Konstantin Ananyev, Mattias Rönnblom
Robin Jarry, Dec 16, 2022 at 11:21:
> This is a follow up on previous work by Kevin Laatz:
>
> http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
>
> This series is aimed at allowing DPDK applications to expose their CPU
> usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
> more basic and naive approach which leaves the cpu cycles accounting
> completely up to the application.
>
> For reference, I have implemented a draft patch in OvS to use
> rte_lcore_register_usage_cb() and report the already available busy
> cycles information.
>
> https://github.com/rjarry/ovs/commit/643e672fe388e348ea7ccbbda6f5a87a066fd919
Since there was no negative feedback, can this be applied for -rc1 once
I fixed the reported cosmetic issues?
Thanks.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 1/4] eal: add lcore info in telemetry
2022-12-16 10:21 ` [PATCH v5 1/4] eal: add lcore info in telemetry Robin Jarry
@ 2023-01-18 9:42 ` Kevin Laatz
2023-01-18 10:21 ` Morten Brørup
2023-01-18 14:45 ` Robin Jarry
0 siblings, 2 replies; 134+ messages in thread
From: Kevin Laatz @ 2023-01-18 9:42 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Morten Brørup
On 16/12/2022 10:21, Robin Jarry wrote:
> Report the same information than rte_lcore_dump() in the telemetry
> API into /eal/lcore/list and /eal/lcore/info,ID.
>
> Example:
>
> --> /eal/lcore/info,3
> {
> "/eal/lcore/info": {
> "lcore_id": 3,
> "socket": 0,
> "role": "RTE",
> "cpuset": [
> 3
> ]
> }
> }
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
> v4 -> v5: No change
>
> lib/eal/common/eal_common_lcore.c | 96 +++++++++++++++++++++++++++++++
> 1 file changed, 96 insertions(+)
>
> diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
> index 06c594b0224f..16548977dce8 100644
> --- a/lib/eal/common/eal_common_lcore.c
> +++ b/lib/eal/common/eal_common_lcore.c
> @@ -10,6 +10,9 @@
> #include <rte_errno.h>
> #include <rte_lcore.h>
> #include <rte_log.h>
> +#ifndef RTE_EXEC_ENV_WINDOWS
> +#include <rte_telemetry.h>
> +#endif
>
> #include "eal_private.h"
> #include "eal_thread.h"
> @@ -456,3 +459,96 @@ rte_lcore_dump(FILE *f)
> {
> rte_lcore_iterate(lcore_dump_cb, f);
> }
> +
> +#ifndef RTE_EXEC_ENV_WINDOWS
> +static int
> +lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
> +{
> + struct rte_tel_data *d = arg;
> + return rte_tel_data_add_array_int(d, lcore_id);
> +}
> +
> +static int
> +handle_lcore_list(const char *cmd __rte_unused,
> + const char *params __rte_unused,
> + struct rte_tel_data *d)
> +{
> + int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
> + if (ret)
> + return ret;
> + return rte_lcore_iterate(lcore_telemetry_id_cb, d);
> +}
> +
> +struct lcore_telemetry_info {
> + unsigned int lcore_id;
> + struct rte_tel_data *d;
> +};
> +
> +static int
> +lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
> +{
> + struct lcore_telemetry_info *info = arg;
> + struct rte_config *cfg = rte_eal_get_configuration();
> + struct rte_tel_data *cpuset;
> + const char *role;
> + unsigned int cpu;
> +
> + if (info->lcore_id != lcore_id)
> + return 0;
> +
> + switch (cfg->lcore_role[lcore_id]) {
> + case ROLE_RTE:
> + role = "RTE";
> + break;
> + case ROLE_SERVICE:
> + role = "SERVICE";
> + break;
> + case ROLE_NON_EAL:
> + role = "NON_EAL";
> + break;
> + default:
> + role = "UNKNOWN";
> + break;
> + }
> + rte_tel_data_start_dict(info->d);
> + rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
> + rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
> + rte_tel_data_add_dict_string(info->d, "role", role);
> + cpuset = rte_tel_data_alloc();
> + if (!cpuset)
> + return -ENOMEM;
> + rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
> + for (cpu = 0; cpu < CPU_SETSIZE; cpu++)
> + if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
> + rte_tel_data_add_array_int(cpuset, cpu);
> + rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
> +
> + return 0;
> +}
> +
> +static int
> +handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
> +{
> + struct lcore_telemetry_info info = { .d = d };
> + char *endptr = NULL;
> + if (params == NULL || strlen(params) == 0)
> + return -EINVAL;
> + errno = 0;
> + info.lcore_id = strtoul(params, &endptr, 10);
> + if (errno)
> + return -errno;
> + if (endptr == params)
> + return -EINVAL;
> + return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
> +}
> +
> +RTE_INIT(lcore_telemetry)
> +{
> + rte_telemetry_register_cmd(
> + "/eal/lcore/list", handle_lcore_list,
> + "List of lcore ids. Takes no parameters");
> + rte_telemetry_register_cmd(
> + "/eal/lcore/info", handle_lcore_info,
> + "Returns lcore info. Parameters: int lcore_id");
> +}
> +#endif /* !RTE_EXEC_ENV_WINDOWS */
Hi Robin,
Thanks for taking the time to work on this. It is a good implementation
for debug use-cases.
I have 2 suggestions which would improve the usability of the data:
1. Could we make the lcore_id paramater on /eal/lcore/info optional?
This would allow users to read info for all lcores in the application at
once.
2. Could we add 2 additional telemetry endpoints? One which returns an
array of busy_cycles values and the other returns an array of
total_cycles values. These arrays could be used in conjunction with the
/eal/lcore/list endpoint to quickly read the usage related metrics. I've
included an example diff below [1].
We have a use-case beyond debugging in which we read telemetry every few
milliseconds. From a performance point of view, adding the 2 additional
endpoints would be very beneficial.
Thanks,
Kevin
[1]
diff --git a/lib/eal/common/eal_common_lcore.c
b/lib/eal/common/eal_common_lcore.c
index 210636d21d..94ddb276c5 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -569,6 +569,32 @@ handle_lcore_info(const char *cmd __rte_unused,
const char *params, struct rte_t
return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
}
+static int
+lcore_telemetry_busy_cycles_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_tel_data *d = arg;
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
+ unsigned long cycles = 0;
+
+ memset(&usage, 0, sizeof(usage));
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0)
+ cycles = usage.busy_cycles;
+
+ return rte_tel_data_add_array_u64(d, cycles);
+}
+
+static int
+handle_lcore_busy_cycles(const char *cmd __rte_unused,
+ const char *params __rte_unused, struct rte_tel_data *d)
+{
+ int ret = rte_tel_data_start_array(d, RTE_TEL_U64_VAL);
+ if (ret)
+ return ret;
+ return rte_lcore_iterate(lcore_telemetry_busy_cycles_cb, d);
+}
+
RTE_INIT(lcore_telemetry)
{
rte_telemetry_register_cmd(
@@ -577,5 +603,8 @@ RTE_INIT(lcore_telemetry)
rte_telemetry_register_cmd(
"/eal/lcore/info", handle_lcore_info,
"Returns lcore info. Parameters: int lcore_id");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/busy_cycles", handle_lcore_busy_cycles,
+ "List of busy cycle values. Takes no parameters");
}
#endif /* !RTE_EXEC_ENV_WINDOWS */
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 1/4] eal: add lcore info in telemetry
2023-01-18 9:42 ` Kevin Laatz
@ 2023-01-18 10:21 ` Morten Brørup
2023-01-18 11:03 ` Kevin Laatz
2023-01-18 14:45 ` Robin Jarry
1 sibling, 1 reply; 134+ messages in thread
From: Morten Brørup @ 2023-01-18 10:21 UTC (permalink / raw)
To: Kevin Laatz, Robin Jarry, dev; +Cc: Tyler Retzlaff, Ciara Power
> From: Kevin Laatz [mailto:kevin.laatz@intel.com]
> Sent: Wednesday, 18 January 2023 10.42
> To: Robin Jarry; dev@dpdk.org
> Cc: Tyler Retzlaff; Morten Brørup
> Subject: Re: [PATCH v5 1/4] eal: add lcore info in telemetry
>
> On 16/12/2022 10:21, Robin Jarry wrote:
> > Report the same information than rte_lcore_dump() in the telemetry
> > API into /eal/lcore/list and /eal/lcore/info,ID.
> >
> > Example:
> >
> > --> /eal/lcore/info,3
> > {
> > "/eal/lcore/info": {
> > "lcore_id": 3,
> > "socket": 0,
> > "role": "RTE",
> > "cpuset": [
> > 3
> > ]
> > }
> > }
> >
> > Signed-off-by: Robin Jarry <rjarry@redhat.com>
> > Acked-by: Morten Brørup <mb@smartsharesystems.com>
> > ---
> Hi Robin,
>
> Thanks for taking the time to work on this. It is a good implementation
> for debug use-cases.
>
> I have 2 suggestions which would improve the usability of the data:
> 1. Could we make the lcore_id paramater on /eal/lcore/info optional?
> This would allow users to read info for all lcores in the application
> at
> once.
+1 to this suggestion.
> 2. Could we add 2 additional telemetry endpoints? One which returns an
> array of busy_cycles values and the other returns an array of
> total_cycles values. These arrays could be used in conjunction with the
> /eal/lcore/list endpoint to quickly read the usage related metrics.
> I've
> included an example diff below [1].
I prefer this done in a more generic way, see below.
>
> We have a use-case beyond debugging in which we read telemetry every
> few
> milliseconds. From a performance point of view, adding the 2 additional
> endpoints would be very beneficial.
>
> Thanks,
> Kevin
>
> [1]
>
> diff --git a/lib/eal/common/eal_common_lcore.c
> b/lib/eal/common/eal_common_lcore.c
> index 210636d21d..94ddb276c5 100644
> --- a/lib/eal/common/eal_common_lcore.c
> +++ b/lib/eal/common/eal_common_lcore.c
> @@ -569,6 +569,32 @@ handle_lcore_info(const char *cmd __rte_unused,
> const char *params, struct rte_t
> return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
> }
>
> +static int
> +lcore_telemetry_busy_cycles_cb(unsigned int lcore_id, void *arg)
> +{
> + struct rte_tel_data *d = arg;
> + struct rte_lcore_usage usage;
> + rte_lcore_usage_cb usage_cb;
> + unsigned long cycles = 0;
> +
> + memset(&usage, 0, sizeof(usage));
> + usage_cb = lcore_usage_cb;
> + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0)
> + cycles = usage.busy_cycles;
> +
> + return rte_tel_data_add_array_u64(d, cycles);
> +}
> +
> +static int
> +handle_lcore_busy_cycles(const char *cmd __rte_unused,
> + const char *params __rte_unused, struct rte_tel_data
> *d)
> +{
> + int ret = rte_tel_data_start_array(d, RTE_TEL_U64_VAL);
> + if (ret)
> + return ret;
> + return rte_lcore_iterate(lcore_telemetry_busy_cycles_cb, d);
> +}
> +
> RTE_INIT(lcore_telemetry)
> {
> rte_telemetry_register_cmd(
> @@ -577,5 +603,8 @@ RTE_INIT(lcore_telemetry)
> rte_telemetry_register_cmd(
> "/eal/lcore/info", handle_lcore_info,
> "Returns lcore info. Parameters: int
> lcore_id");
> + rte_telemetry_register_cmd(
> + "/eal/lcore/busy_cycles",
> handle_lcore_busy_cycles,
> + "List of busy cycle values. Takes no
> parameters");
> }
> #endif /* !RTE_EXEC_ENV_WINDOWS */
This should be generalized to support any named field in the rte_lcore_usage structure.
The general path could be: /eal/lcore/usage
With optional parameter lcore_id. This should return one object (or an array of such objects, if lcore_id is not given) with all usage fields and their values, e.g.:
{
"lcore_id": 7,
"total_cycles": 1234,
"usage_cycles": 567
}
The paths to support the array-optimized feature you are requesting could be: /eal/lcores/usage/total_cycles and /eal/lcores/usage/usage_cycles.
These paths should return the arrays as suggested. I only request that you change "/lcore" to plural "/lcores" and add "/usage" to the path before the field name in the usage table.
Alternatively, you could add a path /eal/lcores/usage_array, taking the field names as parameters and outputting multiple arrays like this:
/eal/lcores/usage_array,total_cycles,usage_cycles
{
"total_cycles": [1234, 1234, 1234],
"usage_cycles": [567, 678, 789]
}
But I don't know if this breaks with DPDK's standard REST interface. It would be easier if we had decided on something like OData, instead of inventing our own.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 1/4] eal: add lcore info in telemetry
2023-01-18 10:21 ` Morten Brørup
@ 2023-01-18 11:03 ` Kevin Laatz
2023-01-18 11:35 ` Morten Brørup
0 siblings, 1 reply; 134+ messages in thread
From: Kevin Laatz @ 2023-01-18 11:03 UTC (permalink / raw)
To: Morten Brørup, Robin Jarry, dev; +Cc: Tyler Retzlaff, Ciara Power
On 18/01/2023 10:21, Morten Brørup wrote:
>> From: Kevin Laatz [mailto:kevin.laatz@intel.com]
>> Sent: Wednesday, 18 January 2023 10.42
>> To: Robin Jarry; dev@dpdk.org
>> Cc: Tyler Retzlaff; Morten Brørup
>> Subject: Re: [PATCH v5 1/4] eal: add lcore info in telemetry
>>
>> On 16/12/2022 10:21, Robin Jarry wrote:
>>> Report the same information than rte_lcore_dump() in the telemetry
>>> API into /eal/lcore/list and /eal/lcore/info,ID.
>>>
>>> Example:
>>>
>>> --> /eal/lcore/info,3
>>> {
>>> "/eal/lcore/info": {
>>> "lcore_id": 3,
>>> "socket": 0,
>>> "role": "RTE",
>>> "cpuset": [
>>> 3
>>> ]
>>> }
>>> }
>>>
>>> Signed-off-by: Robin Jarry <rjarry@redhat.com>
>>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>>> ---
>
>> Hi Robin,
>>
>> Thanks for taking the time to work on this. It is a good implementation
>> for debug use-cases.
>>
>> I have 2 suggestions which would improve the usability of the data:
>> 1. Could we make the lcore_id paramater on /eal/lcore/info optional?
>> This would allow users to read info for all lcores in the application
>> at
>> once.
> +1 to this suggestion.
>
>> 2. Could we add 2 additional telemetry endpoints? One which returns an
>> array of busy_cycles values and the other returns an array of
>> total_cycles values. These arrays could be used in conjunction with the
>> /eal/lcore/list endpoint to quickly read the usage related metrics.
>> I've
>> included an example diff below [1].
> I prefer this done in a more generic way, see below.
>
>> We have a use-case beyond debugging in which we read telemetry every
>> few
>> milliseconds. From a performance point of view, adding the 2 additional
>> endpoints would be very beneficial.
>>
>> Thanks,
>> Kevin
>>
>> [1]
>>
>> diff --git a/lib/eal/common/eal_common_lcore.c
>> b/lib/eal/common/eal_common_lcore.c
>> index 210636d21d..94ddb276c5 100644
>> --- a/lib/eal/common/eal_common_lcore.c
>> +++ b/lib/eal/common/eal_common_lcore.c
>> @@ -569,6 +569,32 @@ handle_lcore_info(const char *cmd __rte_unused,
>> const char *params, struct rte_t
>> return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
>> }
>>
>> +static int
>> +lcore_telemetry_busy_cycles_cb(unsigned int lcore_id, void *arg)
>> +{
>> + struct rte_tel_data *d = arg;
>> + struct rte_lcore_usage usage;
>> + rte_lcore_usage_cb usage_cb;
>> + unsigned long cycles = 0;
>> +
>> + memset(&usage, 0, sizeof(usage));
>> + usage_cb = lcore_usage_cb;
>> + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0)
>> + cycles = usage.busy_cycles;
>> +
>> + return rte_tel_data_add_array_u64(d, cycles);
>> +}
>> +
>> +static int
>> +handle_lcore_busy_cycles(const char *cmd __rte_unused,
>> + const char *params __rte_unused, struct rte_tel_data
>> *d)
>> +{
>> + int ret = rte_tel_data_start_array(d, RTE_TEL_U64_VAL);
>> + if (ret)
>> + return ret;
>> + return rte_lcore_iterate(lcore_telemetry_busy_cycles_cb, d);
>> +}
>> +
>> RTE_INIT(lcore_telemetry)
>> {
>> rte_telemetry_register_cmd(
>> @@ -577,5 +603,8 @@ RTE_INIT(lcore_telemetry)
>> rte_telemetry_register_cmd(
>> "/eal/lcore/info", handle_lcore_info,
>> "Returns lcore info. Parameters: int
>> lcore_id");
>> + rte_telemetry_register_cmd(
>> + "/eal/lcore/busy_cycles",
>> handle_lcore_busy_cycles,
>> + "List of busy cycle values. Takes no
>> parameters");
>> }
>> #endif /* !RTE_EXEC_ENV_WINDOWS */
> This should be generalized to support any named field in the rte_lcore_usage structure.
>
> The general path could be: /eal/lcore/usage
>
> With optional parameter lcore_id. This should return one object (or an array of such objects, if lcore_id is not given) with all usage fields and their values, e.g.:
>
> {
> "lcore_id": 7,
> "total_cycles": 1234,
> "usage_cycles": 567
> }
>
>
> The paths to support the array-optimized feature you are requesting could be: /eal/lcores/usage/total_cycles and /eal/lcores/usage/usage_cycles.
>
> These paths should return the arrays as suggested. I only request that you change "/lcore" to plural "/lcores" and add "/usage" to the path before the field name in the usage table.
>
> Alternatively, you could add a path /eal/lcores/usage_array, taking the field names as parameters and outputting multiple arrays like this:
>
> /eal/lcores/usage_array,total_cycles,usage_cycles
>
> {
> "total_cycles": [1234, 1234, 1234],
> "usage_cycles": [567, 678, 789]
> }
+1, this would also work nicely and allows for extension in future
without flooding with endpoints.
>
> But I don't know if this breaks with DPDK's standard REST interface. It would be easier if we had decided on something like OData, instead of inventing our own.
>
>
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 1/4] eal: add lcore info in telemetry
2023-01-18 11:03 ` Kevin Laatz
@ 2023-01-18 11:35 ` Morten Brørup
0 siblings, 0 replies; 134+ messages in thread
From: Morten Brørup @ 2023-01-18 11:35 UTC (permalink / raw)
To: Kevin Laatz, Robin Jarry, dev; +Cc: Tyler Retzlaff, Ciara Power
> From: Kevin Laatz [mailto:kevin.laatz@intel.com]
> Sent: Wednesday, 18 January 2023 12.03
>
> On 18/01/2023 10:21, Morten Brørup wrote:
> >> From: Kevin Laatz [mailto:kevin.laatz@intel.com]
> >>
> >> On 16/12/2022 10:21, Robin Jarry wrote:
> >>> Report the same information than rte_lcore_dump() in the telemetry
> >>> API into /eal/lcore/list and /eal/lcore/info,ID.
> >>>
> >>> Example:
> >>>
> >>> --> /eal/lcore/info,3
> >>> {
> >>> "/eal/lcore/info": {
> >>> "lcore_id": 3,
> >>> "socket": 0,
> >>> "role": "RTE",
> >>> "cpuset": [
> >>> 3
> >>> ]
> >>> }
> >>> }
> >>>
> >>> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> >>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> >>> ---
> >
> >> Hi Robin,
> >>
> >> Thanks for taking the time to work on this. It is a good
> implementation
> >> for debug use-cases.
> >>
> >> I have 2 suggestions which would improve the usability of the data:
> >> 1. Could we make the lcore_id paramater on /eal/lcore/info optional?
> >> This would allow users to read info for all lcores in the
> application
> >> at
> >> once.
> > +1 to this suggestion.
> >
> >> 2. Could we add 2 additional telemetry endpoints? One which returns
> an
> >> array of busy_cycles values and the other returns an array of
> >> total_cycles values. These arrays could be used in conjunction with
> the
> >> /eal/lcore/list endpoint to quickly read the usage related metrics.
> >> I've
> >> included an example diff below [1].
> > I prefer this done in a more generic way, see below.
> >
> >> We have a use-case beyond debugging in which we read telemetry every
> >> few
> >> milliseconds. From a performance point of view, adding the 2
> additional
> >> endpoints would be very beneficial.
> >>
> >> Thanks,
> >> Kevin
> >>
> >> [1]
> >>
> >> diff --git a/lib/eal/common/eal_common_lcore.c
> >> b/lib/eal/common/eal_common_lcore.c
> >> index 210636d21d..94ddb276c5 100644
> >> --- a/lib/eal/common/eal_common_lcore.c
> >> +++ b/lib/eal/common/eal_common_lcore.c
> >> @@ -569,6 +569,32 @@ handle_lcore_info(const char *cmd __rte_unused,
> >> const char *params, struct rte_t
> >> return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
> >> }
> >>
> >> +static int
> >> +lcore_telemetry_busy_cycles_cb(unsigned int lcore_id, void *arg)
> >> +{
> >> + struct rte_tel_data *d = arg;
> >> + struct rte_lcore_usage usage;
> >> + rte_lcore_usage_cb usage_cb;
> >> + unsigned long cycles = 0;
> >> +
> >> + memset(&usage, 0, sizeof(usage));
> >> + usage_cb = lcore_usage_cb;
> >> + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0)
> >> + cycles = usage.busy_cycles;
> >> +
> >> + return rte_tel_data_add_array_u64(d, cycles);
> >> +}
> >> +
> >> +static int
> >> +handle_lcore_busy_cycles(const char *cmd __rte_unused,
> >> + const char *params __rte_unused, struct rte_tel_data
> >> *d)
> >> +{
> >> + int ret = rte_tel_data_start_array(d, RTE_TEL_U64_VAL);
> >> + if (ret)
> >> + return ret;
> >> + return rte_lcore_iterate(lcore_telemetry_busy_cycles_cb, d);
> >> +}
> >> +
> >> RTE_INIT(lcore_telemetry)
> >> {
> >> rte_telemetry_register_cmd(
> >> @@ -577,5 +603,8 @@ RTE_INIT(lcore_telemetry)
> >> rte_telemetry_register_cmd(
> >> "/eal/lcore/info", handle_lcore_info,
> >> "Returns lcore info. Parameters: int
> >> lcore_id");
> >> + rte_telemetry_register_cmd(
> >> + "/eal/lcore/busy_cycles",
> >> handle_lcore_busy_cycles,
> >> + "List of busy cycle values. Takes no
> >> parameters");
> >> }
> >> #endif /* !RTE_EXEC_ENV_WINDOWS */
> > This should be generalized to support any named field in the
> rte_lcore_usage structure.
> >
> > The general path could be: /eal/lcore/usage
> >
> > With optional parameter lcore_id. This should return one object (or
> an array of such objects, if lcore_id is not given) with all usage
> fields and their values, e.g.:
> >
> > {
> > "lcore_id": 7,
> > "total_cycles": 1234,
> > "usage_cycles": 567
> > }
> >
> >
> > The paths to support the array-optimized feature you are requesting
> could be: /eal/lcores/usage/total_cycles and
> /eal/lcores/usage/usage_cycles.
> >
> > These paths should return the arrays as suggested. I only request
> that you change "/lcore" to plural "/lcores" and add "/usage" to the
> path before the field name in the usage table.
> >
> > Alternatively, you could add a path /eal/lcores/usage_array, taking
> the field names as parameters and outputting multiple arrays like this:
> >
> > /eal/lcores/usage_array,total_cycles,usage_cycles
> >
> > {
I forgot the index array:
"lcore_id": [1,6,7],
> > "total_cycles": [1234, 1234, 1234],
> > "usage_cycles": [567, 678, 789]
> > }
>
> +1, this would also work nicely and allows for extension in future
> without flooding with endpoints.
Our applications do something somewhat similar, i.e. use JSON arrays for performance (and bandwidth conservation) reasons - so I can confirm that the concept works in production.
But come to think of it, your array suggestion seems more like an additional feature, which belongs in another patch. We should not hold back Robin's patch series due to feature creep. :-)
>
>
> >
> > But I don't know if this breaks with DPDK's standard REST interface.
> It would be easier if we had decided on something like OData, instead
> of inventing our own.
> >
> >
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 1/4] eal: add lcore info in telemetry
2023-01-18 9:42 ` Kevin Laatz
2023-01-18 10:21 ` Morten Brørup
@ 2023-01-18 14:45 ` Robin Jarry
2023-01-18 16:01 ` Kevin Laatz
1 sibling, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-01-18 14:45 UTC (permalink / raw)
To: Kevin Laatz, dev; +Cc: Tyler Retzlaff, Morten Brørup
Kevin Laatz, Jan 18, 2023 at 10:42:
> Hi Robin,
>
> Thanks for taking the time to work on this. It is a good implementation
> for debug use-cases.
>
> I have 2 suggestions which would improve the usability of the data:
> 1. Could we make the lcore_id paramater on /eal/lcore/info optional?
> This would allow users to read info for all lcores in the application at
> once.
I don't think it would be a good thing since it would require returning
a different data format depending the parameter is specifier or not.
Probably adding another endpoint /eal/lcore/info_all that returns a list
of /eal/lcore/info (one for every lcore) would be better.
> 2. Could we add 2 additional telemetry endpoints? One which returns an
> array of busy_cycles values and the other returns an array of
> total_cycles values. These arrays could be used in conjunction with the
> /eal/lcore/list endpoint to quickly read the usage related metrics. I've
> included an example diff below [1].
>
> We have a use-case beyond debugging in which we read telemetry every few
> milliseconds. From a performance point of view, adding the 2 additional
> endpoints would be very beneficial.
If we add /eal/lcore/info_all you would have all this without two
additional endpoints. I don't think that calling it every few
milliseconds and extracting the {busy,total}_cycles values would be
a problem.
I can add another patch in the series but I would prefer not changing
the format at the last minute.
Would that be ok?
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 1/4] eal: add lcore info in telemetry
2023-01-18 14:45 ` Robin Jarry
@ 2023-01-18 16:01 ` Kevin Laatz
2023-01-18 16:17 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: Kevin Laatz @ 2023-01-18 16:01 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Morten Brørup
On 18/01/2023 14:45, Robin Jarry wrote:
> Kevin Laatz, Jan 18, 2023 at 10:42:
>> Hi Robin,
>>
>> Thanks for taking the time to work on this. It is a good implementation
>> for debug use-cases.
>>
>> I have 2 suggestions which would improve the usability of the data:
>> 1. Could we make the lcore_id paramater on /eal/lcore/info optional?
>> This would allow users to read info for all lcores in the application at
>> once.
> I don't think it would be a good thing since it would require returning
> a different data format depending the parameter is specifier or not.
>
> Probably adding another endpoint /eal/lcore/info_all that returns a list
> of /eal/lcore/info (one for every lcore) would be better.
Either option seems ok, I don't have a strong preference, the main thing
here is to get the info for all cores in our telemetry read.
>> 2. Could we add 2 additional telemetry endpoints? One which returns an
>> array of busy_cycles values and the other returns an array of
>> total_cycles values. These arrays could be used in conjunction with the
>> /eal/lcore/list endpoint to quickly read the usage related metrics. I've
>> included an example diff below [1].
>>
>> We have a use-case beyond debugging in which we read telemetry every few
>> milliseconds. From a performance point of view, adding the 2 additional
>> endpoints would be very beneficial.
> If we add /eal/lcore/info_all you would have all this without two
> additional endpoints. I don't think that calling it every few
> milliseconds and extracting the {busy,total}_cycles values would be
> a problem.
>
> I can add another patch in the series but I would prefer not changing
> the format at the last minute.
While all of the information would be available, there are performance
benefits to reducing the size of data returned and by flattening the
arrays, in addition to a reduction in the JSON parsing required to
extract the needed metrics.
The additional endpoint(s) (I like Morten's idea of a single additional
endpoint where you can specify the metrics to include via parameters)
shouldn't affect the format of other parts of this patchset, but we
would gain the benefits of the additional metric format.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 1/4] eal: add lcore info in telemetry
2023-01-18 16:01 ` Kevin Laatz
@ 2023-01-18 16:17 ` Robin Jarry
0 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-01-18 16:17 UTC (permalink / raw)
To: Kevin Laatz, dev; +Cc: Tyler Retzlaff, Morten Brørup
Kevin Laatz, Jan 18, 2023 at 17:01:
> The additional endpoint(s) (I like Morten's idea of a single additional
> endpoint where you can specify the metrics to include via parameters)
> shouldn't affect the format of other parts of this patchset, but we
> would gain the benefits of the additional metric format.
Understood. This could probably be added in another patch. If that is
not too much work I can look into it.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2023-01-04 10:53 ` Konstantin Ananyev
@ 2023-01-18 16:46 ` Robin Jarry
2023-02-06 20:07 ` Konstantin Ananyev
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-01-18 16:46 UTC (permalink / raw)
To: Konstantin Ananyev, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
Konstantin Ananyev, Jan 04, 2023 at 11:53:
> Probably we can even print warning or so if some-one tries to overwrite
> it once again.
I'm not sure that is necessary. If an application wants to reset the
callback to NULL at any point in time, I don't see why DPDK should tell
them it is a bad thing.
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v6 0/5] lcore telemetry improvements
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (7 preceding siblings ...)
2022-12-16 10:21 ` [PATCH v5 0/4] lcore telemetry improvements Robin Jarry
@ 2023-01-19 15:06 ` Robin Jarry
2023-01-19 15:06 ` [PATCH v6 1/5] eal: add lcore info in telemetry Robin Jarry
` (4 more replies)
2023-02-02 13:43 ` [PATCH v8 0/5] lcore telemetry improvements Robin Jarry
` (8 subsequent siblings)
17 siblings, 5 replies; 134+ messages in thread
From: Robin Jarry @ 2023-01-19 15:06 UTC (permalink / raw)
To: dev; +Cc: Tyler Retzlaff, Kevin Laatz, Robin Jarry
This is a follow up on previous work by Kevin Laatz:
http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
This series is aimed at allowing DPDK applications to expose their CPU
usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
more basic and naive approach which leaves the cpu cycles accounting
completely up to the application.
For reference, I have implemented a draft patch in OvS to use
rte_lcore_register_usage_cb() and report the already available busy
cycles information.
https://github.com/rjarry/ovs/commit/643e672fe388e348ea7ccbbda6f5a87a066fd919
Changes since v5:
- Added/rephrased some inline comments to address reviews.
- Added a new commit that adds the /eal/lcore/usage endpoint as
suggested by Kevin and Morten.
Changes since v4:
- rte_lcore_usage_cb now takes a pointer to a rte_lcore_usage structure.
I chose not to include any API version tracking mechanism since the
unsupported/unused fields can simply be left to zero. This is only
telemetry after all.
Changes since v3:
- Changed nomenclature from CPU cycles to TSC cycles in the docstring of
rte_lcore_usage_cb.
Changes since v2:
- Fixed typos in docstrings.
- Used if (xxx != NULL) instead of if (xxx) test convention.
- Guarded against an unlikely race if rte_lcore_dump() is called by
a thread while another one calls rte_lcore_register_usage_cb(NULL).
- s/utilization/usage/
- Fixed build on Windows.
Changes since v1:
- The cpuset field in telemetry is now a JSON list of CPU ids.
- Applications must now report their raw CPU cycles counts. The busyness
ratio and rate of change is left to external monitoring tools.
- Renamed show lcores -> dump_lcores in testpmd.
Robin Jarry (5):
eal: add lcore info in telemetry
eal: allow applications to report their cpu usage
testpmd: add dump_lcores command
testpmd: report lcore usage
telemetry: add /eal/lcore/usage endpoint
app/test-pmd/5tswap.c | 5 +-
app/test-pmd/cmdline.c | 3 +
app/test-pmd/csumonly.c | 6 +-
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +-
app/test-pmd/iofwd.c | 5 +-
app/test-pmd/macfwd.c | 5 +-
app/test-pmd/macswap.c | 5 +-
app/test-pmd/noisy_vnf.c | 4 +
app/test-pmd/rxonly.c | 5 +-
app/test-pmd/shared_rxq_fwd.c | 5 +-
app/test-pmd/testpmd.c | 39 +++++-
app/test-pmd/testpmd.h | 14 +-
app/test-pmd/txonly.c | 7 +-
lib/eal/common/eal_common_lcore.c | 205 +++++++++++++++++++++++++++++-
lib/eal/include/rte_lcore.h | 35 +++++
lib/eal/version.map | 1 +
17 files changed, 322 insertions(+), 30 deletions(-)
--
2.39.0
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v6 1/5] eal: add lcore info in telemetry
2023-01-19 15:06 ` [PATCH v6 0/5] " Robin Jarry
@ 2023-01-19 15:06 ` Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
2023-01-26 11:19 ` David Marchand
2023-01-19 15:06 ` [PATCH v6 2/5] eal: allow applications to report their cpu usage Robin Jarry
` (3 subsequent siblings)
4 siblings, 2 replies; 134+ messages in thread
From: Robin Jarry @ 2023-01-19 15:06 UTC (permalink / raw)
To: dev; +Cc: Tyler Retzlaff, Kevin Laatz, Robin Jarry, Morten Brørup
Report the same information than rte_lcore_dump() in the telemetry
API into /eal/lcore/list and /eal/lcore/info,ID.
Example:
--> /eal/lcore/info,3
{
"/eal/lcore/info": {
"lcore_id": 3,
"socket": 0,
"role": "RTE",
"cpuset": [
3
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
Notes:
v5 -> v6: No change
lib/eal/common/eal_common_lcore.c | 96 +++++++++++++++++++++++++++++++
1 file changed, 96 insertions(+)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 06c594b0224f..16548977dce8 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -10,6 +10,9 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
+#include <rte_telemetry.h>
+#endif
#include "eal_private.h"
#include "eal_thread.h"
@@ -456,3 +459,96 @@ rte_lcore_dump(FILE *f)
{
rte_lcore_iterate(lcore_dump_cb, f);
}
+
+#ifndef RTE_EXEC_ENV_WINDOWS
+static int
+lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_tel_data *d = arg;
+ return rte_tel_data_add_array_int(d, lcore_id);
+}
+
+static int
+handle_lcore_list(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+ if (ret)
+ return ret;
+ return rte_lcore_iterate(lcore_telemetry_id_cb, d);
+}
+
+struct lcore_telemetry_info {
+ unsigned int lcore_id;
+ struct rte_tel_data *d;
+};
+
+static int
+lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
+{
+ struct lcore_telemetry_info *info = arg;
+ struct rte_config *cfg = rte_eal_get_configuration();
+ struct rte_tel_data *cpuset;
+ const char *role;
+ unsigned int cpu;
+
+ if (info->lcore_id != lcore_id)
+ return 0;
+
+ switch (cfg->lcore_role[lcore_id]) {
+ case ROLE_RTE:
+ role = "RTE";
+ break;
+ case ROLE_SERVICE:
+ role = "SERVICE";
+ break;
+ case ROLE_NON_EAL:
+ role = "NON_EAL";
+ break;
+ default:
+ role = "UNKNOWN";
+ break;
+ }
+ rte_tel_data_start_dict(info->d);
+ rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
+ rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
+ rte_tel_data_add_dict_string(info->d, "role", role);
+ cpuset = rte_tel_data_alloc();
+ if (!cpuset)
+ return -ENOMEM;
+ rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++)
+ if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
+ rte_tel_data_add_array_int(cpuset, cpu);
+ rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+
+ return 0;
+}
+
+static int
+handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
+{
+ struct lcore_telemetry_info info = { .d = d };
+ char *endptr = NULL;
+ if (params == NULL || strlen(params) == 0)
+ return -EINVAL;
+ errno = 0;
+ info.lcore_id = strtoul(params, &endptr, 10);
+ if (errno)
+ return -errno;
+ if (endptr == params)
+ return -EINVAL;
+ return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
+}
+
+RTE_INIT(lcore_telemetry)
+{
+ rte_telemetry_register_cmd(
+ "/eal/lcore/list", handle_lcore_list,
+ "List of lcore ids. Takes no parameters");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/info", handle_lcore_info,
+ "Returns lcore info. Parameters: int lcore_id");
+}
+#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.39.0
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v6 2/5] eal: allow applications to report their cpu usage
2023-01-19 15:06 ` [PATCH v6 0/5] " Robin Jarry
2023-01-19 15:06 ` [PATCH v6 1/5] eal: add lcore info in telemetry Robin Jarry
@ 2023-01-19 15:06 ` Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
2023-01-26 11:22 ` David Marchand
2023-01-19 15:06 ` [PATCH v6 3/5] testpmd: add dump_lcores command Robin Jarry
` (2 subsequent siblings)
4 siblings, 2 replies; 134+ messages in thread
From: Robin Jarry @ 2023-01-19 15:06 UTC (permalink / raw)
To: dev; +Cc: Tyler Retzlaff, Kevin Laatz, Robin Jarry, Morten Brørup
Allow applications to register a callback that will be invoked in
rte_lcore_dump() and when requesting lcore info in the telemetry API.
The callback is expected to return the number of TSC cycles that have
passed since application start and the number of these cycles that were
spent doing busy work.
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
Notes:
v5 -> v6: Added/rephrased some inline comments.
lib/eal/common/eal_common_lcore.c | 45 ++++++++++++++++++++++++++++---
lib/eal/include/rte_lcore.h | 35 ++++++++++++++++++++++++
lib/eal/version.map | 1 +
3 files changed, 78 insertions(+), 3 deletions(-)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 16548977dce8..80513cfe3725 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -2,6 +2,7 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
@@ -422,11 +423,21 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
return ret;
}
+static rte_lcore_usage_cb lcore_usage_cb;
+
+void
+rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
+{
+ lcore_usage_cb = cb;
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
- char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ char cpuset[RTE_CPU_AFFINITY_STR_LEN], usage_str[256];
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
const char *role;
FILE *f = arg;
int ret;
@@ -446,11 +457,25 @@ lcore_dump_cb(unsigned int lcore_id, void *arg)
break;
}
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ usage_str[0] = '\0';
+ /*
+ * Guard against concurrent modification of lcore_usage_cb.
+ * rte_lcore_register_usage_cb() should only be called once at application init
+ * but nothing prevents and application to reset the callback to NULL.
+ */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ snprintf(usage_str, sizeof(usage_str), ", busy cycles %"PRIu64"/%"PRIu64,
+ usage.busy_cycles, usage.total_cycles);
+ }
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
- fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
+ fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
rte_lcore_to_socket_id(lcore_id), role, cpuset,
- ret == 0 ? "" : "...");
+ ret == 0 ? "" : "...", usage_str);
+
return 0;
}
@@ -489,7 +514,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
{
struct lcore_telemetry_info *info = arg;
struct rte_config *cfg = rte_eal_get_configuration();
+ struct rte_lcore_usage usage;
struct rte_tel_data *cpuset;
+ rte_lcore_usage_cb usage_cb;
const char *role;
unsigned int cpu;
@@ -522,6 +549,18 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
rte_tel_data_add_array_int(cpuset, cpu);
rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /*
+ * Guard against concurrent modification of lcore_usage_cb.
+ * rte_lcore_register_usage_cb() should only be called once at application init
+ * but nothing prevents and application to reset the callback to NULL.
+ */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ rte_tel_data_add_dict_u64(info->d, "total_cycles", usage.total_cycles);
+ rte_tel_data_add_dict_u64(info->d, "busy_cycles", usage.busy_cycles);
+ }
return 0;
}
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 6938c3fd7b81..52468e7120dd 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -328,6 +328,41 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
int
rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
+/**
+ * CPU usage statistics.
+ */
+struct rte_lcore_usage {
+ uint64_t total_cycles;
+ /**< The total amount of time since application start, in TSC cycles. */
+ uint64_t busy_cycles;
+ /**< The amount of busy time since application start, in TSC cycles. */
+};
+
+/**
+ * Callback to allow applications to report CPU usage.
+ *
+ * @param [in] lcore_id
+ * The lcore to consider.
+ * @param [out] usage
+ * Counters representing this lcore usage. This can never be NULL.
+ * @return
+ * - 0 if fields in usage were updated successfully. The fields that the
+ * application does not support must not be modified.
+ * - a negative value if the information is not available or if any error occurred.
+ */
+typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct rte_lcore_usage *usage);
+
+/**
+ * Register a callback from an application to be called in rte_lcore_dump() and
+ * the /eal/lcore/info telemetry endpoint handler. Applications are expected to
+ * report CPU usage statistics via this callback.
+ *
+ * @param cb
+ * The callback function.
+ */
+__rte_experimental
+void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
+
/**
* List all lcores.
*
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 7ad12a7dc985..30fd216a12ea 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -440,6 +440,7 @@ EXPERIMENTAL {
rte_thread_detach;
rte_thread_equal;
rte_thread_join;
+ rte_lcore_register_usage_cb;
};
INTERNAL {
--
2.39.0
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v6 3/5] testpmd: add dump_lcores command
2023-01-19 15:06 ` [PATCH v6 0/5] " Robin Jarry
2023-01-19 15:06 ` [PATCH v6 1/5] eal: add lcore info in telemetry Robin Jarry
2023-01-19 15:06 ` [PATCH v6 2/5] eal: allow applications to report their cpu usage Robin Jarry
@ 2023-01-19 15:06 ` Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
2023-01-26 11:22 ` David Marchand
2023-01-19 15:06 ` [PATCH v6 4/5] testpmd: report lcore usage Robin Jarry
2023-01-19 15:06 ` [PATCH v6 5/5] telemetry: add /eal/lcore/usage endpoint Robin Jarry
4 siblings, 2 replies; 134+ messages in thread
From: Robin Jarry @ 2023-01-19 15:06 UTC (permalink / raw)
To: dev
Cc: Tyler Retzlaff, Kevin Laatz, Robin Jarry, Morten Brørup,
Konstantin Ananyev
Add a simple command that calls rte_lcore_dump().
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
---
Notes:
v5 -> v6: No change
app/test-pmd/cmdline.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index b32dc8bfd445..96474d2ae458 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8345,6 +8345,8 @@ static void cmd_dump_parsed(void *parsed_result,
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
rte_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_lcores"))
+ rte_lcore_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
}
@@ -8358,6 +8360,7 @@ static cmdline_parse_token_string_t cmd_dump_dump =
"dump_ring#"
"dump_mempool#"
"dump_devargs#"
+ "dump_lcores#"
"dump_log_types");
static cmdline_parse_inst_t cmd_dump = {
--
2.39.0
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v6 4/5] testpmd: report lcore usage
2023-01-19 15:06 ` [PATCH v6 0/5] " Robin Jarry
` (2 preceding siblings ...)
2023-01-19 15:06 ` [PATCH v6 3/5] testpmd: add dump_lcores command Robin Jarry
@ 2023-01-19 15:06 ` Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
2023-01-19 15:06 ` [PATCH v6 5/5] telemetry: add /eal/lcore/usage endpoint Robin Jarry
4 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-01-19 15:06 UTC (permalink / raw)
To: dev
Cc: Tyler Retzlaff, Kevin Laatz, Robin Jarry, Morten Brørup,
Konstantin Ananyev
Reuse the --record-core-cycles option to account for busy cycles. One
turn of packet_fwd_t is considered "busy" if there was at least one
received or transmitted packet.
Add a new busy_cycles field in struct fwd_stream. Update get_end_cycles
to accept an additional argument for the number of processed packets.
Update fwd_stream.busy_cycles when the number of packets is greater than
zero.
When --record-core-cycles is specified, register a callback with
rte_lcore_register_usage_cb(). In the callback, use the new lcore_id
field in struct fwd_lcore to identify the correct index in fwd_lcores
and return the sum of busy/total cycles of all fwd_streams.
This makes the cycles counters available in rte_lcore_dump() and the
lcore telemetry API:
testpmd> dump_lcores
lcore 3, socket 0, role RTE, cpuset 3
lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
--> /eal/lcore/info,4
{
"/eal/lcore/info": {
"lcore_id": 4,
"socket": 0,
"role": "RTE",
"cpuset": [
4
],
"busy_cycles": 10623340318,
"total_cycles": 55331167354
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
---
Notes:
v5 -> v6: No change.
app/test-pmd/5tswap.c | 5 +++--
app/test-pmd/csumonly.c | 6 +++---
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +++---
app/test-pmd/iofwd.c | 5 +++--
app/test-pmd/macfwd.c | 5 +++--
app/test-pmd/macswap.c | 5 +++--
app/test-pmd/noisy_vnf.c | 4 ++++
app/test-pmd/rxonly.c | 5 +++--
app/test-pmd/shared_rxq_fwd.c | 5 +++--
app/test-pmd/testpmd.c | 39 ++++++++++++++++++++++++++++++++++-
app/test-pmd/testpmd.h | 14 +++++++++----
app/test-pmd/txonly.c | 7 ++++---
13 files changed, 81 insertions(+), 27 deletions(-)
diff --git a/app/test-pmd/5tswap.c b/app/test-pmd/5tswap.c
index f041a5e1d530..03225075716c 100644
--- a/app/test-pmd/5tswap.c
+++ b/app/test-pmd/5tswap.c
@@ -116,7 +116,7 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -182,7 +182,8 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 1c2459851522..03e141221a56 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -868,7 +868,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
rx_bad_ip_csum = 0;
@@ -1200,8 +1200,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index fd6abc0f4124..7b2f0ffdf0f5 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -196,7 +196,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
RTE_PER_LCORE(_next_flow) = next_flow;
- get_end_cycles(fs, start_tsc);
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index 066f2a3ab79b..2fc9f96dc95f 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -303,7 +303,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_replies = 0;
@@ -508,8 +508,8 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
} while (++nb_tx < nb_replies);
}
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 8fafdec548ad..e5a2dbe20c69 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -59,7 +59,7 @@ pkt_burst_io_forward(struct fwd_stream *fs)
pkts_burst, nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
@@ -84,7 +84,8 @@ pkt_burst_io_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index beb220fbb462..9db623999970 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -65,7 +65,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -115,7 +115,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 4f8deb338296..4db134ac1d91 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -66,7 +66,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -93,7 +93,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index c65ec6f06a5c..290bdcda45f0 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -152,6 +152,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
uint64_t delta_ms;
bool needs_flush = false;
uint64_t now;
+ uint64_t start_tsc = 0;
+
+ get_start_cycles(&start_tsc);
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
pkts_burst, nb_pkt_per_burst);
@@ -219,6 +222,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
ncf->prev_time = rte_get_timer_cycles();
}
+ get_end_cycles(fs, start_tsc, nb_rx + nb_tx);
}
#define NOISY_STRSIZE 256
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index d528d4f34e60..519202339e16 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -58,13 +58,14 @@ pkt_burst_receive(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++)
rte_pktmbuf_free(pkts_burst[i]);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/shared_rxq_fwd.c b/app/test-pmd/shared_rxq_fwd.c
index 2e9047804b5b..395b73bfe52e 100644
--- a/app/test-pmd/shared_rxq_fwd.c
+++ b/app/test-pmd/shared_rxq_fwd.c
@@ -102,9 +102,10 @@ shared_rxq_fwd(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
forward_shared_rxq(fs, nb_rx, pkts_burst);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 134d79a55547..d80867b91b3d 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2053,7 +2053,7 @@ fwd_stats_display(void)
fs->rx_bad_outer_ip_csum;
if (record_core_cycles)
- fwd_cycles += fs->core_cycles;
+ fwd_cycles += fs->busy_cycles;
}
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
@@ -2184,6 +2184,7 @@ fwd_stats_reset(void)
memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
+ fs->busy_cycles = 0;
fs->core_cycles = 0;
}
}
@@ -2260,6 +2261,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
tics_datum = rte_rdtsc();
tics_per_1sec = rte_get_timer_hz();
#endif
+ fc->lcore_id = rte_lcore_id();
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
do {
@@ -2288,6 +2290,37 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
} while (! fc->stopped);
}
+static int
+lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
+{
+ struct fwd_stream **fsm;
+ struct fwd_lcore *fc;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+ int c;
+
+ for (c = 0; c < nb_lcores; c++) {
+ fc = fwd_lcores[c];
+ if (fc->lcore_id != lcore_id)
+ continue;
+
+ fsm = &fwd_streams[fc->stream_idx];
+ nb_fs = fc->stream_nb;
+ usage->busy_cycles = 0;
+ usage->total_cycles = 0;
+
+ for (sm_id = 0; sm_id < nb_fs; sm_id++)
+ if (!fsm[sm_id]->disabled) {
+ usage->busy_cycles += fsm[sm_id]->busy_cycles;
+ usage->total_cycles += fsm[sm_id]->core_cycles;
+ }
+
+ return 0;
+ }
+
+ return -1;
+}
+
static int
start_pkt_forward_on_core(void *fwd_arg)
{
@@ -4522,6 +4555,10 @@ main(int argc, char** argv)
rte_stats_bitrate_reg(bitrate_data);
}
#endif
+
+ if (record_core_cycles)
+ rte_lcore_register_usage_cb(lcore_usage_callback);
+
#ifdef RTE_LIB_CMDLINE
if (init_cmdline() != 0)
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970d2..5dbf5d1c465c 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -174,7 +174,8 @@ struct fwd_stream {
#ifdef RTE_LIB_GRO
unsigned int gro_times; /**< GRO operation times */
#endif
- uint64_t core_cycles; /**< used for RX and TX processing */
+ uint64_t busy_cycles; /**< used with --record-core-cycles */
+ uint64_t core_cycles; /**< used with --record-core-cycles */
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
struct fwd_lcore *lcore; /**< Lcore being scheduled. */
@@ -360,6 +361,7 @@ struct fwd_lcore {
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
volatile char stopped; /**< stop forwarding when set */
+ unsigned int lcore_id; /**< return value of rte_lcore_id() */
};
/*
@@ -836,10 +838,14 @@ get_start_cycles(uint64_t *start_tsc)
}
static inline void
-get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc)
+get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc, uint64_t nb_packets)
{
- if (record_core_cycles)
- fs->core_cycles += rte_rdtsc() - start_tsc;
+ if (record_core_cycles) {
+ uint64_t cycles = rte_rdtsc() - start_tsc;
+ fs->core_cycles += cycles;
+ if (nb_packets > 0)
+ fs->busy_cycles += cycles;
+ }
}
static inline void
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 021624952daa..ad37626ff63c 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -331,7 +331,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
struct rte_mbuf *pkt;
struct rte_mempool *mbp;
struct rte_ether_hdr eth_hdr;
- uint16_t nb_tx;
+ uint16_t nb_tx = 0;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
uint32_t retry;
@@ -392,7 +392,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
}
if (nb_pkt == 0)
- return;
+ goto end;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
@@ -426,7 +426,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
} while (++nb_tx < nb_pkt);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
--
2.39.0
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v6 5/5] telemetry: add /eal/lcore/usage endpoint
2023-01-19 15:06 ` [PATCH v6 0/5] " Robin Jarry
` (3 preceding siblings ...)
2023-01-19 15:06 ` [PATCH v6 4/5] testpmd: report lcore usage Robin Jarry
@ 2023-01-19 15:06 ` Robin Jarry
2023-01-19 16:21 ` Morten Brørup
2023-01-19 19:42 ` Kevin Laatz
4 siblings, 2 replies; 134+ messages in thread
From: Robin Jarry @ 2023-01-19 15:06 UTC (permalink / raw)
To: dev; +Cc: Tyler Retzlaff, Kevin Laatz, Robin Jarry, Morten Brørup
Allow fetching CPU cycles usage for all lcores with a single request.
This endpoint is intended for repeated and frequent invocations by
external monitoring systems and therefore returns condensed data.
It consists of a single dictionary with three keys: "lcore_ids",
"total_cycles" and "busy_cycles" that are mapped to three arrays of
integer values. Each array has the same number of values, one per lcore,
in the same order.
Example:
--> /eal/lcore/usage
{
"/eal/lcore/usage": {
"lcore_ids": [
4,
5
],
"total_cycles": [
23846845590,
23900558914
],
"busy_cycles": [
21043446682,
21448837316
]
}
}
Cc: Kevin Laatz <kevin.laatz@intel.com>
Cc: Morten Brørup <mb@smartsharesystems.com>
Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
Notes:
v6: new patch
lib/eal/common/eal_common_lcore.c | 64 +++++++++++++++++++++++++++++++
1 file changed, 64 insertions(+)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 80513cfe3725..2f310ad19672 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -581,6 +581,67 @@ handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_t
return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
}
+struct lcore_telemetry_usage {
+ struct rte_tel_data *lcore_ids;
+ struct rte_tel_data *total_cycles;
+ struct rte_tel_data *busy_cycles;
+};
+
+static int
+lcore_telemetry_usage_cb(unsigned int lcore_id, void *arg)
+{
+ struct lcore_telemetry_usage *u = arg;
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
+
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /*
+ * Guard against concurrent modification of lcore_usage_cb.
+ * rte_lcore_register_usage_cb() should only be called once at application init
+ * but nothing prevents and application to reset the callback to NULL.
+ */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ rte_tel_data_add_array_int(u->lcore_ids, lcore_id);
+ rte_tel_data_add_array_u64(u->total_cycles, usage.total_cycles);
+ rte_tel_data_add_array_u64(u->busy_cycles, usage.busy_cycles);
+ }
+
+ return 0;
+}
+
+static int
+handle_lcore_usage(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ struct lcore_telemetry_usage usage;
+ struct rte_tel_data *lcore_ids = rte_tel_data_alloc();
+ struct rte_tel_data *total_cycles = rte_tel_data_alloc();
+ struct rte_tel_data *busy_cycles = rte_tel_data_alloc();
+
+ if (!lcore_ids || !total_cycles || !busy_cycles) {
+ rte_tel_data_free(lcore_ids);
+ rte_tel_data_free(total_cycles);
+ rte_tel_data_free(busy_cycles);
+ return -ENOMEM;
+ }
+
+ rte_tel_data_start_dict(d);
+ rte_tel_data_start_array(lcore_ids, RTE_TEL_INT_VAL);
+ rte_tel_data_start_array(total_cycles, RTE_TEL_U64_VAL);
+ rte_tel_data_start_array(busy_cycles, RTE_TEL_U64_VAL);
+ rte_tel_data_add_dict_container(d, "lcore_ids", lcore_ids, 0);
+ rte_tel_data_add_dict_container(d, "total_cycles", total_cycles, 0);
+ rte_tel_data_add_dict_container(d, "busy_cycles", busy_cycles, 0);
+ usage.lcore_ids = lcore_ids;
+ usage.total_cycles = total_cycles;
+ usage.busy_cycles = busy_cycles;
+
+ return rte_lcore_iterate(lcore_telemetry_usage_cb, &usage);
+}
+
RTE_INIT(lcore_telemetry)
{
rte_telemetry_register_cmd(
@@ -589,5 +650,8 @@ RTE_INIT(lcore_telemetry)
rte_telemetry_register_cmd(
"/eal/lcore/info", handle_lcore_info,
"Returns lcore info. Parameters: int lcore_id");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/usage", handle_lcore_usage,
+ "Returns lcore cycles usage. Takes no parameters");
}
#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.39.0
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v6 5/5] telemetry: add /eal/lcore/usage endpoint
2023-01-19 15:06 ` [PATCH v6 5/5] telemetry: add /eal/lcore/usage endpoint Robin Jarry
@ 2023-01-19 16:21 ` Morten Brørup
2023-01-19 16:34 ` Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
1 sibling, 1 reply; 134+ messages in thread
From: Morten Brørup @ 2023-01-19 16:21 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz
> From: Robin Jarry [mailto:rjarry@redhat.com]
> Sent: Thursday, 19 January 2023 16.07
>
> Allow fetching CPU cycles usage for all lcores with a single request.
> This endpoint is intended for repeated and frequent invocations by
> external monitoring systems and therefore returns condensed data.
>
> It consists of a single dictionary with three keys: "lcore_ids",
> "total_cycles" and "busy_cycles" that are mapped to three arrays of
> integer values. Each array has the same number of values, one per
> lcore,
> in the same order.
[...]
> + rte_telemetry_register_cmd(
> + "/eal/lcore/usage", handle_lcore_usage,
> + "Returns lcore cycles usage. Takes no parameters");
In the future, the rte_lcore_usage structure may contain more fields, and some may not be related to the TSC. So consider removing "cycles" from the description of the telemetry path.
Don't waste time changing it unless you are providing a new patch version anyway!
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v6 5/5] telemetry: add /eal/lcore/usage endpoint
2023-01-19 16:21 ` Morten Brørup
@ 2023-01-19 16:34 ` Robin Jarry
2023-01-19 16:45 ` Morten Brørup
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-01-19 16:34 UTC (permalink / raw)
To: Morten Brørup, dev; +Cc: Tyler Retzlaff, Kevin Laatz
Morten Brørup, Jan 19, 2023 at 17:21:
> In the future, the rte_lcore_usage structure may contain more fields,
> and some may not be related to the TSC. So consider removing "cycles"
> from the description of the telemetry path.
>
> Don't waste time changing it unless you are providing a new patch
> version anyway!
Ok, I assume the "cycles" word can be removed later if people add more
fields that are not related to cycles in the rte_lcore_usage structure.
For now, we only are talking about cycles :)
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v6 5/5] telemetry: add /eal/lcore/usage endpoint
2023-01-19 16:34 ` Robin Jarry
@ 2023-01-19 16:45 ` Morten Brørup
0 siblings, 0 replies; 134+ messages in thread
From: Morten Brørup @ 2023-01-19 16:45 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz
> From: Robin Jarry [mailto:rjarry@redhat.com]
> Sent: Thursday, 19 January 2023 17.35
>
> Morten Brørup, Jan 19, 2023 at 17:21:
> > In the future, the rte_lcore_usage structure may contain more fields,
> > and some may not be related to the TSC. So consider removing "cycles"
> > from the description of the telemetry path.
> >
> > Don't waste time changing it unless you are providing a new patch
> > version anyway!
>
> Ok, I assume the "cycles" word can be removed later if people add more
> fields that are not related to cycles in the rte_lcore_usage structure.
> For now, we only are talking about cycles :)
Agreed!
This part of the patch is new, and I forgot to ACK it in a previous response, so here goes...
Acked-by: Morten Brørup <mb@smartsharesystems.com>
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v6 1/5] eal: add lcore info in telemetry
2023-01-19 15:06 ` [PATCH v6 1/5] eal: add lcore info in telemetry Robin Jarry
@ 2023-01-19 19:42 ` Kevin Laatz
2023-01-26 11:19 ` David Marchand
1 sibling, 0 replies; 134+ messages in thread
From: Kevin Laatz @ 2023-01-19 19:42 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Morten Brørup
On 19/01/2023 15:06, Robin Jarry wrote:
> Report the same information than rte_lcore_dump() in the telemetry
> API into /eal/lcore/list and /eal/lcore/info,ID.
>
> Example:
>
> --> /eal/lcore/info,3
> {
> "/eal/lcore/info": {
> "lcore_id": 3,
> "socket": 0,
> "role": "RTE",
> "cpuset": [
> 3
> ]
> }
> }
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
>
> Notes:
> v5 -> v6: No change
>
> lib/eal/common/eal_common_lcore.c | 96 +++++++++++++++++++++++++++++++
> 1 file changed, 96 insertions(+)
>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v6 2/5] eal: allow applications to report their cpu usage
2023-01-19 15:06 ` [PATCH v6 2/5] eal: allow applications to report their cpu usage Robin Jarry
@ 2023-01-19 19:42 ` Kevin Laatz
2023-01-26 11:22 ` David Marchand
1 sibling, 0 replies; 134+ messages in thread
From: Kevin Laatz @ 2023-01-19 19:42 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Morten Brørup
On 19/01/2023 15:06, Robin Jarry wrote:
> Allow applications to register a callback that will be invoked in
> rte_lcore_dump() and when requesting lcore info in the telemetry API.
>
> The callback is expected to return the number of TSC cycles that have
> passed since application start and the number of these cycles that were
> spent doing busy work.
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
>
> Notes:
> v5 -> v6: Added/rephrased some inline comments.
>
> lib/eal/common/eal_common_lcore.c | 45 ++++++++++++++++++++++++++++---
> lib/eal/include/rte_lcore.h | 35 ++++++++++++++++++++++++
> lib/eal/version.map | 1 +
> 3 files changed, 78 insertions(+), 3 deletions(-)
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v6 3/5] testpmd: add dump_lcores command
2023-01-19 15:06 ` [PATCH v6 3/5] testpmd: add dump_lcores command Robin Jarry
@ 2023-01-19 19:42 ` Kevin Laatz
2023-01-26 11:22 ` David Marchand
1 sibling, 0 replies; 134+ messages in thread
From: Kevin Laatz @ 2023-01-19 19:42 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Morten Brørup, Konstantin Ananyev
On 19/01/2023 15:06, Robin Jarry wrote:
> Add a simple command that calls rte_lcore_dump().
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
> ---
>
> Notes:
> v5 -> v6: No change
>
> app/test-pmd/cmdline.c | 3 +++
> 1 file changed, 3 insertions(+)
>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v6 4/5] testpmd: report lcore usage
2023-01-19 15:06 ` [PATCH v6 4/5] testpmd: report lcore usage Robin Jarry
@ 2023-01-19 19:42 ` Kevin Laatz
0 siblings, 0 replies; 134+ messages in thread
From: Kevin Laatz @ 2023-01-19 19:42 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Morten Brørup, Konstantin Ananyev
On 19/01/2023 15:06, Robin Jarry wrote:
> Reuse the --record-core-cycles option to account for busy cycles. One
> turn of packet_fwd_t is considered "busy" if there was at least one
> received or transmitted packet.
>
> Add a new busy_cycles field in struct fwd_stream. Update get_end_cycles
> to accept an additional argument for the number of processed packets.
> Update fwd_stream.busy_cycles when the number of packets is greater than
> zero.
>
> When --record-core-cycles is specified, register a callback with
> rte_lcore_register_usage_cb(). In the callback, use the new lcore_id
> field in struct fwd_lcore to identify the correct index in fwd_lcores
> and return the sum of busy/total cycles of all fwd_streams.
>
> This makes the cycles counters available in rte_lcore_dump() and the
> lcore telemetry API:
>
> testpmd> dump_lcores
> lcore 3, socket 0, role RTE, cpuset 3
> lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
> lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
>
> --> /eal/lcore/info,4
> {
> "/eal/lcore/info": {
> "lcore_id": 4,
> "socket": 0,
> "role": "RTE",
> "cpuset": [
> 4
> ],
> "busy_cycles": 10623340318,
> "total_cycles": 55331167354
> }
> }
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
> ---
>
> Notes:
> v5 -> v6: No change.
>
> app/test-pmd/5tswap.c | 5 +++--
> app/test-pmd/csumonly.c | 6 +++---
> app/test-pmd/flowgen.c | 2 +-
> app/test-pmd/icmpecho.c | 6 +++---
> app/test-pmd/iofwd.c | 5 +++--
> app/test-pmd/macfwd.c | 5 +++--
> app/test-pmd/macswap.c | 5 +++--
> app/test-pmd/noisy_vnf.c | 4 ++++
> app/test-pmd/rxonly.c | 5 +++--
> app/test-pmd/shared_rxq_fwd.c | 5 +++--
> app/test-pmd/testpmd.c | 39 ++++++++++++++++++++++++++++++++++-
> app/test-pmd/testpmd.h | 14 +++++++++----
> app/test-pmd/txonly.c | 7 ++++---
> 13 files changed, 81 insertions(+), 27 deletions(-)
>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v6 5/5] telemetry: add /eal/lcore/usage endpoint
2023-01-19 15:06 ` [PATCH v6 5/5] telemetry: add /eal/lcore/usage endpoint Robin Jarry
2023-01-19 16:21 ` Morten Brørup
@ 2023-01-19 19:42 ` Kevin Laatz
1 sibling, 0 replies; 134+ messages in thread
From: Kevin Laatz @ 2023-01-19 19:42 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Morten Brørup
On 19/01/2023 15:06, Robin Jarry wrote:
> Allow fetching CPU cycles usage for all lcores with a single request.
> This endpoint is intended for repeated and frequent invocations by
> external monitoring systems and therefore returns condensed data.
>
> It consists of a single dictionary with three keys: "lcore_ids",
> "total_cycles" and "busy_cycles" that are mapped to three arrays of
> integer values. Each array has the same number of values, one per lcore,
> in the same order.
>
> Example:
>
> --> /eal/lcore/usage
> {
> "/eal/lcore/usage": {
> "lcore_ids": [
> 4,
> 5
> ],
> "total_cycles": [
> 23846845590,
> 23900558914
> ],
> "busy_cycles": [
> 21043446682,
> 21448837316
> ]
> }
> }
>
> Cc: Kevin Laatz <kevin.laatz@intel.com>
> Cc: Morten Brørup <mb@smartsharesystems.com>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> ---
>
> Notes:
> v6: new patch
>
> lib/eal/common/eal_common_lcore.c | 64 +++++++++++++++++++++++++++++++
> 1 file changed, 64 insertions(+)
>
Thanks for adding this!
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v6 1/5] eal: add lcore info in telemetry
2023-01-19 15:06 ` [PATCH v6 1/5] eal: add lcore info in telemetry Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
@ 2023-01-26 11:19 ` David Marchand
1 sibling, 0 replies; 134+ messages in thread
From: David Marchand @ 2023-01-26 11:19 UTC (permalink / raw)
To: Robin Jarry; +Cc: dev, Tyler Retzlaff, Kevin Laatz, Morten Brørup
On Thu, Jan 19, 2023 at 4:08 PM Robin Jarry <rjarry@redhat.com> wrote:
>
> Report the same information than rte_lcore_dump() in the telemetry
> API into /eal/lcore/list and /eal/lcore/info,ID.
>
> Example:
>
> --> /eal/lcore/info,3
> {
> "/eal/lcore/info": {
> "lcore_id": 3,
> "socket": 0,
> "role": "RTE",
> "cpuset": [
> 3
> ]
> }
> }
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
>
> Notes:
> v5 -> v6: No change
>
> lib/eal/common/eal_common_lcore.c | 96 +++++++++++++++++++++++++++++++
> 1 file changed, 96 insertions(+)
>
> diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
> index 06c594b0224f..16548977dce8 100644
> --- a/lib/eal/common/eal_common_lcore.c
> +++ b/lib/eal/common/eal_common_lcore.c
> @@ -10,6 +10,9 @@
> #include <rte_errno.h>
> #include <rte_lcore.h>
> #include <rte_log.h>
> +#ifndef RTE_EXEC_ENV_WINDOWS
> +#include <rte_telemetry.h>
> +#endif
>
> #include "eal_private.h"
> #include "eal_thread.h"
> @@ -456,3 +459,96 @@ rte_lcore_dump(FILE *f)
> {
> rte_lcore_iterate(lcore_dump_cb, f);
> }
> +
> +#ifndef RTE_EXEC_ENV_WINDOWS
> +static int
> +lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
> +{
> + struct rte_tel_data *d = arg;
> + return rte_tel_data_add_array_int(d, lcore_id);
> +}
> +
> +static int
> +handle_lcore_list(const char *cmd __rte_unused,
> + const char *params __rte_unused,
> + struct rte_tel_data *d)
Indent should be single tab.
> +{
> + int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
> + if (ret)
> + return ret;
> + return rte_lcore_iterate(lcore_telemetry_id_cb, d);
> +}
> +
> +struct lcore_telemetry_info {
> + unsigned int lcore_id;
> + struct rte_tel_data *d;
> +};
> +
> +static int
> +lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
> +{
> + struct lcore_telemetry_info *info = arg;
> + struct rte_config *cfg = rte_eal_get_configuration();
> + struct rte_tel_data *cpuset;
> + const char *role;
> + unsigned int cpu;
When possible, reverse xmas tree please.
> +
> + if (info->lcore_id != lcore_id)
> + return 0;
> +
> + switch (cfg->lcore_role[lcore_id]) {
> + case ROLE_RTE:
> + role = "RTE";
> + break;
> + case ROLE_SERVICE:
> + role = "SERVICE";
> + break;
> + case ROLE_NON_EAL:
> + role = "NON_EAL";
> + break;
> + default:
> + role = "UNKNOWN";
> + break;
> + }
Please put this translation block in a helper to avoid duplicating
with lcore_dump_cb.
> + rte_tel_data_start_dict(info->d);
> + rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
> + rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
> + rte_tel_data_add_dict_string(info->d, "role", role);
> + cpuset = rte_tel_data_alloc();
> + if (!cpuset)
> + return -ENOMEM;
> + rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
> + for (cpu = 0; cpu < CPU_SETSIZE; cpu++)
> + if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
> + rte_tel_data_add_array_int(cpuset, cpu);
> + rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
> +
> + return 0;
> +}
> +
> +static int
> +handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
> +{
> + struct lcore_telemetry_info info = { .d = d };
> + char *endptr = NULL;
Missing a newline.
> + if (params == NULL || strlen(params) == 0)
> + return -EINVAL;
> + errno = 0;
> + info.lcore_id = strtoul(params, &endptr, 10);
> + if (errno)
> + return -errno;
> + if (endptr == params)
> + return -EINVAL;
> + return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
> +}
> +
> +RTE_INIT(lcore_telemetry)
> +{
> + rte_telemetry_register_cmd(
> + "/eal/lcore/list", handle_lcore_list,
> + "List of lcore ids. Takes no parameters");
Please fix indent.
> + rte_telemetry_register_cmd(
> + "/eal/lcore/info", handle_lcore_info,
> + "Returns lcore info. Parameters: int lcore_id");
> +}
> +#endif /* !RTE_EXEC_ENV_WINDOWS */
> --
> 2.39.0
>
--
David Marchand
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v6 2/5] eal: allow applications to report their cpu usage
2023-01-19 15:06 ` [PATCH v6 2/5] eal: allow applications to report their cpu usage Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
@ 2023-01-26 11:22 ` David Marchand
1 sibling, 0 replies; 134+ messages in thread
From: David Marchand @ 2023-01-26 11:22 UTC (permalink / raw)
To: Robin Jarry; +Cc: dev, Tyler Retzlaff, Kevin Laatz, Morten Brørup
On Thu, Jan 19, 2023 at 4:08 PM Robin Jarry <rjarry@redhat.com> wrote:
>
> Allow applications to register a callback that will be invoked in
> rte_lcore_dump() and when requesting lcore info in the telemetry API.
>
> The callback is expected to return the number of TSC cycles that have
> passed since application start and the number of these cycles that were
> spent doing busy work.
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
>
> Notes:
> v5 -> v6: Added/rephrased some inline comments.
>
> lib/eal/common/eal_common_lcore.c | 45 ++++++++++++++++++++++++++++---
> lib/eal/include/rte_lcore.h | 35 ++++++++++++++++++++++++
> lib/eal/version.map | 1 +
> 3 files changed, 78 insertions(+), 3 deletions(-)
>
> diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
> index 16548977dce8..80513cfe3725 100644
> --- a/lib/eal/common/eal_common_lcore.c
> +++ b/lib/eal/common/eal_common_lcore.c
> @@ -2,6 +2,7 @@
> * Copyright(c) 2010-2014 Intel Corporation
> */
>
> +#include <inttypes.h>
> #include <stdlib.h>
> #include <string.h>
>
> @@ -422,11 +423,21 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
> return ret;
> }
>
> +static rte_lcore_usage_cb lcore_usage_cb;
> +
> +void
> +rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
> +{
> + lcore_usage_cb = cb;
> +}
> +
> static int
> lcore_dump_cb(unsigned int lcore_id, void *arg)
> {
> struct rte_config *cfg = rte_eal_get_configuration();
> - char cpuset[RTE_CPU_AFFINITY_STR_LEN];
> + char cpuset[RTE_CPU_AFFINITY_STR_LEN], usage_str[256];
This is a debug/non performance sensitive helper.
Please remove this "big enough for now" buffer and use a dynamic allocation.
> + struct rte_lcore_usage usage;
> + rte_lcore_usage_cb usage_cb;
> const char *role;
> FILE *f = arg;
> int ret;
> @@ -446,11 +457,25 @@ lcore_dump_cb(unsigned int lcore_id, void *arg)
> break;
> }
>
> + /* The callback may not set all the fields in the structure, so clear it here. */
> + memset(&usage, 0, sizeof(usage));
> + usage_str[0] = '\0';
> + /*
> + * Guard against concurrent modification of lcore_usage_cb.
> + * rte_lcore_register_usage_cb() should only be called once at application init
> + * but nothing prevents and application to reset the callback to NULL.
> + */
> + usage_cb = lcore_usage_cb;
> + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
> + snprintf(usage_str, sizeof(usage_str), ", busy cycles %"PRIu64"/%"PRIu64,
> + usage.busy_cycles, usage.total_cycles);
> + }
> ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
> sizeof(cpuset));
> - fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
> + fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
> rte_lcore_to_socket_id(lcore_id), role, cpuset,
> - ret == 0 ? "" : "...");
> + ret == 0 ? "" : "...", usage_str);
> +
> return 0;
> }
>
> @@ -489,7 +514,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
> {
> struct lcore_telemetry_info *info = arg;
> struct rte_config *cfg = rte_eal_get_configuration();
> + struct rte_lcore_usage usage;
> struct rte_tel_data *cpuset;
> + rte_lcore_usage_cb usage_cb;
> const char *role;
> unsigned int cpu;
Reverse xmas tree please.
>
> @@ -522,6 +549,18 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
> if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
> rte_tel_data_add_array_int(cpuset, cpu);
> rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
> + /* The callback may not set all the fields in the structure, so clear it here. */
> + memset(&usage, 0, sizeof(usage));
> + /*
> + * Guard against concurrent modification of lcore_usage_cb.
> + * rte_lcore_register_usage_cb() should only be called once at application init
> + * but nothing prevents and application to reset the callback to NULL.
> + */
> + usage_cb = lcore_usage_cb;
> + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
> + rte_tel_data_add_dict_u64(info->d, "total_cycles", usage.total_cycles);
> + rte_tel_data_add_dict_u64(info->d, "busy_cycles", usage.busy_cycles);
> + }
>
> return 0;
> }
> diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
> index 6938c3fd7b81..52468e7120dd 100644
> --- a/lib/eal/include/rte_lcore.h
> +++ b/lib/eal/include/rte_lcore.h
> @@ -328,6 +328,41 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
> int
> rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
>
> +/**
> + * CPU usage statistics.
Let's be consistent and use lcore.
> + */
> +struct rte_lcore_usage {
> + uint64_t total_cycles;
> + /**< The total amount of time since application start, in TSC cycles. */
> + uint64_t busy_cycles;
> + /**< The amount of busy time since application start, in TSC cycles. */
This is confusing to have the comments after.
Please put those comments before the associated fields (using /** ).
> +};
> +
> +/**
> + * Callback to allow applications to report CPU usage.
lcore*
> + *
> + * @param [in] lcore_id
> + * The lcore to consider.
> + * @param [out] usage
> + * Counters representing this lcore usage. This can never be NULL.
> + * @return
> + * - 0 if fields in usage were updated successfully. The fields that the
> + * application does not support must not be modified.
> + * - a negative value if the information is not available or if any error occurred.
> + */
> +typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct rte_lcore_usage *usage);
> +
> +/**
> + * Register a callback from an application to be called in rte_lcore_dump() and
> + * the /eal/lcore/info telemetry endpoint handler. Applications are expected to
> + * report CPU usage statistics via this callback.
lcore*
> + *
> + * @param cb
> + * The callback function.
> + */
> +__rte_experimental
> +void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
> +
> /**
> * List all lcores.
> *
> diff --git a/lib/eal/version.map b/lib/eal/version.map
> index 7ad12a7dc985..30fd216a12ea 100644
> --- a/lib/eal/version.map
> +++ b/lib/eal/version.map
> @@ -440,6 +440,7 @@ EXPERIMENTAL {
> rte_thread_detach;
> rte_thread_equal;
> rte_thread_join;
> + rte_lcore_register_usage_cb;
Please start a new block for 23.03 symbols.
--
David Marchand
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v6 3/5] testpmd: add dump_lcores command
2023-01-19 15:06 ` [PATCH v6 3/5] testpmd: add dump_lcores command Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
@ 2023-01-26 11:22 ` David Marchand
1 sibling, 0 replies; 134+ messages in thread
From: David Marchand @ 2023-01-26 11:22 UTC (permalink / raw)
To: Robin Jarry
Cc: dev, Tyler Retzlaff, Kevin Laatz, Morten Brørup, Konstantin Ananyev
On Thu, Jan 19, 2023 at 4:08 PM Robin Jarry <rjarry@redhat.com> wrote:
>
> Add a simple command that calls rte_lcore_dump().
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
We maintain consistent prefixes for commit titles in git.
Historically, testpmd patches titles are prefixed with app/testpmd.
All dump commands are described in testpmd documentation.
Please update it.
--
David Marchand
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v8 0/5] lcore telemetry improvements
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (8 preceding siblings ...)
2023-01-19 15:06 ` [PATCH v6 0/5] " Robin Jarry
@ 2023-02-02 13:43 ` Robin Jarry
2023-02-02 13:43 ` [PATCH v8 1/5] eal: add lcore info in telemetry Robin Jarry
` (5 more replies)
2023-02-07 19:37 ` [PATCH v9 " Robin Jarry
` (7 subsequent siblings)
17 siblings, 6 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-02 13:43 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry
This is a follow up on previous work by Kevin Laatz:
http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
This series is aimed at allowing DPDK applications to expose their CPU
usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
more basic and naive approach which leaves the cpu cycles accounting
completely up to the application.
For reference, I have implemented a draft patch in OvS to use
rte_lcore_register_usage_cb() and report the already available busy
cycles information.
https://github.com/rjarry/ovs/commit/643e672fe388e348ea7ccbbda6f5a87a066fd919
Changes since v7:
- Made /eal/lcore/info lcore_id argument parsing more robust.
Changes since v6:
- Added release notes entry
- Moved lcore role enum to name conversion in a function for reuse
- Moved rte_lcore_register_usage_cb in a 23.03 block of eal/version.map
- Style & indentation fixes
- Use asprintf to format busy/total cycles in lcore_dump_cb
Robin Jarry (5):
eal: add lcore info in telemetry
eal: report applications lcore usage
app/testpmd: add dump command for lcores
app/testpmd: report lcore usage
eal: add lcore usage telemetry endpoint
app/test-pmd/5tswap.c | 5 +-
app/test-pmd/cmdline.c | 3 +
app/test-pmd/csumonly.c | 6 +-
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +-
app/test-pmd/iofwd.c | 5 +-
app/test-pmd/macfwd.c | 5 +-
app/test-pmd/macswap.c | 5 +-
app/test-pmd/noisy_vnf.c | 4 +
app/test-pmd/rxonly.c | 5 +-
app/test-pmd/shared_rxq_fwd.c | 5 +-
app/test-pmd/testpmd.c | 40 +++-
app/test-pmd/testpmd.h | 14 +-
app/test-pmd/txonly.c | 7 +-
doc/guides/rel_notes/release_23_03.rst | 8 +
doc/guides/testpmd_app_ug/testpmd_funcs.rst | 7 +
lib/eal/common/eal_common_lcore.c | 231 ++++++++++++++++++--
lib/eal/include/rte_lcore.h | 35 +++
lib/eal/version.map | 1 +
19 files changed, 349 insertions(+), 45 deletions(-)
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v8 1/5] eal: add lcore info in telemetry
2023-02-02 13:43 ` [PATCH v8 0/5] lcore telemetry improvements Robin Jarry
@ 2023-02-02 13:43 ` Robin Jarry
2023-02-06 3:50 ` fengchengwen
2023-02-02 13:43 ` [PATCH v8 2/5] eal: report applications lcore usage Robin Jarry
` (4 subsequent siblings)
5 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-02 13:43 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup, Kevin Laatz
Report the same information than rte_lcore_dump() in the telemetry
API into /eal/lcore/list and /eal/lcore/info,ID.
Example:
--> /eal/lcore/info,3
{
"/eal/lcore/info": {
"lcore_id": 3,
"socket": 0,
"role": "RTE",
"cpuset": [
3
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v7 -> v8: made lcore_id integer parsing more robust
lib/eal/common/eal_common_lcore.c | 123 +++++++++++++++++++++++++-----
1 file changed, 105 insertions(+), 18 deletions(-)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 06c594b0224f..f53fc17b4d04 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -10,6 +10,9 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
+#include <rte_telemetry.h>
+#endif
#include "eal_private.h"
#include "eal_thread.h"
@@ -419,35 +422,35 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
return ret;
}
+static const char *
+lcore_role_str(enum rte_lcore_role_t role)
+{
+ switch (role) {
+ case ROLE_RTE:
+ return "RTE";
+ case ROLE_SERVICE:
+ return "SERVICE";
+ case ROLE_NON_EAL:
+ return "NON_EAL";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
- const char *role;
FILE *f = arg;
int ret;
- switch (cfg->lcore_role[lcore_id]) {
- case ROLE_RTE:
- role = "RTE";
- break;
- case ROLE_SERVICE:
- role = "SERVICE";
- break;
- case ROLE_NON_EAL:
- role = "NON_EAL";
- break;
- default:
- role = "UNKNOWN";
- break;
- }
-
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
- rte_lcore_to_socket_id(lcore_id), role, cpuset,
- ret == 0 ? "" : "...");
+ rte_lcore_to_socket_id(lcore_id),
+ lcore_role_str(cfg->lcore_role[lcore_id]),
+ cpuset, ret == 0 ? "" : "...");
return 0;
}
@@ -456,3 +459,87 @@ rte_lcore_dump(FILE *f)
{
rte_lcore_iterate(lcore_dump_cb, f);
}
+
+#ifndef RTE_EXEC_ENV_WINDOWS
+static int
+lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_tel_data *d = arg;
+ return rte_tel_data_add_array_int(d, lcore_id);
+}
+
+static int
+handle_lcore_list(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+ if (ret)
+ return ret;
+ return rte_lcore_iterate(lcore_telemetry_id_cb, d);
+}
+
+struct lcore_telemetry_info {
+ unsigned int lcore_id;
+ struct rte_tel_data *d;
+};
+
+static int
+lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ struct lcore_telemetry_info *info = arg;
+ struct rte_tel_data *cpuset;
+ unsigned int cpu;
+
+ if (info->lcore_id != lcore_id)
+ return 0;
+
+ rte_tel_data_start_dict(info->d);
+ rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
+ rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
+ rte_tel_data_add_dict_string(info->d, "role", lcore_role_str(cfg->lcore_role[lcore_id]));
+ cpuset = rte_tel_data_alloc();
+ if (cpuset == NULL)
+ return -ENOMEM;
+ rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+ if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
+ rte_tel_data_add_array_int(cpuset, cpu);
+ }
+ rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+
+ return 0;
+}
+
+static int
+handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
+{
+ struct lcore_telemetry_info info = { .d = d };
+ unsigned long lcore_id;
+ char *endptr;
+
+ if (params == NULL)
+ return -EINVAL;
+ errno = 0;
+ lcore_id = strtoul(params, &endptr, 10);
+ if (errno)
+ return -errno;
+ if (*params == '\0' || *endptr != '\0' || lcore_id >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ info.lcore_id = lcore_id;
+
+ return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
+}
+
+RTE_INIT(lcore_telemetry)
+{
+ rte_telemetry_register_cmd(
+ "/eal/lcore/list", handle_lcore_list,
+ "List of lcore ids. Takes no parameters");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/info", handle_lcore_info,
+ "Returns lcore info. Parameters: int lcore_id");
+}
+#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v8 2/5] eal: report applications lcore usage
2023-02-02 13:43 ` [PATCH v8 0/5] lcore telemetry improvements Robin Jarry
2023-02-02 13:43 ` [PATCH v8 1/5] eal: add lcore info in telemetry Robin Jarry
@ 2023-02-02 13:43 ` Robin Jarry
2023-02-06 4:00 ` fengchengwen
2023-02-06 8:48 ` David Marchand
2023-02-02 13:43 ` [PATCH v8 3/5] app/testpmd: add dump command for lcores Robin Jarry
` (3 subsequent siblings)
5 siblings, 2 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-02 13:43 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup, Kevin Laatz
Allow applications to register a callback that will be invoked in
rte_lcore_dump() and when requesting lcore info in the telemetry API.
The callback is expected to return the number of TSC cycles that have
passed since application start and the number of these cycles that were
spent doing busy work.
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v7 -> v8: no change
doc/guides/rel_notes/release_23_03.rst | 7 ++++
lib/eal/common/eal_common_lcore.c | 48 ++++++++++++++++++++++++--
lib/eal/include/rte_lcore.h | 35 +++++++++++++++++++
lib/eal/version.map | 1 +
4 files changed, 89 insertions(+), 2 deletions(-)
diff --git a/doc/guides/rel_notes/release_23_03.rst b/doc/guides/rel_notes/release_23_03.rst
index 73f5d94e143d..f407dc3df7a8 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -78,6 +78,13 @@ New Features
``rte_event_dev_config::nb_single_link_event_port_queues`` parameter
required for eth_rx, eth_tx, crypto and timer eventdev adapters.
+* **Added support for reporting lcore usage in applications.**
+
+ * The ``/eal/lcore/list`` and ``/eal/lcore/info`` telemetry endpoints have
+ been added to provide information similar to ``rte_lcore_dump()``.
+ * Applications can register a callback at startup via
+ ``rte_lcore_register_usage_cb()`` to provide lcore usage information.
+
Removed Items
-------------
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index f53fc17b4d04..bbb734098b42 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -2,6 +2,7 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
@@ -437,20 +438,49 @@ lcore_role_str(enum rte_lcore_role_t role)
}
}
+static rte_lcore_usage_cb lcore_usage_cb;
+
+void
+rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
+{
+ lcore_usage_cb = cb;
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
+ char *usage_str = NULL;
FILE *f = arg;
int ret;
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /*
+ * Guard against concurrent modification of lcore_usage_cb.
+ * rte_lcore_register_usage_cb() should only be called once at application init
+ * but nothing prevents and application to reset the callback to NULL.
+ */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ if (asprintf(&usage_str, ", busy cycles %"PRIu64"/%"PRIu64,
+ usage.busy_cycles, usage.total_cycles) < 0) {
+ return -ENOMEM;
+ }
+ }
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
- fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
+ fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
rte_lcore_to_socket_id(lcore_id),
lcore_role_str(cfg->lcore_role[lcore_id]),
- cpuset, ret == 0 ? "" : "...");
+ cpuset, ret == 0 ? "" : "...",
+ usage_str ? usage_str : "");
+
+ free(usage_str);
+
return 0;
}
@@ -489,7 +519,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
struct lcore_telemetry_info *info = arg;
+ struct rte_lcore_usage usage;
struct rte_tel_data *cpuset;
+ rte_lcore_usage_cb usage_cb;
unsigned int cpu;
if (info->lcore_id != lcore_id)
@@ -508,6 +540,18 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
rte_tel_data_add_array_int(cpuset, cpu);
}
rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /*
+ * Guard against concurrent modification of lcore_usage_cb.
+ * rte_lcore_register_usage_cb() should only be called once at application init
+ * but nothing prevents and application to reset the callback to NULL.
+ */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ rte_tel_data_add_dict_u64(info->d, "total_cycles", usage.total_cycles);
+ rte_tel_data_add_dict_u64(info->d, "busy_cycles", usage.busy_cycles);
+ }
return 0;
}
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 9c7865052100..b1c8afb05d28 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -328,6 +328,41 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
int
rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
+/**
+ * lcore usage statistics.
+ */
+struct rte_lcore_usage {
+ /** The total amount of time since application start, in TSC cycles. */
+ uint64_t total_cycles;
+ /** The amount of busy time since application start, in TSC cycles. */
+ uint64_t busy_cycles;
+};
+
+/**
+ * Callback to allow applications to report lcore usage.
+ *
+ * @param [in] lcore_id
+ * The lcore to consider.
+ * @param [out] usage
+ * Counters representing this lcore usage. This can never be NULL.
+ * @return
+ * - 0 if fields in usage were updated successfully. The fields that the
+ * application does not support must not be modified.
+ * - a negative value if the information is not available or if any error occurred.
+ */
+typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct rte_lcore_usage *usage);
+
+/**
+ * Register a callback from an application to be called in rte_lcore_dump() and
+ * the /eal/lcore/info telemetry endpoint handler. Applications are expected to
+ * report lcore usage statistics via this callback.
+ *
+ * @param cb
+ * The callback function.
+ */
+__rte_experimental
+void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
+
/**
* List all lcores.
*
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 6523102157e2..1f70caac7b9c 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -442,6 +442,7 @@ EXPERIMENTAL {
# added in 23.03
rte_thread_set_name;
+ rte_lcore_register_usage_cb;
};
INTERNAL {
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v8 3/5] app/testpmd: add dump command for lcores
2023-02-02 13:43 ` [PATCH v8 0/5] lcore telemetry improvements Robin Jarry
2023-02-02 13:43 ` [PATCH v8 1/5] eal: add lcore info in telemetry Robin Jarry
2023-02-02 13:43 ` [PATCH v8 2/5] eal: report applications lcore usage Robin Jarry
@ 2023-02-02 13:43 ` Robin Jarry
2023-02-06 3:34 ` fengchengwen
2023-02-02 13:43 ` [PATCH v8 4/5] app/testpmd: report lcore usage Robin Jarry
` (2 subsequent siblings)
5 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-02 13:43 UTC (permalink / raw)
To: dev
Cc: Robin Jarry, Morten Brørup, Konstantin Ananyev, Kevin Laatz,
Aman Singh, Yuying Zhang
Add a simple command that calls rte_lcore_dump().
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v7 -> v8: no change
app/test-pmd/cmdline.c | 3 +++
doc/guides/testpmd_app_ug/testpmd_funcs.rst | 7 +++++++
2 files changed, 10 insertions(+)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index cb8c174020b0..bb7ff2b44989 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8357,6 +8357,8 @@ static void cmd_dump_parsed(void *parsed_result,
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
rte_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_lcores"))
+ rte_lcore_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
}
@@ -8370,6 +8372,7 @@ static cmdline_parse_token_string_t cmd_dump_dump =
"dump_ring#"
"dump_mempool#"
"dump_devargs#"
+ "dump_lcores#"
"dump_log_types");
static cmdline_parse_inst_t cmd_dump = {
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 79a1fa9cb73d..9ceb21dfbbdf 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -591,6 +591,13 @@ Dumps the user device list::
testpmd> dump_devargs
+dump lcores
+~~~~~~~~~~~
+
+Dumps the logical cores list::
+
+ testpmd> dump_lcores
+
dump log types
~~~~~~~~~~~~~~
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v8 4/5] app/testpmd: report lcore usage
2023-02-02 13:43 ` [PATCH v8 0/5] lcore telemetry improvements Robin Jarry
` (2 preceding siblings ...)
2023-02-02 13:43 ` [PATCH v8 3/5] app/testpmd: add dump command for lcores Robin Jarry
@ 2023-02-02 13:43 ` Robin Jarry
2023-02-06 3:31 ` fengchengwen
2023-02-06 8:58 ` David Marchand
2023-02-02 13:43 ` [PATCH v8 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
2023-02-05 23:11 ` [PATCH v8 0/5] lcore telemetry improvements Thomas Monjalon
5 siblings, 2 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-02 13:43 UTC (permalink / raw)
To: dev
Cc: Robin Jarry, Morten Brørup, Konstantin Ananyev, Kevin Laatz,
Aman Singh, Yuying Zhang
Reuse the --record-core-cycles option to account for busy cycles. One
turn of packet_fwd_t is considered "busy" if there was at least one
received or transmitted packet.
Add a new busy_cycles field in struct fwd_stream. Update get_end_cycles
to accept an additional argument for the number of processed packets.
Update fwd_stream.busy_cycles when the number of packets is greater than
zero.
When --record-core-cycles is specified, register a callback with
rte_lcore_register_usage_cb(). In the callback, use the new lcore_id
field in struct fwd_lcore to identify the correct index in fwd_lcores
and return the sum of busy/total cycles of all fwd_streams.
This makes the cycles counters available in rte_lcore_dump() and the
lcore telemetry API:
testpmd> dump_lcores
lcore 3, socket 0, role RTE, cpuset 3
lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
--> /eal/lcore/info,4
{
"/eal/lcore/info": {
"lcore_id": 4,
"socket": 0,
"role": "RTE",
"cpuset": [
4
],
"busy_cycles": 10623340318,
"total_cycles": 55331167354
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v7 -> v8: no change
app/test-pmd/5tswap.c | 5 +++--
app/test-pmd/csumonly.c | 6 +++---
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +++---
app/test-pmd/iofwd.c | 5 +++--
app/test-pmd/macfwd.c | 5 +++--
app/test-pmd/macswap.c | 5 +++--
app/test-pmd/noisy_vnf.c | 4 ++++
app/test-pmd/rxonly.c | 5 +++--
app/test-pmd/shared_rxq_fwd.c | 5 +++--
app/test-pmd/testpmd.c | 40 ++++++++++++++++++++++++++++++++++-
app/test-pmd/testpmd.h | 14 ++++++++----
app/test-pmd/txonly.c | 7 +++---
13 files changed, 82 insertions(+), 27 deletions(-)
diff --git a/app/test-pmd/5tswap.c b/app/test-pmd/5tswap.c
index f041a5e1d530..03225075716c 100644
--- a/app/test-pmd/5tswap.c
+++ b/app/test-pmd/5tswap.c
@@ -116,7 +116,7 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -182,7 +182,8 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 1c2459851522..03e141221a56 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -868,7 +868,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
rx_bad_ip_csum = 0;
@@ -1200,8 +1200,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index fd6abc0f4124..7b2f0ffdf0f5 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -196,7 +196,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
RTE_PER_LCORE(_next_flow) = next_flow;
- get_end_cycles(fs, start_tsc);
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index 066f2a3ab79b..2fc9f96dc95f 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -303,7 +303,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_replies = 0;
@@ -508,8 +508,8 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
} while (++nb_tx < nb_replies);
}
}
-
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 8fafdec548ad..e5a2dbe20c69 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -59,7 +59,7 @@ pkt_burst_io_forward(struct fwd_stream *fs)
pkts_burst, nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
@@ -84,7 +84,8 @@ pkt_burst_io_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index beb220fbb462..9db623999970 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -65,7 +65,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -115,7 +115,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 4f8deb338296..4db134ac1d91 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -66,7 +66,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
@@ -93,7 +93,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index c65ec6f06a5c..290bdcda45f0 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -152,6 +152,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
uint64_t delta_ms;
bool needs_flush = false;
uint64_t now;
+ uint64_t start_tsc = 0;
+
+ get_start_cycles(&start_tsc);
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
pkts_burst, nb_pkt_per_burst);
@@ -219,6 +222,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
ncf->prev_time = rte_get_timer_cycles();
}
+ get_end_cycles(fs, start_tsc, nb_rx + nb_tx);
}
#define NOISY_STRSIZE 256
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index d528d4f34e60..519202339e16 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -58,13 +58,14 @@ pkt_burst_receive(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++)
rte_pktmbuf_free(pkts_burst[i]);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/shared_rxq_fwd.c b/app/test-pmd/shared_rxq_fwd.c
index 2e9047804b5b..395b73bfe52e 100644
--- a/app/test-pmd/shared_rxq_fwd.c
+++ b/app/test-pmd/shared_rxq_fwd.c
@@ -102,9 +102,10 @@ shared_rxq_fwd(struct fwd_stream *fs)
nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
- return;
+ goto end;
forward_shared_rxq(fs, nb_rx, pkts_burst);
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_rx);
}
static void
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index e366f81a0f46..105f75ad5f35 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2053,7 +2053,7 @@ fwd_stats_display(void)
fs->rx_bad_outer_ip_csum;
if (record_core_cycles)
- fwd_cycles += fs->core_cycles;
+ fwd_cycles += fs->busy_cycles;
}
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
@@ -2184,6 +2184,7 @@ fwd_stats_reset(void)
memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
+ fs->busy_cycles = 0;
fs->core_cycles = 0;
}
}
@@ -2260,6 +2261,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
tics_datum = rte_rdtsc();
tics_per_1sec = rte_get_timer_hz();
#endif
+ fc->lcore_id = rte_lcore_id();
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
do {
@@ -2288,6 +2290,38 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
} while (! fc->stopped);
}
+static int
+lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
+{
+ struct fwd_stream **fsm;
+ struct fwd_lcore *fc;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+ int c;
+
+ for (c = 0; c < nb_lcores; c++) {
+ fc = fwd_lcores[c];
+ if (fc->lcore_id != lcore_id)
+ continue;
+
+ fsm = &fwd_streams[fc->stream_idx];
+ nb_fs = fc->stream_nb;
+ usage->busy_cycles = 0;
+ usage->total_cycles = 0;
+
+ for (sm_id = 0; sm_id < nb_fs; sm_id++) {
+ if (!fsm[sm_id]->disabled) {
+ usage->busy_cycles += fsm[sm_id]->busy_cycles;
+ usage->total_cycles += fsm[sm_id]->core_cycles;
+ }
+ }
+
+ return 0;
+ }
+
+ return -1;
+}
+
static int
start_pkt_forward_on_core(void *fwd_arg)
{
@@ -4527,6 +4561,10 @@ main(int argc, char** argv)
rte_stats_bitrate_reg(bitrate_data);
}
#endif
+
+ if (record_core_cycles)
+ rte_lcore_register_usage_cb(lcore_usage_callback);
+
#ifdef RTE_LIB_CMDLINE
if (init_cmdline() != 0)
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970d2..5dbf5d1c465c 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -174,7 +174,8 @@ struct fwd_stream {
#ifdef RTE_LIB_GRO
unsigned int gro_times; /**< GRO operation times */
#endif
- uint64_t core_cycles; /**< used for RX and TX processing */
+ uint64_t busy_cycles; /**< used with --record-core-cycles */
+ uint64_t core_cycles; /**< used with --record-core-cycles */
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
struct fwd_lcore *lcore; /**< Lcore being scheduled. */
@@ -360,6 +361,7 @@ struct fwd_lcore {
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
volatile char stopped; /**< stop forwarding when set */
+ unsigned int lcore_id; /**< return value of rte_lcore_id() */
};
/*
@@ -836,10 +838,14 @@ get_start_cycles(uint64_t *start_tsc)
}
static inline void
-get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc)
+get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc, uint64_t nb_packets)
{
- if (record_core_cycles)
- fs->core_cycles += rte_rdtsc() - start_tsc;
+ if (record_core_cycles) {
+ uint64_t cycles = rte_rdtsc() - start_tsc;
+ fs->core_cycles += cycles;
+ if (nb_packets > 0)
+ fs->busy_cycles += cycles;
+ }
}
static inline void
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 021624952daa..ad37626ff63c 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -331,7 +331,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
struct rte_mbuf *pkt;
struct rte_mempool *mbp;
struct rte_ether_hdr eth_hdr;
- uint16_t nb_tx;
+ uint16_t nb_tx = 0;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
uint32_t retry;
@@ -392,7 +392,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
}
if (nb_pkt == 0)
- return;
+ goto end;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
@@ -426,7 +426,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
} while (++nb_tx < nb_pkt);
}
- get_end_cycles(fs, start_tsc);
+end:
+ get_end_cycles(fs, start_tsc, nb_tx);
}
static int
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v8 5/5] eal: add lcore usage telemetry endpoint
2023-02-02 13:43 ` [PATCH v8 0/5] lcore telemetry improvements Robin Jarry
` (3 preceding siblings ...)
2023-02-02 13:43 ` [PATCH v8 4/5] app/testpmd: report lcore usage Robin Jarry
@ 2023-02-02 13:43 ` Robin Jarry
2023-02-02 14:00 ` Morten Brørup
2023-02-06 3:27 ` fengchengwen
2023-02-05 23:11 ` [PATCH v8 0/5] lcore telemetry improvements Thomas Monjalon
5 siblings, 2 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-02 13:43 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Kevin Laatz
Allow fetching CPU cycles usage for all lcores with a single request.
This endpoint is intended for repeated and frequent invocations by
external monitoring systems and therefore returns condensed data.
It consists of a single dictionary with three keys: "lcore_ids",
"total_cycles" and "busy_cycles" that are mapped to three arrays of
integer values. Each array has the same number of values, one per lcore,
in the same order.
Example:
--> /eal/lcore/usage
{
"/eal/lcore/usage": {
"lcore_ids": [
4,
5
],
"total_cycles": [
23846845590,
23900558914
],
"busy_cycles": [
21043446682,
21448837316
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v7 -> v8: no change
doc/guides/rel_notes/release_23_03.rst | 5 +-
lib/eal/common/eal_common_lcore.c | 64 ++++++++++++++++++++++++++
2 files changed, 67 insertions(+), 2 deletions(-)
diff --git a/doc/guides/rel_notes/release_23_03.rst b/doc/guides/rel_notes/release_23_03.rst
index f407dc3df7a8..31c282bbb489 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -80,8 +80,9 @@ New Features
* **Added support for reporting lcore usage in applications.**
- * The ``/eal/lcore/list`` and ``/eal/lcore/info`` telemetry endpoints have
- been added to provide information similar to ``rte_lcore_dump()``.
+ * The ``/eal/lcore/list``, ``/eal/lcore/usage`` and ``/eal/lcore/info``
+ telemetry endpoints have been added to provide information similar to
+ ``rte_lcore_dump()``.
* Applications can register a callback at startup via
``rte_lcore_register_usage_cb()`` to provide lcore usage information.
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index bbb734098b42..c28d4e194c30 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -577,6 +577,67 @@ handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_t
return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
}
+struct lcore_telemetry_usage {
+ struct rte_tel_data *lcore_ids;
+ struct rte_tel_data *total_cycles;
+ struct rte_tel_data *busy_cycles;
+};
+
+static int
+lcore_telemetry_usage_cb(unsigned int lcore_id, void *arg)
+{
+ struct lcore_telemetry_usage *u = arg;
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
+
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /*
+ * Guard against concurrent modification of lcore_usage_cb.
+ * rte_lcore_register_usage_cb() should only be called once at application init
+ * but nothing prevents and application to reset the callback to NULL.
+ */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ rte_tel_data_add_array_int(u->lcore_ids, lcore_id);
+ rte_tel_data_add_array_u64(u->total_cycles, usage.total_cycles);
+ rte_tel_data_add_array_u64(u->busy_cycles, usage.busy_cycles);
+ }
+
+ return 0;
+}
+
+static int
+handle_lcore_usage(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ struct lcore_telemetry_usage usage;
+ struct rte_tel_data *lcore_ids = rte_tel_data_alloc();
+ struct rte_tel_data *total_cycles = rte_tel_data_alloc();
+ struct rte_tel_data *busy_cycles = rte_tel_data_alloc();
+
+ if (!lcore_ids || !total_cycles || !busy_cycles) {
+ rte_tel_data_free(lcore_ids);
+ rte_tel_data_free(total_cycles);
+ rte_tel_data_free(busy_cycles);
+ return -ENOMEM;
+ }
+
+ rte_tel_data_start_dict(d);
+ rte_tel_data_start_array(lcore_ids, RTE_TEL_INT_VAL);
+ rte_tel_data_start_array(total_cycles, RTE_TEL_U64_VAL);
+ rte_tel_data_start_array(busy_cycles, RTE_TEL_U64_VAL);
+ rte_tel_data_add_dict_container(d, "lcore_ids", lcore_ids, 0);
+ rte_tel_data_add_dict_container(d, "total_cycles", total_cycles, 0);
+ rte_tel_data_add_dict_container(d, "busy_cycles", busy_cycles, 0);
+ usage.lcore_ids = lcore_ids;
+ usage.total_cycles = total_cycles;
+ usage.busy_cycles = busy_cycles;
+
+ return rte_lcore_iterate(lcore_telemetry_usage_cb, &usage);
+}
+
RTE_INIT(lcore_telemetry)
{
rte_telemetry_register_cmd(
@@ -585,5 +646,8 @@ RTE_INIT(lcore_telemetry)
rte_telemetry_register_cmd(
"/eal/lcore/info", handle_lcore_info,
"Returns lcore info. Parameters: int lcore_id");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/usage", handle_lcore_usage,
+ "Returns lcore cycles usage. Takes no parameters");
}
#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v8 5/5] eal: add lcore usage telemetry endpoint
2023-02-02 13:43 ` [PATCH v8 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
@ 2023-02-02 14:00 ` Morten Brørup
2023-02-06 3:27 ` fengchengwen
1 sibling, 0 replies; 134+ messages in thread
From: Morten Brørup @ 2023-02-02 14:00 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Kevin Laatz
> From: Robin Jarry [mailto:rjarry@redhat.com]
> Sent: Thursday, 2 February 2023 14.43
>
> Allow fetching CPU cycles usage for all lcores with a single request.
> This endpoint is intended for repeated and frequent invocations by
> external monitoring systems and therefore returns condensed data.
>
> It consists of a single dictionary with three keys: "lcore_ids",
> "total_cycles" and "busy_cycles" that are mapped to three arrays of
> integer values. Each array has the same number of values, one per
> lcore,
> in the same order.
>
> Example:
>
> --> /eal/lcore/usage
> {
> "/eal/lcore/usage": {
> "lcore_ids": [
> 4,
> 5
> ],
> "total_cycles": [
> 23846845590,
> 23900558914
> ],
> "busy_cycles": [
> 21043446682,
> 21448837316
> ]
> }
> }
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> ---
Acked-by: Morten Brørup <mb@smartsharesystems.com>
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 0/5] lcore telemetry improvements
2023-02-02 13:43 ` [PATCH v8 0/5] lcore telemetry improvements Robin Jarry
` (4 preceding siblings ...)
2023-02-02 13:43 ` [PATCH v8 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
@ 2023-02-05 23:11 ` Thomas Monjalon
5 siblings, 0 replies; 134+ messages in thread
From: Thomas Monjalon @ 2023-02-05 23:11 UTC (permalink / raw)
To: Robin Jarry; +Cc: dev, david.marchand, bruce.richardson, ciara.power
> Robin Jarry (5):
> eal: add lcore info in telemetry
> eal: report applications lcore usage
> app/testpmd: add dump command for lcores
> app/testpmd: report lcore usage
> eal: add lcore usage telemetry endpoint
It needs to be rebased on top of the series renaming u64 functions to uint.
It has been just merged in main.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 5/5] eal: add lcore usage telemetry endpoint
2023-02-02 13:43 ` [PATCH v8 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
2023-02-02 14:00 ` Morten Brørup
@ 2023-02-06 3:27 ` fengchengwen
2023-02-06 8:24 ` Robin Jarry
1 sibling, 1 reply; 134+ messages in thread
From: fengchengwen @ 2023-02-06 3:27 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Kevin Laatz
Hi Robin,
On 2023/2/2 21:43, Robin Jarry wrote:
> Allow fetching CPU cycles usage for all lcores with a single request.
> This endpoint is intended for repeated and frequent invocations by
> external monitoring systems and therefore returns condensed data.
>
> It consists of a single dictionary with three keys: "lcore_ids",
> "total_cycles" and "busy_cycles" that are mapped to three arrays of
> integer values. Each array has the same number of values, one per lcore,
> in the same order.
>
> Example:
>
> --> /eal/lcore/usage
> {
> "/eal/lcore/usage": {
> "lcore_ids": [
> 4,
> 5
> ],
> "total_cycles": [
> 23846845590,
> 23900558914
> ],
> "busy_cycles": [
> 21043446682,
> 21448837316
> ]
> }
The telemetry should be human-readable also.
so why not "/eal/lcore/usage": {
"lcore_4" : {
"total_cycles" : xxx
"busy_cycles" : xxx
"busy/total ratio" : "xx%"
},
"lcore_5" : {
"total_cycles" : yyy
"busy_cycles" : yyy
"busy/total ratio" : "yy%"
},
}
> }
>
...
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 4/5] app/testpmd: report lcore usage
2023-02-02 13:43 ` [PATCH v8 4/5] app/testpmd: report lcore usage Robin Jarry
@ 2023-02-06 3:31 ` fengchengwen
2023-02-06 8:58 ` David Marchand
1 sibling, 0 replies; 134+ messages in thread
From: fengchengwen @ 2023-02-06 3:31 UTC (permalink / raw)
To: dev
Suggest add field "busy ratio" to reduce hand-computing and improve readability.
With above add, Acked-by: Chengwen Feng <fengchengwen@huawei.com>
On 2023/2/2 21:43, Robin Jarry wrote:
> Reuse the --record-core-cycles option to account for busy cycles. One
> turn of packet_fwd_t is considered "busy" if there was at least one
> received or transmitted packet.
>
> Add a new busy_cycles field in struct fwd_stream. Update get_end_cycles
> to accept an additional argument for the number of processed packets.
> Update fwd_stream.busy_cycles when the number of packets is greater than
> zero.
>
> When --record-core-cycles is specified, register a callback with
> rte_lcore_register_usage_cb(). In the callback, use the new lcore_id
> field in struct fwd_lcore to identify the correct index in fwd_lcores
> and return the sum of busy/total cycles of all fwd_streams.
>
> This makes the cycles counters available in rte_lcore_dump() and the
> lcore telemetry API:
>
> testpmd> dump_lcores
> lcore 3, socket 0, role RTE, cpuset 3
> lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
> lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
>
> --> /eal/lcore/info,4
> {
> "/eal/lcore/info": {
> "lcore_id": 4,
> "socket": 0,
> "role": "RTE",
> "cpuset": [
> 4
> ],
> "busy_cycles": 10623340318,
> "total_cycles": 55331167354
> }
> }
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> ---
>
...
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 3/5] app/testpmd: add dump command for lcores
2023-02-02 13:43 ` [PATCH v8 3/5] app/testpmd: add dump command for lcores Robin Jarry
@ 2023-02-06 3:34 ` fengchengwen
0 siblings, 0 replies; 134+ messages in thread
From: fengchengwen @ 2023-02-06 3:34 UTC (permalink / raw)
To: dev
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
On 2023/2/2 21:43, Robin Jarry wrote:
> Add a simple command that calls rte_lcore_dump().
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> ---
>
...
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 1/5] eal: add lcore info in telemetry
2023-02-02 13:43 ` [PATCH v8 1/5] eal: add lcore info in telemetry Robin Jarry
@ 2023-02-06 3:50 ` fengchengwen
2023-02-06 8:22 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: fengchengwen @ 2023-02-06 3:50 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Morten Brørup, Kevin Laatz
Hi Robin,
On 2023/2/2 21:43, Robin Jarry wrote:
> Report the same information than rte_lcore_dump() in the telemetry
> API into /eal/lcore/list and /eal/lcore/info,ID.
>
> Example:
>
> --> /eal/lcore/info,3
> {
> "/eal/lcore/info": {
> "lcore_id": 3,
> "socket": 0,
> "role": "RTE",
> "cpuset": [
> 3
> ]
> }
> }
>
...
> +
> +static int
> +handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
> +{
> + struct lcore_telemetry_info info = { .d = d };
> + unsigned long lcore_id;
> + char *endptr;
> +
> + if (params == NULL)
> + return -EINVAL;
> + errno = 0;
> + lcore_id = strtoul(params, &endptr, 10);
> + if (errno)
> + return -errno;
> + if (*params == '\0' || *endptr != '\0' || lcore_id >= RTE_MAX_LCORE)
> + return -EINVAL;
> +
> + info.lcore_id = lcore_id;
> +
> + return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
lcore_iterate will iterate and find the lcore.
How about add one new API e.g. rte_lcore_cb(xxx) ?
> +}
> +
> +RTE_INIT(lcore_telemetry)
> +{
> + rte_telemetry_register_cmd(
> + "/eal/lcore/list", handle_lcore_list,
> + "List of lcore ids. Takes no parameters");
> + rte_telemetry_register_cmd(
> + "/eal/lcore/info", handle_lcore_info,
> + "Returns lcore info. Parameters: int lcore_id");
> +}
> +#endif /* !RTE_EXEC_ENV_WINDOWS */
>
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 2/5] eal: report applications lcore usage
2023-02-02 13:43 ` [PATCH v8 2/5] eal: report applications lcore usage Robin Jarry
@ 2023-02-06 4:00 ` fengchengwen
2023-02-06 7:36 ` Morten Brørup
2023-02-06 8:48 ` David Marchand
1 sibling, 1 reply; 134+ messages in thread
From: fengchengwen @ 2023-02-06 4:00 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Morten Brørup, Kevin Laatz
Hi Robin,
On 2023/2/2 21:43, Robin Jarry wrote:
> Allow applications to register a callback that will be invoked in
> rte_lcore_dump() and when requesting lcore info in the telemetry API.
>
> The callback is expected to return the number of TSC cycles that have
> passed since application start and the number of these cycles that were
> spent doing busy work.
>
The 'record_burst_stats' also important for performance tune.
Is it possible to incorporate it into this framework?
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v8 2/5] eal: report applications lcore usage
2023-02-06 4:00 ` fengchengwen
@ 2023-02-06 7:36 ` Morten Brørup
2023-02-06 8:21 ` Robin Jarry
2023-02-06 11:18 ` fengchengwen
0 siblings, 2 replies; 134+ messages in thread
From: Morten Brørup @ 2023-02-06 7:36 UTC (permalink / raw)
To: fengchengwen, Robin Jarry, dev; +Cc: Kevin Laatz
> From: fengchengwen [mailto:fengchengwen@huawei.com]
> Sent: Monday, 6 February 2023 05.00
>
> Hi Robin,
>
> On 2023/2/2 21:43, Robin Jarry wrote:
> > Allow applications to register a callback that will be invoked in
> > rte_lcore_dump() and when requesting lcore info in the telemetry API.
> >
> > The callback is expected to return the number of TSC cycles that have
> > passed since application start and the number of these cycles that
> were
> > spent doing busy work.
> >
>
> The 'record_burst_stats' also important for performance tune.
While I agree that burst size spread is important for comparing CPU usage and capacity, the 'record_burst_stats' is a test_pmd specific feature. Applications may implement something different.
>
> Is it possible to incorporate it into this framework?
We all agreed on CPU usage metrics for this patch, so let's limit ourselves this time.
If we want more performance metrics, such as RX and TX burst size spread, they can be proposed and discussed in a separate RFC. It might trigger a lot of discussion, like the original lcore busyness patch that inspired this patch series.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 2/5] eal: report applications lcore usage
2023-02-06 7:36 ` Morten Brørup
@ 2023-02-06 8:21 ` Robin Jarry
2023-02-06 11:18 ` fengchengwen
1 sibling, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-06 8:21 UTC (permalink / raw)
To: Morten Brørup, fengchengwen, dev; +Cc: Kevin Laatz
Morten Brørup, Feb 06, 2023 at 08:36:
> > The 'record_burst_stats' also important for performance tune.
>
> While I agree that burst size spread is important for comparing CPU
> usage and capacity, the 'record_burst_stats' is a test_pmd specific
> feature. Applications may implement something different.
>
> > Is it possible to incorporate it into this framework?
>
> We all agreed on CPU usage metrics for this patch, so let's limit
> ourselves this time.
>
> If we want more performance metrics, such as RX and TX burst size
> spread, they can be proposed and discussed in a separate RFC. It might
> trigger a lot of discussion, like the original lcore busyness patch
> that inspired this patch series.
I agree with Morten here. This framework should only allow reporting
statistics that are not specific to any DPDK application.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 1/5] eal: add lcore info in telemetry
2023-02-06 3:50 ` fengchengwen
@ 2023-02-06 8:22 ` Robin Jarry
2023-02-06 11:22 ` fengchengwen
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-06 8:22 UTC (permalink / raw)
To: fengchengwen, dev; +Cc: Morten Brørup, Kevin Laatz
fengchengwen, Feb 06, 2023 at 04:50:
> > + return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
>
> lcore_iterate will iterate and find the lcore.
>
> How about add one new API e.g. rte_lcore_cb(xxx) ?
Hi fengchengwen,
what would that new API do?
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 5/5] eal: add lcore usage telemetry endpoint
2023-02-06 3:27 ` fengchengwen
@ 2023-02-06 8:24 ` Robin Jarry
2023-02-06 11:32 ` fengchengwen
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-06 8:24 UTC (permalink / raw)
To: fengchengwen, dev; +Cc: Kevin Laatz
fengchengwen, Feb 06, 2023 at 04:27:
> The telemetry should be human-readable also.
>
> so why not "/eal/lcore/usage": {
> "lcore_4" : {
> "total_cycles" : xxx
> "busy_cycles" : xxx
> "busy/total ratio" : "xx%"
> },
> "lcore_5" : {
> "total_cycles" : yyy
> "busy_cycles" : yyy
> "busy/total ratio" : "yy%"
> },
> }
The raw data is exposed and can be rendered any way you like. This
should be left to external monitoring tools, such as grafana & al.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 2/5] eal: report applications lcore usage
2023-02-02 13:43 ` [PATCH v8 2/5] eal: report applications lcore usage Robin Jarry
2023-02-06 4:00 ` fengchengwen
@ 2023-02-06 8:48 ` David Marchand
2023-02-06 9:03 ` Robin Jarry
1 sibling, 1 reply; 134+ messages in thread
From: David Marchand @ 2023-02-06 8:48 UTC (permalink / raw)
To: Robin Jarry, Morten Brørup, Kevin Laatz; +Cc: dev, Chengwen Feng
Hello Robin,
On Thu, Feb 2, 2023 at 2:44 PM Robin Jarry <rjarry@redhat.com> wrote:
>
> Allow applications to register a callback that will be invoked in
> rte_lcore_dump() and when requesting lcore info in the telemetry API.
>
> The callback is expected to return the number of TSC cycles that have
> passed since application start and the number of these cycles that were
> spent doing busy work.
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> ---
>
> Notes:
> v7 -> v8: no change
>
> doc/guides/rel_notes/release_23_03.rst | 7 ++++
> lib/eal/common/eal_common_lcore.c | 48 ++++++++++++++++++++++++--
> lib/eal/include/rte_lcore.h | 35 +++++++++++++++++++
> lib/eal/version.map | 1 +
> 4 files changed, 89 insertions(+), 2 deletions(-)
>
> diff --git a/doc/guides/rel_notes/release_23_03.rst b/doc/guides/rel_notes/release_23_03.rst
> index 73f5d94e143d..f407dc3df7a8 100644
> --- a/doc/guides/rel_notes/release_23_03.rst
> +++ b/doc/guides/rel_notes/release_23_03.rst
> @@ -78,6 +78,13 @@ New Features
> ``rte_event_dev_config::nb_single_link_event_port_queues`` parameter
> required for eth_rx, eth_tx, crypto and timer eventdev adapters.
>
> +* **Added support for reporting lcore usage in applications.**
> +
> + * The ``/eal/lcore/list`` and ``/eal/lcore/info`` telemetry endpoints have
> + been added to provide information similar to ``rte_lcore_dump()``.
> + * Applications can register a callback at startup via
> + ``rte_lcore_register_usage_cb()`` to provide lcore usage information.
> +
EAL updates come first in RN sections.
>
> Removed Items
> -------------
> diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
> index f53fc17b4d04..bbb734098b42 100644
> --- a/lib/eal/common/eal_common_lcore.c
> +++ b/lib/eal/common/eal_common_lcore.c
> @@ -2,6 +2,7 @@
> * Copyright(c) 2010-2014 Intel Corporation
> */
>
> +#include <inttypes.h>
> #include <stdlib.h>
> #include <string.h>
>
> @@ -437,20 +438,49 @@ lcore_role_str(enum rte_lcore_role_t role)
> }
> }
>
> +static rte_lcore_usage_cb lcore_usage_cb;
> +
> +void
> +rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
> +{
> + lcore_usage_cb = cb;
> +}
> +
> static int
> lcore_dump_cb(unsigned int lcore_id, void *arg)
> {
> struct rte_config *cfg = rte_eal_get_configuration();
> char cpuset[RTE_CPU_AFFINITY_STR_LEN];
> + struct rte_lcore_usage usage;
> + rte_lcore_usage_cb usage_cb;
> + char *usage_str = NULL;
> FILE *f = arg;
> int ret;
>
> + /* The callback may not set all the fields in the structure, so clear it here. */
> + memset(&usage, 0, sizeof(usage));
> + /*
> + * Guard against concurrent modification of lcore_usage_cb.
> + * rte_lcore_register_usage_cb() should only be called once at application init
> + * but nothing prevents and application to reset the callback to NULL.
This is copy/paste a few times, only commenting here:
"prevents an* application from* resetting* the callback to NULL"
> + */
> + usage_cb = lcore_usage_cb;
> + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
> + if (asprintf(&usage_str, ", busy cycles %"PRIu64"/%"PRIu64,
> + usage.busy_cycles, usage.total_cycles) < 0) {
> + return -ENOMEM;
> + }
> + }
> ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
> sizeof(cpuset));
> - fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
> + fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
> rte_lcore_to_socket_id(lcore_id),
> lcore_role_str(cfg->lcore_role[lcore_id]),
> - cpuset, ret == 0 ? "" : "...");
> + cpuset, ret == 0 ? "" : "...",
> + usage_str ? usage_str : "");
usage_str != NULL
> +
> + free(usage_str);
> +
> return 0;
> }
>
> @@ -489,7 +519,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
> {
> struct rte_config *cfg = rte_eal_get_configuration();
> struct lcore_telemetry_info *info = arg;
> + struct rte_lcore_usage usage;
> struct rte_tel_data *cpuset;
> + rte_lcore_usage_cb usage_cb;
> unsigned int cpu;
>
> if (info->lcore_id != lcore_id)
> @@ -508,6 +540,18 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
> rte_tel_data_add_array_int(cpuset, cpu);
> }
> rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
> + /* The callback may not set all the fields in the structure, so clear it here. */
> + memset(&usage, 0, sizeof(usage));
> + /*
> + * Guard against concurrent modification of lcore_usage_cb.
> + * rte_lcore_register_usage_cb() should only be called once at application init
> + * but nothing prevents and application to reset the callback to NULL.
> + */
> + usage_cb = lcore_usage_cb;
> + if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
> + rte_tel_data_add_dict_u64(info->d, "total_cycles", usage.total_cycles);
> + rte_tel_data_add_dict_u64(info->d, "busy_cycles", usage.busy_cycles);
> + }
>
> return 0;
> }
> diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
> index 9c7865052100..b1c8afb05d28 100644
> --- a/lib/eal/include/rte_lcore.h
> +++ b/lib/eal/include/rte_lcore.h
> @@ -328,6 +328,41 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
> int
> rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
>
> +/**
> + * lcore usage statistics.
> + */
> +struct rte_lcore_usage {
> + /** The total amount of time since application start, in TSC cycles. */
> + uint64_t total_cycles;
This code comment needs some clarification.
What is this "total amount of time"?
"application start" is ambiguous.
EAL lcore threads are not created/started by the application itself,
so the application has no idea of the time the lcore/threads were
created.
I would describe as:
/** The total amount of time that the application has been running on
this lcore, in TSC cycles. */
Is it acceptable to you?
> + /** The amount of busy time since application start, in TSC cycles. */
> + uint64_t busy_cycles;
And here:
/** The amount of time the application was busy, handling some
workload on this lcore, in TSC cycles. */
> +};
> +
> +/**
> + * Callback to allow applications to report lcore usage.
> + *
> + * @param [in] lcore_id
> + * The lcore to consider.
> + * @param [out] usage
> + * Counters representing this lcore usage. This can never be NULL.
> + * @return
> + * - 0 if fields in usage were updated successfully. The fields that the
> + * application does not support must not be modified.
> + * - a negative value if the information is not available or if any error occurred.
> + */
> +typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct rte_lcore_usage *usage);
> +
> +/**
Missing a experimental banner:
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
> + * Register a callback from an application to be called in rte_lcore_dump() and
> + * the /eal/lcore/info telemetry endpoint handler. Applications are expected to
> + * report lcore usage statistics via this callback.
> + *
> + * @param cb
> + * The callback function.
> + */
> +__rte_experimental
> +void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
> +
> /**
> * List all lcores.
> *
> diff --git a/lib/eal/version.map b/lib/eal/version.map
> index 6523102157e2..1f70caac7b9c 100644
> --- a/lib/eal/version.map
> +++ b/lib/eal/version.map
> @@ -442,6 +442,7 @@ EXPERIMENTAL {
>
> # added in 23.03
> rte_thread_set_name;
> + rte_lcore_register_usage_cb;
Alphabetical order.
> };
>
> INTERNAL {
> --
> 2.39.1
>
--
David Marchand
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 4/5] app/testpmd: report lcore usage
2023-02-02 13:43 ` [PATCH v8 4/5] app/testpmd: report lcore usage Robin Jarry
2023-02-06 3:31 ` fengchengwen
@ 2023-02-06 8:58 ` David Marchand
2023-02-06 9:08 ` Robin Jarry
1 sibling, 1 reply; 134+ messages in thread
From: David Marchand @ 2023-02-06 8:58 UTC (permalink / raw)
To: Robin Jarry
Cc: dev, Morten Brørup, Konstantin Ananyev, Kevin Laatz,
Aman Singh, Yuying Zhang
Salut Robin,
On Thu, Feb 2, 2023 at 2:44 PM Robin Jarry <rjarry@redhat.com> wrote:
>
> Reuse the --record-core-cycles option to account for busy cycles. One
> turn of packet_fwd_t is considered "busy" if there was at least one
> received or transmitted packet.
>
> Add a new busy_cycles field in struct fwd_stream. Update get_end_cycles
> to accept an additional argument for the number of processed packets.
> Update fwd_stream.busy_cycles when the number of packets is greater than
> zero.
>
> When --record-core-cycles is specified, register a callback with
> rte_lcore_register_usage_cb(). In the callback, use the new lcore_id
> field in struct fwd_lcore to identify the correct index in fwd_lcores
> and return the sum of busy/total cycles of all fwd_streams.
>
> This makes the cycles counters available in rte_lcore_dump() and the
> lcore telemetry API:
>
> testpmd> dump_lcores
> lcore 3, socket 0, role RTE, cpuset 3
> lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
> lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
I have been playing a bit with this series with two lcores, each one
polling a net/null port.
At first it looked good, but then I started to have one idle lcore, by
asking net/null not to receive anything.
$ build-clang/app/dpdk-testpmd -c 7 --no-huge -m 40 -a 0:0.0 --vdev
net_null1,no-rx=1 --vdev net_null2 -- --no-mlockall
--total-num-mbufs=2048 -ia --record-core-cycles --nb-cores=2
One thing that struck me is that an idle lcore was always showing less
"total_cycles" than a busy one.
The more time testpmd was running, the bigger the divergence between
lcores would be.
Re-reading the API, it is unclear to me (which is the reason for my
comments on patch 2).
Let's first sort out my patch 2 comments and we may revisit this patch
4 implementation afterwards (as I think we are not accounting some
mainloop cycles with current implementation).
For now, I have some comments on the existing data structures, see below.
[snip]
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index e366f81a0f46..105f75ad5f35 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -2053,7 +2053,7 @@ fwd_stats_display(void)
> fs->rx_bad_outer_ip_csum;
>
> if (record_core_cycles)
> - fwd_cycles += fs->core_cycles;
> + fwd_cycles += fs->busy_cycles;
> }
> for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
> pt_id = fwd_ports_ids[i];
> @@ -2184,6 +2184,7 @@ fwd_stats_reset(void)
>
> memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
> memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
> + fs->busy_cycles = 0;
> fs->core_cycles = 0;
> }
> }
> @@ -2260,6 +2261,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
> tics_datum = rte_rdtsc();
> tics_per_1sec = rte_get_timer_hz();
> #endif
> + fc->lcore_id = rte_lcore_id();
A fwd_lcore object is bound to a single lcore, so this lcore_id is unneeded.
> fsm = &fwd_streams[fc->stream_idx];
> nb_fs = fc->stream_nb;
> do {
> @@ -2288,6 +2290,38 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
> } while (! fc->stopped);
> }
>
> +static int
> +lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
> +{
> + struct fwd_stream **fsm;
> + struct fwd_lcore *fc;
> + streamid_t nb_fs;
> + streamid_t sm_id;
> + int c;
> +
> + for (c = 0; c < nb_lcores; c++) {
> + fc = fwd_lcores[c];
> + if (fc->lcore_id != lcore_id)
> + continue;
You can find which fwd_lcore is mapped to a lcore using existing structures.
This requires updating some helper, something like:
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970..e5297ee7fb 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -785,25 +785,31 @@ is_proc_primary(void)
return rte_eal_process_type() == RTE_PROC_PRIMARY;
}
-static inline unsigned int
-lcore_num(void)
+void
+parse_fwd_portlist(const char *port);
+
+static inline struct fwd_lcore *
+lcore_to_fwd_lcore(uint16_t lcore_id)
{
unsigned int i;
- for (i = 0; i < RTE_MAX_LCORE; ++i)
- if (fwd_lcores_cpuids[i] == rte_lcore_id())
- return i;
+ for (i = 0; i < cur_fwd_config.nb_fwd_lcores; ++i) {
+ if (fwd_lcores_cpuids[i] == lcore_id)
+ return fwd_lcores[i];
+ }
- rte_panic("lcore_id of current thread not found in
fwd_lcores_cpuids\n");
+ return NULL;
}
-void
-parse_fwd_portlist(const char *port);
-
static inline struct fwd_lcore *
current_fwd_lcore(void)
{
- return fwd_lcores[lcore_num()];
+ struct fwd_lcore *fc = lcore_to_fwd_lcore(rte_lcore_id());
+
+ if (fc == NULL)
+ rte_panic("lcore_id of current thread not found in
fwd_lcores_cpuids\n");
+
+ return fc;
}
/* Mbuf Pools */
And then by using this new helper, lcore_usage_callback becomes simpler:
+static int
+lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
+{
+ struct fwd_stream **fsm;
+ struct fwd_lcore *fc;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+
+ fc = lcore_to_fwd_lcore(lcore_id);
+ if (fc == NULL)
+ return -1;
+
+ fsm = &fwd_streams[fc->stream_idx];
+ nb_fs = fc->stream_nb;
+ usage->busy_cycles = 0;
+ usage->total_cycles = 0;
+
+ for (sm_id = 0; sm_id < nb_fs; sm_id++) {
+ if (fsm[sm_id]->disabled)
+ continue;
+
+ usage->busy_cycles += fsm[sm_id]->busy_cycles;
+ usage->total_cycles += fsm[sm_id]->core_cycles;
+ }
+
+ return 0;
+}
+
--
David Marchand
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 2/5] eal: report applications lcore usage
2023-02-06 8:48 ` David Marchand
@ 2023-02-06 9:03 ` Robin Jarry
0 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-06 9:03 UTC (permalink / raw)
To: David Marchand, Morten Brørup, Kevin Laatz; +Cc: dev, Chengwen Feng
David Marchand, Feb 06, 2023 at 09:48:
> > +struct rte_lcore_usage {
> > + /** The total amount of time since application start, in TSC cycles. */
> > + uint64_t total_cycles;
>
> This code comment needs some clarification.
>
> What is this "total amount of time"?
> "application start" is ambiguous.
> EAL lcore threads are not created/started by the application itself,
> so the application has no idea of the time the lcore/threads were
> created.
>
> I would describe as:
> /** The total amount of time that the application has been running on
> this lcore, in TSC cycles. */
>
> Is it acceptable to you?
Yes, this leaves less room for interpretation.
> > + /** The amount of busy time since application start, in TSC cycles. */
> > + uint64_t busy_cycles;
>
> And here:
> /** The amount of time the application was busy, handling some
> workload on this lcore, in TSC cycles. */
This is in line with the total. Looks good to me.
I will address that and your other comments for v9.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 4/5] app/testpmd: report lcore usage
2023-02-06 8:58 ` David Marchand
@ 2023-02-06 9:08 ` Robin Jarry
2023-02-06 15:06 ` David Marchand
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-06 9:08 UTC (permalink / raw)
To: David Marchand
Cc: dev, Morten Brørup, Konstantin Ananyev, Kevin Laatz,
Aman Singh, Yuying Zhang
David Marchand, Feb 06, 2023 at 09:58:
> I have been playing a bit with this series with two lcores, each one
> polling a net/null port.
> At first it looked good, but then I started to have one idle lcore, by
> asking net/null not to receive anything.
>
> $ build-clang/app/dpdk-testpmd -c 7 --no-huge -m 40 -a 0:0.0 --vdev
> net_null1,no-rx=1 --vdev net_null2 -- --no-mlockall
> --total-num-mbufs=2048 -ia --record-core-cycles --nb-cores=2
>
> One thing that struck me is that an idle lcore was always showing less
> "total_cycles" than a busy one.
> The more time testpmd was running, the bigger the divergence between
> lcores would be.
>
> Re-reading the API, it is unclear to me (which is the reason for my
> comments on patch 2).
> Let's first sort out my patch 2 comments and we may revisit this patch
> 4 implementation afterwards (as I think we are not accounting some
> mainloop cycles with current implementation).
Indeed, we are not accounting for all cycles. Only the cycles spent in
the packet_fwd_t functions. This was already the case before my series
I only added the busy cycles accounting.
However, I agree that this should be updated to take all cycles into
account (as much as it is possible with the current code base). Maybe
this could be done as a separate patch or do you want to include it in
this series?
> A fwd_lcore object is bound to a single lcore, so this lcore_id is
> unneeded.
[snip]
> You can find which fwd_lcore is mapped to a lcore using existing
> structures. This requires updating some helper, something like:
I had missed that. Indeed, no need for a new field. I'll address that in
v9.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 2/5] eal: report applications lcore usage
2023-02-06 7:36 ` Morten Brørup
2023-02-06 8:21 ` Robin Jarry
@ 2023-02-06 11:18 ` fengchengwen
1 sibling, 0 replies; 134+ messages in thread
From: fengchengwen @ 2023-02-06 11:18 UTC (permalink / raw)
To: Morten Brørup, Robin Jarry, dev; +Cc: Kevin Laatz
On 2023/2/6 15:36, Morten Brørup wrote:
>> From: fengchengwen [mailto:fengchengwen@huawei.com]
>> Sent: Monday, 6 February 2023 05.00
>>
>> Hi Robin,
>>
>> On 2023/2/2 21:43, Robin Jarry wrote:
>>> Allow applications to register a callback that will be invoked in
>>> rte_lcore_dump() and when requesting lcore info in the telemetry API.
>>>
>>> The callback is expected to return the number of TSC cycles that have
>>> passed since application start and the number of these cycles that
>> were
>>> spent doing busy work.
>>>
>>
>> The 'record_burst_stats' also important for performance tune.
>
> While I agree that burst size spread is important for comparing CPU usage and capacity, the 'record_burst_stats' is a test_pmd specific feature. Applications may implement something different.
>
>>
>> Is it possible to incorporate it into this framework?
>
> We all agreed on CPU usage metrics for this patch, so let's limit ourselves this time.
>
> If we want more performance metrics, such as RX and TX burst size spread, they can be proposed and discussed in a separate RFC. It might trigger a lot of discussion, like the original lcore busyness patch that inspired this patch series.
>
It's okay for a separate RFC.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 1/5] eal: add lcore info in telemetry
2023-02-06 8:22 ` Robin Jarry
@ 2023-02-06 11:22 ` fengchengwen
2023-02-06 11:46 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: fengchengwen @ 2023-02-06 11:22 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Morten Brørup, Kevin Laatz
On 2023/2/6 16:22, Robin Jarry wrote:
> fengchengwen, Feb 06, 2023 at 04:50:
>> > + return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
>>
>> lcore_iterate will iterate and find the lcore.
>>
>> How about add one new API e.g. rte_lcore_cb(xxx) ?
>
> Hi fengchengwen,
>
> what would that new API do?
Just invoke callback on one specific lcore.
In this patch, the lcore_telemetry_info_cb() only valid on specific lcore, but it was implements by rte_lcore_iterate which will iterate all lcores.
>
>
> .
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 5/5] eal: add lcore usage telemetry endpoint
2023-02-06 8:24 ` Robin Jarry
@ 2023-02-06 11:32 ` fengchengwen
0 siblings, 0 replies; 134+ messages in thread
From: fengchengwen @ 2023-02-06 11:32 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Kevin Laatz
On 2023/2/6 16:24, Robin Jarry wrote:
> fengchengwen, Feb 06, 2023 at 04:27:
>> The telemetry should be human-readable also.
>>
>> so why not "/eal/lcore/usage": {
>> "lcore_4" : {
>> "total_cycles" : xxx
>> "busy_cycles" : xxx
>> "busy/total ratio" : "xx%"
>> },
>> "lcore_5" : {
>> "total_cycles" : yyy
>> "busy_cycles" : yyy
>> "busy/total ratio" : "yy%"
>> },
>> }
>
> The raw data is exposed and can be rendered any way you like. This should be left to external monitoring tools, such as grafana & al.
It's a small step in programming, but it's more user friendly.
Once done, user who use telemetry could be benefiting.
And it's also be render by monitoring tools because there's no data loss.
>
>
> .
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 1/5] eal: add lcore info in telemetry
2023-02-06 11:22 ` fengchengwen
@ 2023-02-06 11:46 ` Robin Jarry
2023-02-06 12:08 ` fengchengwen
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-06 11:46 UTC (permalink / raw)
To: fengchengwen, dev; +Cc: Morten Brørup, Kevin Laatz
fengchengwen, Feb 06, 2023 at 12:22:
> Just invoke callback on one specific lcore.
>
> In this patch, the lcore_telemetry_info_cb() only valid on specific
> lcore, but it was implements by rte_lcore_iterate which will iterate
> all lcores.
Ok I see. I don't think this would be worth the effort. There will never
be more than a few hundred cores. The performance gain would be
negligible. What do you think?
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 1/5] eal: add lcore info in telemetry
2023-02-06 11:46 ` Robin Jarry
@ 2023-02-06 12:08 ` fengchengwen
0 siblings, 0 replies; 134+ messages in thread
From: fengchengwen @ 2023-02-06 12:08 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Morten Brørup, Kevin Laatz
On 2023/2/6 19:46, Robin Jarry wrote:
> fengchengwen, Feb 06, 2023 at 12:22:
>> Just invoke callback on one specific lcore.
>>
>> In this patch, the lcore_telemetry_info_cb() only valid on specific lcore, but it was implements by rte_lcore_iterate which will iterate all lcores.
>
> Ok I see. I don't think this would be worth the effort. There will never be more than a few hundred cores. The performance gain would be negligible. What do you think?
Okay for not add.
And for this patch,
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
>
>
> .
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v8 4/5] app/testpmd: report lcore usage
2023-02-06 9:08 ` Robin Jarry
@ 2023-02-06 15:06 ` David Marchand
0 siblings, 0 replies; 134+ messages in thread
From: David Marchand @ 2023-02-06 15:06 UTC (permalink / raw)
To: Robin Jarry, Aman Singh, Yuying Zhang
Cc: dev, Morten Brørup, Konstantin Ananyev, Kevin Laatz
On Mon, Feb 6, 2023 at 10:08 AM Robin Jarry <rjarry@redhat.com> wrote:
>
> David Marchand, Feb 06, 2023 at 09:58:
> > I have been playing a bit with this series with two lcores, each one
> > polling a net/null port.
> > At first it looked good, but then I started to have one idle lcore, by
> > asking net/null not to receive anything.
> >
> > $ build-clang/app/dpdk-testpmd -c 7 --no-huge -m 40 -a 0:0.0 --vdev
> > net_null1,no-rx=1 --vdev net_null2 -- --no-mlockall
> > --total-num-mbufs=2048 -ia --record-core-cycles --nb-cores=2
> >
> > One thing that struck me is that an idle lcore was always showing less
> > "total_cycles" than a busy one.
> > The more time testpmd was running, the bigger the divergence between
> > lcores would be.
> >
> > Re-reading the API, it is unclear to me (which is the reason for my
> > comments on patch 2).
> > Let's first sort out my patch 2 comments and we may revisit this patch
> > 4 implementation afterwards (as I think we are not accounting some
> > mainloop cycles with current implementation).
>
> Indeed, we are not accounting for all cycles. Only the cycles spent in
> the packet_fwd_t functions. This was already the case before my series
> I only added the busy cycles accounting.
"busy" cycles is what was already present in testpmd under the
core_cycles report existing feature: get_end_cycles was only called
with nb_rx + nb_tx > 0.
The only change with this patch is its internal name, there is no
addition on this topic.
But this patch adds "total_cycles" for testpmd...
>
>
> However, I agree that this should be updated to take all cycles into
> account (as much as it is possible with the current code base). Maybe
> this could be done as a separate patch or do you want to include it in
> this series?
... and its implementation seems non compliant with the lcore_usage
API as discussed in patch 2.
As for how much cycles are counted as busy (meaning, should we count
cycles spent in the mainloop too), I think it is better but that would
be a change in the core_cycles report existing feature.
I'd really like to hear from testpmd maintainers.
--
David Marchand
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2023-01-18 16:46 ` Robin Jarry
@ 2023-02-06 20:07 ` Konstantin Ananyev
2023-02-06 20:29 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: Konstantin Ananyev @ 2023-02-06 20:07 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
>
> Konstantin Ananyev, Jan 04, 2023 at 11:53:
> > Probably we can even print warning or so if some-one tries to overwrite
> > it once again.
>
> I'm not sure that is necessary. If an application wants to reset the
> callback to NULL at any point in time, I don't see why DPDK should tell
> them it is a bad thing.
Problem is not in resetting cb function itself.
Usually with CB user needs some sort of data structure (to accumulate stats, track states, etc.).
If we allow to reset the CB, then it arises the question when/how should we allow
user to free associated data?
And, as I undersand, we don't have a clear way to do it.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2023-02-06 20:07 ` Konstantin Ananyev
@ 2023-02-06 20:29 ` Robin Jarry
2023-02-06 20:34 ` Konstantin Ananyev
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-06 20:29 UTC (permalink / raw)
To: Konstantin Ananyev, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
Konstantin Ananyev, Feb 06, 2023 at 21:07:
> Problem is not in resetting cb function itself.
>
> Usually with CB user needs some sort of data structure (to accumulate
> stats, track states, etc.). If we allow to reset the CB, then it
> arises the question when/how should we allow user to free associated
> data?
>
> And, as I undersand, we don't have a clear way to do it.
If the application decides to reset the callback function, they are in
a good position to determine what resources they need to free. I don't
see why EAL should get involved here but I may be missing a point.
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2023-02-06 20:29 ` Robin Jarry
@ 2023-02-06 20:34 ` Konstantin Ananyev
2023-02-06 20:39 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: Konstantin Ananyev @ 2023-02-06 20:34 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
> -----Original Message-----
> From: Robin Jarry <rjarry@redhat.com>
> Sent: Monday, February 6, 2023 8:29 PM
> To: Konstantin Ananyev <konstantin.ananyev@huawei.com>; dev@dpdk.org
> Cc: Tyler Retzlaff <roretzla@linux.microsoft.com>; Kevin Laatz <kevin.laatz@intel.com>; Morten Brørup
> <mb@smartsharesystems.com>
> Subject: Re: [PATCH v5 2/4] eal: allow applications to report their cpu usage
>
> Konstantin Ananyev, Feb 06, 2023 at 21:07:
> > Problem is not in resetting cb function itself.
> >
> > Usually with CB user needs some sort of data structure (to accumulate
> > stats, track states, etc.). If we allow to reset the CB, then it
> > arises the question when/how should we allow user to free associated
> > data?
> >
> > And, as I undersand, we don't have a clear way to do it.
>
> If the application decides to reset the callback function, they are in
> a good position to determine what resources they need to free.
Yes, app knows what resources it wants to free.
But it has no way to know *when* it is safe to free them.
Just a bit more explanation:
App invokes your function which resets global value of CB.
How would it know that after return from this function none
other thread still not executing this CB right now?
And how determine when this thread will finish with executing CB function?
That's why it might be easier simply not allow to reset it at all....
> I don't
> see why EAL should get involved here but I may be missing a point.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2023-02-06 20:34 ` Konstantin Ananyev
@ 2023-02-06 20:39 ` Robin Jarry
2023-02-06 20:44 ` Konstantin Ananyev
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-06 20:39 UTC (permalink / raw)
To: Konstantin Ananyev, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
Konstantin Ananyev, Feb 06, 2023 at 21:34:
> Yes, app knows what resources it wants to free.
> But it has no way to know *when* it is safe to free them.
> Just a bit more explanation:
> App invokes your function which resets global value of CB.
> How would it know that after return from this function none
> other thread still not executing this CB right now?
> And how determine when this thread will finish with executing CB function?
> That's why it might be easier simply not allow to reset it at all....
Ok I see. But what should we do to prevent this? Simply ignore the
request and log a warning?
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2023-02-06 20:39 ` Robin Jarry
@ 2023-02-06 20:44 ` Konstantin Ananyev
2023-02-06 20:55 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: Konstantin Ananyev @ 2023-02-06 20:44 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
> Konstantin Ananyev, Feb 06, 2023 at 21:34:
> > Yes, app knows what resources it wants to free.
> > But it has no way to know *when* it is safe to free them.
> > Just a bit more explanation:
> > App invokes your function which resets global value of CB.
> > How would it know that after return from this function none
> > other thread still not executing this CB right now?
> > And how determine when this thread will finish with executing CB function?
> > That's why it might be easier simply not allow to reset it at all....
>
> Ok I see. But what should we do to prevent this? Simply ignore the
> request and log a warning?
That's seems like simplest choice to me...
Or if you still prefer to allow it - put a special comment that it is user
responsibility to handle such possible race-condition (inside his CB function or so).
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2023-02-06 20:44 ` Konstantin Ananyev
@ 2023-02-06 20:55 ` Robin Jarry
2023-02-07 13:12 ` Konstantin Ananyev
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-06 20:55 UTC (permalink / raw)
To: Konstantin Ananyev, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
Konstantin Ananyev, Feb 06, 2023 at 21:44:
> > Ok I see. But what should we do to prevent this? Simply ignore the
> > request and log a warning?
>
> That's seems like simplest choice to me... Or if you still prefer to
> allow it - put a special comment that it is user responsibility to
> handle such possible race-condition (inside his CB function or so).
The issue is that a warning can be easily overlooked and the application
may assume that they can free up resources whereas the callback was
never reset and may still access them.
I wonder if this could be enforced with RTE_BUILD_BUG_ON somehow. Or at
least by checking that the cb value is not NULL with RTE_ASSERT?
^ permalink raw reply [flat|nested] 134+ messages in thread
* RE: [PATCH v5 2/4] eal: allow applications to report their cpu usage
2023-02-06 20:55 ` Robin Jarry
@ 2023-02-07 13:12 ` Konstantin Ananyev
0 siblings, 0 replies; 134+ messages in thread
From: Konstantin Ananyev @ 2023-02-07 13:12 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Tyler Retzlaff, Kevin Laatz, Morten Brørup
> > > Ok I see. But what should we do to prevent this? Simply ignore the
> > > request and log a warning?
> >
> > That's seems like simplest choice to me... Or if you still prefer to
> > allow it - put a special comment that it is user responsibility to
> > handle such possible race-condition (inside his CB function or so).
>
> The issue is that a warning can be easily overlooked and the application
> may assume that they can free up resources whereas the callback was
> never reset and may still access them.
Yes, could happen, in principle.
> I wonder if this could be enforced with RTE_BUILD_BUG_ON somehow.
I don't think it is possible, as the value can change at runtime.
> Or at least by checking that the cb value is not NULL with RTE_ASSERT?
assert() would work, but general rule of thumb - don't panic() in library
functions, but return an error.
After another thought - might be allowing user to call it at run-time
and just documenting it properly is enough.
After all there are few ways to overcome the problem in user CB function itself.
Konstantin
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v9 0/5] lcore telemetry improvements
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (9 preceding siblings ...)
2023-02-02 13:43 ` [PATCH v8 0/5] lcore telemetry improvements Robin Jarry
@ 2023-02-07 19:37 ` Robin Jarry
2023-02-07 19:37 ` [PATCH v9 1/5] eal: add lcore info in telemetry Robin Jarry
` (6 subsequent siblings)
17 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-07 19:37 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry
This is a follow up on previous work by Kevin Laatz:
http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
This series is aimed at allowing DPDK applications to expose their CPU
usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
more basic and naive approach which leaves the cpu cycles accounting
completely up to the application.
For reference, I have implemented a draft patch in OvS to use
rte_lcore_register_usage_cb() and report the already available busy
cycles information.
https://github.com/rjarry/ovs/commit/643e672fe388e348ea7ccbbda6f5a87a066fd919
v9:
- Fixed changelog & version.map order.
- Updated with 64-bit integer telemetry functions.
- Refined docstrings (added notice about resetting the callback).
- Fixed accounting of total cycles in testpmd.
v8:
- Made /eal/lcore/info lcore_id argument parsing more robust.
Robin Jarry (5):
eal: add lcore info in telemetry
eal: report applications lcore usage
app/testpmd: add dump command for lcores
app/testpmd: report lcore usage
eal: add lcore usage telemetry endpoint
app/test-pmd/cmdline.c | 3 +
app/test-pmd/noisy_vnf.c | 7 +-
app/test-pmd/testpmd.c | 42 +++-
app/test-pmd/testpmd.h | 25 ++-
app/test-pmd/txonly.c | 2 +-
doc/guides/rel_notes/release_23_03.rst | 8 +
doc/guides/testpmd_app_ug/testpmd_funcs.rst | 7 +
lib/eal/common/eal_common_lcore.c | 219 ++++++++++++++++++--
lib/eal/include/rte_lcore.h | 48 +++++
lib/eal/version.map | 1 +
10 files changed, 329 insertions(+), 33 deletions(-)
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v9 1/5] eal: add lcore info in telemetry
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (10 preceding siblings ...)
2023-02-07 19:37 ` [PATCH v9 " Robin Jarry
@ 2023-02-07 19:37 ` Robin Jarry
2023-02-08 2:24 ` lihuisong (C)
2023-02-07 19:37 ` [PATCH v9 2/5] eal: report applications lcore usage Robin Jarry
` (5 subsequent siblings)
17 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-07 19:37 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup, Kevin Laatz
Report the same information than rte_lcore_dump() in the telemetry
API into /eal/lcore/list and /eal/lcore/info,ID.
Example:
--> /eal/lcore/info,3
{
"/eal/lcore/info": {
"lcore_id": 3,
"socket": 0,
"role": "RTE",
"cpuset": [
3
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v8 -> v9: Updated with 64 bits integers telemetry functions
lib/eal/common/eal_common_lcore.c | 123 +++++++++++++++++++++++++-----
1 file changed, 105 insertions(+), 18 deletions(-)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 06c594b0224f..f53fc17b4d04 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -10,6 +10,9 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
+#include <rte_telemetry.h>
+#endif
#include "eal_private.h"
#include "eal_thread.h"
@@ -419,35 +422,35 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
return ret;
}
+static const char *
+lcore_role_str(enum rte_lcore_role_t role)
+{
+ switch (role) {
+ case ROLE_RTE:
+ return "RTE";
+ case ROLE_SERVICE:
+ return "SERVICE";
+ case ROLE_NON_EAL:
+ return "NON_EAL";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
- const char *role;
FILE *f = arg;
int ret;
- switch (cfg->lcore_role[lcore_id]) {
- case ROLE_RTE:
- role = "RTE";
- break;
- case ROLE_SERVICE:
- role = "SERVICE";
- break;
- case ROLE_NON_EAL:
- role = "NON_EAL";
- break;
- default:
- role = "UNKNOWN";
- break;
- }
-
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
- rte_lcore_to_socket_id(lcore_id), role, cpuset,
- ret == 0 ? "" : "...");
+ rte_lcore_to_socket_id(lcore_id),
+ lcore_role_str(cfg->lcore_role[lcore_id]),
+ cpuset, ret == 0 ? "" : "...");
return 0;
}
@@ -456,3 +459,87 @@ rte_lcore_dump(FILE *f)
{
rte_lcore_iterate(lcore_dump_cb, f);
}
+
+#ifndef RTE_EXEC_ENV_WINDOWS
+static int
+lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_tel_data *d = arg;
+ return rte_tel_data_add_array_int(d, lcore_id);
+}
+
+static int
+handle_lcore_list(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+ if (ret)
+ return ret;
+ return rte_lcore_iterate(lcore_telemetry_id_cb, d);
+}
+
+struct lcore_telemetry_info {
+ unsigned int lcore_id;
+ struct rte_tel_data *d;
+};
+
+static int
+lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ struct lcore_telemetry_info *info = arg;
+ struct rte_tel_data *cpuset;
+ unsigned int cpu;
+
+ if (info->lcore_id != lcore_id)
+ return 0;
+
+ rte_tel_data_start_dict(info->d);
+ rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
+ rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
+ rte_tel_data_add_dict_string(info->d, "role", lcore_role_str(cfg->lcore_role[lcore_id]));
+ cpuset = rte_tel_data_alloc();
+ if (cpuset == NULL)
+ return -ENOMEM;
+ rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+ if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
+ rte_tel_data_add_array_int(cpuset, cpu);
+ }
+ rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+
+ return 0;
+}
+
+static int
+handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
+{
+ struct lcore_telemetry_info info = { .d = d };
+ unsigned long lcore_id;
+ char *endptr;
+
+ if (params == NULL)
+ return -EINVAL;
+ errno = 0;
+ lcore_id = strtoul(params, &endptr, 10);
+ if (errno)
+ return -errno;
+ if (*params == '\0' || *endptr != '\0' || lcore_id >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ info.lcore_id = lcore_id;
+
+ return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
+}
+
+RTE_INIT(lcore_telemetry)
+{
+ rte_telemetry_register_cmd(
+ "/eal/lcore/list", handle_lcore_list,
+ "List of lcore ids. Takes no parameters");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/info", handle_lcore_info,
+ "Returns lcore info. Parameters: int lcore_id");
+}
+#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v9 2/5] eal: report applications lcore usage
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (11 preceding siblings ...)
2023-02-07 19:37 ` [PATCH v9 1/5] eal: add lcore info in telemetry Robin Jarry
@ 2023-02-07 19:37 ` Robin Jarry
2023-02-07 19:37 ` [PATCH v9 3/5] app/testpmd: add dump command for lcores Robin Jarry
` (4 subsequent siblings)
17 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-07 19:37 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup, Kevin Laatz
Allow applications to register a callback that will be invoked in
rte_lcore_dump() and when requesting lcore info in the telemetry API.
The callback is expected to return the number of TSC cycles that have
passed since application start and the number of these cycles that were
spent doing busy work.
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v8 -> v9:
- Fixed changelog & version.map order.
- Updated with 64-bit integer telemetry functions.
- Refined docstrings (added notice about resetting the callback).
doc/guides/rel_notes/release_23_03.rst | 7 ++++
lib/eal/common/eal_common_lcore.c | 40 +++++++++++++++++++--
lib/eal/include/rte_lcore.h | 48 ++++++++++++++++++++++++++
lib/eal/version.map | 1 +
4 files changed, 94 insertions(+), 2 deletions(-)
diff --git a/doc/guides/rel_notes/release_23_03.rst b/doc/guides/rel_notes/release_23_03.rst
index 1fa101c420cd..17d38d5ea264 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -55,6 +55,13 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Added support for reporting lcore usage in applications.**
+
+ * The ``/eal/lcore/list`` and ``/eal/lcore/info`` telemetry endpoints have
+ been added to provide information similar to ``rte_lcore_dump()``.
+ * Applications can register a callback at startup via
+ ``rte_lcore_register_usage_cb()`` to provide lcore usage information.
+
* **Updated AMD axgbe driver.**
* Added multi-process support.
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index f53fc17b4d04..fcda7c50119a 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -2,6 +2,7 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
@@ -437,20 +438,45 @@ lcore_role_str(enum rte_lcore_role_t role)
}
}
+static rte_lcore_usage_cb lcore_usage_cb;
+
+void
+rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
+{
+ lcore_usage_cb = cb;
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
+ char *usage_str = NULL;
FILE *f = arg;
int ret;
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /* Guard against concurrent modification of lcore_usage_cb. */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ if (asprintf(&usage_str, ", busy cycles %"PRIu64"/%"PRIu64,
+ usage.busy_cycles, usage.total_cycles) < 0) {
+ return -ENOMEM;
+ }
+ }
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
- fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
+ fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
rte_lcore_to_socket_id(lcore_id),
lcore_role_str(cfg->lcore_role[lcore_id]),
- cpuset, ret == 0 ? "" : "...");
+ cpuset, ret == 0 ? "" : "...",
+ usage_str != NULL ? usage_str : "");
+
+ free(usage_str);
+
return 0;
}
@@ -489,7 +515,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
struct lcore_telemetry_info *info = arg;
+ struct rte_lcore_usage usage;
struct rte_tel_data *cpuset;
+ rte_lcore_usage_cb usage_cb;
unsigned int cpu;
if (info->lcore_id != lcore_id)
@@ -508,6 +536,14 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
rte_tel_data_add_array_int(cpuset, cpu);
}
rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /* Guard against concurrent modification of lcore_usage_cb. */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ rte_tel_data_add_dict_uint(info->d, "total_cycles", usage.total_cycles);
+ rte_tel_data_add_dict_uint(info->d, "busy_cycles", usage.busy_cycles);
+ }
return 0;
}
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 9c7865052100..30f83f4d578c 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -328,6 +328,54 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
int
rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
+/**
+ * lcore usage statistics.
+ */
+struct rte_lcore_usage {
+ /**
+ * The total amount of time that the application has been running on
+ * this lcore, in TSC cycles.
+ */
+ uint64_t total_cycles;
+ /**
+ * The amount of time the application was busy, handling some
+ * workload on this lcore, in TSC cycles.
+ */
+ uint64_t busy_cycles;
+};
+
+/**
+ * Callback to allow applications to report lcore usage.
+ *
+ * @param [in] lcore_id
+ * The lcore to consider.
+ * @param [out] usage
+ * Counters representing this lcore usage. This can never be NULL.
+ * @return
+ * - 0 if fields in usage were updated successfully. The fields that the
+ * application does not support must not be modified.
+ * - a negative value if the information is not available or if any error occurred.
+ */
+typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct rte_lcore_usage *usage);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Register a callback from an application to be called in rte_lcore_dump() and
+ * the /eal/lcore/info telemetry endpoint handler. Applications are expected to
+ * report lcore usage statistics via this callback.
+ *
+ * If a callback was already registered, it can be replaced with another callback
+ * or unregistered with NULL. The previously registered callback may remain in
+ * use for an undetermined period of time.
+ *
+ * @param cb
+ * The callback function.
+ */
+__rte_experimental
+void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
+
/**
* List all lcores.
*
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 6523102157e2..2c05e5f2fb60 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -441,6 +441,7 @@ EXPERIMENTAL {
rte_thread_join;
# added in 23.03
+ rte_lcore_register_usage_cb;
rte_thread_set_name;
};
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v9 3/5] app/testpmd: add dump command for lcores
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (12 preceding siblings ...)
2023-02-07 19:37 ` [PATCH v9 2/5] eal: report applications lcore usage Robin Jarry
@ 2023-02-07 19:37 ` Robin Jarry
2023-02-07 19:37 ` [PATCH v9 4/5] app/testpmd: report lcore usage Robin Jarry
` (3 subsequent siblings)
17 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-07 19:37 UTC (permalink / raw)
To: dev
Cc: Robin Jarry, Morten Brørup, Konstantin Ananyev, Kevin Laatz,
Aman Singh, Yuying Zhang
Add a simple command that calls rte_lcore_dump().
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v8 -> v9: no change
app/test-pmd/cmdline.c | 3 +++
doc/guides/testpmd_app_ug/testpmd_funcs.rst | 7 +++++++
2 files changed, 10 insertions(+)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index cb8c174020b0..bb7ff2b44989 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8357,6 +8357,8 @@ static void cmd_dump_parsed(void *parsed_result,
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
rte_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_lcores"))
+ rte_lcore_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
}
@@ -8370,6 +8372,7 @@ static cmdline_parse_token_string_t cmd_dump_dump =
"dump_ring#"
"dump_mempool#"
"dump_devargs#"
+ "dump_lcores#"
"dump_log_types");
static cmdline_parse_inst_t cmd_dump = {
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 79a1fa9cb73d..9ceb21dfbbdf 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -591,6 +591,13 @@ Dumps the user device list::
testpmd> dump_devargs
+dump lcores
+~~~~~~~~~~~
+
+Dumps the logical cores list::
+
+ testpmd> dump_lcores
+
dump log types
~~~~~~~~~~~~~~
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v9 4/5] app/testpmd: report lcore usage
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (13 preceding siblings ...)
2023-02-07 19:37 ` [PATCH v9 3/5] app/testpmd: add dump command for lcores Robin Jarry
@ 2023-02-07 19:37 ` Robin Jarry
2023-02-08 2:59 ` lihuisong (C)
2023-02-07 19:37 ` [PATCH v9 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
` (2 subsequent siblings)
17 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-07 19:37 UTC (permalink / raw)
To: dev
Cc: Robin Jarry, Morten Brørup, Konstantin Ananyev, Kevin Laatz,
Aman Singh, Yuying Zhang
The --record-core-cycles option already accounts for busy cycles. One
turn of packet_fwd_t is considered "busy" if there was at least one
received or transmitted packet.
Rename core_cycles to busy_cycles in struct fwd_stream to make it more
explicit. Add total_cycles to struct fwd_lcore. Add cycles accounting in
noisy_vnf where it was missing.
When --record-core-cycles is specified, register a callback with
rte_lcore_register_usage_cb() and update total_cycles every turn of
lcore loop based on a starting tsc value.
In the callback, resolve the proper struct fwd_lcore based on lcore_id
and return the lcore total_cycles and the sum of busy_cycles of all its
fwd_streams.
This makes the cycles counters available in rte_lcore_dump() and the
lcore telemetry API:
testpmd> dump_lcores
lcore 3, socket 0, role RTE, cpuset 3
lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
--> /eal/lcore/info,4
{
"/eal/lcore/info": {
"lcore_id": 4,
"socket": 0,
"role": "RTE",
"cpuset": [
4
],
"busy_cycles": 10623340318,
"total_cycles": 55331167354
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v8 -> v9: Fixed accounting of total cycles
app/test-pmd/noisy_vnf.c | 7 ++++++-
app/test-pmd/testpmd.c | 42 ++++++++++++++++++++++++++++++++++++----
app/test-pmd/testpmd.h | 25 +++++++++++++++---------
app/test-pmd/txonly.c | 2 +-
4 files changed, 61 insertions(+), 15 deletions(-)
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index c65ec6f06a5c..b3bfa84af211 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -152,6 +152,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
uint64_t delta_ms;
bool needs_flush = false;
uint64_t now;
+ uint64_t start_tsc = 0;
+
+ get_start_cycles(&start_tsc);
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
pkts_burst, nb_pkt_per_burst);
@@ -169,7 +172,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
inc_tx_burst_stats(fs, nb_tx);
fs->tx_packets += nb_tx;
fs->fwd_dropped += drop_pkts(pkts_burst, nb_rx, nb_tx);
- return;
+ goto end;
}
fifo_free = rte_ring_free_count(ncf->f);
@@ -219,6 +222,8 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
ncf->prev_time = rte_get_timer_cycles();
}
+end:
+ get_end_cycles(fs, start_tsc);
}
#define NOISY_STRSIZE 256
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index e366f81a0f46..eeb96aefa80b 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2053,7 +2053,7 @@ fwd_stats_display(void)
fs->rx_bad_outer_ip_csum;
if (record_core_cycles)
- fwd_cycles += fs->core_cycles;
+ fwd_cycles += fs->busy_cycles;
}
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
@@ -2145,7 +2145,7 @@ fwd_stats_display(void)
else
total_pkts = total_recv;
- printf("\n CPU cycles/packet=%.2F (total cycles="
+ printf("\n CPU cycles/packet=%.2F (busy cycles="
"%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
" MHz Clock\n",
(double) fwd_cycles / total_pkts,
@@ -2184,8 +2184,10 @@ fwd_stats_reset(void)
memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
- fs->core_cycles = 0;
+ fs->busy_cycles = 0;
}
+ for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++)
+ fwd_lcores[i]->total_cycles = 0;
}
static void
@@ -2248,6 +2250,7 @@ static void
run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
{
struct fwd_stream **fsm;
+ uint64_t start_tsc;
streamid_t nb_fs;
streamid_t sm_id;
#ifdef RTE_LIB_BITRATESTATS
@@ -2262,6 +2265,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
#endif
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
+ start_tsc = rte_rdtsc();
do {
for (sm_id = 0; sm_id < nb_fs; sm_id++)
if (!fsm[sm_id]->disabled)
@@ -2284,10 +2288,36 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
latencystats_lcore_id == rte_lcore_id())
rte_latencystats_update();
#endif
-
+ if (record_core_cycles)
+ fc->total_cycles = rte_rdtsc() - start_tsc;
} while (! fc->stopped);
}
+static int
+lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
+{
+ struct fwd_stream **fsm;
+ struct fwd_lcore *fc;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+
+ fc = lcore_to_fwd_lcore(lcore_id);
+ if (fc == NULL)
+ return -1;
+
+ fsm = &fwd_streams[fc->stream_idx];
+ nb_fs = fc->stream_nb;
+ usage->busy_cycles = 0;
+ usage->total_cycles = fc->total_cycles;
+
+ for (sm_id = 0; sm_id < nb_fs; sm_id++) {
+ if (!fsm[sm_id]->disabled)
+ usage->busy_cycles += fsm[sm_id]->busy_cycles;
+ }
+
+ return 0;
+}
+
static int
start_pkt_forward_on_core(void *fwd_arg)
{
@@ -4527,6 +4557,10 @@ main(int argc, char** argv)
rte_stats_bitrate_reg(bitrate_data);
}
#endif
+
+ if (record_core_cycles)
+ rte_lcore_register_usage_cb(lcore_usage_callback);
+
#ifdef RTE_LIB_CMDLINE
if (init_cmdline() != 0)
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970d2..6ec2f6879b47 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -174,7 +174,7 @@ struct fwd_stream {
#ifdef RTE_LIB_GRO
unsigned int gro_times; /**< GRO operation times */
#endif
- uint64_t core_cycles; /**< used for RX and TX processing */
+ uint64_t busy_cycles; /**< used with --record-core-cycles */
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
struct fwd_lcore *lcore; /**< Lcore being scheduled. */
@@ -360,6 +360,7 @@ struct fwd_lcore {
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
volatile char stopped; /**< stop forwarding when set */
+ uint64_t total_cycles; /**< used with --record-core-cycles */
};
/*
@@ -785,16 +786,17 @@ is_proc_primary(void)
return rte_eal_process_type() == RTE_PROC_PRIMARY;
}
-static inline unsigned int
-lcore_num(void)
+static inline struct fwd_lcore *
+lcore_to_fwd_lcore(uint16_t lcore_id)
{
unsigned int i;
- for (i = 0; i < RTE_MAX_LCORE; ++i)
- if (fwd_lcores_cpuids[i] == rte_lcore_id())
- return i;
+ for (i = 0; i < cur_fwd_config.nb_fwd_lcores; ++i) {
+ if (fwd_lcores_cpuids[i] == lcore_id)
+ return fwd_lcores[i];
+ }
- rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n");
+ return NULL;
}
void
@@ -803,7 +805,12 @@ parse_fwd_portlist(const char *port);
static inline struct fwd_lcore *
current_fwd_lcore(void)
{
- return fwd_lcores[lcore_num()];
+ struct fwd_lcore *fc = lcore_to_fwd_lcore(rte_lcore_id());
+
+ if (fc == NULL)
+ rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n");
+
+ return fc;
}
/* Mbuf Pools */
@@ -839,7 +846,7 @@ static inline void
get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc)
{
if (record_core_cycles)
- fs->core_cycles += rte_rdtsc() - start_tsc;
+ fs->busy_cycles += rte_rdtsc() - start_tsc;
}
static inline void
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 021624952daa..c4e1c2aa2af1 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -331,7 +331,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
struct rte_mbuf *pkt;
struct rte_mempool *mbp;
struct rte_ether_hdr eth_hdr;
- uint16_t nb_tx;
+ uint16_t nb_tx = 0;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
uint32_t retry;
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v9 5/5] eal: add lcore usage telemetry endpoint
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (14 preceding siblings ...)
2023-02-07 19:37 ` [PATCH v9 4/5] app/testpmd: report lcore usage Robin Jarry
@ 2023-02-07 19:37 ` Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 0/5] lcore telemetry improvements Robin Jarry
2023-02-09 9:43 ` [PATCH v10 " Robin Jarry
17 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-07 19:37 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Kevin Laatz
Allow fetching CPU cycles usage for all lcores with a single request.
This endpoint is intended for repeated and frequent invocations by
external monitoring systems and therefore returns condensed data.
It consists of a single dictionary with three keys: "lcore_ids",
"total_cycles" and "busy_cycles" that are mapped to three arrays of
integer values. Each array has the same number of values, one per lcore,
in the same order.
Example:
--> /eal/lcore/usage
{
"/eal/lcore/usage": {
"lcore_ids": [
4,
5
],
"total_cycles": [
23846845590,
23900558914
],
"busy_cycles": [
21043446682,
21448837316
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v8 -> v9: Updated with 64 bits integer telemetry functions
doc/guides/rel_notes/release_23_03.rst | 5 ++-
lib/eal/common/eal_common_lcore.c | 60 ++++++++++++++++++++++++++
2 files changed, 63 insertions(+), 2 deletions(-)
diff --git a/doc/guides/rel_notes/release_23_03.rst b/doc/guides/rel_notes/release_23_03.rst
index 17d38d5ea264..4f2878846829 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -57,8 +57,9 @@ New Features
* **Added support for reporting lcore usage in applications.**
- * The ``/eal/lcore/list`` and ``/eal/lcore/info`` telemetry endpoints have
- been added to provide information similar to ``rte_lcore_dump()``.
+ * The ``/eal/lcore/list``, ``/eal/lcore/usage`` and ``/eal/lcore/info``
+ telemetry endpoints have been added to provide information similar to
+ ``rte_lcore_dump()``.
* Applications can register a callback at startup via
``rte_lcore_register_usage_cb()`` to provide lcore usage information.
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index fcda7c50119a..088adb14b4b6 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -569,6 +569,63 @@ handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_t
return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
}
+struct lcore_telemetry_usage {
+ struct rte_tel_data *lcore_ids;
+ struct rte_tel_data *total_cycles;
+ struct rte_tel_data *busy_cycles;
+};
+
+static int
+lcore_telemetry_usage_cb(unsigned int lcore_id, void *arg)
+{
+ struct lcore_telemetry_usage *u = arg;
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
+
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /* Guard against concurrent modification of lcore_usage_cb. */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ rte_tel_data_add_array_uint(u->lcore_ids, lcore_id);
+ rte_tel_data_add_array_uint(u->total_cycles, usage.total_cycles);
+ rte_tel_data_add_array_uint(u->busy_cycles, usage.busy_cycles);
+ }
+
+ return 0;
+}
+
+static int
+handle_lcore_usage(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ struct lcore_telemetry_usage usage;
+ struct rte_tel_data *lcore_ids = rte_tel_data_alloc();
+ struct rte_tel_data *total_cycles = rte_tel_data_alloc();
+ struct rte_tel_data *busy_cycles = rte_tel_data_alloc();
+
+ if (!lcore_ids || !total_cycles || !busy_cycles) {
+ rte_tel_data_free(lcore_ids);
+ rte_tel_data_free(total_cycles);
+ rte_tel_data_free(busy_cycles);
+ return -ENOMEM;
+ }
+
+ rte_tel_data_start_dict(d);
+ rte_tel_data_start_array(lcore_ids, RTE_TEL_UINT_VAL);
+ rte_tel_data_start_array(total_cycles, RTE_TEL_UINT_VAL);
+ rte_tel_data_start_array(busy_cycles, RTE_TEL_UINT_VAL);
+ rte_tel_data_add_dict_container(d, "lcore_ids", lcore_ids, 0);
+ rte_tel_data_add_dict_container(d, "total_cycles", total_cycles, 0);
+ rte_tel_data_add_dict_container(d, "busy_cycles", busy_cycles, 0);
+ usage.lcore_ids = lcore_ids;
+ usage.total_cycles = total_cycles;
+ usage.busy_cycles = busy_cycles;
+
+ return rte_lcore_iterate(lcore_telemetry_usage_cb, &usage);
+}
+
RTE_INIT(lcore_telemetry)
{
rte_telemetry_register_cmd(
@@ -577,5 +634,8 @@ RTE_INIT(lcore_telemetry)
rte_telemetry_register_cmd(
"/eal/lcore/info", handle_lcore_info,
"Returns lcore info. Parameters: int lcore_id");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/usage", handle_lcore_usage,
+ "Returns lcore cycles usage. Takes no parameters");
}
#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v9 1/5] eal: add lcore info in telemetry
2023-02-07 19:37 ` [PATCH v9 1/5] eal: add lcore info in telemetry Robin Jarry
@ 2023-02-08 2:24 ` lihuisong (C)
2023-02-08 17:04 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: lihuisong (C) @ 2023-02-08 2:24 UTC (permalink / raw)
To: dev
在 2023/2/8 3:37, Robin Jarry 写道:
> Report the same information than rte_lcore_dump() in the telemetry
> API into /eal/lcore/list and /eal/lcore/info,ID.
>
> Example:
>
> --> /eal/lcore/info,3
> {
> "/eal/lcore/info": {
> "lcore_id": 3,
> "socket": 0,
> "role": "RTE",
> "cpuset": [
> 3
> ]
> }
> }
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> ---
>
> Notes:
> v8 -> v9: Updated with 64 bits integers telemetry functions
>
> lib/eal/common/eal_common_lcore.c | 123 +++++++++++++++++++++++++-----
> 1 file changed, 105 insertions(+), 18 deletions(-)
>
> diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
> index 06c594b0224f..f53fc17b4d04 100644
> --- a/lib/eal/common/eal_common_lcore.c
> +++ b/lib/eal/common/eal_common_lcore.c
> @@ -10,6 +10,9 @@
> #include <rte_errno.h>
> #include <rte_lcore.h>
> #include <rte_log.h>
> +#ifndef RTE_EXEC_ENV_WINDOWS
> +#include <rte_telemetry.h>
> +#endif
>
> #include "eal_private.h"
> #include "eal_thread.h"
> @@ -419,35 +422,35 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
> return ret;
> }
>
> +static const char *
> +lcore_role_str(enum rte_lcore_role_t role)
> +{
> + switch (role) {
> + case ROLE_RTE:
> + return "RTE";
> + case ROLE_SERVICE:
> + return "SERVICE";
> + case ROLE_NON_EAL:
> + return "NON_EAL";
> + default:
> + return "UNKNOWN";
> + }
> +}
> +
> static int
> lcore_dump_cb(unsigned int lcore_id, void *arg)
> {
> struct rte_config *cfg = rte_eal_get_configuration();
> char cpuset[RTE_CPU_AFFINITY_STR_LEN];
> - const char *role;
> FILE *f = arg;
> int ret;
>
> - switch (cfg->lcore_role[lcore_id]) {
> - case ROLE_RTE:
> - role = "RTE";
> - break;
> - case ROLE_SERVICE:
> - role = "SERVICE";
> - break;
> - case ROLE_NON_EAL:
> - role = "NON_EAL";
> - break;
> - default:
> - role = "UNKNOWN";
> - break;
> - }
> -
> ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
> sizeof(cpuset));
> fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
> - rte_lcore_to_socket_id(lcore_id), role, cpuset,
> - ret == 0 ? "" : "...");
> + rte_lcore_to_socket_id(lcore_id),
> + lcore_role_str(cfg->lcore_role[lcore_id]),
> + cpuset, ret == 0 ? "" : "...");
> return 0;
> }
The above modification doesn't seem to be related to this patch. Suggest
remove or delete it from this patch.
>
> @@ -456,3 +459,87 @@ rte_lcore_dump(FILE *f)
> {
> rte_lcore_iterate(lcore_dump_cb, f);
> }
> +
> +#ifndef RTE_EXEC_ENV_WINDOWS
> +static int
> +lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
> +{
> + struct rte_tel_data *d = arg;
> + return rte_tel_data_add_array_int(d, lcore_id);
> +}
> +
> +static int
> +handle_lcore_list(const char *cmd __rte_unused,
> + const char *params __rte_unused,
> + struct rte_tel_data *d)
> +{
> + int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
> + if (ret)
> + return ret;
> + return rte_lcore_iterate(lcore_telemetry_id_cb, d);
> +}
> +
> +struct lcore_telemetry_info {
> + unsigned int lcore_id;
> + struct rte_tel_data *d;
> +};
> +
> +static int
> +lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
> +{
> + struct rte_config *cfg = rte_eal_get_configuration();
> + struct lcore_telemetry_info *info = arg;
> + struct rte_tel_data *cpuset;
> + unsigned int cpu;
> +
> + if (info->lcore_id != lcore_id)
Suggest: info->lcore_id != lcore_id -> lcore_id != info->lcore_id
Here, info->lcore_id is a target and lcore_id is the variable to be judged, right?
> + return 0;
> +
> + rte_tel_data_start_dict(info->d);
> + rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
> + rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
> + rte_tel_data_add_dict_string(info->d, "role", lcore_role_str(cfg->lcore_role[lcore_id]));
> + cpuset = rte_tel_data_alloc();
> + if (cpuset == NULL)
> + return -ENOMEM;
> + rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
> + for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
> + if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
> + rte_tel_data_add_array_int(cpuset, cpu);
> + }
> + rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
> +
> + return 0;
> +}
> +
> +static int
> +handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
> +{
> + struct lcore_telemetry_info info = { .d = d };
> + unsigned long lcore_id;
> + char *endptr;
> +
> + if (params == NULL)
> + return -EINVAL;
> + errno = 0;
> + lcore_id = strtoul(params, &endptr, 10);
> + if (errno)
> + return -errno;
> + if (*params == '\0' || *endptr != '\0' || lcore_id >= RTE_MAX_LCORE)
> + return -EINVAL;
> +
> + info.lcore_id = lcore_id;
> +
> + return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
> +}
> +
> +RTE_INIT(lcore_telemetry)
> +{
> + rte_telemetry_register_cmd(
> + "/eal/lcore/list", handle_lcore_list,
> + "List of lcore ids. Takes no parameters");
> + rte_telemetry_register_cmd(
> + "/eal/lcore/info", handle_lcore_info,
> + "Returns lcore info. Parameters: int lcore_id");
> +}
> +#endif /* !RTE_EXEC_ENV_WINDOWS */
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v9 4/5] app/testpmd: report lcore usage
2023-02-07 19:37 ` [PATCH v9 4/5] app/testpmd: report lcore usage Robin Jarry
@ 2023-02-08 2:59 ` lihuisong (C)
0 siblings, 0 replies; 134+ messages in thread
From: lihuisong (C) @ 2023-02-08 2:59 UTC (permalink / raw)
To: Robin Jarry, dev
Cc: Morten Brørup, Konstantin Ananyev, Kevin Laatz, Aman Singh,
Yuying Zhang
在 2023/2/8 3:37, Robin Jarry 写道:
> The --record-core-cycles option already accounts for busy cycles. One
> turn of packet_fwd_t is considered "busy" if there was at least one
> received or transmitted packet.
>
> Rename core_cycles to busy_cycles in struct fwd_stream to make it more
> explicit. Add total_cycles to struct fwd_lcore. Add cycles accounting in
> noisy_vnf where it was missing.
>
> When --record-core-cycles is specified, register a callback with
> rte_lcore_register_usage_cb() and update total_cycles every turn of
> lcore loop based on a starting tsc value.
>
> In the callback, resolve the proper struct fwd_lcore based on lcore_id
> and return the lcore total_cycles and the sum of busy_cycles of all its
> fwd_streams.
>
> This makes the cycles counters available in rte_lcore_dump() and the
> lcore telemetry API:
>
> testpmd> dump_lcores
> lcore 3, socket 0, role RTE, cpuset 3
> lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
> lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
>
> --> /eal/lcore/info,4
> {
> "/eal/lcore/info": {
> "lcore_id": 4,
> "socket": 0,
> "role": "RTE",
> "cpuset": [
> 4
> ],
> "busy_cycles": 10623340318,
> "total_cycles": 55331167354
> }
> }
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> ---
...
> static inline void
> diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
> index 021624952daa..c4e1c2aa2af1 100644
> --- a/app/test-pmd/txonly.c
> +++ b/app/test-pmd/txonly.c
> @@ -331,7 +331,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
> struct rte_mbuf *pkt;
> struct rte_mempool *mbp;
> struct rte_ether_hdr eth_hdr;
> - uint16_t nb_tx;
> + uint16_t nb_tx = 0;
useless modify?
> uint16_t nb_pkt;
> uint16_t vlan_tci, vlan_tci_outer;
> uint32_t retry;
^ permalink raw reply [flat|nested] 134+ messages in thread
* [RESEND PATCH v9 0/5] lcore telemetry improvements
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (15 preceding siblings ...)
2023-02-07 19:37 ` [PATCH v9 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
@ 2023-02-08 8:45 ` Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 1/5] eal: add lcore info in telemetry Robin Jarry
` (5 more replies)
2023-02-09 9:43 ` [PATCH v10 " Robin Jarry
17 siblings, 6 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-08 8:45 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry
This is a follow up on previous work by Kevin Laatz:
http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
This series is aimed at allowing DPDK applications to expose their CPU
usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
more basic and naive approach which leaves the cpu cycles accounting
completely up to the application.
For reference, I have implemented a draft patch in OvS to use
rte_lcore_register_usage_cb() and report the already available busy
cycles information.
https://github.com/rjarry/ovs/commit/643e672fe388e348ea7ccbbda6f5a87a066fd919
v9:
- Fixed changelog & version.map order.
- Updated with 64-bit integer telemetry functions.
- Refined docstrings (added notice about resetting the callback).
- Fixed accounting of total cycles in testpmd.
v8:
- Made /eal/lcore/info lcore_id argument parsing more robust.
Robin Jarry (5):
eal: add lcore info in telemetry
eal: report applications lcore usage
app/testpmd: add dump command for lcores
app/testpmd: report lcore usage
eal: add lcore usage telemetry endpoint
app/test-pmd/cmdline.c | 3 +
app/test-pmd/noisy_vnf.c | 8 +-
app/test-pmd/testpmd.c | 42 +++-
app/test-pmd/testpmd.h | 25 ++-
doc/guides/rel_notes/release_23_03.rst | 8 +
doc/guides/testpmd_app_ug/testpmd_funcs.rst | 7 +
lib/eal/common/eal_common_lcore.c | 219 ++++++++++++++++++--
lib/eal/include/rte_lcore.h | 48 +++++
lib/eal/version.map | 1 +
9 files changed, 329 insertions(+), 32 deletions(-)
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [RESEND PATCH v9 1/5] eal: add lcore info in telemetry
2023-02-08 8:45 ` [RESEND PATCH v9 0/5] lcore telemetry improvements Robin Jarry
@ 2023-02-08 8:45 ` Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 2/5] eal: report applications lcore usage Robin Jarry
` (4 subsequent siblings)
5 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-08 8:45 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup, Kevin Laatz
Report the same information than rte_lcore_dump() in the telemetry
API into /eal/lcore/list and /eal/lcore/info,ID.
Example:
--> /eal/lcore/info,3
{
"/eal/lcore/info": {
"lcore_id": 3,
"socket": 0,
"role": "RTE",
"cpuset": [
3
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v8 -> v9: Updated with 64 bits integers telemetry functions
lib/eal/common/eal_common_lcore.c | 123 +++++++++++++++++++++++++-----
1 file changed, 105 insertions(+), 18 deletions(-)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 06c594b0224f..f53fc17b4d04 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -10,6 +10,9 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
+#include <rte_telemetry.h>
+#endif
#include "eal_private.h"
#include "eal_thread.h"
@@ -419,35 +422,35 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
return ret;
}
+static const char *
+lcore_role_str(enum rte_lcore_role_t role)
+{
+ switch (role) {
+ case ROLE_RTE:
+ return "RTE";
+ case ROLE_SERVICE:
+ return "SERVICE";
+ case ROLE_NON_EAL:
+ return "NON_EAL";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
- const char *role;
FILE *f = arg;
int ret;
- switch (cfg->lcore_role[lcore_id]) {
- case ROLE_RTE:
- role = "RTE";
- break;
- case ROLE_SERVICE:
- role = "SERVICE";
- break;
- case ROLE_NON_EAL:
- role = "NON_EAL";
- break;
- default:
- role = "UNKNOWN";
- break;
- }
-
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
- rte_lcore_to_socket_id(lcore_id), role, cpuset,
- ret == 0 ? "" : "...");
+ rte_lcore_to_socket_id(lcore_id),
+ lcore_role_str(cfg->lcore_role[lcore_id]),
+ cpuset, ret == 0 ? "" : "...");
return 0;
}
@@ -456,3 +459,87 @@ rte_lcore_dump(FILE *f)
{
rte_lcore_iterate(lcore_dump_cb, f);
}
+
+#ifndef RTE_EXEC_ENV_WINDOWS
+static int
+lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_tel_data *d = arg;
+ return rte_tel_data_add_array_int(d, lcore_id);
+}
+
+static int
+handle_lcore_list(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ int ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+ if (ret)
+ return ret;
+ return rte_lcore_iterate(lcore_telemetry_id_cb, d);
+}
+
+struct lcore_telemetry_info {
+ unsigned int lcore_id;
+ struct rte_tel_data *d;
+};
+
+static int
+lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ struct lcore_telemetry_info *info = arg;
+ struct rte_tel_data *cpuset;
+ unsigned int cpu;
+
+ if (info->lcore_id != lcore_id)
+ return 0;
+
+ rte_tel_data_start_dict(info->d);
+ rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
+ rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
+ rte_tel_data_add_dict_string(info->d, "role", lcore_role_str(cfg->lcore_role[lcore_id]));
+ cpuset = rte_tel_data_alloc();
+ if (cpuset == NULL)
+ return -ENOMEM;
+ rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+ if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
+ rte_tel_data_add_array_int(cpuset, cpu);
+ }
+ rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+
+ return 0;
+}
+
+static int
+handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
+{
+ struct lcore_telemetry_info info = { .d = d };
+ unsigned long lcore_id;
+ char *endptr;
+
+ if (params == NULL)
+ return -EINVAL;
+ errno = 0;
+ lcore_id = strtoul(params, &endptr, 10);
+ if (errno)
+ return -errno;
+ if (*params == '\0' || *endptr != '\0' || lcore_id >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ info.lcore_id = lcore_id;
+
+ return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
+}
+
+RTE_INIT(lcore_telemetry)
+{
+ rte_telemetry_register_cmd(
+ "/eal/lcore/list", handle_lcore_list,
+ "List of lcore ids. Takes no parameters");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/info", handle_lcore_info,
+ "Returns lcore info. Parameters: int lcore_id");
+}
+#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [RESEND PATCH v9 2/5] eal: report applications lcore usage
2023-02-08 8:45 ` [RESEND PATCH v9 0/5] lcore telemetry improvements Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 1/5] eal: add lcore info in telemetry Robin Jarry
@ 2023-02-08 8:45 ` Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 3/5] app/testpmd: add dump command for lcores Robin Jarry
` (3 subsequent siblings)
5 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-08 8:45 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup, Kevin Laatz
Allow applications to register a callback that will be invoked in
rte_lcore_dump() and when requesting lcore info in the telemetry API.
The callback is expected to return the number of TSC cycles that have
passed since application start and the number of these cycles that were
spent doing busy work.
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v8 -> v9:
- Fixed changelog & version.map order.
- Updated with 64-bit integer telemetry functions.
- Refined docstrings (added notice about resetting the callback).
doc/guides/rel_notes/release_23_03.rst | 7 ++++
lib/eal/common/eal_common_lcore.c | 40 +++++++++++++++++++--
lib/eal/include/rte_lcore.h | 48 ++++++++++++++++++++++++++
lib/eal/version.map | 1 +
4 files changed, 94 insertions(+), 2 deletions(-)
diff --git a/doc/guides/rel_notes/release_23_03.rst b/doc/guides/rel_notes/release_23_03.rst
index 1fa101c420cd..17d38d5ea264 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -55,6 +55,13 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Added support for reporting lcore usage in applications.**
+
+ * The ``/eal/lcore/list`` and ``/eal/lcore/info`` telemetry endpoints have
+ been added to provide information similar to ``rte_lcore_dump()``.
+ * Applications can register a callback at startup via
+ ``rte_lcore_register_usage_cb()`` to provide lcore usage information.
+
* **Updated AMD axgbe driver.**
* Added multi-process support.
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index f53fc17b4d04..fcda7c50119a 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -2,6 +2,7 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
@@ -437,20 +438,45 @@ lcore_role_str(enum rte_lcore_role_t role)
}
}
+static rte_lcore_usage_cb lcore_usage_cb;
+
+void
+rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
+{
+ lcore_usage_cb = cb;
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
+ char *usage_str = NULL;
FILE *f = arg;
int ret;
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /* Guard against concurrent modification of lcore_usage_cb. */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ if (asprintf(&usage_str, ", busy cycles %"PRIu64"/%"PRIu64,
+ usage.busy_cycles, usage.total_cycles) < 0) {
+ return -ENOMEM;
+ }
+ }
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
- fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
+ fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
rte_lcore_to_socket_id(lcore_id),
lcore_role_str(cfg->lcore_role[lcore_id]),
- cpuset, ret == 0 ? "" : "...");
+ cpuset, ret == 0 ? "" : "...",
+ usage_str != NULL ? usage_str : "");
+
+ free(usage_str);
+
return 0;
}
@@ -489,7 +515,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
struct lcore_telemetry_info *info = arg;
+ struct rte_lcore_usage usage;
struct rte_tel_data *cpuset;
+ rte_lcore_usage_cb usage_cb;
unsigned int cpu;
if (info->lcore_id != lcore_id)
@@ -508,6 +536,14 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
rte_tel_data_add_array_int(cpuset, cpu);
}
rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /* Guard against concurrent modification of lcore_usage_cb. */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ rte_tel_data_add_dict_uint(info->d, "total_cycles", usage.total_cycles);
+ rte_tel_data_add_dict_uint(info->d, "busy_cycles", usage.busy_cycles);
+ }
return 0;
}
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 9c7865052100..30f83f4d578c 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -328,6 +328,54 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
int
rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
+/**
+ * lcore usage statistics.
+ */
+struct rte_lcore_usage {
+ /**
+ * The total amount of time that the application has been running on
+ * this lcore, in TSC cycles.
+ */
+ uint64_t total_cycles;
+ /**
+ * The amount of time the application was busy, handling some
+ * workload on this lcore, in TSC cycles.
+ */
+ uint64_t busy_cycles;
+};
+
+/**
+ * Callback to allow applications to report lcore usage.
+ *
+ * @param [in] lcore_id
+ * The lcore to consider.
+ * @param [out] usage
+ * Counters representing this lcore usage. This can never be NULL.
+ * @return
+ * - 0 if fields in usage were updated successfully. The fields that the
+ * application does not support must not be modified.
+ * - a negative value if the information is not available or if any error occurred.
+ */
+typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct rte_lcore_usage *usage);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Register a callback from an application to be called in rte_lcore_dump() and
+ * the /eal/lcore/info telemetry endpoint handler. Applications are expected to
+ * report lcore usage statistics via this callback.
+ *
+ * If a callback was already registered, it can be replaced with another callback
+ * or unregistered with NULL. The previously registered callback may remain in
+ * use for an undetermined period of time.
+ *
+ * @param cb
+ * The callback function.
+ */
+__rte_experimental
+void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
+
/**
* List all lcores.
*
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 6523102157e2..2c05e5f2fb60 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -441,6 +441,7 @@ EXPERIMENTAL {
rte_thread_join;
# added in 23.03
+ rte_lcore_register_usage_cb;
rte_thread_set_name;
};
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [RESEND PATCH v9 3/5] app/testpmd: add dump command for lcores
2023-02-08 8:45 ` [RESEND PATCH v9 0/5] lcore telemetry improvements Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 1/5] eal: add lcore info in telemetry Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 2/5] eal: report applications lcore usage Robin Jarry
@ 2023-02-08 8:45 ` Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 4/5] app/testpmd: report lcore usage Robin Jarry
` (2 subsequent siblings)
5 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-08 8:45 UTC (permalink / raw)
To: dev
Cc: Robin Jarry, Morten Brørup, Konstantin Ananyev, Kevin Laatz,
Aman Singh, Yuying Zhang
Add a simple command that calls rte_lcore_dump().
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v8 -> v9: no change
app/test-pmd/cmdline.c | 3 +++
doc/guides/testpmd_app_ug/testpmd_funcs.rst | 7 +++++++
2 files changed, 10 insertions(+)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index cb8c174020b0..bb7ff2b44989 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8357,6 +8357,8 @@ static void cmd_dump_parsed(void *parsed_result,
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
rte_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_lcores"))
+ rte_lcore_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
}
@@ -8370,6 +8372,7 @@ static cmdline_parse_token_string_t cmd_dump_dump =
"dump_ring#"
"dump_mempool#"
"dump_devargs#"
+ "dump_lcores#"
"dump_log_types");
static cmdline_parse_inst_t cmd_dump = {
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 79a1fa9cb73d..9ceb21dfbbdf 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -591,6 +591,13 @@ Dumps the user device list::
testpmd> dump_devargs
+dump lcores
+~~~~~~~~~~~
+
+Dumps the logical cores list::
+
+ testpmd> dump_lcores
+
dump log types
~~~~~~~~~~~~~~
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [RESEND PATCH v9 4/5] app/testpmd: report lcore usage
2023-02-08 8:45 ` [RESEND PATCH v9 0/5] lcore telemetry improvements Robin Jarry
` (2 preceding siblings ...)
2023-02-08 8:45 ` [RESEND PATCH v9 3/5] app/testpmd: add dump command for lcores Robin Jarry
@ 2023-02-08 8:45 ` Robin Jarry
2023-02-09 8:43 ` David Marchand
2023-02-08 8:45 ` [RESEND PATCH v9 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
2023-02-09 8:44 ` [RESEND PATCH v9 0/5] lcore telemetry improvements David Marchand
5 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-08 8:45 UTC (permalink / raw)
To: dev
Cc: Robin Jarry, Morten Brørup, Konstantin Ananyev, Kevin Laatz,
Aman Singh, Yuying Zhang
The --record-core-cycles option already accounts for busy cycles. One
turn of packet_fwd_t is considered "busy" if there was at least one
received or transmitted packet.
Rename core_cycles to busy_cycles in struct fwd_stream to make it more
explicit. Add total_cycles to struct fwd_lcore. Add cycles accounting in
noisy_vnf where it was missing.
When --record-core-cycles is specified, register a callback with
rte_lcore_register_usage_cb() and update total_cycles every turn of
lcore loop based on a starting tsc value.
In the callback, resolve the proper struct fwd_lcore based on lcore_id
and return the lcore total_cycles and the sum of busy_cycles of all its
fwd_streams.
This makes the cycles counters available in rte_lcore_dump() and the
lcore telemetry API:
testpmd> dump_lcores
lcore 3, socket 0, role RTE, cpuset 3
lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
--> /eal/lcore/info,4
{
"/eal/lcore/info": {
"lcore_id": 4,
"socket": 0,
"role": "RTE",
"cpuset": [
4
],
"busy_cycles": 10623340318,
"total_cycles": 55331167354
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v8 -> v9: Fixed accounting of total cycles
app/test-pmd/noisy_vnf.c | 8 +++++++-
app/test-pmd/testpmd.c | 42 ++++++++++++++++++++++++++++++++++++----
app/test-pmd/testpmd.h | 25 +++++++++++++++---------
3 files changed, 61 insertions(+), 14 deletions(-)
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index c65ec6f06a5c..ce5a3e5e6987 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -144,6 +144,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
struct noisy_config *ncf = noisy_cfg[fs->rx_port];
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_mbuf *tmp_pkts[MAX_PKT_BURST];
+ uint64_t start_tsc = 0;
uint16_t nb_deqd = 0;
uint16_t nb_rx = 0;
uint16_t nb_tx = 0;
@@ -153,6 +154,8 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
bool needs_flush = false;
uint64_t now;
+ get_start_cycles(&start_tsc);
+
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
pkts_burst, nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
@@ -169,7 +172,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
inc_tx_burst_stats(fs, nb_tx);
fs->tx_packets += nb_tx;
fs->fwd_dropped += drop_pkts(pkts_burst, nb_rx, nb_tx);
- return;
+ goto end;
}
fifo_free = rte_ring_free_count(ncf->f);
@@ -219,6 +222,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
ncf->prev_time = rte_get_timer_cycles();
}
+end:
+ if (nb_rx > 0 || nb_tx > 0)
+ get_end_cycles(fs, start_tsc);
}
#define NOISY_STRSIZE 256
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index e366f81a0f46..eeb96aefa80b 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2053,7 +2053,7 @@ fwd_stats_display(void)
fs->rx_bad_outer_ip_csum;
if (record_core_cycles)
- fwd_cycles += fs->core_cycles;
+ fwd_cycles += fs->busy_cycles;
}
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
@@ -2145,7 +2145,7 @@ fwd_stats_display(void)
else
total_pkts = total_recv;
- printf("\n CPU cycles/packet=%.2F (total cycles="
+ printf("\n CPU cycles/packet=%.2F (busy cycles="
"%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
" MHz Clock\n",
(double) fwd_cycles / total_pkts,
@@ -2184,8 +2184,10 @@ fwd_stats_reset(void)
memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
- fs->core_cycles = 0;
+ fs->busy_cycles = 0;
}
+ for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++)
+ fwd_lcores[i]->total_cycles = 0;
}
static void
@@ -2248,6 +2250,7 @@ static void
run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
{
struct fwd_stream **fsm;
+ uint64_t start_tsc;
streamid_t nb_fs;
streamid_t sm_id;
#ifdef RTE_LIB_BITRATESTATS
@@ -2262,6 +2265,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
#endif
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
+ start_tsc = rte_rdtsc();
do {
for (sm_id = 0; sm_id < nb_fs; sm_id++)
if (!fsm[sm_id]->disabled)
@@ -2284,10 +2288,36 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
latencystats_lcore_id == rte_lcore_id())
rte_latencystats_update();
#endif
-
+ if (record_core_cycles)
+ fc->total_cycles = rte_rdtsc() - start_tsc;
} while (! fc->stopped);
}
+static int
+lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
+{
+ struct fwd_stream **fsm;
+ struct fwd_lcore *fc;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+
+ fc = lcore_to_fwd_lcore(lcore_id);
+ if (fc == NULL)
+ return -1;
+
+ fsm = &fwd_streams[fc->stream_idx];
+ nb_fs = fc->stream_nb;
+ usage->busy_cycles = 0;
+ usage->total_cycles = fc->total_cycles;
+
+ for (sm_id = 0; sm_id < nb_fs; sm_id++) {
+ if (!fsm[sm_id]->disabled)
+ usage->busy_cycles += fsm[sm_id]->busy_cycles;
+ }
+
+ return 0;
+}
+
static int
start_pkt_forward_on_core(void *fwd_arg)
{
@@ -4527,6 +4557,10 @@ main(int argc, char** argv)
rte_stats_bitrate_reg(bitrate_data);
}
#endif
+
+ if (record_core_cycles)
+ rte_lcore_register_usage_cb(lcore_usage_callback);
+
#ifdef RTE_LIB_CMDLINE
if (init_cmdline() != 0)
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970d2..6ec2f6879b47 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -174,7 +174,7 @@ struct fwd_stream {
#ifdef RTE_LIB_GRO
unsigned int gro_times; /**< GRO operation times */
#endif
- uint64_t core_cycles; /**< used for RX and TX processing */
+ uint64_t busy_cycles; /**< used with --record-core-cycles */
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
struct fwd_lcore *lcore; /**< Lcore being scheduled. */
@@ -360,6 +360,7 @@ struct fwd_lcore {
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
volatile char stopped; /**< stop forwarding when set */
+ uint64_t total_cycles; /**< used with --record-core-cycles */
};
/*
@@ -785,16 +786,17 @@ is_proc_primary(void)
return rte_eal_process_type() == RTE_PROC_PRIMARY;
}
-static inline unsigned int
-lcore_num(void)
+static inline struct fwd_lcore *
+lcore_to_fwd_lcore(uint16_t lcore_id)
{
unsigned int i;
- for (i = 0; i < RTE_MAX_LCORE; ++i)
- if (fwd_lcores_cpuids[i] == rte_lcore_id())
- return i;
+ for (i = 0; i < cur_fwd_config.nb_fwd_lcores; ++i) {
+ if (fwd_lcores_cpuids[i] == lcore_id)
+ return fwd_lcores[i];
+ }
- rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n");
+ return NULL;
}
void
@@ -803,7 +805,12 @@ parse_fwd_portlist(const char *port);
static inline struct fwd_lcore *
current_fwd_lcore(void)
{
- return fwd_lcores[lcore_num()];
+ struct fwd_lcore *fc = lcore_to_fwd_lcore(rte_lcore_id());
+
+ if (fc == NULL)
+ rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n");
+
+ return fc;
}
/* Mbuf Pools */
@@ -839,7 +846,7 @@ static inline void
get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc)
{
if (record_core_cycles)
- fs->core_cycles += rte_rdtsc() - start_tsc;
+ fs->busy_cycles += rte_rdtsc() - start_tsc;
}
static inline void
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [RESEND PATCH v9 5/5] eal: add lcore usage telemetry endpoint
2023-02-08 8:45 ` [RESEND PATCH v9 0/5] lcore telemetry improvements Robin Jarry
` (3 preceding siblings ...)
2023-02-08 8:45 ` [RESEND PATCH v9 4/5] app/testpmd: report lcore usage Robin Jarry
@ 2023-02-08 8:45 ` Robin Jarry
2023-02-09 8:44 ` [RESEND PATCH v9 0/5] lcore telemetry improvements David Marchand
5 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-08 8:45 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Kevin Laatz
Allow fetching CPU cycles usage for all lcores with a single request.
This endpoint is intended for repeated and frequent invocations by
external monitoring systems and therefore returns condensed data.
It consists of a single dictionary with three keys: "lcore_ids",
"total_cycles" and "busy_cycles" that are mapped to three arrays of
integer values. Each array has the same number of values, one per lcore,
in the same order.
Example:
--> /eal/lcore/usage
{
"/eal/lcore/usage": {
"lcore_ids": [
4,
5
],
"total_cycles": [
23846845590,
23900558914
],
"busy_cycles": [
21043446682,
21448837316
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v8 -> v9: Updated with 64 bits integer telemetry functions
doc/guides/rel_notes/release_23_03.rst | 5 ++-
lib/eal/common/eal_common_lcore.c | 60 ++++++++++++++++++++++++++
2 files changed, 63 insertions(+), 2 deletions(-)
diff --git a/doc/guides/rel_notes/release_23_03.rst b/doc/guides/rel_notes/release_23_03.rst
index 17d38d5ea264..4f2878846829 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -57,8 +57,9 @@ New Features
* **Added support for reporting lcore usage in applications.**
- * The ``/eal/lcore/list`` and ``/eal/lcore/info`` telemetry endpoints have
- been added to provide information similar to ``rte_lcore_dump()``.
+ * The ``/eal/lcore/list``, ``/eal/lcore/usage`` and ``/eal/lcore/info``
+ telemetry endpoints have been added to provide information similar to
+ ``rte_lcore_dump()``.
* Applications can register a callback at startup via
``rte_lcore_register_usage_cb()`` to provide lcore usage information.
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index fcda7c50119a..088adb14b4b6 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -569,6 +569,63 @@ handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_t
return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
}
+struct lcore_telemetry_usage {
+ struct rte_tel_data *lcore_ids;
+ struct rte_tel_data *total_cycles;
+ struct rte_tel_data *busy_cycles;
+};
+
+static int
+lcore_telemetry_usage_cb(unsigned int lcore_id, void *arg)
+{
+ struct lcore_telemetry_usage *u = arg;
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
+
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /* Guard against concurrent modification of lcore_usage_cb. */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ rte_tel_data_add_array_uint(u->lcore_ids, lcore_id);
+ rte_tel_data_add_array_uint(u->total_cycles, usage.total_cycles);
+ rte_tel_data_add_array_uint(u->busy_cycles, usage.busy_cycles);
+ }
+
+ return 0;
+}
+
+static int
+handle_lcore_usage(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ struct lcore_telemetry_usage usage;
+ struct rte_tel_data *lcore_ids = rte_tel_data_alloc();
+ struct rte_tel_data *total_cycles = rte_tel_data_alloc();
+ struct rte_tel_data *busy_cycles = rte_tel_data_alloc();
+
+ if (!lcore_ids || !total_cycles || !busy_cycles) {
+ rte_tel_data_free(lcore_ids);
+ rte_tel_data_free(total_cycles);
+ rte_tel_data_free(busy_cycles);
+ return -ENOMEM;
+ }
+
+ rte_tel_data_start_dict(d);
+ rte_tel_data_start_array(lcore_ids, RTE_TEL_UINT_VAL);
+ rte_tel_data_start_array(total_cycles, RTE_TEL_UINT_VAL);
+ rte_tel_data_start_array(busy_cycles, RTE_TEL_UINT_VAL);
+ rte_tel_data_add_dict_container(d, "lcore_ids", lcore_ids, 0);
+ rte_tel_data_add_dict_container(d, "total_cycles", total_cycles, 0);
+ rte_tel_data_add_dict_container(d, "busy_cycles", busy_cycles, 0);
+ usage.lcore_ids = lcore_ids;
+ usage.total_cycles = total_cycles;
+ usage.busy_cycles = busy_cycles;
+
+ return rte_lcore_iterate(lcore_telemetry_usage_cb, &usage);
+}
+
RTE_INIT(lcore_telemetry)
{
rte_telemetry_register_cmd(
@@ -577,5 +634,8 @@ RTE_INIT(lcore_telemetry)
rte_telemetry_register_cmd(
"/eal/lcore/info", handle_lcore_info,
"Returns lcore info. Parameters: int lcore_id");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/usage", handle_lcore_usage,
+ "Returns lcore cycles usage. Takes no parameters");
}
#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v9 1/5] eal: add lcore info in telemetry
2023-02-08 2:24 ` lihuisong (C)
@ 2023-02-08 17:04 ` Robin Jarry
2023-02-09 2:18 ` lihuisong (C)
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2023-02-08 17:04 UTC (permalink / raw)
To: lihuisong (C), dev
Hi lihuisong,
lihuisong (C), Feb 08, 2023 at 03:24:
> > static int
> > lcore_dump_cb(unsigned int lcore_id, void *arg)
> > {
> > struct rte_config *cfg = rte_eal_get_configuration();
> > char cpuset[RTE_CPU_AFFINITY_STR_LEN];
> > - const char *role;
> > FILE *f = arg;
> > int ret;
> >
> > - switch (cfg->lcore_role[lcore_id]) {
> > - case ROLE_RTE:
> > - role = "RTE";
> > - break;
> > - case ROLE_SERVICE:
> > - role = "SERVICE";
> > - break;
> > - case ROLE_NON_EAL:
> > - role = "NON_EAL";
> > - break;
> > - default:
> > - role = "UNKNOWN";
> > - break;
> > - }
> > -
> > ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
> > sizeof(cpuset));
> > fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
> > - rte_lcore_to_socket_id(lcore_id), role, cpuset,
> > - ret == 0 ? "" : "...");
> > + rte_lcore_to_socket_id(lcore_id),
> > + lcore_role_str(cfg->lcore_role[lcore_id]),
> > + cpuset, ret == 0 ? "" : "...");
> > return 0;
> > }
> The above modification doesn't seem to be related to this patch.
> Suggest remove or delete it from this patch.
I was asked in an earlier review to factorize this into an helper to
avoid code duplication.
> > + if (info->lcore_id != lcore_id)
>
> Suggest: info->lcore_id != lcore_id -> lcore_id != info->lcore_id
> Here, info->lcore_id is a target and lcore_id is the variable to be
> judged, right?
Yeah that looks better. I didn't pay too much attention since this
principle is not well respected in the current code base.
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v9 1/5] eal: add lcore info in telemetry
2023-02-08 17:04 ` Robin Jarry
@ 2023-02-09 2:18 ` lihuisong (C)
2023-02-09 8:31 ` David Marchand
0 siblings, 1 reply; 134+ messages in thread
From: lihuisong (C) @ 2023-02-09 2:18 UTC (permalink / raw)
To: Robin Jarry, dev; +Cc: Morten Brørup, Kevin Laatz, Ferruh Yigit
在 2023/2/9 1:04, Robin Jarry 写道:
> Hi lihuisong,
>
> lihuisong (C), Feb 08, 2023 at 03:24:
>>> static int
>>> lcore_dump_cb(unsigned int lcore_id, void *arg)
>>> {
>>> struct rte_config *cfg = rte_eal_get_configuration();
>>> char cpuset[RTE_CPU_AFFINITY_STR_LEN];
>>> - const char *role;
>>> FILE *f = arg;
>>> int ret;
>>>
>>> - switch (cfg->lcore_role[lcore_id]) {
>>> - case ROLE_RTE:
>>> - role = "RTE";
>>> - break;
>>> - case ROLE_SERVICE:
>>> - role = "SERVICE";
>>> - break;
>>> - case ROLE_NON_EAL:
>>> - role = "NON_EAL";
>>> - break;
>>> - default:
>>> - role = "UNKNOWN";
>>> - break;
>>> - }
>>> -
>>> ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
>>> sizeof(cpuset));
>>> fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
>>> - rte_lcore_to_socket_id(lcore_id), role, cpuset,
>>> - ret == 0 ? "" : "...");
>>> + rte_lcore_to_socket_id(lcore_id),
>>> + lcore_role_str(cfg->lcore_role[lcore_id]),
>>> + cpuset, ret == 0 ? "" : "...");
>>> return 0;
>>> }
>> The above modification doesn't seem to be related to this patch.
>> Suggest remove or delete it from this patch.
> I was asked in an earlier review to factorize this into an helper to
> avoid code duplication.
ok, this patch also use lcore_role_str function. please ignore this comment.
>
>>> + if (info->lcore_id != lcore_id)
>> Suggest: info->lcore_id != lcore_id -> lcore_id != info->lcore_id
>> Here, info->lcore_id is a target and lcore_id is the variable to be
>> judged, right?
> Yeah that looks better. I didn't pay too much attention since this
> principle is not well respected in the current code base.
That's not a very good reason.
It's similar to "ret != 0" and "p != NULL" in DPDK coding style.
>
> .
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v9 1/5] eal: add lcore info in telemetry
2023-02-09 2:18 ` lihuisong (C)
@ 2023-02-09 8:31 ` David Marchand
2023-02-09 8:38 ` David Marchand
0 siblings, 1 reply; 134+ messages in thread
From: David Marchand @ 2023-02-09 8:31 UTC (permalink / raw)
To: lihuisong (C)
Cc: Robin Jarry, dev, Morten Brørup, Kevin Laatz, Ferruh Yigit
On Thu, Feb 9, 2023 at 3:19 AM lihuisong (C) <lihuisong@huawei.com> wrote:
> >>> + if (info->lcore_id != lcore_id)
> >> Suggest: info->lcore_id != lcore_id -> lcore_id != info->lcore_id
> >> Here, info->lcore_id is a target and lcore_id is the variable to be
> >> judged, right?
> > Yeah that looks better. I didn't pay too much attention since this
> > principle is not well respected in the current code base.
> That's not a very good reason.
> It's similar to "ret != 0" and "p != NULL" in DPDK coding style.
I'll squash this suggestion when applying.
--
David Marchand
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v9 1/5] eal: add lcore info in telemetry
2023-02-09 8:31 ` David Marchand
@ 2023-02-09 8:38 ` David Marchand
0 siblings, 0 replies; 134+ messages in thread
From: David Marchand @ 2023-02-09 8:38 UTC (permalink / raw)
To: lihuisong (C)
Cc: Robin Jarry, dev, Morten Brørup, Kevin Laatz, Ferruh Yigit
On Thu, Feb 9, 2023 at 9:31 AM David Marchand <david.marchand@redhat.com> wrote:
>
> On Thu, Feb 9, 2023 at 3:19 AM lihuisong (C) <lihuisong@huawei.com> wrote:
> > >>> + if (info->lcore_id != lcore_id)
> > >> Suggest: info->lcore_id != lcore_id -> lcore_id != info->lcore_id
> > >> Here, info->lcore_id is a target and lcore_id is the variable to be
> > >> judged, right?
> > > Yeah that looks better. I didn't pay too much attention since this
> > > principle is not well respected in the current code base.
> > That's not a very good reason.
> > It's similar to "ret != 0" and "p != NULL" in DPDK coding style.
>
> I'll squash this suggestion when applying.
Hum, well, I have some other comments later in this series, so Robin
will fix this himself.
--
David Marchand
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [RESEND PATCH v9 4/5] app/testpmd: report lcore usage
2023-02-08 8:45 ` [RESEND PATCH v9 4/5] app/testpmd: report lcore usage Robin Jarry
@ 2023-02-09 8:43 ` David Marchand
0 siblings, 0 replies; 134+ messages in thread
From: David Marchand @ 2023-02-09 8:43 UTC (permalink / raw)
To: Robin Jarry
Cc: dev, Morten Brørup, Konstantin Ananyev, Kevin Laatz,
Aman Singh, Yuying Zhang
On Wed, Feb 8, 2023 at 9:49 AM Robin Jarry <rjarry@redhat.com> wrote:
>
> The --record-core-cycles option already accounts for busy cycles. One
> turn of packet_fwd_t is considered "busy" if there was at least one
> received or transmitted packet.
>
> Rename core_cycles to busy_cycles in struct fwd_stream to make it more
> explicit. Add total_cycles to struct fwd_lcore. Add cycles accounting in
> noisy_vnf where it was missing.
>
> When --record-core-cycles is specified, register a callback with
> rte_lcore_register_usage_cb() and update total_cycles every turn of
> lcore loop based on a starting tsc value.
>
> In the callback, resolve the proper struct fwd_lcore based on lcore_id
> and return the lcore total_cycles and the sum of busy_cycles of all its
> fwd_streams.
>
> This makes the cycles counters available in rte_lcore_dump() and the
> lcore telemetry API:
>
> testpmd> dump_lcores
> lcore 3, socket 0, role RTE, cpuset 3
> lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
> lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
>
> --> /eal/lcore/info,4
> {
> "/eal/lcore/info": {
> "lcore_id": 4,
> "socket": 0,
> "role": "RTE",
> "cpuset": [
> 4
> ],
> "busy_cycles": 10623340318,
> "total_cycles": 55331167354
> }
> }
>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> ---
>
> Notes:
> v8 -> v9: Fixed accounting of total cycles
>
> app/test-pmd/noisy_vnf.c | 8 +++++++-
> app/test-pmd/testpmd.c | 42 ++++++++++++++++++++++++++++++++++++----
> app/test-pmd/testpmd.h | 25 +++++++++++++++---------
> 3 files changed, 61 insertions(+), 14 deletions(-)
>
> diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
> index c65ec6f06a5c..ce5a3e5e6987 100644
> --- a/app/test-pmd/noisy_vnf.c
> +++ b/app/test-pmd/noisy_vnf.c
> @@ -144,6 +144,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
> struct noisy_config *ncf = noisy_cfg[fs->rx_port];
> struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
> struct rte_mbuf *tmp_pkts[MAX_PKT_BURST];
> + uint64_t start_tsc = 0;
> uint16_t nb_deqd = 0;
> uint16_t nb_rx = 0;
> uint16_t nb_tx = 0;
> @@ -153,6 +154,8 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
> bool needs_flush = false;
> uint64_t now;
>
> + get_start_cycles(&start_tsc);
> +
> nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
> pkts_burst, nb_pkt_per_burst);
> inc_rx_burst_stats(fs, nb_rx);
> @@ -169,7 +172,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
> inc_tx_burst_stats(fs, nb_tx);
> fs->tx_packets += nb_tx;
> fs->fwd_dropped += drop_pkts(pkts_burst, nb_rx, nb_tx);
> - return;
> + goto end;
> }
>
> fifo_free = rte_ring_free_count(ncf->f);
> @@ -219,6 +222,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
> fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
> ncf->prev_time = rte_get_timer_cycles();
> }
> +end:
> + if (nb_rx > 0 || nb_tx > 0)
> + get_end_cycles(fs, start_tsc);
> }
>
> #define NOISY_STRSIZE 256
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index e366f81a0f46..eeb96aefa80b 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -2053,7 +2053,7 @@ fwd_stats_display(void)
> fs->rx_bad_outer_ip_csum;
>
> if (record_core_cycles)
> - fwd_cycles += fs->core_cycles;
> + fwd_cycles += fs->busy_cycles;
> }
> for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
> pt_id = fwd_ports_ids[i];
> @@ -2145,7 +2145,7 @@ fwd_stats_display(void)
> else
> total_pkts = total_recv;
>
> - printf("\n CPU cycles/packet=%.2F (total cycles="
> + printf("\n CPU cycles/packet=%.2F (busy cycles="
> "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
> " MHz Clock\n",
> (double) fwd_cycles / total_pkts,
> @@ -2184,8 +2184,10 @@ fwd_stats_reset(void)
>
> memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
> memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
> - fs->core_cycles = 0;
> + fs->busy_cycles = 0;
> }
> + for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++)
> + fwd_lcores[i]->total_cycles = 0;
This instrumentation accuracy may not be that important in testpmd
(becauase testpmd is just a test/validation tool).
However, resetting total_cycles is setting a bad example for people
who may look at this code.
It does not comply with the EAL api.
The associated lcores may still be running the moment a user reset the
fwd stats.
> }
>
> static void
> @@ -2248,6 +2250,7 @@ static void
> run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
> {
> struct fwd_stream **fsm;
> + uint64_t start_tsc;
> streamid_t nb_fs;
> streamid_t sm_id;
> #ifdef RTE_LIB_BITRATESTATS
> @@ -2262,6 +2265,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
> #endif
> fsm = &fwd_streams[fc->stream_idx];
> nb_fs = fc->stream_nb;
> + start_tsc = rte_rdtsc();
> do {
> for (sm_id = 0; sm_id < nb_fs; sm_id++)
> if (!fsm[sm_id]->disabled)
> @@ -2284,10 +2288,36 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
> latencystats_lcore_id == rte_lcore_id())
> rte_latencystats_update();
> #endif
> -
> + if (record_core_cycles)
> + fc->total_cycles = rte_rdtsc() - start_tsc;
By using a single tsc reference at the start of this function,
total_cycles will be reset every time forwarding is stopped /
restarted.
A more accurate way to account for consumed cycles for this lcore
would be to increase by a differential value for each loop.
Like:
@@ -2248,6 +2248,7 @@ static void
run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
{
struct fwd_stream **fsm;
+ uint64_t prev_tsc;
streamid_t nb_fs;
streamid_t sm_id;
#ifdef RTE_LIB_BITRATESTATS
@@ -2262,6 +2263,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc,
packet_fwd_t pkt_fwd)
#endif
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
+ prev_tsc = rte_rdtsc();
do {
for (sm_id = 0; sm_id < nb_fs; sm_id++)
if (!fsm[sm_id]->disabled)
@@ -2285,9 +2287,42 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc,
packet_fwd_t pkt_fwd)
rte_latencystats_update();
#endif
+ if (record_core_cycles) {
+ uint64_t current_tsc = rte_rdtsc();
+
+ fc->total_cycles += current_tsc - prev_tsc;
+ prev_tsc = current_tsc;
+ }
} while (! fc->stopped);
}
I also have one interrogation around those updates.
I wonder if we are missing some __atomic_store/load pairs (probably
more an issue for non-x86 arches), since the updates and reading those
cycles happen on different threads.
This issue predates your patch (for fs->core_cycles accesses previously).
I am not asking for a fix right away, this last point can wait post -rc1.
--
David Marchand
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [RESEND PATCH v9 0/5] lcore telemetry improvements
2023-02-08 8:45 ` [RESEND PATCH v9 0/5] lcore telemetry improvements Robin Jarry
` (4 preceding siblings ...)
2023-02-08 8:45 ` [RESEND PATCH v9 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
@ 2023-02-09 8:44 ` David Marchand
5 siblings, 0 replies; 134+ messages in thread
From: David Marchand @ 2023-02-09 8:44 UTC (permalink / raw)
To: Robin Jarry; +Cc: dev
Hello Robin,
On Wed, Feb 8, 2023 at 9:45 AM Robin Jarry <rjarry@redhat.com> wrote:
>
> This is a follow up on previous work by Kevin Laatz:
>
> http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
>
> This series is aimed at allowing DPDK applications to expose their CPU
> usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
> more basic and naive approach which leaves the cpu cycles accounting
> completely up to the application.
>
> For reference, I have implemented a draft patch in OvS to use
> rte_lcore_register_usage_cb() and report the already available busy
> cycles information.
>
> https://github.com/rjarry/ovs/commit/643e672fe388e348ea7ccbbda6f5a87a066fd919
>
> v9:
>
> - Fixed changelog & version.map order.
> - Updated with 64-bit integer telemetry functions.
> - Refined docstrings (added notice about resetting the callback).
> - Fixed accounting of total cycles in testpmd.
>
> v8:
>
> - Made /eal/lcore/info lcore_id argument parsing more robust.
>
> Robin Jarry (5):
> eal: add lcore info in telemetry
> eal: report applications lcore usage
> app/testpmd: add dump command for lcores
> app/testpmd: report lcore usage
> eal: add lcore usage telemetry endpoint
>
> app/test-pmd/cmdline.c | 3 +
> app/test-pmd/noisy_vnf.c | 8 +-
> app/test-pmd/testpmd.c | 42 +++-
> app/test-pmd/testpmd.h | 25 ++-
> doc/guides/rel_notes/release_23_03.rst | 8 +
> doc/guides/testpmd_app_ug/testpmd_funcs.rst | 7 +
> lib/eal/common/eal_common_lcore.c | 219 ++++++++++++++++++--
> lib/eal/include/rte_lcore.h | 48 +++++
> lib/eal/version.map | 1 +
> 9 files changed, 329 insertions(+), 32 deletions(-)
Thanks for this work.
The EAL parts look ready to me, but I still have some concerns on the
implementation in testpmd (see comments on patch 4).
--
David Marchand
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v10 0/5] lcore telemetry improvements
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
` (16 preceding siblings ...)
2023-02-08 8:45 ` [RESEND PATCH v9 0/5] lcore telemetry improvements Robin Jarry
@ 2023-02-09 9:43 ` Robin Jarry
2023-02-09 9:43 ` [PATCH v10 1/5] eal: add lcore info in telemetry Robin Jarry
` (5 more replies)
17 siblings, 6 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-09 9:43 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry
This is a follow up on previous work by Kevin Laatz:
http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
This series is aimed at allowing DPDK applications to expose their CPU
usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
more basic and naive approach which leaves the cpu cycles accounting
completely up to the application.
For reference, I have implemented a draft patch in OvS to use
rte_lcore_register_usage_cb() and report the already available busy
cycles information.
https://github.com/rjarry/ovs/commit/643e672fe388e348ea7ccbbda6f5a87a066fd919
v10:
- Code style fix
- Fixed reset of total_cycles while lcore is running
v9:
- Fixed changelog & version.map order.
- Updated with 64-bit integer telemetry functions.
- Refined docstrings (added notice about resetting the callback).
- Fixed accounting of total cycles in testpmd.
Robin Jarry (5):
eal: add lcore info in telemetry
eal: report applications lcore usage
app/testpmd: add dump command for lcores
app/testpmd: report lcore usage
eal: add lcore usage telemetry endpoint
app/test-pmd/cmdline.c | 3 +
app/test-pmd/noisy_vnf.c | 8 +-
app/test-pmd/testpmd.c | 44 +++-
app/test-pmd/testpmd.h | 25 ++-
doc/guides/rel_notes/release_23_03.rst | 8 +
doc/guides/testpmd_app_ug/testpmd_funcs.rst | 7 +
lib/eal/common/eal_common_lcore.c | 222 ++++++++++++++++++--
lib/eal/include/rte_lcore.h | 48 +++++
lib/eal/version.map | 1 +
9 files changed, 335 insertions(+), 31 deletions(-)
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v10 1/5] eal: add lcore info in telemetry
2023-02-09 9:43 ` [PATCH v10 " Robin Jarry
@ 2023-02-09 9:43 ` Robin Jarry
2023-02-09 9:43 ` [PATCH v10 2/5] eal: report applications lcore usage Robin Jarry
` (4 subsequent siblings)
5 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-09 9:43 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup, Kevin Laatz
Report the same information than rte_lcore_dump() in the telemetry
API into /eal/lcore/list and /eal/lcore/info,ID.
Example:
--> /eal/lcore/info,3
{
"/eal/lcore/info": {
"lcore_id": 3,
"socket": 0,
"role": "RTE",
"cpuset": [
3
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v9 -> v10: s/info->lcore_id != lcore_id/lcore_id != info->lcore_id/
lib/eal/common/eal_common_lcore.c | 126 +++++++++++++++++++++++++-----
1 file changed, 108 insertions(+), 18 deletions(-)
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 06c594b0224f..d45a40831393 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -10,6 +10,9 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
+#include <rte_telemetry.h>
+#endif
#include "eal_private.h"
#include "eal_thread.h"
@@ -419,35 +422,35 @@ rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
return ret;
}
+static const char *
+lcore_role_str(enum rte_lcore_role_t role)
+{
+ switch (role) {
+ case ROLE_RTE:
+ return "RTE";
+ case ROLE_SERVICE:
+ return "SERVICE";
+ case ROLE_NON_EAL:
+ return "NON_EAL";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
- const char *role;
FILE *f = arg;
int ret;
- switch (cfg->lcore_role[lcore_id]) {
- case ROLE_RTE:
- role = "RTE";
- break;
- case ROLE_SERVICE:
- role = "SERVICE";
- break;
- case ROLE_NON_EAL:
- role = "NON_EAL";
- break;
- default:
- role = "UNKNOWN";
- break;
- }
-
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
- rte_lcore_to_socket_id(lcore_id), role, cpuset,
- ret == 0 ? "" : "...");
+ rte_lcore_to_socket_id(lcore_id),
+ lcore_role_str(cfg->lcore_role[lcore_id]),
+ cpuset, ret == 0 ? "" : "...");
return 0;
}
@@ -456,3 +459,90 @@ rte_lcore_dump(FILE *f)
{
rte_lcore_iterate(lcore_dump_cb, f);
}
+
+#ifndef RTE_EXEC_ENV_WINDOWS
+static int
+lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_tel_data *d = arg;
+ return rte_tel_data_add_array_int(d, lcore_id);
+}
+
+static int
+handle_lcore_list(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ int ret;
+
+ ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
+ if (ret == 0)
+ ret = rte_lcore_iterate(lcore_telemetry_id_cb, d);
+
+ return ret;
+}
+
+struct lcore_telemetry_info {
+ unsigned int lcore_id;
+ struct rte_tel_data *d;
+};
+
+static int
+lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ struct lcore_telemetry_info *info = arg;
+ struct rte_tel_data *cpuset;
+ unsigned int cpu;
+
+ if (lcore_id != info->lcore_id)
+ return 0;
+
+ rte_tel_data_start_dict(info->d);
+ rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
+ rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
+ rte_tel_data_add_dict_string(info->d, "role", lcore_role_str(cfg->lcore_role[lcore_id]));
+ cpuset = rte_tel_data_alloc();
+ if (cpuset == NULL)
+ return -ENOMEM;
+ rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+ if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
+ rte_tel_data_add_array_int(cpuset, cpu);
+ }
+ rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+
+ return 0;
+}
+
+static int
+handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
+{
+ struct lcore_telemetry_info info = { .d = d };
+ unsigned long lcore_id;
+ char *endptr;
+
+ if (params == NULL)
+ return -EINVAL;
+ errno = 0;
+ lcore_id = strtoul(params, &endptr, 10);
+ if (errno)
+ return -errno;
+ if (*params == '\0' || *endptr != '\0' || lcore_id >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ info.lcore_id = lcore_id;
+
+ return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
+}
+
+RTE_INIT(lcore_telemetry)
+{
+ rte_telemetry_register_cmd(
+ "/eal/lcore/list", handle_lcore_list,
+ "List of lcore ids. Takes no parameters");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/info", handle_lcore_info,
+ "Returns lcore info. Parameters: int lcore_id");
+}
+#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v10 2/5] eal: report applications lcore usage
2023-02-09 9:43 ` [PATCH v10 " Robin Jarry
2023-02-09 9:43 ` [PATCH v10 1/5] eal: add lcore info in telemetry Robin Jarry
@ 2023-02-09 9:43 ` Robin Jarry
2023-02-09 9:43 ` [PATCH v10 3/5] app/testpmd: add dump command for lcores Robin Jarry
` (3 subsequent siblings)
5 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-09 9:43 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Morten Brørup, Kevin Laatz
Allow applications to register a callback that will be invoked in
rte_lcore_dump() and when requesting lcore info in the telemetry API.
The callback is expected to return the number of TSC cycles that have
passed since application start and the number of these cycles that were
spent doing busy work.
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v9 -> v10: no change
doc/guides/rel_notes/release_23_03.rst | 7 ++++
lib/eal/common/eal_common_lcore.c | 40 +++++++++++++++++++--
lib/eal/include/rte_lcore.h | 48 ++++++++++++++++++++++++++
lib/eal/version.map | 1 +
4 files changed, 94 insertions(+), 2 deletions(-)
diff --git a/doc/guides/rel_notes/release_23_03.rst b/doc/guides/rel_notes/release_23_03.rst
index 1fa101c420cd..17d38d5ea264 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -55,6 +55,13 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Added support for reporting lcore usage in applications.**
+
+ * The ``/eal/lcore/list`` and ``/eal/lcore/info`` telemetry endpoints have
+ been added to provide information similar to ``rte_lcore_dump()``.
+ * Applications can register a callback at startup via
+ ``rte_lcore_register_usage_cb()`` to provide lcore usage information.
+
* **Updated AMD axgbe driver.**
* Added multi-process support.
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index d45a40831393..8fcdebd87692 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -2,6 +2,7 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
@@ -437,20 +438,45 @@ lcore_role_str(enum rte_lcore_role_t role)
}
}
+static rte_lcore_usage_cb lcore_usage_cb;
+
+void
+rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
+{
+ lcore_usage_cb = cb;
+}
+
static int
lcore_dump_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
+ char *usage_str = NULL;
FILE *f = arg;
int ret;
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /* Guard against concurrent modification of lcore_usage_cb. */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ if (asprintf(&usage_str, ", busy cycles %"PRIu64"/%"PRIu64,
+ usage.busy_cycles, usage.total_cycles) < 0) {
+ return -ENOMEM;
+ }
+ }
ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
sizeof(cpuset));
- fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
+ fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
rte_lcore_to_socket_id(lcore_id),
lcore_role_str(cfg->lcore_role[lcore_id]),
- cpuset, ret == 0 ? "" : "...");
+ cpuset, ret == 0 ? "" : "...",
+ usage_str != NULL ? usage_str : "");
+
+ free(usage_str);
+
return 0;
}
@@ -492,7 +518,9 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
{
struct rte_config *cfg = rte_eal_get_configuration();
struct lcore_telemetry_info *info = arg;
+ struct rte_lcore_usage usage;
struct rte_tel_data *cpuset;
+ rte_lcore_usage_cb usage_cb;
unsigned int cpu;
if (lcore_id != info->lcore_id)
@@ -511,6 +539,14 @@ lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
rte_tel_data_add_array_int(cpuset, cpu);
}
rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /* Guard against concurrent modification of lcore_usage_cb. */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ rte_tel_data_add_dict_uint(info->d, "total_cycles", usage.total_cycles);
+ rte_tel_data_add_dict_uint(info->d, "busy_cycles", usage.busy_cycles);
+ }
return 0;
}
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 9c7865052100..30f83f4d578c 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -328,6 +328,54 @@ typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
int
rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
+/**
+ * lcore usage statistics.
+ */
+struct rte_lcore_usage {
+ /**
+ * The total amount of time that the application has been running on
+ * this lcore, in TSC cycles.
+ */
+ uint64_t total_cycles;
+ /**
+ * The amount of time the application was busy, handling some
+ * workload on this lcore, in TSC cycles.
+ */
+ uint64_t busy_cycles;
+};
+
+/**
+ * Callback to allow applications to report lcore usage.
+ *
+ * @param [in] lcore_id
+ * The lcore to consider.
+ * @param [out] usage
+ * Counters representing this lcore usage. This can never be NULL.
+ * @return
+ * - 0 if fields in usage were updated successfully. The fields that the
+ * application does not support must not be modified.
+ * - a negative value if the information is not available or if any error occurred.
+ */
+typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct rte_lcore_usage *usage);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Register a callback from an application to be called in rte_lcore_dump() and
+ * the /eal/lcore/info telemetry endpoint handler. Applications are expected to
+ * report lcore usage statistics via this callback.
+ *
+ * If a callback was already registered, it can be replaced with another callback
+ * or unregistered with NULL. The previously registered callback may remain in
+ * use for an undetermined period of time.
+ *
+ * @param cb
+ * The callback function.
+ */
+__rte_experimental
+void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb);
+
/**
* List all lcores.
*
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 6523102157e2..2c05e5f2fb60 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -441,6 +441,7 @@ EXPERIMENTAL {
rte_thread_join;
# added in 23.03
+ rte_lcore_register_usage_cb;
rte_thread_set_name;
};
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v10 3/5] app/testpmd: add dump command for lcores
2023-02-09 9:43 ` [PATCH v10 " Robin Jarry
2023-02-09 9:43 ` [PATCH v10 1/5] eal: add lcore info in telemetry Robin Jarry
2023-02-09 9:43 ` [PATCH v10 2/5] eal: report applications lcore usage Robin Jarry
@ 2023-02-09 9:43 ` Robin Jarry
2023-02-09 9:43 ` [PATCH v10 4/5] app/testpmd: report lcore usage Robin Jarry
` (2 subsequent siblings)
5 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-09 9:43 UTC (permalink / raw)
To: dev
Cc: Robin Jarry, Morten Brørup, Konstantin Ananyev, Kevin Laatz,
Aman Singh, Yuying Zhang
Add a simple command that calls rte_lcore_dump().
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v9 -> v10: no change
app/test-pmd/cmdline.c | 3 +++
doc/guides/testpmd_app_ug/testpmd_funcs.rst | 7 +++++++
2 files changed, 10 insertions(+)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index cb8c174020b0..bb7ff2b44989 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8357,6 +8357,8 @@ static void cmd_dump_parsed(void *parsed_result,
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
rte_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_lcores"))
+ rte_lcore_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
}
@@ -8370,6 +8372,7 @@ static cmdline_parse_token_string_t cmd_dump_dump =
"dump_ring#"
"dump_mempool#"
"dump_devargs#"
+ "dump_lcores#"
"dump_log_types");
static cmdline_parse_inst_t cmd_dump = {
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 79a1fa9cb73d..9ceb21dfbbdf 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -591,6 +591,13 @@ Dumps the user device list::
testpmd> dump_devargs
+dump lcores
+~~~~~~~~~~~
+
+Dumps the logical cores list::
+
+ testpmd> dump_lcores
+
dump log types
~~~~~~~~~~~~~~
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v10 4/5] app/testpmd: report lcore usage
2023-02-09 9:43 ` [PATCH v10 " Robin Jarry
` (2 preceding siblings ...)
2023-02-09 9:43 ` [PATCH v10 3/5] app/testpmd: add dump command for lcores Robin Jarry
@ 2023-02-09 9:43 ` Robin Jarry
2023-02-09 9:43 ` [PATCH v10 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
2023-02-10 13:27 ` [PATCH v10 0/5] lcore telemetry improvements David Marchand
5 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-09 9:43 UTC (permalink / raw)
To: dev
Cc: Robin Jarry, Morten Brørup, Konstantin Ananyev, Kevin Laatz,
Aman Singh, Yuying Zhang
The --record-core-cycles option already accounts for busy cycles. One
turn of packet_fwd_t is considered "busy" if there was at least one
received or transmitted packet.
Rename core_cycles to busy_cycles in struct fwd_stream to make it more
explicit. Add total_cycles to struct fwd_lcore. Add cycles accounting in
noisy_vnf where it was missing.
When --record-core-cycles is specified, register a callback with
rte_lcore_register_usage_cb() and update total_cycles every turn of
lcore loop based on a starting tsc value.
In the callback, resolve the proper struct fwd_lcore based on lcore_id
and return the lcore total_cycles and the sum of busy_cycles of all its
fwd_streams.
This makes the cycles counters available in rte_lcore_dump() and the
lcore telemetry API:
testpmd> dump_lcores
lcore 3, socket 0, role RTE, cpuset 3
lcore 4, socket 0, role RTE, cpuset 4, busy cycles 1228584096/9239923140
lcore 5, socket 0, role RTE, cpuset 5, busy cycles 1255661768/9218141538
--> /eal/lcore/info,4
{
"/eal/lcore/info": {
"lcore_id": 4,
"socket": 0,
"role": "RTE",
"cpuset": [
4
],
"busy_cycles": 10623340318,
"total_cycles": 55331167354
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v9 -> v10: Fixed reset of total_cycles without stopping
app/test-pmd/noisy_vnf.c | 8 +++++++-
app/test-pmd/testpmd.c | 44 +++++++++++++++++++++++++++++++++++++---
app/test-pmd/testpmd.h | 25 +++++++++++++++--------
3 files changed, 64 insertions(+), 13 deletions(-)
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index c65ec6f06a5c..ce5a3e5e6987 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -144,6 +144,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
struct noisy_config *ncf = noisy_cfg[fs->rx_port];
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_mbuf *tmp_pkts[MAX_PKT_BURST];
+ uint64_t start_tsc = 0;
uint16_t nb_deqd = 0;
uint16_t nb_rx = 0;
uint16_t nb_tx = 0;
@@ -153,6 +154,8 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
bool needs_flush = false;
uint64_t now;
+ get_start_cycles(&start_tsc);
+
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
pkts_burst, nb_pkt_per_burst);
inc_rx_burst_stats(fs, nb_rx);
@@ -169,7 +172,7 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
inc_tx_burst_stats(fs, nb_tx);
fs->tx_packets += nb_tx;
fs->fwd_dropped += drop_pkts(pkts_burst, nb_rx, nb_tx);
- return;
+ goto end;
}
fifo_free = rte_ring_free_count(ncf->f);
@@ -219,6 +222,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
ncf->prev_time = rte_get_timer_cycles();
}
+end:
+ if (nb_rx > 0 || nb_tx > 0)
+ get_end_cycles(fs, start_tsc);
}
#define NOISY_STRSIZE 256
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index e366f81a0f46..d02f96df7570 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2053,7 +2053,7 @@ fwd_stats_display(void)
fs->rx_bad_outer_ip_csum;
if (record_core_cycles)
- fwd_cycles += fs->core_cycles;
+ fwd_cycles += fs->busy_cycles;
}
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
@@ -2145,7 +2145,7 @@ fwd_stats_display(void)
else
total_pkts = total_recv;
- printf("\n CPU cycles/packet=%.2F (total cycles="
+ printf("\n CPU cycles/packet=%.2F (busy cycles="
"%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
" MHz Clock\n",
(double) fwd_cycles / total_pkts,
@@ -2184,8 +2184,10 @@ fwd_stats_reset(void)
memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
- fs->core_cycles = 0;
+ fs->busy_cycles = 0;
}
+ for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++)
+ fwd_lcores[i]->total_cycles = 0;
}
static void
@@ -2248,6 +2250,7 @@ static void
run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
{
struct fwd_stream **fsm;
+ uint64_t prev_tsc;
streamid_t nb_fs;
streamid_t sm_id;
#ifdef RTE_LIB_BITRATESTATS
@@ -2262,6 +2265,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
#endif
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
+ prev_tsc = rte_rdtsc();
do {
for (sm_id = 0; sm_id < nb_fs; sm_id++)
if (!fsm[sm_id]->disabled)
@@ -2284,10 +2288,40 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
latencystats_lcore_id == rte_lcore_id())
rte_latencystats_update();
#endif
+ if (record_core_cycles) {
+ uint64_t tsc = rte_rdtsc();
+ fc->total_cycles += tsc - prev_tsc;
+ prev_tsc = tsc;
+ }
} while (! fc->stopped);
}
+static int
+lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
+{
+ struct fwd_stream **fsm;
+ struct fwd_lcore *fc;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+
+ fc = lcore_to_fwd_lcore(lcore_id);
+ if (fc == NULL)
+ return -1;
+
+ fsm = &fwd_streams[fc->stream_idx];
+ nb_fs = fc->stream_nb;
+ usage->busy_cycles = 0;
+ usage->total_cycles = fc->total_cycles;
+
+ for (sm_id = 0; sm_id < nb_fs; sm_id++) {
+ if (!fsm[sm_id]->disabled)
+ usage->busy_cycles += fsm[sm_id]->busy_cycles;
+ }
+
+ return 0;
+}
+
static int
start_pkt_forward_on_core(void *fwd_arg)
{
@@ -4527,6 +4561,10 @@ main(int argc, char** argv)
rte_stats_bitrate_reg(bitrate_data);
}
#endif
+
+ if (record_core_cycles)
+ rte_lcore_register_usage_cb(lcore_usage_callback);
+
#ifdef RTE_LIB_CMDLINE
if (init_cmdline() != 0)
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970d2..6ec2f6879b47 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -174,7 +174,7 @@ struct fwd_stream {
#ifdef RTE_LIB_GRO
unsigned int gro_times; /**< GRO operation times */
#endif
- uint64_t core_cycles; /**< used for RX and TX processing */
+ uint64_t busy_cycles; /**< used with --record-core-cycles */
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
struct fwd_lcore *lcore; /**< Lcore being scheduled. */
@@ -360,6 +360,7 @@ struct fwd_lcore {
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
volatile char stopped; /**< stop forwarding when set */
+ uint64_t total_cycles; /**< used with --record-core-cycles */
};
/*
@@ -785,16 +786,17 @@ is_proc_primary(void)
return rte_eal_process_type() == RTE_PROC_PRIMARY;
}
-static inline unsigned int
-lcore_num(void)
+static inline struct fwd_lcore *
+lcore_to_fwd_lcore(uint16_t lcore_id)
{
unsigned int i;
- for (i = 0; i < RTE_MAX_LCORE; ++i)
- if (fwd_lcores_cpuids[i] == rte_lcore_id())
- return i;
+ for (i = 0; i < cur_fwd_config.nb_fwd_lcores; ++i) {
+ if (fwd_lcores_cpuids[i] == lcore_id)
+ return fwd_lcores[i];
+ }
- rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n");
+ return NULL;
}
void
@@ -803,7 +805,12 @@ parse_fwd_portlist(const char *port);
static inline struct fwd_lcore *
current_fwd_lcore(void)
{
- return fwd_lcores[lcore_num()];
+ struct fwd_lcore *fc = lcore_to_fwd_lcore(rte_lcore_id());
+
+ if (fc == NULL)
+ rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n");
+
+ return fc;
}
/* Mbuf Pools */
@@ -839,7 +846,7 @@ static inline void
get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc)
{
if (record_core_cycles)
- fs->core_cycles += rte_rdtsc() - start_tsc;
+ fs->busy_cycles += rte_rdtsc() - start_tsc;
}
static inline void
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* [PATCH v10 5/5] eal: add lcore usage telemetry endpoint
2023-02-09 9:43 ` [PATCH v10 " Robin Jarry
` (3 preceding siblings ...)
2023-02-09 9:43 ` [PATCH v10 4/5] app/testpmd: report lcore usage Robin Jarry
@ 2023-02-09 9:43 ` Robin Jarry
2023-02-10 13:27 ` [PATCH v10 0/5] lcore telemetry improvements David Marchand
5 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2023-02-09 9:43 UTC (permalink / raw)
To: dev; +Cc: Robin Jarry, Kevin Laatz
Allow fetching CPU cycles usage for all lcores with a single request.
This endpoint is intended for repeated and frequent invocations by
external monitoring systems and therefore returns condensed data.
It consists of a single dictionary with three keys: "lcore_ids",
"total_cycles" and "busy_cycles" that are mapped to three arrays of
integer values. Each array has the same number of values, one per lcore,
in the same order.
Example:
--> /eal/lcore/usage
{
"/eal/lcore/usage": {
"lcore_ids": [
4,
5
],
"total_cycles": [
23846845590,
23900558914
],
"busy_cycles": [
21043446682,
21448837316
]
}
}
Signed-off-by: Robin Jarry <rjarry@redhat.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
Notes:
v9 -> v10: no change
doc/guides/rel_notes/release_23_03.rst | 5 ++-
lib/eal/common/eal_common_lcore.c | 60 ++++++++++++++++++++++++++
2 files changed, 63 insertions(+), 2 deletions(-)
diff --git a/doc/guides/rel_notes/release_23_03.rst b/doc/guides/rel_notes/release_23_03.rst
index 17d38d5ea264..4f2878846829 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -57,8 +57,9 @@ New Features
* **Added support for reporting lcore usage in applications.**
- * The ``/eal/lcore/list`` and ``/eal/lcore/info`` telemetry endpoints have
- been added to provide information similar to ``rte_lcore_dump()``.
+ * The ``/eal/lcore/list``, ``/eal/lcore/usage`` and ``/eal/lcore/info``
+ telemetry endpoints have been added to provide information similar to
+ ``rte_lcore_dump()``.
* Applications can register a callback at startup via
``rte_lcore_register_usage_cb()`` to provide lcore usage information.
diff --git a/lib/eal/common/eal_common_lcore.c b/lib/eal/common/eal_common_lcore.c
index 8fcdebd87692..837acbe4142a 100644
--- a/lib/eal/common/eal_common_lcore.c
+++ b/lib/eal/common/eal_common_lcore.c
@@ -572,6 +572,63 @@ handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_t
return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
}
+struct lcore_telemetry_usage {
+ struct rte_tel_data *lcore_ids;
+ struct rte_tel_data *total_cycles;
+ struct rte_tel_data *busy_cycles;
+};
+
+static int
+lcore_telemetry_usage_cb(unsigned int lcore_id, void *arg)
+{
+ struct lcore_telemetry_usage *u = arg;
+ struct rte_lcore_usage usage;
+ rte_lcore_usage_cb usage_cb;
+
+ /* The callback may not set all the fields in the structure, so clear it here. */
+ memset(&usage, 0, sizeof(usage));
+ /* Guard against concurrent modification of lcore_usage_cb. */
+ usage_cb = lcore_usage_cb;
+ if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
+ rte_tel_data_add_array_uint(u->lcore_ids, lcore_id);
+ rte_tel_data_add_array_uint(u->total_cycles, usage.total_cycles);
+ rte_tel_data_add_array_uint(u->busy_cycles, usage.busy_cycles);
+ }
+
+ return 0;
+}
+
+static int
+handle_lcore_usage(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ struct lcore_telemetry_usage usage;
+ struct rte_tel_data *lcore_ids = rte_tel_data_alloc();
+ struct rte_tel_data *total_cycles = rte_tel_data_alloc();
+ struct rte_tel_data *busy_cycles = rte_tel_data_alloc();
+
+ if (!lcore_ids || !total_cycles || !busy_cycles) {
+ rte_tel_data_free(lcore_ids);
+ rte_tel_data_free(total_cycles);
+ rte_tel_data_free(busy_cycles);
+ return -ENOMEM;
+ }
+
+ rte_tel_data_start_dict(d);
+ rte_tel_data_start_array(lcore_ids, RTE_TEL_UINT_VAL);
+ rte_tel_data_start_array(total_cycles, RTE_TEL_UINT_VAL);
+ rte_tel_data_start_array(busy_cycles, RTE_TEL_UINT_VAL);
+ rte_tel_data_add_dict_container(d, "lcore_ids", lcore_ids, 0);
+ rte_tel_data_add_dict_container(d, "total_cycles", total_cycles, 0);
+ rte_tel_data_add_dict_container(d, "busy_cycles", busy_cycles, 0);
+ usage.lcore_ids = lcore_ids;
+ usage.total_cycles = total_cycles;
+ usage.busy_cycles = busy_cycles;
+
+ return rte_lcore_iterate(lcore_telemetry_usage_cb, &usage);
+}
+
RTE_INIT(lcore_telemetry)
{
rte_telemetry_register_cmd(
@@ -580,5 +637,8 @@ RTE_INIT(lcore_telemetry)
rte_telemetry_register_cmd(
"/eal/lcore/info", handle_lcore_info,
"Returns lcore info. Parameters: int lcore_id");
+ rte_telemetry_register_cmd(
+ "/eal/lcore/usage", handle_lcore_usage,
+ "Returns lcore cycles usage. Takes no parameters");
}
#endif /* !RTE_EXEC_ENV_WINDOWS */
--
2.39.1
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [PATCH v10 0/5] lcore telemetry improvements
2023-02-09 9:43 ` [PATCH v10 " Robin Jarry
` (4 preceding siblings ...)
2023-02-09 9:43 ` [PATCH v10 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
@ 2023-02-10 13:27 ` David Marchand
5 siblings, 0 replies; 134+ messages in thread
From: David Marchand @ 2023-02-10 13:27 UTC (permalink / raw)
To: Robin Jarry; +Cc: dev, Morten Brørup, Kevin Laatz, Konstantin Ananyev
Hello Robin,
On Thu, Feb 9, 2023 at 10:45 AM Robin Jarry <rjarry@redhat.com> wrote:
>
> This is a follow up on previous work by Kevin Laatz:
>
> http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
>
> This series is aimed at allowing DPDK applications to expose their CPU
> usage stats in the DPDK telemetry under /eal/lcore/info. This is a much
> more basic and naive approach which leaves the cpu cycles accounting
> completely up to the application.
>
> For reference, I have implemented a draft patch in OvS to use
> rte_lcore_register_usage_cb() and report the already available busy
> cycles information.
>
> https://github.com/rjarry/ovs/commit/643e672fe388e348ea7ccbbda6f5a87a066fd919
>
> v10:
>
> - Code style fix
> - Fixed reset of total_cycles while lcore is running
>
> v9:
>
> - Fixed changelog & version.map order.
> - Updated with 64-bit integer telemetry functions.
> - Refined docstrings (added notice about resetting the callback).
> - Fixed accounting of total cycles in testpmd.
>
> Robin Jarry (5):
> eal: add lcore info in telemetry
> eal: report applications lcore usage
> app/testpmd: add dump command for lcores
> app/testpmd: report lcore usage
> eal: add lcore usage telemetry endpoint
>
> app/test-pmd/cmdline.c | 3 +
> app/test-pmd/noisy_vnf.c | 8 +-
> app/test-pmd/testpmd.c | 44 +++-
> app/test-pmd/testpmd.h | 25 ++-
> doc/guides/rel_notes/release_23_03.rst | 8 +
> doc/guides/testpmd_app_ug/testpmd_funcs.rst | 7 +
> lib/eal/common/eal_common_lcore.c | 222 ++++++++++++++++++--
> lib/eal/include/rte_lcore.h | 48 +++++
> lib/eal/version.map | 1 +
> 9 files changed, 335 insertions(+), 31 deletions(-)
>
This last revision lgtm with an edit on patch 4 (one comment you
missed from v9).
Series applied, thanks.
--
David Marchand
^ permalink raw reply [flat|nested] 134+ messages in thread
* Re: [RFC PATCH 0/4] lcore telemetry improvements
2022-11-23 10:19 [RFC PATCH 0/4] " Robin Jarry
@ 2022-11-23 10:44 ` Robin Jarry
0 siblings, 0 replies; 134+ messages in thread
From: Robin Jarry @ 2022-11-23 10:44 UTC (permalink / raw)
To: dev
Cc: Bruce Richardson, Jerin Jacob, Kevin Laatz, Konstantin Ananyev,
Mattias Rönnblom, Morten Brørup
There was a hiccup with my smtp config. Sorry about the double send.
Please ignore this series and reply on the other one.
I have updated patchwork accordingly.
^ permalink raw reply [flat|nested] 134+ messages in thread
* [RFC PATCH 0/4] lcore telemetry improvements
@ 2022-11-23 10:19 Robin Jarry
2022-11-23 10:44 ` Robin Jarry
0 siblings, 1 reply; 134+ messages in thread
From: Robin Jarry @ 2022-11-23 10:19 UTC (permalink / raw)
To: dev
Cc: Bruce Richardson, Jerin Jacob, Kevin Laatz, Konstantin Ananyev,
Mattias Rönnblom, Morten Brørup, Robin Jarry
This is a follow up on previous work by Kevin Laatz:
http://patches.dpdk.org/project/dpdk/list/?series=24658&state=*
This is a much more basic and naive approach which leaves the busy
cycles percentage completely up to the application.
This series is aimed at allowing DPDK applications to expose their CPU
busy cycles ratio in the DPDK telemetry under /eal/lcore/info.
I have left it as RFC since calculating busy cycles can be
a controversial topic.
For reference, I have implemented a draft patch in OvS to use
rte_lcore_register_busy_percent_cb() and return the already available
busy cycles information:
https://github.com/rjarry/ovs/commit/4286c0e75583075a223a67eee746084a2f3b0547
Robin Jarry (4):
eal: add lcore info in telemetry
eal: allow applications to report their cpu utilization
testpmd: add show lcores command
testpmd: report lcore usage
app/test-pmd/5tswap.c | 5 +-
app/test-pmd/cmdline.c | 31 ++++++++
app/test-pmd/csumonly.c | 6 +-
app/test-pmd/flowgen.c | 2 +-
app/test-pmd/icmpecho.c | 6 +-
app/test-pmd/iofwd.c | 5 +-
app/test-pmd/macfwd.c | 5 +-
app/test-pmd/macswap.c | 5 +-
app/test-pmd/noisy_vnf.c | 4 +
app/test-pmd/rxonly.c | 5 +-
app/test-pmd/shared_rxq_fwd.c | 5 +-
app/test-pmd/testpmd.c | 69 +++++++++++++++-
app/test-pmd/testpmd.h | 25 +++++-
app/test-pmd/txonly.c | 7 +-
lib/eal/common/eal_common_lcore.c | 127 +++++++++++++++++++++++++++++-
lib/eal/include/rte_lcore.h | 30 +++++++
lib/eal/version.map | 1 +
17 files changed, 306 insertions(+), 32 deletions(-)
--
2.38.1
^ permalink raw reply [flat|nested] 134+ messages in thread
end of thread, other threads:[~2023-02-10 13:27 UTC | newest]
Thread overview: 134+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-23 10:26 [RFC PATCH 0/4] lcore telemetry improvements Robin Jarry
2022-11-23 10:26 ` [RFC PATCH 1/4] eal: add lcore info in telemetry Robin Jarry
2022-11-23 16:44 ` Stephen Hemminger
2022-11-23 23:15 ` Robin Jarry
2022-11-23 10:26 ` [RFC PATCH 2/4] eal: allow applications to report their cpu utilization Robin Jarry
2022-11-23 10:26 ` [RFC PATCH 3/4] testpmd: add show lcores command Robin Jarry
2022-11-23 10:26 ` [RFC PATCH 4/4] testpmd: report lcore usage Robin Jarry
2022-11-28 8:59 ` [PATCH v2 0/4] lcore telemetry improvements Robin Jarry
2022-11-28 8:59 ` [PATCH v2 1/4] eal: add lcore info in telemetry Robin Jarry
2022-11-28 8:59 ` [PATCH v2 2/4] eal: allow applications to report their cpu cycles utilization Robin Jarry
2022-11-28 10:52 ` Morten Brørup
2022-11-29 8:19 ` Robin Jarry
2022-11-28 8:59 ` [PATCH v2 3/4] testpmd: add dump_lcores command Robin Jarry
2022-11-28 8:59 ` [PATCH v2 4/4] testpmd: report lcore usage Robin Jarry
2022-11-29 15:33 ` [PATCH v3 0/4] lcore telemetry improvements Robin Jarry
2022-11-29 15:33 ` [PATCH v3 1/4] eal: add lcore info in telemetry Robin Jarry
2022-11-29 15:33 ` [PATCH v3 2/4] eal: allow applications to report their cpu cycles usage Robin Jarry
2022-11-29 16:10 ` Mattias Rönnblom
2022-12-07 11:00 ` Robin Jarry
2022-12-07 11:21 ` Morten Brørup
2022-11-29 15:33 ` [PATCH v3 3/4] testpmd: add dump_lcores command Robin Jarry
2022-11-29 15:33 ` [PATCH v3 4/4] testpmd: report lcore usage Robin Jarry
2022-11-29 16:14 ` [PATCH v3 0/4] lcore telemetry improvements Mattias Rönnblom
2022-12-07 16:21 ` [PATCH " Robin Jarry
2022-12-07 16:21 ` [PATCH 1/4] eal: add lcore info in telemetry Robin Jarry
2022-12-07 16:21 ` [PATCH 2/4] eal: allow applications to report their cpu usage Robin Jarry
2022-12-13 15:49 ` Robin Jarry
2022-12-13 16:39 ` Morten Brørup
2022-12-13 17:45 ` Tyler Retzlaff
2022-12-07 16:21 ` [PATCH 3/4] testpmd: add dump_lcores command Robin Jarry
2022-12-07 16:21 ` [PATCH 4/4] testpmd: report lcore usage Robin Jarry
2022-12-16 10:21 ` [PATCH v5 0/4] lcore telemetry improvements Robin Jarry
2022-12-16 10:21 ` [PATCH v5 1/4] eal: add lcore info in telemetry Robin Jarry
2023-01-18 9:42 ` Kevin Laatz
2023-01-18 10:21 ` Morten Brørup
2023-01-18 11:03 ` Kevin Laatz
2023-01-18 11:35 ` Morten Brørup
2023-01-18 14:45 ` Robin Jarry
2023-01-18 16:01 ` Kevin Laatz
2023-01-18 16:17 ` Robin Jarry
2022-12-16 10:21 ` [PATCH v5 2/4] eal: allow applications to report their cpu usage Robin Jarry
2022-12-16 10:47 ` Morten Brørup
2023-01-04 10:13 ` Robin Jarry
2023-01-04 10:28 ` Morten Brørup
2022-12-22 12:41 ` Konstantin Ananyev
2023-01-04 10:10 ` Robin Jarry
2023-01-04 10:53 ` Konstantin Ananyev
2023-01-18 16:46 ` Robin Jarry
2023-02-06 20:07 ` Konstantin Ananyev
2023-02-06 20:29 ` Robin Jarry
2023-02-06 20:34 ` Konstantin Ananyev
2023-02-06 20:39 ` Robin Jarry
2023-02-06 20:44 ` Konstantin Ananyev
2023-02-06 20:55 ` Robin Jarry
2023-02-07 13:12 ` Konstantin Ananyev
2023-01-04 10:15 ` Robin Jarry
2022-12-16 10:21 ` [PATCH v5 3/4] testpmd: add dump_lcores command Robin Jarry
2022-12-22 12:43 ` Konstantin Ananyev
2022-12-16 10:21 ` [PATCH v5 4/4] testpmd: report lcore usage Robin Jarry
2022-12-22 12:44 ` Konstantin Ananyev
2023-01-18 9:13 ` [PATCH v5 0/4] lcore telemetry improvements Robin Jarry
2023-01-19 15:06 ` [PATCH v6 0/5] " Robin Jarry
2023-01-19 15:06 ` [PATCH v6 1/5] eal: add lcore info in telemetry Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
2023-01-26 11:19 ` David Marchand
2023-01-19 15:06 ` [PATCH v6 2/5] eal: allow applications to report their cpu usage Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
2023-01-26 11:22 ` David Marchand
2023-01-19 15:06 ` [PATCH v6 3/5] testpmd: add dump_lcores command Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
2023-01-26 11:22 ` David Marchand
2023-01-19 15:06 ` [PATCH v6 4/5] testpmd: report lcore usage Robin Jarry
2023-01-19 19:42 ` Kevin Laatz
2023-01-19 15:06 ` [PATCH v6 5/5] telemetry: add /eal/lcore/usage endpoint Robin Jarry
2023-01-19 16:21 ` Morten Brørup
2023-01-19 16:34 ` Robin Jarry
2023-01-19 16:45 ` Morten Brørup
2023-01-19 19:42 ` Kevin Laatz
2023-02-02 13:43 ` [PATCH v8 0/5] lcore telemetry improvements Robin Jarry
2023-02-02 13:43 ` [PATCH v8 1/5] eal: add lcore info in telemetry Robin Jarry
2023-02-06 3:50 ` fengchengwen
2023-02-06 8:22 ` Robin Jarry
2023-02-06 11:22 ` fengchengwen
2023-02-06 11:46 ` Robin Jarry
2023-02-06 12:08 ` fengchengwen
2023-02-02 13:43 ` [PATCH v8 2/5] eal: report applications lcore usage Robin Jarry
2023-02-06 4:00 ` fengchengwen
2023-02-06 7:36 ` Morten Brørup
2023-02-06 8:21 ` Robin Jarry
2023-02-06 11:18 ` fengchengwen
2023-02-06 8:48 ` David Marchand
2023-02-06 9:03 ` Robin Jarry
2023-02-02 13:43 ` [PATCH v8 3/5] app/testpmd: add dump command for lcores Robin Jarry
2023-02-06 3:34 ` fengchengwen
2023-02-02 13:43 ` [PATCH v8 4/5] app/testpmd: report lcore usage Robin Jarry
2023-02-06 3:31 ` fengchengwen
2023-02-06 8:58 ` David Marchand
2023-02-06 9:08 ` Robin Jarry
2023-02-06 15:06 ` David Marchand
2023-02-02 13:43 ` [PATCH v8 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
2023-02-02 14:00 ` Morten Brørup
2023-02-06 3:27 ` fengchengwen
2023-02-06 8:24 ` Robin Jarry
2023-02-06 11:32 ` fengchengwen
2023-02-05 23:11 ` [PATCH v8 0/5] lcore telemetry improvements Thomas Monjalon
2023-02-07 19:37 ` [PATCH v9 " Robin Jarry
2023-02-07 19:37 ` [PATCH v9 1/5] eal: add lcore info in telemetry Robin Jarry
2023-02-08 2:24 ` lihuisong (C)
2023-02-08 17:04 ` Robin Jarry
2023-02-09 2:18 ` lihuisong (C)
2023-02-09 8:31 ` David Marchand
2023-02-09 8:38 ` David Marchand
2023-02-07 19:37 ` [PATCH v9 2/5] eal: report applications lcore usage Robin Jarry
2023-02-07 19:37 ` [PATCH v9 3/5] app/testpmd: add dump command for lcores Robin Jarry
2023-02-07 19:37 ` [PATCH v9 4/5] app/testpmd: report lcore usage Robin Jarry
2023-02-08 2:59 ` lihuisong (C)
2023-02-07 19:37 ` [PATCH v9 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 0/5] lcore telemetry improvements Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 1/5] eal: add lcore info in telemetry Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 2/5] eal: report applications lcore usage Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 3/5] app/testpmd: add dump command for lcores Robin Jarry
2023-02-08 8:45 ` [RESEND PATCH v9 4/5] app/testpmd: report lcore usage Robin Jarry
2023-02-09 8:43 ` David Marchand
2023-02-08 8:45 ` [RESEND PATCH v9 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
2023-02-09 8:44 ` [RESEND PATCH v9 0/5] lcore telemetry improvements David Marchand
2023-02-09 9:43 ` [PATCH v10 " Robin Jarry
2023-02-09 9:43 ` [PATCH v10 1/5] eal: add lcore info in telemetry Robin Jarry
2023-02-09 9:43 ` [PATCH v10 2/5] eal: report applications lcore usage Robin Jarry
2023-02-09 9:43 ` [PATCH v10 3/5] app/testpmd: add dump command for lcores Robin Jarry
2023-02-09 9:43 ` [PATCH v10 4/5] app/testpmd: report lcore usage Robin Jarry
2023-02-09 9:43 ` [PATCH v10 5/5] eal: add lcore usage telemetry endpoint Robin Jarry
2023-02-10 13:27 ` [PATCH v10 0/5] lcore telemetry improvements David Marchand
-- strict thread matches above, loose matches on Subject: below --
2022-11-23 10:19 [RFC PATCH 0/4] " Robin Jarry
2022-11-23 10:44 ` Robin Jarry
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).