DPDK patches and discussions
 help / color / mirror / Atom feed
From: Robin Jarry <rjarry@redhat.com>
To: dev@dpdk.org, Jerin Jacob <jerinj@marvell.com>,
	Kiran Kumar K <kirankumark@marvell.com>,
	Nithin Dabilpuram <ndabilpuram@marvell.com>,
	Zhirun Yan <yanzhirun_163@163.com>
Subject: [PATCH v2] graph: avoid accessing graph list when getting stats
Date: Mon,  1 Apr 2024 22:36:49 +0200	[thread overview]
Message-ID: <20240401203647.1909165-3-rjarry@redhat.com> (raw)
In-Reply-To: <20240325155303.770468-2-rjarry@redhat.com>

In rte_graph_cluster_stats_get, the walk model of the first graph is
checked to determine if multi-core dispatch specific counters should be
updated or not. This global list is accessed without any locks.

If the global list is modified by another thread while
rte_graph_cluster_stats_get is called, it can result in undefined
behaviour.

Adding a lock would make it impossible to call
rte_graph_cluster_stats_get in packet processing code paths. Avoid
accessing the global list instead by storing a bool field in the private
rte_graph_cluster_stats structure.

Also update the default callback to avoid accessing the global list and
use a different default callback depending on the graph model.

Signed-off-by: Robin Jarry <rjarry@redhat.com>
---

Notes:
    v2:
    
    * (kiran) removed unnecessary loop in stats_mem_init.

 lib/graph/graph_stats.c | 57 ++++++++++++++++++++++++++---------------
 1 file changed, 36 insertions(+), 21 deletions(-)

diff --git a/lib/graph/graph_stats.c b/lib/graph/graph_stats.c
index 2fb808b21ec5..d71451a17b95 100644
--- a/lib/graph/graph_stats.c
+++ b/lib/graph/graph_stats.c
@@ -34,6 +34,7 @@ struct __rte_cache_aligned rte_graph_cluster_stats {
 	uint32_t cluster_node_size; /* Size of struct cluster_node */
 	rte_node_t max_nodes;
 	int socket_id;
+	bool dispatch;
 	void *cookie;
 	size_t sz;
 
@@ -74,17 +75,16 @@ print_banner_dispatch(FILE *f)
 }
 
 static inline void
-print_banner(FILE *f)
+print_banner(FILE *f, bool dispatch)
 {
-	if (rte_graph_worker_model_get(STAILQ_FIRST(graph_list_head_get())->graph) ==
-	    RTE_GRAPH_MODEL_MCORE_DISPATCH)
+	if (dispatch)
 		print_banner_dispatch(f);
 	else
 		print_banner_default(f);
 }
 
 static inline void
-print_node(FILE *f, const struct rte_graph_cluster_node_stats *stat)
+print_node(FILE *f, const struct rte_graph_cluster_node_stats *stat, bool dispatch)
 {
 	double objs_per_call, objs_per_sec, cycles_per_call, ts_per_hz;
 	const uint64_t prev_calls = stat->prev_calls;
@@ -104,8 +104,7 @@ print_node(FILE *f, const struct rte_graph_cluster_node_stats *stat)
 	objs_per_sec = ts_per_hz ? (objs - prev_objs) / ts_per_hz : 0;
 	objs_per_sec /= 1000000;
 
-	if (rte_graph_worker_model_get(STAILQ_FIRST(graph_list_head_get())->graph) ==
-	    RTE_GRAPH_MODEL_MCORE_DISPATCH) {
+	if (dispatch) {
 		fprintf(f,
 			"|%-31s|%-15" PRIu64 "|%-15" PRIu64 "|%-15" PRIu64
 			"|%-15" PRIu64 "|%-15" PRIu64
@@ -123,20 +122,17 @@ print_node(FILE *f, const struct rte_graph_cluster_node_stats *stat)
 }
 
 static int
-graph_cluster_stats_cb(bool is_first, bool is_last, void *cookie,
+graph_cluster_stats_cb(bool dispatch, bool is_first, bool is_last, void *cookie,
 		       const struct rte_graph_cluster_node_stats *stat)
 {
 	FILE *f = cookie;
-	int model;
-
-	model = rte_graph_worker_model_get(STAILQ_FIRST(graph_list_head_get())->graph);
 
 	if (unlikely(is_first))
-		print_banner(f);
+		print_banner(f, dispatch);
 	if (stat->objs)
-		print_node(f, stat);
+		print_node(f, stat, dispatch);
 	if (unlikely(is_last)) {
-		if (model == RTE_GRAPH_MODEL_MCORE_DISPATCH)
+		if (dispatch)
 			boarder_model_dispatch();
 		else
 			boarder();
@@ -145,6 +141,20 @@ graph_cluster_stats_cb(bool is_first, bool is_last, void *cookie,
 	return 0;
 };
 
+static int
+graph_cluster_stats_cb_rtc(bool is_first, bool is_last, void *cookie,
+			   const struct rte_graph_cluster_node_stats *stat)
+{
+	return graph_cluster_stats_cb(false, is_first, is_last, cookie, stat);
+};
+
+static int
+graph_cluster_stats_cb_dispatch(bool is_first, bool is_last, void *cookie,
+				const struct rte_graph_cluster_node_stats *stat)
+{
+	return graph_cluster_stats_cb(true, is_first, is_last, cookie, stat);
+};
+
 static struct rte_graph_cluster_stats *
 stats_mem_init(struct cluster *cluster,
 	       const struct rte_graph_cluster_stats_param *prm)
@@ -157,8 +167,13 @@ stats_mem_init(struct cluster *cluster,
 
 	/* Fix up callback */
 	fn = prm->fn;
-	if (fn == NULL)
-		fn = graph_cluster_stats_cb;
+	if (fn == NULL) {
+		const struct rte_graph *graph = cluster->graphs[0]->graph;
+		if (graph->model == RTE_GRAPH_MODEL_MCORE_DISPATCH)
+			fn = graph_cluster_stats_cb_dispatch;
+		else
+			fn = graph_cluster_stats_cb_rtc;
+	}
 
 	cluster_node_size = sizeof(struct cluster_node);
 	/* For a given cluster, max nodes will be the max number of graphs */
@@ -350,6 +365,8 @@ rte_graph_cluster_stats_create(const struct rte_graph_cluster_stats_param *prm)
 			if (stats_mem_populate(&stats, graph_fp, graph_node))
 				goto realloc_fail;
 		}
+		if (graph->graph->model == RTE_GRAPH_MODEL_MCORE_DISPATCH)
+			stats->dispatch = true;
 	}
 
 	/* Finally copy to hugepage memory to avoid pressure on rte_realloc */
@@ -375,20 +392,18 @@ rte_graph_cluster_stats_destroy(struct rte_graph_cluster_stats *stat)
 }
 
 static inline void
-cluster_node_arregate_stats(struct cluster_node *cluster)
+cluster_node_arregate_stats(struct cluster_node *cluster, bool dispatch)
 {
 	uint64_t calls = 0, cycles = 0, objs = 0, realloc_count = 0;
 	struct rte_graph_cluster_node_stats *stat = &cluster->stat;
 	uint64_t sched_objs = 0, sched_fail = 0;
 	struct rte_node *node;
 	rte_node_t count;
-	int model;
 
-	model = rte_graph_worker_model_get(STAILQ_FIRST(graph_list_head_get())->graph);
 	for (count = 0; count < cluster->nb_nodes; count++) {
 		node = cluster->nodes[count];
 
-		if (model == RTE_GRAPH_MODEL_MCORE_DISPATCH) {
+		if (dispatch) {
 			sched_objs += node->dispatch.total_sched_objs;
 			sched_fail += node->dispatch.total_sched_fail;
 		}
@@ -403,7 +418,7 @@ cluster_node_arregate_stats(struct cluster_node *cluster)
 	stat->objs = objs;
 	stat->cycles = cycles;
 
-	if (model == RTE_GRAPH_MODEL_MCORE_DISPATCH) {
+	if (dispatch) {
 		stat->dispatch.sched_objs = sched_objs;
 		stat->dispatch.sched_fail = sched_fail;
 	}
@@ -433,7 +448,7 @@ rte_graph_cluster_stats_get(struct rte_graph_cluster_stats *stat, bool skip_cb)
 	cluster = stat->clusters;
 
 	for (count = 0; count < stat->max_nodes; count++) {
-		cluster_node_arregate_stats(cluster);
+		cluster_node_arregate_stats(cluster, stat->dispatch);
 		if (!skip_cb)
 			rc = stat->fn(!count, (count == stat->max_nodes - 1),
 				      stat->cookie, &cluster->stat);
-- 
2.44.0


  parent reply	other threads:[~2024-04-01 20:37 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-25 15:53 [PATCH] " Robin Jarry
2024-03-29  7:44 ` [EXTERNAL] " Kiran Kumar Kokkilagadda
2024-03-29  9:32   ` Robin Jarry
2024-04-01 20:36 ` Robin Jarry [this message]
2024-04-03  7:22   ` [EXTERNAL] [PATCH v2] " Kiran Kumar Kokkilagadda
2024-06-18 13:15   ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240401203647.1909165-3-rjarry@redhat.com \
    --to=rjarry@redhat.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=yanzhirun_163@163.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).