DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] graph: avoid id collisions
@ 2024-03-25 15:53 Robin Jarry
  2024-03-26 12:48 ` Jerin Jacob
  2024-03-26 15:49 ` [PATCH v2] " Robin Jarry
  0 siblings, 2 replies; 4+ messages in thread
From: Robin Jarry @ 2024-03-25 15:53 UTC (permalink / raw)
  To: dev, Jerin Jacob, Kiran Kumar K, Nithin Dabilpuram, Zhirun Yan

The graph id is determined based on a global variable that is
incremented every time a graph is created, and decremented every time
a graph is destroyed. This only works if graphs are destroyed in the
reverse order in which they have been created.

The following code produces duplicate graph IDs which can lead to
use-after-free bugs and other undefined behaviours:

  a = rte_graph_create(...); // id=0 graph_id=1
  b = rte_graph_create(...); // id=1 graph_id=2
  rte_graph_destroy(a);      // graph_id=1
  c = rte_graph_create(...); // id=1 graph_id=2 (duplicate with b)
  rte_graph_destroy(c);      // frees memory still used by b

Remove the global counter. Make sure that the graph list is always
ordered by increasing graph ids. When creating a new graph, pick a free
id which is not allocated.

Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
 lib/graph/graph.c | 86 +++++++++++++++++++++++++++++++++++++----------
 1 file changed, 69 insertions(+), 17 deletions(-)

diff --git a/lib/graph/graph.c b/lib/graph/graph.c
index 147bc6c685c5..50616580d06b 100644
--- a/lib/graph/graph.c
+++ b/lib/graph/graph.c
@@ -19,11 +19,54 @@
 
 static struct graph_head graph_list = STAILQ_HEAD_INITIALIZER(graph_list);
 static rte_spinlock_t graph_lock = RTE_SPINLOCK_INITIALIZER;
-static rte_graph_t graph_id;
-
-#define GRAPH_ID_CHECK(id) ID_CHECK(id, graph_id)
 
 /* Private functions */
+static struct graph *
+graph_from_id(rte_graph_t id)
+{
+	struct graph *graph;
+	STAILQ_FOREACH(graph, &graph_list, next) {
+		if (graph->id == id)
+			return graph;
+	}
+	rte_errno = EINVAL;
+	return NULL;
+}
+
+static rte_graph_t
+graph_next_free_id(void)
+{
+	struct graph *graph;
+	rte_graph_t id = 0;
+
+	STAILQ_FOREACH(graph, &graph_list, next) {
+		if (id < graph->id)
+			break;
+		id = graph->id + 1;
+	}
+
+	return id;
+}
+
+static void
+graph_insert_ordered(struct graph *graph)
+{
+	struct graph *after, *g;
+
+	after = NULL;
+	STAILQ_FOREACH(g, &graph_list, next) {
+		if (g->id < graph->id) {
+			after = g;
+			break;
+		}
+	}
+	if (after == NULL) {
+		STAILQ_INSERT_TAIL(&graph_list, graph, next);
+	} else {
+		STAILQ_INSERT_AFTER(&graph_list, after, graph, next);
+	}
+}
+
 struct graph_head *
 graph_list_head_get(void)
 {
@@ -279,7 +322,8 @@ rte_graph_model_mcore_dispatch_core_bind(rte_graph_t id, int lcore)
 {
 	struct graph *graph;
 
-	GRAPH_ID_CHECK(id);
+	if (graph_from_id(id) == NULL)
+		goto fail;
 	if (!rte_lcore_is_enabled(lcore))
 		SET_ERR_JMP(ENOLINK, fail, "lcore %d not enabled", lcore);
 
@@ -309,7 +353,8 @@ rte_graph_model_mcore_dispatch_core_unbind(rte_graph_t id)
 {
 	struct graph *graph;
 
-	GRAPH_ID_CHECK(id);
+	if (graph_from_id(id) == NULL)
+		goto fail;
 	STAILQ_FOREACH(graph, &graph_list, next)
 		if (graph->id == id)
 			break;
@@ -406,7 +451,7 @@ rte_graph_create(const char *name, struct rte_graph_param *prm)
 	graph->socket = prm->socket_id;
 	graph->src_node_count = src_node_count;
 	graph->node_count = graph_nodes_count(graph);
-	graph->id = graph_id;
+	graph->id = graph_next_free_id();
 	graph->parent_id = RTE_GRAPH_ID_INVALID;
 	graph->lcore_id = RTE_MAX_LCORE;
 	graph->num_pkt_to_capture = prm->num_pkt_to_capture;
@@ -422,8 +467,7 @@ rte_graph_create(const char *name, struct rte_graph_param *prm)
 		goto graph_mem_destroy;
 
 	/* All good, Lets add the graph to the list */
-	graph_id++;
-	STAILQ_INSERT_TAIL(&graph_list, graph, next);
+	graph_insert_ordered(graph);
 
 	graph_spinlock_unlock();
 	return graph->id;
@@ -467,7 +511,6 @@ rte_graph_destroy(rte_graph_t id)
 			graph_cleanup(graph);
 			STAILQ_REMOVE(&graph_list, graph, graph, next);
 			free(graph);
-			graph_id--;
 			goto done;
 		}
 		graph = tmp;
@@ -520,7 +563,7 @@ graph_clone(struct graph *parent_graph, const char *name, struct rte_graph_param
 	graph->parent_id = parent_graph->id;
 	graph->lcore_id = parent_graph->lcore_id;
 	graph->socket = parent_graph->socket;
-	graph->id = graph_id;
+	graph->id = graph_next_free_id();
 
 	/* Allocate the Graph fast path memory and populate the data */
 	if (graph_fp_mem_create(graph))
@@ -539,8 +582,7 @@ graph_clone(struct graph *parent_graph, const char *name, struct rte_graph_param
 		goto graph_mem_destroy;
 
 	/* All good, Lets add the graph to the list */
-	graph_id++;
-	STAILQ_INSERT_TAIL(&graph_list, graph, next);
+	graph_insert_ordered(graph);
 
 	graph_spinlock_unlock();
 	return graph->id;
@@ -561,7 +603,8 @@ rte_graph_clone(rte_graph_t id, const char *name, struct rte_graph_param *prm)
 {
 	struct graph *graph;
 
-	GRAPH_ID_CHECK(id);
+	if (graph_from_id(id) == NULL)
+		goto fail;
 	STAILQ_FOREACH(graph, &graph_list, next)
 		if (graph->id == id)
 			return graph_clone(graph, name, prm);
@@ -587,7 +630,8 @@ rte_graph_id_to_name(rte_graph_t id)
 {
 	struct graph *graph;
 
-	GRAPH_ID_CHECK(id);
+	if (graph_from_id(id) == NULL)
+		goto fail;
 	STAILQ_FOREACH(graph, &graph_list, next)
 		if (graph->id == id)
 			return graph->name;
@@ -604,7 +648,8 @@ rte_graph_node_get(rte_graph_t gid, uint32_t nid)
 	rte_graph_off_t off;
 	rte_node_t count;
 
-	GRAPH_ID_CHECK(gid);
+	if (graph_from_id(gid) == NULL)
+		goto fail;
 	STAILQ_FOREACH(graph, &graph_list, next)
 		if (graph->id == gid) {
 			rte_graph_foreach_node(count, off, graph->graph,
@@ -747,7 +792,8 @@ graph_scan_dump(FILE *f, rte_graph_t id, bool all)
 	struct graph *graph;
 
 	RTE_VERIFY(f);
-	GRAPH_ID_CHECK(id);
+	if (graph_from_id(id) == NULL)
+		goto fail;
 
 	STAILQ_FOREACH(graph, &graph_list, next) {
 		if (all == true) {
@@ -776,7 +822,13 @@ rte_graph_list_dump(FILE *f)
 rte_graph_t
 rte_graph_max_count(void)
 {
-	return graph_id;
+	struct graph *graph;
+	rte_graph_t count = 0;
+
+	STAILQ_FOREACH(graph, &graph_list, next)
+		count++;
+
+	return count;
 }
 
 RTE_LOG_REGISTER_DEFAULT(rte_graph_logtype, INFO);
-- 
2.44.0


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] graph: avoid id collisions
  2024-03-25 15:53 [PATCH] graph: avoid id collisions Robin Jarry
@ 2024-03-26 12:48 ` Jerin Jacob
  2024-03-26 15:49 ` [PATCH v2] " Robin Jarry
  1 sibling, 0 replies; 4+ messages in thread
From: Jerin Jacob @ 2024-03-26 12:48 UTC (permalink / raw)
  To: Robin Jarry
  Cc: dev, Jerin Jacob, Kiran Kumar K, Nithin Dabilpuram, Zhirun Yan

On Mon, Mar 25, 2024 at 9:24 PM Robin Jarry <rjarry@redhat.com> wrote:
>
> The graph id is determined based on a global variable that is
> incremented every time a graph is created, and decremented every time
> a graph is destroyed. This only works if graphs are destroyed in the
> reverse order in which they have been created.
>
> The following code produces duplicate graph IDs which can lead to
> use-after-free bugs and other undefined behaviours:
>
>   a = rte_graph_create(...); // id=0 graph_id=1
>   b = rte_graph_create(...); // id=1 graph_id=2
>   rte_graph_destroy(a);      // graph_id=1
>   c = rte_graph_create(...); // id=1 graph_id=2 (duplicate with b)
>   rte_graph_destroy(c);      // frees memory still used by b
>
> Remove the global counter. Make sure that the graph list is always
> ordered by increasing graph ids. When creating a new graph, pick a free
> id which is not allocated.


Please update app/test/test_graph.c to test this case.


>
> Signed-off-by: Robin Jarry <rjarry@redhat.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v2] graph: avoid id collisions
  2024-03-25 15:53 [PATCH] graph: avoid id collisions Robin Jarry
  2024-03-26 12:48 ` Jerin Jacob
@ 2024-03-26 15:49 ` Robin Jarry
  2024-03-27  2:44   ` [EXTERNAL] " Kiran Kumar Kokkilagadda
  1 sibling, 1 reply; 4+ messages in thread
From: Robin Jarry @ 2024-03-26 15:49 UTC (permalink / raw)
  To: dev, Jerin Jacob, Kiran Kumar K, Nithin Dabilpuram, Zhirun Yan

The graph id is determined based on a global variable that is
incremented every time a graph is created, and decremented every time
a graph is destroyed. This only works if graphs are destroyed in the
reverse order in which they have been created.

The following code produces duplicate graph IDs which can lead to
use-after-free bugs and other undefined behaviours:

  a = rte_graph_create(...); // id=0 graph_id=1
  b = rte_graph_create(...); // id=1 graph_id=2
  rte_graph_destroy(a);      // graph_id=1
  c = rte_graph_create(...); // id=1 graph_id=2 (duplicate with b)
  rte_graph_destroy(c);      // frees memory still used by b

Remove the global counter. Make sure that the graph list is always
ordered by increasing graph ids. When creating a new graph, pick a free
id which is not allocated.

Update unit tests to ensure it works as expected.

Signed-off-by: Robin Jarry <rjarry@redhat.com>
---

Notes:
    v2: Added unit test

 app/test/test_graph.c | 63 +++++++++++++++++++++++++++++++
 lib/graph/graph.c     | 86 ++++++++++++++++++++++++++++++++++---------
 2 files changed, 132 insertions(+), 17 deletions(-)

diff --git a/app/test/test_graph.c b/app/test/test_graph.c
index b8409bc60497..c650ac4f57b7 100644
--- a/app/test/test_graph.c
+++ b/app/test/test_graph.c
@@ -696,6 +696,68 @@ test_graph_clone(void)
 	return ret;
 }
 
+static int
+test_graph_id_collisions(void)
+{
+	static const char *node_patterns[] = {"test_node_source1", "test_node00"};
+	struct rte_graph_param gconf = {
+		.socket_id = SOCKET_ID_ANY,
+		.nb_node_patterns = 2,
+		.node_patterns = node_patterns,
+	};
+	rte_graph_t g1, g2, g3;
+
+	g1 = rte_graph_create("worker1", &gconf);
+	if (g1 == RTE_GRAPH_ID_INVALID) {
+		printf("Graph 1 creation failed with error = %d\n", rte_errno);
+		return -1;
+	}
+	g2 = rte_graph_create("worker2", &gconf);
+	if (g2 == RTE_GRAPH_ID_INVALID) {
+		printf("Graph 2 creation failed with error = %d\n", rte_errno);
+		return -1;
+	}
+	if (g1 == g2) {
+		printf("Graph ids should be different\n");
+		return -1;
+	}
+	if (rte_graph_destroy(g1) < 0) {
+		printf("Graph 1 suppression failed\n");
+		return -1;
+	}
+	g3 = rte_graph_create("worker3", &gconf);
+	if (g3 == RTE_GRAPH_ID_INVALID) {
+		printf("Graph 3 creation failed with error = %d\n", rte_errno);
+		return -1;
+	}
+	if (g3 == g2) {
+		printf("Graph ids should be different\n");
+		return -1;
+	}
+	g1 = rte_graph_clone(g2, "worker1", &gconf);
+	if (g1 == RTE_GRAPH_ID_INVALID) {
+		printf("Graph 2 clone failed with error = %d\n", rte_errno);
+		return -1;
+	}
+	if (g1 == g2 || g2 == g3 || g1 == g3) {
+		printf("Graph ids should be different\n");
+		return -1;
+	}
+	if (rte_graph_destroy(g1) < 0) {
+		printf("Graph 1 suppression failed\n");
+		return -1;
+	}
+	if (rte_graph_destroy(g2) < 0) {
+		printf("Graph 2 suppression failed\n");
+		return -1;
+	}
+	if (rte_graph_destroy(g3) < 0) {
+		printf("Graph 3 suppression failed\n");
+		return -1;
+	}
+	return 0;
+}
+
 static int
 test_graph_model_mcore_dispatch_node_lcore_affinity_set(void)
 {
@@ -977,6 +1039,7 @@ static struct unit_test_suite graph_testsuite = {
 		TEST_CASE(test_lookup_functions),
 		TEST_CASE(test_create_graph),
 		TEST_CASE(test_graph_clone),
+		TEST_CASE(test_graph_id_collisions),
 		TEST_CASE(test_graph_model_mcore_dispatch_node_lcore_affinity_set),
 		TEST_CASE(test_graph_model_mcore_dispatch_core_bind_unbind),
 		TEST_CASE(test_graph_worker_model_set_get),
diff --git a/lib/graph/graph.c b/lib/graph/graph.c
index 147bc6c685c5..50616580d06b 100644
--- a/lib/graph/graph.c
+++ b/lib/graph/graph.c
@@ -19,11 +19,54 @@
 
 static struct graph_head graph_list = STAILQ_HEAD_INITIALIZER(graph_list);
 static rte_spinlock_t graph_lock = RTE_SPINLOCK_INITIALIZER;
-static rte_graph_t graph_id;
-
-#define GRAPH_ID_CHECK(id) ID_CHECK(id, graph_id)
 
 /* Private functions */
+static struct graph *
+graph_from_id(rte_graph_t id)
+{
+	struct graph *graph;
+	STAILQ_FOREACH(graph, &graph_list, next) {
+		if (graph->id == id)
+			return graph;
+	}
+	rte_errno = EINVAL;
+	return NULL;
+}
+
+static rte_graph_t
+graph_next_free_id(void)
+{
+	struct graph *graph;
+	rte_graph_t id = 0;
+
+	STAILQ_FOREACH(graph, &graph_list, next) {
+		if (id < graph->id)
+			break;
+		id = graph->id + 1;
+	}
+
+	return id;
+}
+
+static void
+graph_insert_ordered(struct graph *graph)
+{
+	struct graph *after, *g;
+
+	after = NULL;
+	STAILQ_FOREACH(g, &graph_list, next) {
+		if (g->id < graph->id) {
+			after = g;
+			break;
+		}
+	}
+	if (after == NULL) {
+		STAILQ_INSERT_TAIL(&graph_list, graph, next);
+	} else {
+		STAILQ_INSERT_AFTER(&graph_list, after, graph, next);
+	}
+}
+
 struct graph_head *
 graph_list_head_get(void)
 {
@@ -279,7 +322,8 @@ rte_graph_model_mcore_dispatch_core_bind(rte_graph_t id, int lcore)
 {
 	struct graph *graph;
 
-	GRAPH_ID_CHECK(id);
+	if (graph_from_id(id) == NULL)
+		goto fail;
 	if (!rte_lcore_is_enabled(lcore))
 		SET_ERR_JMP(ENOLINK, fail, "lcore %d not enabled", lcore);
 
@@ -309,7 +353,8 @@ rte_graph_model_mcore_dispatch_core_unbind(rte_graph_t id)
 {
 	struct graph *graph;
 
-	GRAPH_ID_CHECK(id);
+	if (graph_from_id(id) == NULL)
+		goto fail;
 	STAILQ_FOREACH(graph, &graph_list, next)
 		if (graph->id == id)
 			break;
@@ -406,7 +451,7 @@ rte_graph_create(const char *name, struct rte_graph_param *prm)
 	graph->socket = prm->socket_id;
 	graph->src_node_count = src_node_count;
 	graph->node_count = graph_nodes_count(graph);
-	graph->id = graph_id;
+	graph->id = graph_next_free_id();
 	graph->parent_id = RTE_GRAPH_ID_INVALID;
 	graph->lcore_id = RTE_MAX_LCORE;
 	graph->num_pkt_to_capture = prm->num_pkt_to_capture;
@@ -422,8 +467,7 @@ rte_graph_create(const char *name, struct rte_graph_param *prm)
 		goto graph_mem_destroy;
 
 	/* All good, Lets add the graph to the list */
-	graph_id++;
-	STAILQ_INSERT_TAIL(&graph_list, graph, next);
+	graph_insert_ordered(graph);
 
 	graph_spinlock_unlock();
 	return graph->id;
@@ -467,7 +511,6 @@ rte_graph_destroy(rte_graph_t id)
 			graph_cleanup(graph);
 			STAILQ_REMOVE(&graph_list, graph, graph, next);
 			free(graph);
-			graph_id--;
 			goto done;
 		}
 		graph = tmp;
@@ -520,7 +563,7 @@ graph_clone(struct graph *parent_graph, const char *name, struct rte_graph_param
 	graph->parent_id = parent_graph->id;
 	graph->lcore_id = parent_graph->lcore_id;
 	graph->socket = parent_graph->socket;
-	graph->id = graph_id;
+	graph->id = graph_next_free_id();
 
 	/* Allocate the Graph fast path memory and populate the data */
 	if (graph_fp_mem_create(graph))
@@ -539,8 +582,7 @@ graph_clone(struct graph *parent_graph, const char *name, struct rte_graph_param
 		goto graph_mem_destroy;
 
 	/* All good, Lets add the graph to the list */
-	graph_id++;
-	STAILQ_INSERT_TAIL(&graph_list, graph, next);
+	graph_insert_ordered(graph);
 
 	graph_spinlock_unlock();
 	return graph->id;
@@ -561,7 +603,8 @@ rte_graph_clone(rte_graph_t id, const char *name, struct rte_graph_param *prm)
 {
 	struct graph *graph;
 
-	GRAPH_ID_CHECK(id);
+	if (graph_from_id(id) == NULL)
+		goto fail;
 	STAILQ_FOREACH(graph, &graph_list, next)
 		if (graph->id == id)
 			return graph_clone(graph, name, prm);
@@ -587,7 +630,8 @@ rte_graph_id_to_name(rte_graph_t id)
 {
 	struct graph *graph;
 
-	GRAPH_ID_CHECK(id);
+	if (graph_from_id(id) == NULL)
+		goto fail;
 	STAILQ_FOREACH(graph, &graph_list, next)
 		if (graph->id == id)
 			return graph->name;
@@ -604,7 +648,8 @@ rte_graph_node_get(rte_graph_t gid, uint32_t nid)
 	rte_graph_off_t off;
 	rte_node_t count;
 
-	GRAPH_ID_CHECK(gid);
+	if (graph_from_id(gid) == NULL)
+		goto fail;
 	STAILQ_FOREACH(graph, &graph_list, next)
 		if (graph->id == gid) {
 			rte_graph_foreach_node(count, off, graph->graph,
@@ -747,7 +792,8 @@ graph_scan_dump(FILE *f, rte_graph_t id, bool all)
 	struct graph *graph;
 
 	RTE_VERIFY(f);
-	GRAPH_ID_CHECK(id);
+	if (graph_from_id(id) == NULL)
+		goto fail;
 
 	STAILQ_FOREACH(graph, &graph_list, next) {
 		if (all == true) {
@@ -776,7 +822,13 @@ rte_graph_list_dump(FILE *f)
 rte_graph_t
 rte_graph_max_count(void)
 {
-	return graph_id;
+	struct graph *graph;
+	rte_graph_t count = 0;
+
+	STAILQ_FOREACH(graph, &graph_list, next)
+		count++;
+
+	return count;
 }
 
 RTE_LOG_REGISTER_DEFAULT(rte_graph_logtype, INFO);
-- 
2.44.0


^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: [EXTERNAL] [PATCH v2] graph: avoid id collisions
  2024-03-26 15:49 ` [PATCH v2] " Robin Jarry
@ 2024-03-27  2:44   ` Kiran Kumar Kokkilagadda
  0 siblings, 0 replies; 4+ messages in thread
From: Kiran Kumar Kokkilagadda @ 2024-03-27  2:44 UTC (permalink / raw)
  To: Robin Jarry, dev, Jerin Jacob, Nithin Kumar Dabilpuram, Zhirun Yan



> -----Original Message-----
> From: Robin Jarry <rjarry@redhat.com>
> Sent: Tuesday, March 26, 2024 9:20 PM
> To: dev@dpdk.org; Jerin Jacob <jerinj@marvell.com>; Kiran Kumar
> Kokkilagadda <kirankumark@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>; Zhirun Yan <yanzhirun_163@163.com>
> Subject: [EXTERNAL] [PATCH v2] graph: avoid id collisions
> 
> Prioritize security for external emails: Confirm sender and content safety
> before clicking links or opening attachments
> 
> ----------------------------------------------------------------------
> The graph id is determined based on a global variable that is incremented
> every time a graph is created, and decremented every time a graph is
> destroyed. This only works if graphs are destroyed in the reverse order in
> which they have been created.
> 
> The following code produces duplicate graph IDs which can lead to use-after-
> free bugs and other undefined behaviours:
> 
>   a = rte_graph_create(...); // id=0 graph_id=1
>   b = rte_graph_create(...); // id=1 graph_id=2
>   rte_graph_destroy(a);      // graph_id=1
>   c = rte_graph_create(...); // id=1 graph_id=2 (duplicate with b)
>   rte_graph_destroy(c);      // frees memory still used by b
> 
> Remove the global counter. Make sure that the graph list is always ordered by
> increasing graph ids. When creating a new graph, pick a free id which is not
> allocated.
> 
> Update unit tests to ensure it works as expected.
> 
> Signed-off-by: Robin Jarry <rjarry@redhat.com>
> ---

Acked-by: Kiran Kumar Kokkilagadda <kirankumark@marvell.com>

> 
> Notes:
>     v2: Added unit test
> 
>  app/test/test_graph.c | 63 +++++++++++++++++++++++++++++++
>  lib/graph/graph.c     | 86 ++++++++++++++++++++++++++++++++++---------
>  2 files changed, 132 insertions(+), 17 deletions(-)
> 
> diff --git a/app/test/test_graph.c b/app/test/test_graph.c index
> b8409bc60497..c650ac4f57b7 100644
> --- a/app/test/test_graph.c
> +++ b/app/test/test_graph.c
> @@ -696,6 +696,68 @@ test_graph_clone(void)
>  	return ret;
>  }
> 
> +static int
> +test_graph_id_collisions(void)
> +{
> +	static const char *node_patterns[] = {"test_node_source1",
> "test_node00"};
> +	struct rte_graph_param gconf = {
> +		.socket_id = SOCKET_ID_ANY,
> +		.nb_node_patterns = 2,
> +		.node_patterns = node_patterns,
> +	};
> +	rte_graph_t g1, g2, g3;
> +
> +	g1 = rte_graph_create("worker1", &gconf);
> +	if (g1 == RTE_GRAPH_ID_INVALID) {
> +		printf("Graph 1 creation failed with error = %d\n", rte_errno);
> +		return -1;
> +	}
> +	g2 = rte_graph_create("worker2", &gconf);
> +	if (g2 == RTE_GRAPH_ID_INVALID) {
> +		printf("Graph 2 creation failed with error = %d\n", rte_errno);
> +		return -1;
> +	}
> +	if (g1 == g2) {
> +		printf("Graph ids should be different\n");
> +		return -1;
> +	}
> +	if (rte_graph_destroy(g1) < 0) {
> +		printf("Graph 1 suppression failed\n");
> +		return -1;
> +	}
> +	g3 = rte_graph_create("worker3", &gconf);
> +	if (g3 == RTE_GRAPH_ID_INVALID) {
> +		printf("Graph 3 creation failed with error = %d\n", rte_errno);
> +		return -1;
> +	}
> +	if (g3 == g2) {
> +		printf("Graph ids should be different\n");
> +		return -1;
> +	}
> +	g1 = rte_graph_clone(g2, "worker1", &gconf);
> +	if (g1 == RTE_GRAPH_ID_INVALID) {
> +		printf("Graph 2 clone failed with error = %d\n", rte_errno);
> +		return -1;
> +	}
> +	if (g1 == g2 || g2 == g3 || g1 == g3) {
> +		printf("Graph ids should be different\n");
> +		return -1;
> +	}
> +	if (rte_graph_destroy(g1) < 0) {
> +		printf("Graph 1 suppression failed\n");
> +		return -1;
> +	}
> +	if (rte_graph_destroy(g2) < 0) {
> +		printf("Graph 2 suppression failed\n");
> +		return -1;
> +	}
> +	if (rte_graph_destroy(g3) < 0) {
> +		printf("Graph 3 suppression failed\n");
> +		return -1;
> +	}
> +	return 0;
> +}
> +
>  static int
>  test_graph_model_mcore_dispatch_node_lcore_affinity_set(void)
>  {
> @@ -977,6 +1039,7 @@ static struct unit_test_suite graph_testsuite = {
>  		TEST_CASE(test_lookup_functions),
>  		TEST_CASE(test_create_graph),
>  		TEST_CASE(test_graph_clone),
> +		TEST_CASE(test_graph_id_collisions),
> 
> 	TEST_CASE(test_graph_model_mcore_dispatch_node_lcore_affinity_s
> et),
> 
> 	TEST_CASE(test_graph_model_mcore_dispatch_core_bind_unbind),
>  		TEST_CASE(test_graph_worker_model_set_get),
> diff --git a/lib/graph/graph.c b/lib/graph/graph.c index
> 147bc6c685c5..50616580d06b 100644
> --- a/lib/graph/graph.c
> +++ b/lib/graph/graph.c
> @@ -19,11 +19,54 @@
> 
>  static struct graph_head graph_list = STAILQ_HEAD_INITIALIZER(graph_list);
>  static rte_spinlock_t graph_lock = RTE_SPINLOCK_INITIALIZER; -static
> rte_graph_t graph_id;
> -
> -#define GRAPH_ID_CHECK(id) ID_CHECK(id, graph_id)
> 
>  /* Private functions */
> +static struct graph *
> +graph_from_id(rte_graph_t id)
> +{
> +	struct graph *graph;
> +	STAILQ_FOREACH(graph, &graph_list, next) {
> +		if (graph->id == id)
> +			return graph;
> +	}
> +	rte_errno = EINVAL;
> +	return NULL;
> +}
> +
> +static rte_graph_t
> +graph_next_free_id(void)
> +{
> +	struct graph *graph;
> +	rte_graph_t id = 0;
> +
> +	STAILQ_FOREACH(graph, &graph_list, next) {
> +		if (id < graph->id)
> +			break;
> +		id = graph->id + 1;
> +	}
> +
> +	return id;
> +}
> +
> +static void
> +graph_insert_ordered(struct graph *graph) {
> +	struct graph *after, *g;
> +
> +	after = NULL;
> +	STAILQ_FOREACH(g, &graph_list, next) {
> +		if (g->id < graph->id) {
> +			after = g;
> +			break;
> +		}
> +	}
> +	if (after == NULL) {
> +		STAILQ_INSERT_TAIL(&graph_list, graph, next);
> +	} else {
> +		STAILQ_INSERT_AFTER(&graph_list, after, graph, next);
> +	}
> +}
> +
>  struct graph_head *
>  graph_list_head_get(void)
>  {
> @@ -279,7 +322,8 @@
> rte_graph_model_mcore_dispatch_core_bind(rte_graph_t id, int lcore)  {
>  	struct graph *graph;
> 
> -	GRAPH_ID_CHECK(id);
> +	if (graph_from_id(id) == NULL)
> +		goto fail;
>  	if (!rte_lcore_is_enabled(lcore))
>  		SET_ERR_JMP(ENOLINK, fail, "lcore %d not enabled", lcore);
> 
> @@ -309,7 +353,8 @@
> rte_graph_model_mcore_dispatch_core_unbind(rte_graph_t id)  {
>  	struct graph *graph;
> 
> -	GRAPH_ID_CHECK(id);
> +	if (graph_from_id(id) == NULL)
> +		goto fail;
>  	STAILQ_FOREACH(graph, &graph_list, next)
>  		if (graph->id == id)
>  			break;
> @@ -406,7 +451,7 @@ rte_graph_create(const char *name, struct
> rte_graph_param *prm)
>  	graph->socket = prm->socket_id;
>  	graph->src_node_count = src_node_count;
>  	graph->node_count = graph_nodes_count(graph);
> -	graph->id = graph_id;
> +	graph->id = graph_next_free_id();
>  	graph->parent_id = RTE_GRAPH_ID_INVALID;
>  	graph->lcore_id = RTE_MAX_LCORE;
>  	graph->num_pkt_to_capture = prm->num_pkt_to_capture; @@ -
> 422,8 +467,7 @@ rte_graph_create(const char *name, struct
> rte_graph_param *prm)
>  		goto graph_mem_destroy;
> 
>  	/* All good, Lets add the graph to the list */
> -	graph_id++;
> -	STAILQ_INSERT_TAIL(&graph_list, graph, next);
> +	graph_insert_ordered(graph);
> 
>  	graph_spinlock_unlock();
>  	return graph->id;
> @@ -467,7 +511,6 @@ rte_graph_destroy(rte_graph_t id)
>  			graph_cleanup(graph);
>  			STAILQ_REMOVE(&graph_list, graph, graph, next);
>  			free(graph);
> -			graph_id--;
>  			goto done;
>  		}
>  		graph = tmp;
> @@ -520,7 +563,7 @@ graph_clone(struct graph *parent_graph, const char
> *name, struct rte_graph_param
>  	graph->parent_id = parent_graph->id;
>  	graph->lcore_id = parent_graph->lcore_id;
>  	graph->socket = parent_graph->socket;
> -	graph->id = graph_id;
> +	graph->id = graph_next_free_id();
> 
>  	/* Allocate the Graph fast path memory and populate the data */
>  	if (graph_fp_mem_create(graph))
> @@ -539,8 +582,7 @@ graph_clone(struct graph *parent_graph, const char
> *name, struct rte_graph_param
>  		goto graph_mem_destroy;
> 
>  	/* All good, Lets add the graph to the list */
> -	graph_id++;
> -	STAILQ_INSERT_TAIL(&graph_list, graph, next);
> +	graph_insert_ordered(graph);
> 
>  	graph_spinlock_unlock();
>  	return graph->id;
> @@ -561,7 +603,8 @@ rte_graph_clone(rte_graph_t id, const char *name,
> struct rte_graph_param *prm)  {
>  	struct graph *graph;
> 
> -	GRAPH_ID_CHECK(id);
> +	if (graph_from_id(id) == NULL)
> +		goto fail;
>  	STAILQ_FOREACH(graph, &graph_list, next)
>  		if (graph->id == id)
>  			return graph_clone(graph, name, prm); @@ -587,7
> +630,8 @@ rte_graph_id_to_name(rte_graph_t id)  {
>  	struct graph *graph;
> 
> -	GRAPH_ID_CHECK(id);
> +	if (graph_from_id(id) == NULL)
> +		goto fail;
>  	STAILQ_FOREACH(graph, &graph_list, next)
>  		if (graph->id == id)
>  			return graph->name;
> @@ -604,7 +648,8 @@ rte_graph_node_get(rte_graph_t gid, uint32_t nid)
>  	rte_graph_off_t off;
>  	rte_node_t count;
> 
> -	GRAPH_ID_CHECK(gid);
> +	if (graph_from_id(gid) == NULL)
> +		goto fail;
>  	STAILQ_FOREACH(graph, &graph_list, next)
>  		if (graph->id == gid) {
>  			rte_graph_foreach_node(count, off, graph->graph,
> @@ -747,7 +792,8 @@ graph_scan_dump(FILE *f, rte_graph_t id, bool all)
>  	struct graph *graph;
> 
>  	RTE_VERIFY(f);
> -	GRAPH_ID_CHECK(id);
> +	if (graph_from_id(id) == NULL)
> +		goto fail;
> 
>  	STAILQ_FOREACH(graph, &graph_list, next) {
>  		if (all == true) {
> @@ -776,7 +822,13 @@ rte_graph_list_dump(FILE *f)  rte_graph_t
>  rte_graph_max_count(void)
>  {
> -	return graph_id;
> +	struct graph *graph;
> +	rte_graph_t count = 0;
> +
> +	STAILQ_FOREACH(graph, &graph_list, next)
> +		count++;
> +
> +	return count;
>  }
> 
>  RTE_LOG_REGISTER_DEFAULT(rte_graph_logtype, INFO);
> --
> 2.44.0


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2024-03-27  2:44 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-03-25 15:53 [PATCH] graph: avoid id collisions Robin Jarry
2024-03-26 12:48 ` Jerin Jacob
2024-03-26 15:49 ` [PATCH v2] " Robin Jarry
2024-03-27  2:44   ` [EXTERNAL] " Kiran Kumar Kokkilagadda

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).