From: <jerinj@marvell.com>
To: <dev@dpdk.org>
Cc: <pkapoor@marvell.com>, <ndabilpuram@marvell.com>,
<kirankumark@marvell.com>, <pbhagavatula@marvell.com>,
<pathreya@marvell.com>, <nsaxena@marvell.com>,
<sshankarnara@marvell.com>, <honnappa.nagarahalli@arm.com>,
<thomas@monjalon.net>, <david.marchand@redhat.com>,
<ferruh.yigit@intel.com>, <arybchenko@solarflare.com>,
<ajit.khaparde@broadcom.com>, <xiaolong.ye@intel.com>,
<rasland@mellanox.com>, <maxime.coquelin@redhat.com>,
<akhil.goyal@nxp.com>, <cristian.dumitrescu@intel.com>,
<john.mcnamara@intel.com>, <bruce.richardson@intel.com>,
<anatoly.burakov@intel.com>, <gavin.hu@arm.com>,
<drc@linux.vnet.ibm.com>, <konstantin.ananyev@intel.com>,
<pallavi.kadam@intel.com>, <olivier.matz@6wind.com>,
<gage.eads@intel.com>, <nikhil.rao@intel.com>,
<erik.g.carrillo@intel.com>, <hemant.agrawal@nxp.com>,
<artem.andreev@oktetlabs.ru>, <sthemmin@microsoft.com>,
<shahafs@mellanox.com>, <keith.wiles@intel.com>,
<mattias.ronnblom@ericsson.com>, <jasvinder.singh@intel.com>,
<vladimir.medvedkin@intel.com>, <mdr@ashroe.eu>,
<techboard@dpdk.org>, "Jerin Jacob" <jerinj@marvell.com>
Subject: [dpdk-dev] [RFC PATCH 1/5] graph: introduce graph subsystem
Date: Fri, 31 Jan 2020 22:31:57 +0530 [thread overview]
Message-ID: <20200131170201.3236153-2-jerinj@marvell.com> (raw)
In-Reply-To: <20200131170201.3236153-1-jerinj@marvell.com>
From: Jerin Jacob <jerinj@marvell.com>
Abstracting the data processing functions as “nodes” and “link” them
together to create complex “graph” to enable reusable data processing functions
has been identified as proven architecture.
Introducing the graph infrastructure for packet processing.
Signed-off-by: Jerin Jacob <jerinj@marvell.com>
---
app/test/meson.build | 1 +
config/common_base | 7 +
config/rte_config.h | 4 +
lib/Makefile | 2 +
lib/librte_graph/Makefile | 28 ++
lib/librte_graph/graph.c | 578 +++++++++++++++++++++++++
lib/librte_graph/graph_debug.c | 81 ++++
lib/librte_graph/graph_ops.c | 163 +++++++
lib/librte_graph/graph_populate.c | 224 ++++++++++
lib/librte_graph/graph_private.h | 113 +++++
lib/librte_graph/graph_stats.c | 396 +++++++++++++++++
lib/librte_graph/meson.build | 11 +
lib/librte_graph/node.c | 419 ++++++++++++++++++
lib/librte_graph/rte_graph.h | 277 ++++++++++++
lib/librte_graph/rte_graph_version.map | 46 ++
lib/librte_graph/rte_graph_worker.h | 280 ++++++++++++
lib/meson.build | 2 +-
mk/rte.app.mk | 1 +
18 files changed, 2632 insertions(+), 1 deletion(-)
create mode 100644 lib/librte_graph/Makefile
create mode 100644 lib/librte_graph/graph.c
create mode 100644 lib/librte_graph/graph_debug.c
create mode 100644 lib/librte_graph/graph_ops.c
create mode 100644 lib/librte_graph/graph_populate.c
create mode 100644 lib/librte_graph/graph_private.h
create mode 100644 lib/librte_graph/graph_stats.c
create mode 100644 lib/librte_graph/meson.build
create mode 100644 lib/librte_graph/node.c
create mode 100644 lib/librte_graph/rte_graph.h
create mode 100644 lib/librte_graph/rte_graph_version.map
create mode 100644 lib/librte_graph/rte_graph_worker.h
diff --git a/app/test/meson.build b/app/test/meson.build
index 22b0cefaa..e1cdae3cb 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -160,6 +160,7 @@ test_deps = ['acl',
'ring',
'stack',
'timer'
+ 'graph',
]
fast_test_names = [
diff --git a/config/common_base b/config/common_base
index c897dd0ae..badcc0be5 100644
--- a/config/common_base
+++ b/config/common_base
@@ -1070,6 +1070,13 @@ CONFIG_RTE_LIBRTE_BPF_ELF=n
#
CONFIG_RTE_LIBRTE_IPSEC=y
+#
+# Compile librte_graph
+#
+CONFIG_RTE_LIBRTE_GRAPH=y
+CONFIG_RTE_GRAPH_BURST_SIZE=256
+CONFIG_RTE_LIBRTE_GRAPH_STATS=y
+CONFIG_RTE_LIBRTE_GRAPH_DEBUG=n
#
# Compile the test application
#
diff --git a/config/rte_config.h b/config/rte_config.h
index d30786bc0..e9201fd46 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -98,6 +98,10 @@
/* KNI defines */
#define RTE_KNI_PREEMPT_DEFAULT 1
+/* rte_graph defines */
+#define RTE_GRAPH_BURST_SIZE 256
+#define RTE_LIBRTE_GRAPH_STATS 1
+
/****** driver defines ********/
/* QuickAssist device */
diff --git a/lib/Makefile b/lib/Makefile
index 46b91ae1a..495f572bf 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -119,6 +119,8 @@ DEPDIRS-librte_telemetry := librte_eal librte_metrics librte_ethdev
DIRS-$(CONFIG_RTE_LIBRTE_RCU) += librte_rcu
DEPDIRS-librte_rcu := librte_eal
+DIRS-$(CONFIG_RTE_LIBRTE_GRAPH) += librte_graph
+DEPDIRS-librte_graph := librte_eal librte_mbuf
ifeq ($(CONFIG_RTE_EXEC_ENV_LINUX),y)
DIRS-$(CONFIG_RTE_LIBRTE_KNI) += librte_kni
endif
diff --git a/lib/librte_graph/Makefile b/lib/librte_graph/Makefile
new file mode 100644
index 000000000..967c8d9bc
--- /dev/null
+++ b/lib/librte_graph/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2020 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_graph.a
+
+CFLAGS += -O3 -DALLOW_EXPERIMENTAL_API
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal
+
+EXPORT_MAP := rte_graph_version.map
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_GRAPH) += node.c
+SRCS-$(CONFIG_RTE_LIBRTE_GRAPH) += graph.c
+SRCS-$(CONFIG_RTE_LIBRTE_GRAPH) += graph_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_GRAPH) += graph_debug.c
+SRCS-$(CONFIG_RTE_LIBRTE_GRAPH) += graph_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_GRAPH) += graph_populate.c
+
+# install header files
+SYMLINK-$(CONFIG_RTE_LIBRTE_GRAPH)-include += rte_graph.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_GRAPH)-include += rte_graph_worker.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_graph/graph.c b/lib/librte_graph/graph.c
new file mode 100644
index 000000000..0bdd6e1c0
--- /dev/null
+++ b/lib/librte_graph/graph.c
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <fnmatch.h>
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_errno.h>
+#include <rte_graph.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+
+#include "graph_private.h"
+
+static struct graph_head graph_list = STAILQ_HEAD_INITIALIZER(graph_list);
+static rte_spinlock_t graph_lock = RTE_SPINLOCK_INITIALIZER;
+static rte_graph_t graph_id;
+int rte_graph_logtype;
+
+#define graph_id_check(id) id_check(id, graph_id)
+
+/* Private functions */
+struct graph_head *
+graph_list_head_get(void)
+{
+ return &graph_list;
+}
+
+void
+graph_spinlock_lock(void)
+{
+ rte_spinlock_lock(&graph_lock);
+}
+
+void
+graph_spinlock_unlock(void)
+{
+ rte_spinlock_unlock(&graph_lock);
+}
+
+static int
+graph_node_add(struct graph *graph, struct node *node)
+{
+ struct graph_node *graph_node;
+ size_t sz;
+
+ /* Skip the duplicate nodes */
+ STAILQ_FOREACH(graph_node, &graph->node_list, next)
+ if (strncmp(node->name, graph_node->node->name,
+ RTE_NODE_NAMESIZE) == 0)
+ return 0;
+
+ /* Allocate new graph node object */
+ sz = sizeof(*graph_node) + node->nb_edges * sizeof(struct node *);
+ graph_node = calloc(1, sz);
+
+ if (graph_node == NULL)
+ set_err(ENOMEM, free, "failed to calloc %s object", node->name);
+
+ /* Initialize the graph node */
+ graph_node->node = node;
+
+ /* Add to graph node list */
+ STAILQ_INSERT_TAIL(&graph->node_list, graph_node, next);
+ return 0;
+
+free:
+ free(graph_node);
+ return -rte_errno;
+}
+
+static struct graph_node *
+node_to_graph_node(struct graph *graph, struct node *node)
+{
+ struct graph_node *graph_node;
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next)
+ if (graph_node->node == node)
+ return graph_node;
+
+ set_err(ENODEV, fail, "found isolated node %s", node->name);
+fail:
+ return NULL;
+}
+
+static int
+graph_node_edges_add(struct graph *graph)
+{
+ struct graph_node *graph_node;
+ struct node *adjacency;
+ const char *next;
+ rte_edge_t i;
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next) {
+ for (i = 0; i < graph_node->node->nb_edges; i++) {
+ next = graph_node->node->next_nodes[i];
+ adjacency = node_from_name(next);
+ if (adjacency == NULL)
+ set_err(EINVAL, fail, "node %s not registered",
+ next);
+ if (graph_node_add(graph, adjacency))
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ return -rte_errno;
+}
+
+static int
+graph_adjacency_list_update(struct graph *graph)
+{
+ struct graph_node *graph_node, *tmp;
+ struct node *adjacency;
+ const char *next;
+ rte_edge_t i;
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next) {
+ for (i = 0; i < graph_node->node->nb_edges; i++) {
+ next = graph_node->node->next_nodes[i];
+ adjacency = node_from_name(next);
+ if (adjacency == NULL)
+ set_err(EINVAL, fail, "node %s not registered",
+ next);
+ tmp = node_to_graph_node(graph, adjacency);
+ if (tmp == NULL)
+ goto fail;
+ graph_node->adjacency_list[i] = tmp;
+ }
+ }
+
+ return 0;
+fail:
+ return -rte_errno;
+}
+
+static int
+expand_pattern_to_node(struct graph *graph, const char *pattern)
+{
+ struct node_head *node_head = node_list_head_get();
+ bool found = false;
+ struct node *node;
+
+ /* Check for pattern match */
+ STAILQ_FOREACH(node, node_head, next) {
+ if (fnmatch(pattern, node->name, 0) == 0) {
+ if (graph_node_add(graph, node))
+ goto fail;
+ found = true;
+ }
+ }
+ if (found == false)
+ set_err(EFAULT, fail, "pattern %s node not found", pattern);
+
+ return 0;
+fail:
+ return -rte_errno;
+}
+
+static void
+graph_cleanup(struct graph *graph)
+{
+ struct graph_node *graph_node;
+
+ while (!STAILQ_EMPTY(&graph->node_list)) {
+ graph_node = STAILQ_FIRST(&graph->node_list);
+ STAILQ_REMOVE_HEAD(&graph->node_list, next);
+ free(graph_node);
+ }
+}
+
+static int
+graph_node_init(struct graph *graph)
+{
+ struct graph_node *graph_node;
+ const char *name;
+ int rc;
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next) {
+ if (graph_node->node->init) {
+ name = graph_node->node->name;
+ rc = graph_node->node->init(graph->graph,
+ graph_node_name_to_ptr(graph->graph, name));
+ if (rc)
+ set_err(rc, err, "node %s init() failed", name);
+ }
+ }
+
+ return 0;
+err:
+ return -rte_errno;
+}
+
+static void
+graph_node_fini(struct graph *graph)
+{
+ struct graph_node *graph_node;
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next)
+ if (graph_node->node->fini)
+ graph_node->node->fini(graph->graph,
+ graph_node_name_to_ptr(graph->graph,
+ graph_node->node->name));
+}
+
+static struct rte_graph *
+graph_mem_fixup_node_ctx(struct rte_graph *graph)
+{
+ rte_node_t count; rte_graph_off_t off; struct rte_node *node;
+ struct node *node_db;
+ const char *name;
+
+ rte_graph_foreach_node(count, off, graph, node) {
+ if (node->parent_id == RTE_NODE_ID_INVALID) /* Static node */
+ name = node->name;
+ else /* Cloned node */
+ name = node->parent;
+
+ node_db = node_from_name(name);
+ if (node_db == NULL)
+ set_err(ENOLINK, fail, "node %s not found", name);
+ node->process = node_db->process;
+ }
+
+ return graph;
+fail:
+ return NULL;
+}
+
+static struct rte_graph *
+graph_mem_fixup_secondray(struct rte_graph *graph)
+{
+ if (graph == NULL || rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return graph;
+
+ return graph_mem_fixup_node_ctx(graph);
+}
+
+struct rte_graph *
+rte_graph_lookup(const char *name)
+{
+ const struct rte_memzone *mz;
+ struct rte_graph *rc = NULL;
+
+ mz = rte_memzone_lookup(name);
+ if (mz)
+ rc = mz->addr;
+
+ return graph_mem_fixup_secondray(rc);
+}
+
+rte_graph_t
+rte_graph_create(const char *name, struct rte_graph_param *prm)
+{
+ struct graph *graph;
+ const char *pattern;
+ uint16_t i;
+
+ graph_spinlock_lock();
+
+ /* Check arguments sanity */
+ if (prm == NULL)
+ set_err(EINVAL, fail, "param should not be NULL");
+
+ if (name == NULL)
+ set_err(EINVAL, fail, "graph name should not be NULL");
+
+ /* Check for existence of duplicate graph */
+ STAILQ_FOREACH(graph, &graph_list, next)
+ if (strncmp(name, graph->name, RTE_GRAPH_NAMESIZE) == 0)
+ set_err(EEXIST, fail, "found duplicate graph %s", name);
+
+ /* Create graph object */
+ graph = calloc(1, sizeof(*graph));
+ if (graph == NULL)
+ set_err(ENOMEM, fail, "failed to calloc graph object");
+
+ /* Initilze the graph object */
+ STAILQ_INIT(&graph->node_list);
+ if (rte_strscpy(graph->name, name, RTE_GRAPH_NAMESIZE) < 0)
+ set_err(E2BIG, free, "too big name=%s", name);
+
+ /* Expand node pattern and add the nodes to the graph */
+ for (i = 0; i < prm->nb_node_patterns; i++) {
+ pattern = prm->node_patterns[i];
+ if (expand_pattern_to_node(graph, pattern))
+ goto graph_cleanup;
+ }
+
+ /* Go over all the nodes edges and add them to the graph */
+ if (graph_node_edges_add(graph))
+ goto graph_cleanup;
+
+ /* Update adjacency list of all nodes in the graph */
+ if (graph_adjacency_list_update(graph))
+ goto graph_cleanup;
+
+ /* Make sure atleast a source node present in the graph */
+ if (!graph_src_nodes_count(graph))
+ goto graph_cleanup;
+
+ /* Make sure no node is pointing to source node */
+ if (graph_node_has_edge_to_src_node(graph))
+ goto graph_cleanup;
+
+ /* Don't allow node has loop to self */
+ if (graph_node_has_loop_edge(graph))
+ goto graph_cleanup;
+
+ /* Do BFS from src nodes on the graph to find isolated nodes */
+ if (graph_has_isolated_node(graph))
+ goto graph_cleanup;
+
+ /* Initlize graph object */
+ graph->socket = prm->socket_id;
+ graph->src_node_count = graph_src_nodes_count(graph);
+ graph->node_count = graph_nodes_count(graph);
+ graph->id = graph_id;
+
+ /* Allocate the Graph fast path memory and populate the data */
+ if (graph_fp_mem_create(graph))
+ goto graph_cleanup;
+
+ /* Call init() of the all the nodes in the graph */
+ if (graph_node_init(graph))
+ goto graph_mem_destroy;
+
+ /* All good, Lets add the graph to the list */
+ graph_id++;
+ STAILQ_INSERT_TAIL(&graph_list, graph, next);
+
+ graph_spinlock_unlock();
+ return graph->id;
+
+graph_mem_destroy:
+ graph_fp_mem_destroy(graph);
+graph_cleanup:
+ graph_cleanup(graph);
+free:
+ free(graph);
+fail:
+ graph_spinlock_unlock();
+ return RTE_GRAPH_ID_INVALID;
+}
+
+rte_graph_t
+rte_graph_destroy(const char *graph_name)
+{
+ rte_graph_t rc = RTE_GRAPH_ID_INVALID;
+ struct graph *graph, *tmp;
+ const char *name;
+
+ graph_spinlock_lock();
+
+ graph = STAILQ_FIRST(&graph_list);
+ while (graph != NULL) {
+ tmp = STAILQ_NEXT(graph, next);
+ name = graph->name;
+ if (strncmp(name, graph_name, RTE_GRAPH_NAMESIZE) == 0) {
+ /* Call fini() of the all the nodes in the graph */
+ graph_node_fini(graph);
+ /* Destroy graph fast path memory */
+ rc = graph_fp_mem_destroy(graph);
+ if (rc)
+ set_err(rc, done, "mz %s free failed", name);
+
+ graph_cleanup(graph);
+ STAILQ_REMOVE(&graph_list, graph, graph, next);
+ rc = graph->id;
+ free(graph);
+ graph_id--;
+ goto done;
+ }
+ graph = tmp;
+ }
+done:
+ graph_spinlock_unlock();
+ return rc;
+}
+
+rte_graph_t
+rte_graph_from_name(const char *name)
+{
+ struct graph *graph;
+
+ STAILQ_FOREACH(graph, &graph_list, next)
+ if (strncmp(graph->name, name, RTE_GRAPH_NAMESIZE) == 0)
+ return graph->id;
+
+ return RTE_GRAPH_ID_INVALID;
+}
+
+char *
+rte_graph_id_to_name(rte_graph_t id)
+{
+ struct graph *graph;
+
+ graph_id_check(id);
+ STAILQ_FOREACH(graph, &graph_list, next)
+ if (graph->id == id)
+ return graph->name;
+
+fail:
+ return NULL;
+}
+
+struct rte_node *rte_graph_node_get(rte_graph_t gid, uint32_t nid)
+{
+ struct rte_node *node;
+ struct graph *graph;
+ rte_graph_off_t off;
+ rte_node_t count;
+
+ graph_id_check(gid);
+ STAILQ_FOREACH(graph, &graph_list, next)
+ if (graph->id == gid) {
+ rte_graph_foreach_node(count, off, graph->graph, node) {
+ if (node->id == nid)
+ return node;
+ }
+ break;
+ }
+fail:
+ return NULL;
+}
+
+struct rte_node *rte_graph_node_get_by_name(const char *graph_name,
+ const char *node_name)
+{
+ struct rte_node *node;
+ struct graph *graph;
+ rte_graph_off_t off;
+ rte_node_t count;
+
+ STAILQ_FOREACH(graph, &graph_list, next)
+ if (!strncmp(graph->name, graph_name, RTE_GRAPH_NAMESIZE)) {
+ rte_graph_foreach_node(count, off, graph->graph, node) {
+ if (!strncmp(node->name, node_name,
+ RTE_NODE_NAMESIZE))
+ return node;
+ }
+ break;
+ }
+
+ return NULL;
+}
+
+__rte_experimental void __rte_noinline
+__rte_node_stream_alloc(struct rte_graph *graph, struct rte_node *node)
+{
+ uint16_t size = node->size;
+
+ RTE_VERIFY(size != UINT16_MAX);
+ /* Allocate double amount of size to avoid immediate realloc */
+ size = RTE_MIN(UINT16_MAX, RTE_MAX(RTE_GRAPH_BURST_SIZE, size * 2));
+ node->objs = rte_realloc_socket(node->objs, size * sizeof(void *),
+ RTE_CACHE_LINE_SIZE, graph->socket);
+ RTE_VERIFY(node->objs);
+ node->size = size;
+ node->realloc_count++;
+}
+
+__rte_experimental void __rte_noinline
+__rte_node_stream_alloc_size(struct rte_graph *graph, struct rte_node *node,
+ uint16_t req_size)
+{
+ uint16_t size = node->size;
+
+ RTE_VERIFY(size != UINT16_MAX);
+ /* Allocate double amount of size to avoid immediate realloc */
+ size = RTE_MIN(UINT16_MAX, RTE_MAX(RTE_GRAPH_BURST_SIZE, req_size * 2));
+ node->objs = rte_realloc_socket(node->objs, size * sizeof(void *),
+ RTE_CACHE_LINE_SIZE, graph->socket);
+ RTE_VERIFY(node->objs);
+ node->size = size;
+ node->realloc_count++;
+}
+
+static int
+graph_to_dot(FILE *f, struct graph *graph)
+{
+ const char *src_edge_color = " [color=blue]\n";
+ const char *edge_color = "\n";
+ struct graph_node *graph_node;
+ char *node_name;
+ rte_edge_t i;
+ int rc;
+
+ rc = fprintf(f, "digraph %s {\n\trankdir=LR;\n", graph->name);
+ if (rc < 0)
+ goto end;
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next) {
+ node_name = graph_node->node->name;
+ for (i = 0; i < graph_node->node->nb_edges; i++) {
+ rc = fprintf(f, "\t\"%s\"->\"%s\"%s", node_name,
+ graph_node->adjacency_list[i]->node->name,
+ graph_node->node->flags & RTE_NODE_SOURCE_F ?
+ src_edge_color : edge_color);
+ if (rc < 0)
+ goto end;
+ }
+ }
+ rc = fprintf(f, "}\n");
+ if (rc < 0)
+ goto end;
+
+ return 0;
+end:
+ rte_errno = EBADF;
+ return -rte_errno;
+}
+
+rte_graph_t
+rte_graph_export(const char *name, FILE *f)
+{
+ rte_graph_t rc = RTE_GRAPH_ID_INVALID;
+ struct graph *graph;
+
+ STAILQ_FOREACH(graph, &graph_list, next) {
+ if (strncmp(graph->name, name, RTE_GRAPH_NAMESIZE) == 0) {
+ rc = graph_to_dot(f, graph);
+ goto end;
+ }
+ }
+ rte_errno = ENOENT;
+end:
+ return rc;
+}
+
+static void
+graph_scan_dump(FILE *f, rte_graph_t id, bool all)
+{
+ struct graph *graph;
+
+ RTE_VERIFY(f);
+ graph_id_check(id);
+
+ STAILQ_FOREACH(graph, &graph_list, next) {
+ if (all == true) {
+ graph_dump(f, graph);
+ } else if (graph->id == id) {
+ graph_dump(f, graph);
+ return;
+ }
+ }
+fail:
+ return;
+}
+
+void
+rte_graph_dump(FILE *f, rte_graph_t id)
+{
+ graph_scan_dump(f, id, false);
+}
+
+void
+rte_graph_list_dump(FILE *f)
+{
+ graph_scan_dump(f, 0, true);
+}
+
+RTE_INIT(rte_graph_init_log)
+{
+ rte_graph_logtype = rte_log_register("lib.graph");
+ if (rte_graph_logtype >= 0)
+ rte_log_set_level(rte_graph_logtype, RTE_LOG_INFO);
+}
+
+rte_graph_t
+rte_graph_max_count(void)
+{
+ return graph_id;
+}
diff --git a/lib/librte_graph/graph_debug.c b/lib/librte_graph/graph_debug.c
new file mode 100644
index 000000000..7d42889fb
--- /dev/null
+++ b/lib/librte_graph/graph_debug.c
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_common.h>
+#include <rte_debug.h>
+
+#include "graph_private.h"
+
+void
+graph_dump(FILE *f, struct graph *g)
+{
+ struct graph_node *graph_node;
+ rte_edge_t i = 0;
+
+ fprintf(f, "graph <%s>\n", g->name);
+ fprintf(f, " id=%"PRIu32"\n", g->id);
+ fprintf(f, " cir_start=%"PRIu32"\n", g->cir_start);
+ fprintf(f, " cir_mask=%"PRIu32"\n", g->cir_mask);
+ fprintf(f, " addr=%p\n", g);
+ fprintf(f, " graph=%p\n", g->graph);
+ fprintf(f, " mem_sz=%zu\n", g->mem_sz);
+ fprintf(f, " node_count=%"PRIu32"\n", g->node_count);
+ fprintf(f, " src_node_count=%"PRIu32"\n", g->src_node_count);
+
+ STAILQ_FOREACH(graph_node, &g->node_list, next)
+ fprintf(f, " node[%d] <%s>\n", i++, graph_node->node->name);
+}
+
+void
+node_dump(FILE *f, struct node *n)
+{
+ rte_edge_t i;
+
+ fprintf(f, "node <%s>\n", n->name);
+ fprintf(f, " id=%"PRIu32"\n", n->id);
+ fprintf(f, " flags=0x%" PRIx64 "\n", n->flags);
+ fprintf(f, " addr=%p\n", n);
+ fprintf(f, " process=%p\n", n->process);
+ fprintf(f, " nb_edges=%d\n", n->nb_edges);
+
+ for (i = 0; i < n->nb_edges; i++)
+ fprintf(f, " edge[%d] <%s>\n", i, n->next_nodes[i]);
+}
+
+void
+rte_graph_obj_dump(FILE *f, struct rte_graph *g, bool all)
+{
+ rte_node_t count; rte_graph_off_t off; struct rte_node *n; rte_edge_t i;
+
+ fprintf(f, "graph <%s> @ %p\n", g->name, g);
+ fprintf(f, " id=%"PRIu32"\n", g->id);
+ fprintf(f, " head=%"PRId32"\n", (int32_t)g->head);
+ fprintf(f, " tail=%"PRId32"\n", (int32_t)g->tail);
+ fprintf(f, " cir_mask=0x%"PRIx32"\n", g->cir_mask);
+ fprintf(f, " nb_nodes=%"PRId32"\n", g->nb_nodes);
+ fprintf(f, " socket=%d\n", g->socket);
+ fprintf(f, " fence=0x%" PRIx64 "\n", g->fence);
+ fprintf(f, " nodes_start=0x%"PRIx32"\n", g->nodes_start);
+ fprintf(f, " cir_start=%p\n", g->cir_start);
+
+ rte_graph_foreach_node(count, off, g, n) {
+ if (!all && n->idx == 0)
+ continue;
+ fprintf(f, " node[%d] <%s>\n", count, n->name);
+ fprintf(f, " fence=0x%" PRIx64 "\n", n->fence);
+ fprintf(f, " objs=%p\n", n->objs);
+ fprintf(f, " process=%p\n", n->process);
+ fprintf(f, " id=0x%" PRIx32 "\n", n->id);
+ fprintf(f, " offset=0x%" PRIx32 "\n", n->off);
+ fprintf(f, " nb_edges=%" PRId32 "\n", n->nb_edges);
+ fprintf(f, " realloc_count=%d\n", n->realloc_count);
+ fprintf(f, " size=%d\n", n->size);
+ fprintf(f, " idx=%d\n", n->idx);
+ fprintf(f, " total_objs=%" PRId64 "\n", n->total_objs);
+ fprintf(f, " total_calls=%" PRId64 "\n", n->total_calls);
+ for (i = 0; i < n->nb_edges; i++)
+ fprintf(f, " edge[%d] <%s>\n",
+ i, n->nodes[i]->name);
+ }
+}
diff --git a/lib/librte_graph/graph_ops.c b/lib/librte_graph/graph_ops.c
new file mode 100644
index 000000000..6f5c69599
--- /dev/null
+++ b/lib/librte_graph/graph_ops.c
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_errno.h>
+
+#include "graph_private.h"
+
+static inline int
+node_has_loop_edge(struct node *node)
+{
+ rte_edge_t i;
+ char *name;
+ int rc = 0;
+
+ for (i = 0; i < node->nb_edges; i++) {
+ if (strncmp(node->name, node->next_nodes[i],
+ RTE_NODE_NAMESIZE) == 0) {
+ name = node->name;
+ rc = 1;
+ set_err(EINVAL, fail, "node %s has loop to self", name);
+ }
+ }
+fail:
+ return rc;
+}
+
+int
+graph_node_has_loop_edge(struct graph *graph)
+{
+ struct graph_node *graph_node;
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next)
+ if (node_has_loop_edge(graph_node->node))
+ return 1;
+
+ return 0;
+}
+
+rte_node_t
+graph_src_nodes_count(struct graph *graph)
+{
+ struct graph_node *graph_node;
+ rte_node_t rc = 0;
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next)
+ if (graph_node->node->flags & RTE_NODE_SOURCE_F)
+ rc++;
+
+ if (rc == 0)
+ set_err(EINVAL, fail, "graph needs atleast a source node");
+fail:
+ return rc;
+}
+
+int
+graph_node_has_edge_to_src_node(struct graph *graph)
+{
+ struct graph_node *graph_node;
+ struct node *node;
+ rte_edge_t i;
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next) {
+ for (i = 0; i < graph_node->node->nb_edges; i++) {
+ node = graph_node->adjacency_list[i]->node;
+ if (node->flags & RTE_NODE_SOURCE_F)
+ set_err(EEXIST, fail,
+ "node %s points to the source node %s",
+ graph_node->node->name, node->name);
+ }
+ }
+
+ return 0;
+fail:
+ return 1;
+}
+
+rte_node_t
+graph_nodes_count(struct graph *graph)
+{
+ struct graph_node *graph_node;
+ rte_node_t count = 0;
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next)
+ count++;
+
+ return count;
+}
+
+void
+graph_mark_nodes_as_not_visited(struct graph *graph)
+{
+ struct graph_node *graph_node;
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next)
+ graph_node->visited = false;
+}
+
+int
+graph_bfs(struct graph *graph, struct graph_node *start)
+{
+ struct graph_node **queue, *v, *tmp;
+ uint16_t head = 0, tail = 0;
+ rte_edge_t i;
+ size_t sz;
+
+ sz = sizeof(struct graph_node *) * graph_nodes_count(graph);
+ queue = malloc(sz);
+ if (queue == NULL)
+ set_err(ENOMEM, fail, "failed to alloc bfs queue of %zu", sz);
+
+ /* BFS algorithm */
+ queue[tail++] = start;
+ start->visited = true;
+ while (head != tail) {
+ v = queue[head++];
+ for (i = 0; i < v->node->nb_edges; i++) {
+ tmp = v->adjacency_list[i];
+ if (tmp->visited == false) {
+ queue[tail++] = tmp;
+ tmp->visited = true;
+ }
+ }
+ }
+
+ free(queue);
+ return 0;
+
+fail:
+ return -rte_errno;
+}
+
+int
+graph_has_isolated_node(struct graph *graph)
+{
+ struct graph_node *graph_node;
+
+ graph_mark_nodes_as_not_visited(graph);
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next) {
+ if (graph_node->node->flags & RTE_NODE_SOURCE_F) {
+ if (graph_node->node->nb_edges == 0)
+ set_err(EINVAL, fail,
+ "%s node needs minimum one edge",
+ graph_node->node->name);
+ if (graph_bfs(graph, graph_node))
+ goto fail;
+ }
+ }
+
+ STAILQ_FOREACH(graph_node, &graph->node_list, next)
+ if (graph_node->visited == false)
+ set_err(EINVAL, fail, "found isolated node %s",
+ graph_node->node->name);
+
+ return 0;
+fail:
+ return 1;
+}
diff --git a/lib/librte_graph/graph_populate.c b/lib/librte_graph/graph_populate.c
new file mode 100644
index 000000000..72476db11
--- /dev/null
+++ b/lib/librte_graph/graph_populate.c
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <fnmatch.h>
+
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+
+#include "graph_private.h"
+
+static size_t
+graph_fp_mem_calc_size(struct graph *graph)
+{
+ struct graph_node *graph_node;
+ rte_node_t val;
+ size_t sz;
+
+ /* Graph header */
+ sz = sizeof(struct rte_graph);
+ /* Source nodes list */
+ sz += sizeof(rte_graph_off_t) * graph->src_node_count;
+ /* Circular buffer for pending streams of size number of nodes */
+ val = rte_align32pow2(graph->node_count * sizeof(rte_graph_off_t));
+ sz = RTE_ALIGN(sz, val);
+ graph->cir_start = sz;
+ graph->cir_mask = rte_align32pow2(graph->node_count) - 1;
+ sz += val;
+ /* Fence */
+ sz += sizeof(RTE_GRAPH_FENCE);
+ sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+ graph->nodes_start = sz;
+ /* For 0..N node objects with fence */
+ STAILQ_FOREACH(graph_node, &graph->node_list, next) {
+ sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+ sz += sizeof(struct rte_node);
+ /* Pointer to next nodes(edges) */
+ sz += sizeof(struct rte_node *) * graph_node->node->nb_edges;
+ }
+
+ graph->mem_sz = sz;
+ return sz;
+}
+
+static void
+graph_header_popluate(struct graph *_graph)
+{
+ struct rte_graph *graph = _graph->graph;
+
+ graph->tail = 0;
+ graph->head = (int32_t)-_graph->src_node_count;
+ graph->cir_mask = _graph->cir_mask;
+ graph->nb_nodes = _graph->node_count;
+ graph->cir_start = RTE_PTR_ADD(graph, _graph->cir_start);
+ graph->nodes_start = _graph->nodes_start;
+ graph->socket = _graph->socket;
+ graph->id = _graph->id;
+ memcpy(graph->name, _graph->name, RTE_GRAPH_NAMESIZE);
+ graph->fence = RTE_GRAPH_FENCE;
+}
+
+static void
+graph_nodes_populate(struct graph *_graph)
+{
+ rte_graph_off_t off = _graph->nodes_start;
+ struct rte_graph *graph = _graph->graph;
+ struct graph_node *graph_node;
+ rte_edge_t count, nb_edges;
+ const char *parent;
+ rte_node_t pid;
+
+ STAILQ_FOREACH(graph_node, &_graph->node_list, next) {
+ struct rte_node *node = RTE_PTR_ADD(graph, off);
+ memset(node, 0, sizeof(*node));
+ node->fence = RTE_GRAPH_FENCE;
+ node->off = off;
+ node->process = graph_node->node->process;
+ memcpy(node->name, graph_node->node->name, RTE_GRAPH_NAMESIZE);
+ pid = graph_node->node->parent_id;
+ if (pid != RTE_NODE_ID_INVALID) { /* Cloned node */
+ parent = rte_node_id_to_name(pid);
+ memcpy(node->parent, parent, RTE_GRAPH_NAMESIZE);
+ }
+ node->id = graph_node->node->id;
+ node->parent_id = pid;
+ nb_edges = graph_node->node->nb_edges;
+ node->nb_edges = nb_edges;
+ off += sizeof(struct rte_node);
+ /* Copy the name in first pass to replace with rte_node* later*/
+ for (count = 0; count < nb_edges; count++)
+ node->nodes[count] = (struct rte_node *)
+ &graph_node->adjacency_list[count]->node->name[0];
+
+ off += sizeof(struct rte_node *) * nb_edges;
+ off = RTE_ALIGN(off, RTE_CACHE_LINE_SIZE);
+ node->next = off;
+ __rte_node_stream_alloc(graph, node);
+ }
+}
+
+struct rte_node *
+graph_node_id_to_ptr(const struct rte_graph *graph, rte_node_t id)
+{
+ rte_node_t count; rte_graph_off_t off; struct rte_node *node;
+
+ rte_graph_foreach_node(count, off, graph, node)
+ if (unlikely(node->id == id))
+ return node;
+
+ return NULL;
+}
+
+struct rte_node *
+graph_node_name_to_ptr(const struct rte_graph *graph, const char *name)
+{
+ rte_node_t count; rte_graph_off_t off; struct rte_node *node;
+
+ rte_graph_foreach_node(count, off, graph, node)
+ if (strncmp(name, node->name, RTE_NODE_NAMESIZE) == 0)
+ return node;
+
+ return NULL;
+}
+
+static int
+graph_node_nexts_populate(struct graph *_graph)
+{
+ rte_node_t count, val; rte_graph_off_t off; struct rte_node *node;
+ const struct rte_graph *graph = _graph->graph;
+ const char *name;
+
+ rte_graph_foreach_node(count, off, graph, node) {
+ for (val = 0; val < node->nb_edges; val++) {
+ name = (const char *)node->nodes[val];
+ node->nodes[val] = graph_node_name_to_ptr(graph, name);
+ if (node->nodes[val] == NULL)
+ set_err(EINVAL, fail, "%s not found", name);
+ }
+ }
+
+ return 0;
+fail:
+ return -rte_errno;
+}
+
+static int
+graph_src_nodes_populate(struct graph *_graph)
+{
+ struct rte_graph *graph = _graph->graph;
+ struct graph_node *graph_node;
+ struct rte_node *node;
+ int32_t head = -1;
+ const char *name;
+
+ STAILQ_FOREACH(graph_node, &_graph->node_list, next) {
+ if (graph_node->node->flags & RTE_NODE_SOURCE_F) {
+ name = graph_node->node->name;
+ node = graph_node_name_to_ptr(graph, name);
+ if (node == NULL)
+ set_err(EINVAL, fail, "%s not found", name);
+
+ __rte_node_stream_alloc(graph, node);
+ graph->cir_start[head--] = node->off;
+ }
+ }
+
+ return 0;
+fail:
+ return -rte_errno;
+}
+
+static int
+graph_fp_mem_populate(struct graph *graph)
+{
+ int rc;
+
+ graph_header_popluate(graph);
+ graph_nodes_populate(graph);
+ rc = graph_node_nexts_populate(graph);
+ rc |= graph_src_nodes_populate(graph);
+
+ return rc;
+}
+
+int
+graph_fp_mem_create(struct graph *graph)
+{
+ const struct rte_memzone *mz;
+ size_t sz;
+
+ sz = graph_fp_mem_calc_size(graph);
+ mz = rte_memzone_reserve(graph->name, sz, graph->socket, 0);
+ if (mz == NULL)
+ set_err(ENOMEM, fail, "memzone %s reserve failed", graph->name);
+
+ graph->graph = mz->addr;
+ graph->mz = mz;
+
+ return graph_fp_mem_populate(graph);
+fail:
+ return -rte_errno;
+}
+
+static void
+graph_nodes_mem_destroy(struct rte_graph *graph)
+{
+ rte_node_t count; rte_graph_off_t off; struct rte_node *node;
+
+ if (graph == NULL)
+ return;
+
+ rte_graph_foreach_node(count, off, graph, node)
+ rte_free(node->objs);
+}
+
+int
+graph_fp_mem_destroy(struct graph *graph)
+{
+ graph_nodes_mem_destroy(graph->graph);
+ return rte_memzone_free(graph->mz);
+}
diff --git a/lib/librte_graph/graph_private.h b/lib/librte_graph/graph_private.h
new file mode 100644
index 000000000..47c84bb15
--- /dev/null
+++ b/lib/librte_graph/graph_private.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _RTE_GRAPH_PRIVATE_H_
+#define _RTE_GRAPH_PRIVATE_H_
+
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_graph.h>
+#include <rte_graph_worker.h>
+
+extern int rte_graph_logtype;
+
+#define GRAPH_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, rte_graph_logtype, \
+ RTE_FMT("GRAPH: %s():%u " RTE_FMT_HEAD(__VA_ARGS__,) \
+ "\n", __func__, __LINE__, \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define graph_err(...) GRAPH_LOG(ERR, __VA_ARGS__)
+#define graph_info(...) GRAPH_LOG(INFO, __VA_ARGS__)
+#define graph_dbg(...) GRAPH_LOG(DEBUG, __VA_ARGS__)
+
+#define id_check(id, id_max) do { \
+ if ((id) >= (id_max)) { \
+ rte_errno = EINVAL; \
+ goto fail; \
+ } \
+} while (0)
+
+#define set_err(err, where, fmt, ...) do { \
+ graph_err(fmt, ##__VA_ARGS__); \
+ rte_errno = err; \
+ goto where; \
+} while (0)
+
+struct node {
+ STAILQ_ENTRY(node) next;
+ char name[RTE_NODE_NAMESIZE]; /* Name of the node. */
+ uint64_t flags; /* Node configuration flag */
+ rte_node_process_t process; /* Node process function */
+ rte_node_init_t init; /* Node init function */
+ rte_node_fini_t fini; /* Node fini function */
+ rte_node_t id; /* Allocated ID for this node */
+ rte_node_t parent_id; /* Id of parent node */
+ rte_edge_t nb_edges; /* Number of edges from this node */
+ char next_nodes[][RTE_NODE_NAMESIZE]; /* Names of next nodes. */
+};
+
+struct graph_node {
+ STAILQ_ENTRY(graph_node) next;
+ struct node *node;
+ bool visited;
+ struct graph_node *adjacency_list[];
+};
+
+struct graph {
+ STAILQ_ENTRY(graph) next; /* List of graphs */
+ char name[RTE_GRAPH_NAMESIZE]; /* Name of the graph. */
+ const struct rte_memzone *mz;
+ rte_graph_off_t nodes_start;
+ rte_node_t src_node_count;
+ struct rte_graph *graph;
+ rte_node_t node_count;
+ uint32_t cir_start;
+ uint32_t cir_mask;
+ rte_graph_t id;
+ size_t mem_sz;
+ int socket;
+ STAILQ_HEAD(gnode_list, graph_node) node_list;/* Nodes in a graph */
+};
+
+/* Node functions */
+STAILQ_HEAD(node_head, node);
+struct node_head *node_list_head_get(void);
+struct node *node_from_name(const char *name);
+
+/* Graph list functions */
+STAILQ_HEAD(graph_head, graph);
+struct graph_head *graph_list_head_get(void);
+
+/* Lock functions */
+void graph_spinlock_lock(void);
+void graph_spinlock_unlock(void);
+
+/* Graph operations */
+int graph_bfs(struct graph *graph, struct graph_node *start);
+int graph_has_isolated_node(struct graph *graph);
+int graph_node_has_edge_to_src_node(struct graph *graph);
+int graph_node_has_loop_edge(struct graph *graph);
+rte_node_t graph_src_nodes_count(struct graph *graph);
+rte_node_t graph_nodes_count(struct graph *graph);
+void graph_mark_nodes_as_not_visited(struct graph *graph);
+
+/* Fast path graph memory populate functions */
+int graph_fp_mem_create(struct graph *graph);
+int graph_fp_mem_destroy(struct graph *graph);
+
+/* Lookup functions */
+struct rte_node *
+graph_node_id_to_ptr(const struct rte_graph *graph, rte_node_t id);
+struct rte_node *
+graph_node_name_to_ptr(const struct rte_graph *graph, const char *node_name);
+
+/* Debug functions */
+void graph_dump(FILE *f, struct graph *g);
+void node_dump(FILE *f, struct node *n);
+
+#endif /* _RTE_GRAPH_PRIVATE_H_ */
diff --git a/lib/librte_graph/graph_stats.c b/lib/librte_graph/graph_stats.c
new file mode 100644
index 000000000..0dcbee8a8
--- /dev/null
+++ b/lib/librte_graph/graph_stats.c
@@ -0,0 +1,396 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <fnmatch.h>
+
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+
+#include "graph_private.h"
+
+/* Capture all graphs of cluster */
+struct cluster {
+ rte_graph_t nb_graphs;
+ rte_graph_t size;
+
+ struct graph **graphs;
+};
+
+/* Capture same node ID across cluster */
+struct cluster_node {
+ struct rte_graph_cluster_node_stats stat;
+ rte_node_t nb_nodes;
+
+ struct rte_node *nodes[];
+};
+
+struct rte_graph_cluster_stats {
+ /* Header */
+ rte_graph_cluster_stats_cb_t fn;
+ uint32_t cluster_node_size; /* Size of struct cluster_node */
+ rte_node_t max_nodes;
+ int socket_id;
+ void *cookie;
+ size_t sz;
+
+ struct cluster_node clusters[];
+} __rte_cache_aligned;
+
+#define boarder() fprintf(f, "+-------------------------------+---------------+---------------+---------------+---------------+---------------+-----------+\n")
+
+static inline void
+print_banner(FILE *f)
+{
+ boarder();
+ fprintf(f, "%-32s%-16s%-16s%-16s%-16s%-16s%-16s\n", "|Node", "|calls", "|objs", "|realloc_count", "|objs/call", "|objs/sec(10E6)", "|cycles/call|");
+ boarder();
+}
+
+static inline void
+print_node(FILE *f, const struct rte_graph_cluster_node_stats *stat)
+{
+ double objs_per_call, objs_per_sec, cycles_per_call, ts_per_hz;
+ const uint64_t prev_calls = stat->prev_calls;
+ const uint64_t prev_objs = stat->prev_objs;
+ const uint64_t cycles = stat->cycles;
+ const uint64_t calls = stat->calls;
+ const uint64_t objs = stat->objs;
+ uint64_t call_delta;
+
+ call_delta = calls - prev_calls;
+ objs_per_call = call_delta ?
+ (double)((objs - prev_objs)/call_delta) : 0;
+ cycles_per_call = call_delta ?
+ (double)((cycles - stat->prev_cycles)/call_delta) : 0;
+ ts_per_hz = (double)((stat->ts - stat->prev_ts)/stat->hz);
+ objs_per_sec = ts_per_hz ? (objs - prev_objs) / ts_per_hz : 0;
+ objs_per_sec /= 1000000;
+
+ fprintf(f, "|%-31s|%-15"PRIu64"|%-15"PRIu64"|%-15"PRIu64"|%-15.3f|%-15.6f|%-11.4f|\n",
+ stat->name, calls, objs, stat->realloc_count, objs_per_call,
+ objs_per_sec, cycles_per_call);
+}
+
+static int
+graph_cluster_stats_cb(bool is_first, bool is_last, void *cookie,
+ const struct rte_graph_cluster_node_stats *stat)
+{
+ FILE *f = cookie;
+
+ if (unlikely(is_first))
+ print_banner(f);
+ if (stat->objs)
+ print_node(f, stat);
+ if (unlikely(is_last))
+ boarder();
+
+ return 0;
+};
+
+static struct rte_graph_cluster_stats *
+stats_mem_init(struct cluster *cluster,
+ const struct rte_graph_cluster_stats_param *prm)
+{
+ size_t sz = sizeof(struct rte_graph_cluster_stats);
+ struct rte_graph_cluster_stats *stats;
+ rte_graph_cluster_stats_cb_t fn;
+ int socket_id = prm->socket_id;
+ uint32_t cluster_node_size;
+
+ /* Fixup callback */
+ fn = prm->fn;
+ if (fn == NULL)
+ fn = graph_cluster_stats_cb;
+
+ cluster_node_size = sizeof(struct cluster_node);
+ /* For a given cluster, max nodes will be the max number of graphs */
+ cluster_node_size += cluster->nb_graphs * sizeof(struct rte_node *);
+ cluster_node_size = RTE_ALIGN(cluster_node_size, RTE_CACHE_LINE_SIZE);
+
+ stats = realloc(NULL, sz);
+ memset(stats, 0, sz);
+ if (stats) {
+ stats->fn = fn;
+ stats->cluster_node_size = cluster_node_size;
+ stats->max_nodes = 0;
+ stats->socket_id = socket_id;
+ stats->cookie = prm->cookie;
+ stats->sz = sz;
+ }
+
+ return stats;
+}
+
+static int
+stats_mem_populate(struct rte_graph_cluster_stats **stats_in,
+ struct rte_graph *graph, struct graph_node *graph_node)
+{
+ struct rte_graph_cluster_stats *stats = *stats_in;
+ rte_node_t id = graph_node->node->id;
+ struct cluster_node *cluster;
+ struct rte_node *node;
+ rte_node_t count;
+
+ cluster = stats->clusters;
+
+ /* Iterate over cluster node array to find node ID match */
+ for (count = 0; count < stats->max_nodes; count++) {
+ /* Found an existing node in the reel */
+ if (cluster->stat.id == id) {
+ node = graph_node_id_to_ptr(graph, id);
+ if (node == NULL)
+ set_err(ENOKEY, err,
+ "failed to find node %s in graph %s",
+ graph_node->node->name, graph->name);
+
+ cluster->nodes[cluster->nb_nodes++] = node;
+ return 0;
+ }
+ cluster = RTE_PTR_ADD(cluster, stats->cluster_node_size);
+ }
+
+ /* Hey, it is a new node, allocate space for it in the reel */
+ stats = realloc(stats, stats->sz + stats->cluster_node_size);
+ if (stats == NULL)
+ set_err(ENOMEM, err, "realloc failed");
+
+ /* Clear the new struct cluster_node area */
+ cluster = RTE_PTR_ADD(stats, stats->sz),
+ memset(cluster, 0, stats->cluster_node_size);
+ memcpy(cluster->stat.name, graph_node->node->name, RTE_NODE_NAMESIZE);
+ cluster->stat.id = graph_node->node->id;
+ cluster->stat.hz = rte_get_timer_hz();
+ node = graph_node_id_to_ptr(graph, id);
+ if (node == NULL)
+ set_err(ENOKEY, err, "failed to find node %s in graph %s",
+ graph_node->node->name, graph->name);
+ cluster->nodes[cluster->nb_nodes++] = node;
+
+ stats->sz += stats->cluster_node_size;
+ stats->max_nodes++;
+ *stats_in = stats;
+
+ return 0;
+err:
+ return -rte_errno;
+}
+
+static void
+stats_mem_fini(struct rte_graph_cluster_stats *stats)
+{
+ free(stats);
+}
+
+static void
+cluster_init(struct cluster *cluster)
+{
+ memset(cluster, 0, sizeof(*cluster));
+}
+
+static int
+cluster_add(struct cluster *cluster, struct graph *graph)
+{
+ rte_graph_t count;
+ size_t sz;
+
+ /* Skip the if graph is already added to cluster */
+ for (count = 0; count < cluster->nb_graphs; count++)
+ if (cluster->graphs[count] == graph)
+ return 0;
+
+ /* Exapand the cluster if required to store graph objects */
+ if (cluster->nb_graphs + 1 > cluster->size) {
+ cluster->size = RTE_MAX(1, cluster->size * 2);
+ sz = sizeof(struct graph *) * cluster->size;
+ cluster->graphs = realloc(cluster->graphs, sz);
+ if (cluster->graphs == NULL)
+ set_err(ENOMEM, free, "failed to realloc");
+ }
+
+ /* Add graph to cluster */
+ cluster->graphs[cluster->nb_graphs++] = graph;
+ return 0;
+
+free:
+ return -rte_errno;
+}
+
+static void
+cluster_fini(struct cluster *cluster)
+{
+ if (cluster->graphs)
+ free(cluster->graphs);
+}
+
+static int
+expand_pattern_to_cluster(struct cluster *cluster, const char *pattern)
+{
+ struct graph_head *graph_head = graph_list_head_get();
+ struct graph *graph;
+ bool found = false;
+
+ /* Check for pattern match */
+ STAILQ_FOREACH(graph, graph_head, next) {
+ if (fnmatch(pattern, graph->name, 0) == 0) {
+ if (cluster_add(cluster, graph))
+ goto fail;
+ found = true;
+ }
+ }
+ if (found == false)
+ set_err(EFAULT, fail, "pattern %s graph not found", pattern);
+
+ return 0;
+fail:
+ return -rte_errno;
+}
+
+struct rte_graph_cluster_stats *
+rte_graph_cluster_stats_create(const struct rte_graph_cluster_stats_param *prm)
+{
+ struct rte_graph_cluster_stats *stats, *rc = NULL;
+ struct graph_node *graph_node;
+ struct cluster cluster;
+ struct graph *graph;
+ const char *pattern;
+ rte_graph_t i;
+
+ /* Sanity checks */
+ if (!rte_graph_has_stats_feature())
+ set_err(EINVAL, fail, "stats feature is not enabled");
+
+ if (prm == NULL)
+ set_err(EINVAL, fail, "invalid param");
+
+ if (prm->graph_patterns == NULL || prm->nb_graph_patterns == 0)
+ set_err(EINVAL, fail, "invalid graph param");
+
+ cluster_init(&cluster);
+
+ graph_spinlock_lock();
+ /* Expand graph pattern and add the graph to the cluster */
+ for (i = 0; i < prm->nb_graph_patterns; i++) {
+ pattern = prm->graph_patterns[i];
+ if (expand_pattern_to_cluster(&cluster, pattern))
+ goto bad_pattern;
+ }
+
+ /* Alloc the stats memory */
+ stats = stats_mem_init(&cluster, prm);
+ if (stats == NULL)
+ set_err(ENOMEM, bad_pattern, "failed alloc stats memory");
+
+ /* Iterate over M(Graph) x N (Nodes in graph) */
+ for (i = 0; i < cluster.nb_graphs; i++) {
+ graph = cluster.graphs[i];
+ STAILQ_FOREACH(graph_node, &graph->node_list, next) {
+ struct rte_graph *graph_fp = graph->graph;
+ if (stats_mem_populate(&stats, graph_fp, graph_node))
+ goto realloc_fail;
+ }
+ }
+
+ /* Finally copy to hugepage memory to avoid pressure on rte_realloc */
+ rc = rte_malloc_socket(NULL, stats->sz, 0, stats->socket_id);
+ if (rc)
+ rte_memcpy(rc, stats, stats->sz);
+ else
+ set_err(ENOMEM, realloc_fail, "rte_malloc failed");
+
+realloc_fail:
+ stats_mem_fini(stats);
+bad_pattern:
+ graph_spinlock_unlock();
+ cluster_fini(&cluster);
+fail:
+ return rc;
+}
+
+void
+rte_graph_cluster_stats_destroy(struct rte_graph_cluster_stats *stat)
+{
+ return rte_free(stat);
+}
+
+static inline void
+cluster_node_arregate_stats(struct cluster_node *cluster)
+{
+ uint64_t calls = 0, cycles = 0, objs = 0, realloc_count = 0;
+ struct rte_graph_cluster_node_stats *stat = &cluster->stat;
+ struct rte_node *node;
+ rte_node_t count;
+
+ for (count = 0; count < cluster->nb_nodes; count++) {
+ node = cluster->nodes[count];
+
+ calls += node->total_calls;
+ objs += node->total_objs;
+ cycles += node->total_cycles;
+ realloc_count += node->realloc_count;
+ }
+
+ stat->calls = calls;
+ stat->objs = objs;
+ stat->cycles = cycles;
+ stat->ts = rte_get_timer_cycles();
+ stat->realloc_count = realloc_count;
+}
+
+static inline void
+cluster_node_store_prev_stats(struct cluster_node *cluster)
+{
+ struct rte_graph_cluster_node_stats *stat = &cluster->stat;
+
+ stat->prev_ts = stat->ts;
+ stat->prev_calls = stat->calls;
+ stat->prev_objs = stat->objs;
+ stat->prev_cycles = stat->cycles;
+}
+
+void
+rte_graph_cluster_stats_get(struct rte_graph_cluster_stats *stat, bool skip_cb)
+{
+ struct cluster_node *cluster;
+ rte_node_t count;
+ int rc = 0;
+
+ cluster = stat->clusters;
+
+ for (count = 0; count < stat->max_nodes; count++) {
+ cluster_node_arregate_stats(cluster);
+ if (!skip_cb)
+ rc = stat->fn(!count, (count == stat->max_nodes - 1),
+ stat->cookie, &cluster->stat);
+ cluster_node_store_prev_stats(cluster);
+ if (rc)
+ break;
+ cluster = RTE_PTR_ADD(cluster, stat->cluster_node_size);
+ }
+}
+
+void
+rte_graph_cluster_stats_reset(struct rte_graph_cluster_stats *stat)
+{
+ struct cluster_node *cluster;
+ rte_node_t count;
+
+ cluster = stat->clusters;
+
+ for (count = 0; count < stat->max_nodes; count++) {
+ struct rte_graph_cluster_node_stats *node = &cluster->stat;
+
+ node->ts = 0;
+ node->calls = 0;
+ node->objs = 0;
+ node->cycles = 0;
+ node->prev_ts = 0;
+ node->prev_calls = 0;
+ node->prev_objs = 0;
+ node->prev_cycles = 0;
+ node->realloc_count = 0;
+ cluster = RTE_PTR_ADD(cluster, stat->cluster_node_size);
+ }
+}
diff --git a/lib/librte_graph/meson.build b/lib/librte_graph/meson.build
new file mode 100644
index 000000000..929a17f84
--- /dev/null
+++ b/lib/librte_graph/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2020 Marvell International Ltd.
+#
+
+name = 'graph'
+
+sources = files('node.c', 'graph.c', 'graph_ops.c', 'graph_debug.c', 'graph_stats.c', 'graph_populate.c')
+headers = files('rte_graph.h', 'rte_graph_worker.h')
+allow_experimental_apis = true
+
+deps += ['eal']
diff --git a/lib/librte_graph/node.c b/lib/librte_graph/node.c
new file mode 100644
index 000000000..503721943
--- /dev/null
+++ b/lib/librte_graph/node.c
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+#include "graph_private.h"
+
+static struct node_head node_list = STAILQ_HEAD_INITIALIZER(node_list);
+static rte_node_t node_id;
+
+#define node_id_check(id) id_check(id, node_id)
+
+/* Private functions */
+struct node_head *node_list_head_get(void)
+{
+ return &node_list;
+}
+
+struct node *
+node_from_name(const char *name)
+{
+ struct node *node;
+
+ STAILQ_FOREACH(node, &node_list, next)
+ if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0)
+ return node;
+
+ return NULL;
+}
+
+static bool
+node_has_duplicate_entry(const char *name)
+{
+ struct node *node;
+
+ /* Is duplicate name registred */
+ STAILQ_FOREACH(node, &node_list, next) {
+ if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0) {
+ rte_errno = EEXIST;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Public functions */
+rte_node_t
+__rte_node_register(const struct rte_node_register *reg)
+{
+ struct node *node;
+ rte_edge_t i;
+ size_t sz;
+
+ RTE_BUILD_BUG_ON((offsetof(struct rte_node, nodes) -
+ offsetof(struct rte_node, ctx))
+ != RTE_CACHE_LINE_MIN_SIZE);
+
+ graph_spinlock_lock();
+
+ /* Check sanity */
+ if (reg == NULL || reg->process == NULL) {
+ rte_errno = EINVAL;
+ goto fail;
+ }
+
+ /* Check for duplicate name */
+ if (node_has_duplicate_entry(reg->name))
+ goto fail;
+
+ sz = sizeof(struct node) + (reg->nb_edges * RTE_NODE_NAMESIZE);
+ node = calloc(1, sz);
+ if (node == NULL) {
+ rte_errno = ENOMEM;
+ goto fail;
+ }
+
+ /* Initialize the node */
+ if (rte_strscpy(node->name, reg->name, RTE_NODE_NAMESIZE) < 0) {
+ rte_errno = E2BIG;
+ goto free;
+ }
+ node->flags = reg->flags;
+ node->process = reg->process;
+ node->init = reg->init;
+ node->fini = reg->fini;
+ node->nb_edges = reg->nb_edges;
+ node->parent_id = reg->parent_id;
+ for (i = 0; i < reg->nb_edges; i++) {
+ if (rte_strscpy(node->next_nodes[i], reg->next_nodes[i],
+ RTE_NODE_NAMESIZE) < 0) {
+ rte_errno = E2BIG;
+ goto free;
+ }
+ }
+
+ node->id = node_id++;
+
+ /* Add the node at tail */
+ STAILQ_INSERT_TAIL(&node_list, node, next);
+ graph_spinlock_unlock();
+
+ return node->id;
+free:
+ free(node);
+fail:
+ graph_spinlock_unlock();
+ return RTE_NODE_ID_INVALID;
+}
+
+static int
+clone_name(struct rte_node_register *reg, struct node *node, const char *name)
+{
+ ssize_t sz, rc;
+
+#define SZ RTE_NODE_NAMESIZE
+ rc = rte_strscpy(reg->name, node->name, SZ);
+ if (rc < 0)
+ goto fail;
+ sz = rc;
+ rc = rte_strscpy(reg->name + sz, "-", RTE_MAX((int16_t)(SZ - sz), 0));
+ if (rc < 0)
+ goto fail;
+ sz += rc;
+ sz = rte_strscpy(reg->name + sz, name, RTE_MAX((int16_t)(SZ - sz), 0));
+ if (sz < 0)
+ goto fail;
+
+ return 0;
+fail:
+ rte_errno = E2BIG;
+ return -rte_errno;
+}
+
+static rte_node_t
+node_clone(struct node *node, const char *name)
+{
+ rte_node_t rc = RTE_NODE_ID_INVALID;
+ struct rte_node_register *reg;
+ rte_edge_t i;
+
+ /* Don't allow to clone a node from a cloned node */
+ if (node->parent_id != RTE_NODE_ID_INVALID) {
+ rte_errno = EEXIST;
+ goto fail;
+ }
+
+ /* Check for duplicate name */
+ if (node_has_duplicate_entry(name))
+ goto fail;
+
+ reg = calloc(1, sizeof(*reg) + (sizeof(char *) * node->nb_edges));
+ if (reg == NULL) {
+ rte_errno = ENOMEM;
+ goto fail;
+ }
+
+ /* Clone the source node */
+ reg->flags = node->flags;
+ reg->process = node->process;
+ reg->init = node->init;
+ reg->fini = node->fini;
+ reg->nb_edges = node->nb_edges;
+ reg->parent_id = node->id;
+
+ for (i = 0; i < node->nb_edges; i++)
+ reg->next_nodes[i] = node->next_nodes[i];
+
+ /* Naming ceremony of the new node. name is node->name + "-" + name */
+ if (clone_name(reg, node, name))
+ goto free;
+
+ rc = __rte_node_register(reg);
+free:
+ free(reg);
+fail:
+ return rc;
+}
+
+rte_node_t
+rte_node_clone(rte_node_t id, const char *name)
+{
+ struct node *node;
+
+ node_id_check(id);
+ STAILQ_FOREACH(node, &node_list, next)
+ if (node->id == id)
+ return node_clone(node, name);
+
+fail:
+ return RTE_NODE_ID_INVALID;
+}
+
+rte_node_t
+rte_node_from_name(const char *name)
+{
+ struct node *node;
+
+ STAILQ_FOREACH(node, &node_list, next)
+ if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0)
+ return node->id;
+
+ return RTE_NODE_ID_INVALID;
+}
+
+char *
+rte_node_id_to_name(rte_node_t id)
+{
+ struct node *node;
+
+ node_id_check(id);
+ STAILQ_FOREACH(node, &node_list, next)
+ if (node->id == id)
+ return node->name;
+
+fail:
+ return NULL;
+}
+
+rte_edge_t
+rte_node_edge_count(rte_node_t id)
+{
+ struct node *node;
+
+ node_id_check(id);
+ STAILQ_FOREACH(node, &node_list, next)
+ if (node->id == id)
+ return node->nb_edges;
+fail:
+ return RTE_EDGE_ID_INVALID;
+}
+
+static rte_edge_t
+edge_update(struct node *node, struct node *prev, rte_edge_t from,
+ const char **next_nodes, rte_edge_t nb_edges)
+{
+ rte_edge_t i, max_edges, count = 0;
+ struct node *new_node;
+ bool need_realloc;
+ size_t sz;
+
+ if (from == RTE_EDGE_ID_INVALID)
+ from = node->nb_edges;
+
+ /* Don't create hole in next_nodes[] list */
+ if (from > node->nb_edges) {
+ rte_errno = ENOMEM;
+ goto fail;
+ }
+
+ /* Remove me from list */
+ STAILQ_REMOVE(&node_list, node, node, next);
+
+ /* Allocate the storage space for new node if required */
+ max_edges = from + nb_edges;
+ need_realloc = max_edges > node->nb_edges;
+ if (need_realloc) {
+ sz = sizeof(struct node) + (max_edges * RTE_NODE_NAMESIZE);
+ new_node = realloc(node, sz);
+ if (new_node == NULL) {
+ rte_errno = ENOMEM;
+ goto restore;
+ } else {
+ node = new_node;
+ }
+ }
+
+ /* Update the new nodes name */
+ for (i = from; i < max_edges; i++, count++) {
+ if (rte_strscpy(node->next_nodes[i], next_nodes[count],
+ RTE_NODE_NAMESIZE) < 0) {
+ rte_errno = E2BIG;
+ goto restore;
+ }
+ }
+restore:
+ /* Update the linked list to point new node address in prev node */
+ if (prev)
+ STAILQ_INSERT_AFTER(&node_list, prev, node, next);
+ else
+ STAILQ_INSERT_HEAD(&node_list, node, next);
+
+ if (need_realloc)
+ node->nb_edges += count;
+
+fail:
+ return count;
+}
+
+rte_edge_t
+rte_node_edge_shrink(rte_node_t id, rte_edge_t size)
+{
+ rte_edge_t rc = RTE_EDGE_ID_INVALID;
+ struct node *node;
+
+ node_id_check(id);
+ graph_spinlock_lock();
+
+ STAILQ_FOREACH(node, &node_list, next) {
+ if (node->id == id) {
+ if (node->nb_edges < size) {
+ rte_errno = E2BIG;
+ goto fail;
+ }
+ node->nb_edges = size;
+ rc = size;
+ break;
+ }
+ }
+
+fail:
+ graph_spinlock_unlock();
+ return rc;
+}
+
+rte_edge_t
+rte_node_edge_update(rte_node_t id, rte_edge_t from,
+ const char **next_nodes, uint16_t nb_edges)
+{
+ rte_edge_t rc = RTE_EDGE_ID_INVALID;
+ struct node *n, *prev;
+
+ node_id_check(id);
+ graph_spinlock_lock();
+
+ prev = NULL;
+ STAILQ_FOREACH(n, &node_list, next) {
+ if (n->id == id) {
+ rc = edge_update(n, prev, from, next_nodes, nb_edges);
+ break;
+ }
+ prev = n;
+ }
+
+ graph_spinlock_unlock();
+fail:
+ return rc;
+}
+
+static rte_node_t
+node_copy_edges(struct node *node, char *next_nodes[])
+{
+ rte_edge_t i;
+
+ for (i = 0; i < node->nb_edges; i++)
+ next_nodes[i] = node->next_nodes[i];
+
+ return i;
+}
+
+rte_node_t
+rte_node_edge_get(rte_node_t id, char *next_nodes[])
+{
+ rte_node_t rc = RTE_NODE_ID_INVALID;
+ struct node *node;
+
+ node_id_check(id);
+ graph_spinlock_lock();
+
+ STAILQ_FOREACH(node, &node_list, next) {
+ if (node->id == id) {
+ if (next_nodes == NULL)
+ rc = sizeof(char *) * node->nb_edges;
+ else
+ rc = node_copy_edges(node, next_nodes);
+ break;
+ }
+ }
+
+ graph_spinlock_unlock();
+fail:
+ return rc;
+}
+
+static void
+node_scan_dump(FILE *f, rte_node_t id, bool all)
+{
+ struct node *node;
+
+ RTE_ASSERT(f != NULL);
+ node_id_check(id);
+
+ STAILQ_FOREACH(node, &node_list, next) {
+ if (all == true) {
+ node_dump(f, node);
+ } else if (node->id == id) {
+ node_dump(f, node);
+ return;
+ }
+ }
+fail:
+ return;
+}
+
+void
+rte_node_dump(FILE *f, rte_node_t id)
+{
+ node_scan_dump(f, id, false);
+}
+
+void
+rte_node_list_dump(FILE *f)
+{
+ node_scan_dump(f, 0, true);
+}
+
+rte_node_t
+rte_node_max_count(void)
+{
+ return node_id;
+}
diff --git a/lib/librte_graph/rte_graph.h b/lib/librte_graph/rte_graph.h
new file mode 100644
index 000000000..aa6df76dd
--- /dev/null
+++ b/lib/librte_graph/rte_graph.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _RTE_GRAPH_H_
+#define _RTE_GRAPH_H_
+
+/**
+ * @file rte_graph.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * RTE GRAPH support.
+ * librte_graph provides a framework to <fill the remaning>
+ * and need to add rte_experimental
+ */
+
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_GRAPH_NAMESIZE 64 /**< Max length of graph name. */
+#define RTE_NODE_NAMESIZE 64 /**< Max length of node name. */
+#define RTE_GRAPH_OFF_INVALID UINT32_MAX
+#define RTE_NODE_ID_INVALID UINT32_MAX
+#define RTE_EDGE_ID_INVALID UINT16_MAX
+#define RTE_GRAPH_ID_INVALID UINT16_MAX
+#define RTE_GRAPH_FENCE 0xdeadbeef12345678ULL
+
+typedef uint32_t rte_graph_off_t;
+typedef uint32_t rte_node_t;
+typedef uint16_t rte_edge_t;
+typedef uint16_t rte_graph_t;
+
+/**< Burst size in terms of log2 */
+#if RTE_GRAPH_BURST_SIZE == 1
+#define RTE_GRAPH_BURST_SIZE_LOG2 0
+#elif RTE_GRAPH_BURST_SIZE == 2
+#define RTE_GRAPH_BURST_SIZE_LOG2 1
+#elif RTE_GRAPH_BURST_SIZE == 4
+#define RTE_GRAPH_BURST_SIZE_LOG2 2
+#elif RTE_GRAPH_BURST_SIZE == 8
+#define RTE_GRAPH_BURST_SIZE_LOG2 3
+#elif RTE_GRAPH_BURST_SIZE == 16
+#define RTE_GRAPH_BURST_SIZE_LOG2 4
+#elif RTE_GRAPH_BURST_SIZE == 32
+#define RTE_GRAPH_BURST_SIZE_LOG2 5
+#elif RTE_GRAPH_BURST_SIZE == 64
+#define RTE_GRAPH_BURST_SIZE_LOG2 6
+#elif RTE_GRAPH_BURST_SIZE == 128
+#define RTE_GRAPH_BURST_SIZE_LOG2 7
+#elif RTE_GRAPH_BURST_SIZE == 256
+#define RTE_GRAPH_BURST_SIZE_LOG2 8
+#else
+#error "Unsupported burst size"
+#endif
+
+/* Forward declaration */
+struct rte_node;
+struct rte_graph;
+struct rte_graph_cluster_stats;
+struct rte_graph_cluster_node_stats;
+
+typedef uint16_t (*rte_node_process_t)
+ (struct rte_graph *graph, struct rte_node *node, void **o, uint16_t nb);
+typedef int (*rte_node_init_t)
+ (const struct rte_graph *graph, struct rte_node *node);
+typedef void (*rte_node_fini_t)
+ (const struct rte_graph *graph, struct rte_node *node);
+typedef int (*rte_graph_cluster_stats_cb_t)(bool is_first, bool is_last,
+ void *cookie, const struct rte_graph_cluster_node_stats *);
+
+/**
+ * Configuration parameters for creating the graph.
+ */
+struct rte_graph_param {
+ int socket_id;
+ uint16_t nb_node_patterns;
+ const char **node_patterns;
+};
+
+struct rte_graph_cluster_stats_param {
+ int socket_id;
+ /* NULL value allowed, in that case, default print stat function used */
+ rte_graph_cluster_stats_cb_t fn;
+ RTE_STD_C11
+ union {
+ void *cookie;
+ FILE *f; /* Where to dump the stats when fn == NULL */
+ };
+ uint16_t nb_graph_patterns;
+ const char **graph_patterns;
+};
+
+/* Stats functions */
+struct rte_graph_cluster_node_stats {
+ uint64_t ts;
+ uint64_t calls;
+ uint64_t objs;
+ uint64_t cycles;
+
+ uint64_t prev_ts;
+ uint64_t prev_calls;
+ uint64_t prev_objs;
+ uint64_t prev_cycles;
+
+ uint64_t realloc_count;
+
+ rte_node_t id;
+ uint64_t hz;
+ char name[RTE_NODE_NAMESIZE];
+} __rte_cache_aligned;
+
+/** Structure defines the node registration parameters */
+struct rte_node_register {
+ char name[RTE_NODE_NAMESIZE]; /**< Name of the node. */
+ uint64_t flags; /**< Node configuration flag */
+#define RTE_NODE_SOURCE_F (1ULL << 0)
+ RTE_STD_C11
+ rte_node_process_t process; /**< Node process function */
+ rte_node_init_t init;
+ rte_node_fini_t fini;
+ rte_node_t id; /* out */
+ rte_node_t parent_id; /* Id of parent node */
+ rte_edge_t nb_edges; /**< Number of edges from this node */
+ const char *next_nodes[]; /* Names of next nodes. */
+};
+
+/* Graph create functions */
+__rte_experimental
+rte_graph_t rte_graph_create(const char *name, struct rte_graph_param *prm);
+__rte_experimental
+rte_graph_t rte_graph_destroy(const char *name);
+
+/* Lookup functions */
+__rte_experimental
+rte_graph_t rte_graph_from_name(const char *name);
+__rte_experimental
+char *rte_graph_id_to_name(rte_graph_t id);
+
+/* Export the graph as graphviz dot file */
+__rte_experimental
+rte_graph_t rte_graph_export(const char *name, FILE *f);
+
+/* Get graph object for fast path in worker for walk */
+__rte_experimental
+struct rte_graph *rte_graph_lookup(const char *name);
+
+/* Get graph max count */
+__rte_experimental
+rte_graph_t rte_graph_max_count(void);
+
+/* Debug functions */
+__rte_experimental
+void rte_graph_dump(FILE *f, rte_graph_t id);
+__rte_experimental
+void rte_graph_list_dump(FILE *f);
+__rte_experimental
+void rte_graph_obj_dump(FILE *f, struct rte_graph *graph, bool all);
+
+/* API to get rte_node* after the graph creation */
+#define rte_graph_foreach_node(count, off, graph, node) \
+ for (count = 0, off = graph->nodes_start, \
+ node = RTE_PTR_ADD(graph, off); \
+ count < graph->nb_nodes; \
+ off = node->next, node = RTE_PTR_ADD(graph, off), count++)
+
+/* Lookup functions */
+__rte_experimental
+struct rte_node *rte_graph_node_get(rte_graph_t graph_id, uint32_t node_id);
+__rte_experimental
+struct rte_node *rte_graph_node_get_by_name(const char *graph,
+ const char *name);
+
+__rte_experimental
+struct rte_graph_cluster_stats *
+rte_graph_cluster_stats_create(const struct rte_graph_cluster_stats_param *prm);
+__rte_experimental
+void rte_graph_cluster_stats_destroy(struct rte_graph_cluster_stats *stat);
+__rte_experimental
+void rte_graph_cluster_stats_get(struct rte_graph_cluster_stats *stat,
+ bool skip_cb);
+__rte_experimental
+void rte_graph_cluster_stats_reset(struct rte_graph_cluster_stats *stat);
+
+/* Node creation functions */
+__rte_experimental
+rte_node_t __rte_node_register(const struct rte_node_register *node);
+
+#define RTE_NODE_REGISTER(node) \
+RTE_INIT(rte_node_register_##node) \
+{ \
+ node.parent_id = RTE_NODE_ID_INVALID; \
+ node.id = __rte_node_register(&node); \
+}
+__rte_experimental
+rte_node_t rte_node_clone(rte_node_t id, const char *name);
+
+/* Lookup functions */
+__rte_experimental
+rte_node_t rte_node_from_name(const char *name);
+__rte_experimental
+char *rte_node_id_to_name(rte_node_t id);
+
+/* Edge related functions */
+__rte_experimental
+rte_edge_t rte_node_edge_count(rte_node_t id);
+
+/* If from == RTE_EDGE_ID_INVALID then add end of the list */
+/* return the number of updated number of edges */
+__rte_experimental
+rte_edge_t rte_node_edge_update(rte_node_t id, rte_edge_t from,
+ const char **next_nodes, uint16_t nb_edges);
+__rte_experimental
+rte_edge_t rte_node_edge_shrink(rte_node_t id, rte_edge_t size);
+
+/* if next_nodes == NULL, return the size of array
+ * else number of item copied.
+ */
+__rte_experimental
+rte_node_t rte_node_edge_get(rte_node_t id, char *next_nodes[]);
+
+/* Get node max count */
+__rte_experimental
+rte_node_t rte_node_max_count(void);
+
+/* Debug functions */
+__rte_experimental
+void rte_node_dump(FILE *f, rte_node_t id);
+__rte_experimental
+void rte_node_list_dump(FILE *f);
+
+void __rte_node_stream_alloc(struct rte_graph *graph, struct rte_node *node);
+void __rte_node_stream_alloc_size(struct rte_graph *graph,
+ struct rte_node *node, uint16_t req_size);
+
+/* Helper functions */
+static __rte_always_inline int
+rte_node_is_invalid(rte_node_t id)
+{
+ return (id == RTE_NODE_ID_INVALID);
+}
+
+static __rte_always_inline int
+rte_edge_is_invalid(rte_edge_t id)
+{
+ return (id == RTE_EDGE_ID_INVALID);
+}
+
+static __rte_always_inline int
+rte_graph_is_invalid(rte_graph_t id)
+{
+ return (id == RTE_GRAPH_ID_INVALID);
+}
+
+static __rte_always_inline int
+rte_graph_has_stats_feature(void)
+{
+#ifdef RTE_LIBRTE_GRAPH_STATS
+ return RTE_LIBRTE_GRAPH_STATS;
+#else
+ return 0;
+#endif
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_GRAPH_H_ */
diff --git a/lib/librte_graph/rte_graph_version.map b/lib/librte_graph/rte_graph_version.map
new file mode 100644
index 000000000..e07763786
--- /dev/null
+++ b/lib/librte_graph/rte_graph_version.map
@@ -0,0 +1,46 @@
+EXPERIMENTAL {
+ global:
+
+ rte_graph_create;
+ rte_graph_destroy;
+ rte_graph_from_name;
+ rte_graph_id_to_name;
+ rte_graph_dump;
+ rte_graph_list_dump;
+ rte_graph_node_ctx;
+ rte_graph_node_ctx_by_name;
+ rte_graph_export;
+ rte_graph_lookup;
+ rte_graph_clone;
+ rte_graph_enqueue;
+ rte_graph_free;
+ rte_graph_walk;
+ rte_graph_logtype;
+ rte_graph_obj_dump;
+ rte_graph_max_count;
+ rte_graph_node_get;
+ rte_graph_node_get_by_name;
+
+ __rte_node_register;
+ __rte_node_stream_alloc;
+ __rte_node_stream_alloc_size;
+ rte_node_clone;
+
+ rte_node_from_name;
+ rte_node_id_to_name;
+ rte_node_edge_count;
+ rte_node_edge_update;
+ rte_node_edge_get;
+ rte_node_edge_shrink;
+ rte_node_dump;
+ rte_node_list_dump;
+ rte_node_edge_shrink;
+ rte_node_max_count;
+
+ rte_graph_cluster_stats_create;
+ rte_graph_cluster_stats_destroy;
+ rte_graph_cluster_stats_get;
+ rte_graph_cluster_stats_reset;
+
+ local: *;
+};
diff --git a/lib/librte_graph/rte_graph_worker.h b/lib/librte_graph/rte_graph_worker.h
new file mode 100644
index 000000000..38c418e3d
--- /dev/null
+++ b/lib/librte_graph/rte_graph_worker.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _RTE_GRAPH_WORKER_H_
+#define _RTE_GRAPH_WORKER_H_
+
+/**
+ * @file rte_graph.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * RTE GRAPH support.
+ * librte_graph provides a framework to <fill the remaning>
+ * and need to add rte_experimental
+ */
+
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_graph.h>
+#include <rte_prefetch.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct rte_graph {
+ uint32_t tail;
+ uint32_t head;
+ uint32_t cir_mask;
+ rte_node_t nb_nodes;
+ rte_graph_off_t *cir_start;
+ rte_graph_off_t nodes_start;
+ rte_graph_t id;
+ int socket;
+ char name[RTE_GRAPH_NAMESIZE]; /* Name of the graph. */
+ uint64_t fence;
+}__rte_cache_aligned;
+
+struct rte_node {
+ /* Slow path area */
+ uint64_t fence; /* Fence */
+ rte_graph_off_t next; /* Index to next node */
+ rte_node_t id; /* ID */
+ rte_node_t parent_id; /* Parent ID */
+ rte_edge_t nb_edges; /* Number of edges from this node */
+ uint32_t realloc_count; /* Number of times realloced */
+
+ char parent[RTE_NODE_NAMESIZE]; /* Parent node name. */
+ char name[RTE_NODE_NAMESIZE]; /* Name of the node. */
+ /* Fast path area */
+ #define RTE_NODE_CTX_SZ 16
+ uint8_t ctx[RTE_NODE_CTX_SZ] __rte_cache_aligned;
+ uint16_t size;
+ uint16_t idx;
+ rte_graph_off_t off;
+ uint64_t total_cycles;
+ uint64_t total_calls;
+ uint64_t total_objs;
+ RTE_STD_C11
+ union {
+ void **objs;
+ uint64_t objs_u64;
+ };
+ RTE_STD_C11
+ union {
+ rte_node_process_t process;
+ uint64_t process_u64;
+ };
+ struct rte_node *nodes[] __rte_cache_min_aligned;
+} __rte_cache_aligned;
+
+/* Fast path API for Graph walk */
+static inline void
+rte_graph_walk(struct rte_graph *graph)
+{
+ const rte_graph_off_t *cir_start = graph->cir_start;
+ const rte_node_t mask = graph->cir_mask;
+ uint32_t head = graph->head;
+ struct rte_node *node;
+ uint64_t start;
+ uint16_t rc;
+ void **objs;
+
+ /*
+ * Walk on the source node(s) ((cir_start - head) -> cir_start) and then
+ * on the pending streams (cir_start -> (cir_start + mask) -> cir_start)
+ * in a circular buffer fashion.
+ *
+ * +-----+ <= cir_start - head [number of source nodes]
+ * | |
+ * | ... | <= source nodes
+ * | |
+ * +-----+ <= cir_start [head = 0] [tail = 0]
+ * | |
+ * | ... | <= pending streams
+ * | |
+ * +-----+ <= cir_start + mask
+ */
+ while (likely(head != graph->tail)) {
+ node = RTE_PTR_ADD(graph, cir_start[(int32_t)head++]);
+ RTE_ASSERT(node->fence == RTE_GRAPH_FENCE);
+ objs = node->objs;
+ rte_prefetch0(objs);
+
+ if (rte_graph_has_stats_feature()) {
+ start = rte_rdtsc();
+ rc = node->process(graph, node, objs, node->idx);
+ node->total_cycles += rte_rdtsc() - start;
+ node->total_calls++;
+ node->total_objs += rc;
+ } else {
+ node->process(graph, node, objs, node->idx);
+ }
+ node->idx = 0;
+ head = likely((int32_t)head > 0) ? head & mask : head;
+ }
+ graph->tail = 0;
+}
+
+/* Fast path helper functions */
+static __rte_always_inline void
+__rte_node_enqueue_tail_update(struct rte_graph *graph, struct rte_node *node)
+{
+ uint32_t tail;
+
+ tail = graph->tail;
+ graph->cir_start[tail++] = node->off;
+ graph->tail = tail & graph->cir_mask;
+
+}
+
+static __rte_always_inline void
+__rte_node_enqueue_prolouge(struct rte_graph *graph, struct rte_node *node,
+ const uint16_t idx, const uint16_t space)
+{
+
+ /* Add to the pending stream list if the node is new */
+ if (idx == 0)
+ __rte_node_enqueue_tail_update(graph, node);
+
+ if (unlikely(node->size < (idx + space)))
+ __rte_node_stream_alloc(graph, node);
+}
+
+static __rte_always_inline struct rte_node *
+__rte_node_next_node_get(struct rte_node *node, rte_edge_t next)
+{
+ RTE_ASSERT(next < node->nb_edges);
+ RTE_ASSERT(node->fence == RTE_GRAPH_FENCE);
+ node = node->nodes[next];
+ RTE_ASSERT(node->fence == RTE_GRAPH_FENCE);
+
+ return node;
+}
+
+/* Fast path enqueue functions */
+static inline void
+rte_node_enqueue(struct rte_graph *graph, struct rte_node *node,
+ rte_edge_t next, void **objs, uint16_t nb_objs)
+{
+ node = __rte_node_next_node_get(node, next);
+ const uint16_t idx = node->idx;
+
+ __rte_node_enqueue_prolouge(graph, node, idx, nb_objs);
+
+ rte_memcpy(&node->objs[idx], objs, nb_objs * sizeof(void *));
+ node->idx = idx + nb_objs;
+}
+
+static inline void
+rte_node_enqueue_x1(struct rte_graph *graph, struct rte_node *node,
+ rte_edge_t next, void *obj)
+{
+ node = __rte_node_next_node_get(node, next);
+ uint16_t idx = node->idx;
+
+ __rte_node_enqueue_prolouge(graph, node, idx, 1);
+
+ node->objs[idx++] = obj;
+ node->idx = idx;
+}
+
+static inline void
+rte_node_enqueue_x2(struct rte_graph *graph, struct rte_node *node,
+ rte_edge_t next, void *obj0, void *obj1)
+{
+ node = __rte_node_next_node_get(node, next);
+ uint16_t idx = node->idx;
+
+ __rte_node_enqueue_prolouge(graph, node, idx, 2);
+
+ node->objs[idx++] = obj0;
+ node->objs[idx++] = obj1;
+ node->idx = idx;
+}
+
+static inline void
+rte_node_enqueue_x4(struct rte_graph *graph, struct rte_node *node, rte_edge_t
+ next, void *obj0, void *obj1, void *obj2, void *obj3)
+{
+ node = __rte_node_next_node_get(node, next);
+ uint16_t idx = node->idx;
+
+ __rte_node_enqueue_prolouge(graph, node, idx, 4);
+
+ node->objs[idx++] = obj0;
+ node->objs[idx++] = obj1;
+ node->objs[idx++] = obj2;
+ node->objs[idx++] = obj3;
+ node->idx = idx;
+}
+
+static inline void
+rte_node_enqueue_next(struct rte_graph *graph, struct rte_node *node,
+ rte_edge_t *nexts, void **objs, uint16_t nb_objs)
+{
+ uint16_t i;
+
+ /* Non optimizied implementation to get started !!! */
+ for (i = 0; i < nb_objs; i++)
+ rte_node_enqueue_x1(graph, node, nexts[i], objs[i]);
+}
+
+static inline void **
+rte_node_next_stream_get(struct rte_graph *graph, struct rte_node *node,
+ rte_edge_t next, uint16_t nb_objs)
+{
+ node = __rte_node_next_node_get(node, next);
+ const uint16_t idx = node->idx;
+ uint16_t free_space = node->size - idx;
+
+ if (unlikely(free_space < nb_objs))
+ __rte_node_stream_alloc_size(graph, node, nb_objs);
+
+ return &node->objs[idx];
+}
+
+static inline void
+rte_node_next_stream_put(struct rte_graph *graph, struct rte_node *node,
+ rte_edge_t next, uint16_t idx)
+{
+ if (unlikely(!idx))
+ return;
+
+ node = __rte_node_next_node_get(node, next);
+ if (node->idx == 0)
+ __rte_node_enqueue_tail_update(graph, node);
+
+ node->idx += idx;
+}
+
+static inline void
+rte_node_next_stream_move(struct rte_graph *graph, struct rte_node *src,
+ rte_edge_t next)
+{
+ struct rte_node *dst = __rte_node_next_node_get(src, next);
+
+ /* Let swap the pointers if dst don't have valid objs */
+ if (likely(dst->idx == 0)) {
+ void **dobjs = dst->objs;
+ uint16_t dsz = dst->size;
+ dst->objs = src->objs;
+ dst->size = src->size;
+ src->objs = dobjs;
+ src->size = dsz;
+ dst->idx = src->idx;
+ __rte_node_enqueue_tail_update(graph, dst);
+ } else { /* Move the objects from src node to dst node */
+ rte_node_enqueue(graph, src, next, src->objs, src->idx);
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_GRAPH_WORKER_H_ */
diff --git a/lib/meson.build b/lib/meson.build
index 0af3efab2..4089ce0c3 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -30,7 +30,7 @@ libraries = [
# add pkt framework libs which use other libs from above
'port', 'table', 'pipeline',
# flow_classify lib depends on pkt framework table lib
- 'flow_classify', 'bpf', 'telemetry']
+ 'flow_classify', 'bpf', 'graph', 'telemetry']
if is_windows
libraries = ['kvargs','eal'] # only supported libraries for windows
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 15acf95db..e169d7a7b 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -98,6 +98,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_CMDLINE) += -lrte_cmdline
_LDLIBS-$(CONFIG_RTE_LIBRTE_REORDER) += -lrte_reorder
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrte_sched
_LDLIBS-$(CONFIG_RTE_LIBRTE_RCU) += -lrte_rcu
+_LDLIBS-$(CONFIG_RTE_LIBRTE_GRAPH) += -lrte_graph
ifeq ($(CONFIG_RTE_EXEC_ENV_LINUX),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_KNI) += -lrte_kni
--
2.24.1
next prev parent reply other threads:[~2020-01-31 17:02 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-01-31 17:01 [dpdk-dev] [RFC PATCH 0/5] " jerinj
2020-01-31 17:01 ` jerinj [this message]
2020-02-02 10:34 ` [dpdk-dev] [RFC PATCH 1/5] " Stephen Hemminger
2020-02-02 10:35 ` Stephen Hemminger
2020-02-02 11:08 ` Jerin Jacob
2020-02-02 10:38 ` Stephen Hemminger
2020-02-02 11:21 ` Jerin Jacob
2020-02-03 9:14 ` Gaetan Rivet
2020-02-03 9:49 ` Jerin Jacob
2020-01-31 17:01 ` [dpdk-dev] [RFC PATCH 2/5] node: add packet processing nodes jerinj
2020-01-31 17:01 ` [dpdk-dev] [RFC PATCH 3/5] test: add graph functional tests jerinj
2020-01-31 17:02 ` [dpdk-dev] [RFC PATCH 4/5] test: add graph performance test cases jerinj
2020-01-31 17:02 ` [dpdk-dev] [RFC PATCH 5/5] example/l3fwd_graph: l3fwd using graph architecture jerinj
2020-01-31 18:34 ` [dpdk-dev] [RFC PATCH 0/5] graph: introduce graph subsystem Ray Kinsella
2020-02-01 5:44 ` Jerin Jacob
2020-02-17 7:19 ` Jerin Jacob
2020-02-17 8:38 ` Thomas Monjalon
2020-02-17 10:58 ` Jerin Jacob
2020-02-21 10:30 ` Jerin Jacob
2020-02-21 11:10 ` Thomas Monjalon
2020-02-21 15:38 ` Mattias Rönnblom
2020-02-21 15:53 ` dave
2020-02-21 16:04 ` Thomas Monjalon
2020-02-21 15:56 ` Jerin Jacob
2020-02-21 16:14 ` Thomas Monjalon
2020-02-22 9:05 ` Jerin Jacob
2020-02-22 9:52 ` Thomas Monjalon
2020-02-22 10:24 ` Jerin Jacob
2020-02-24 10:59 ` Ray Kinsella
2020-02-25 5:22 ` Honnappa Nagarahalli
2020-02-25 6:14 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200131170201.3236153-2-jerinj@marvell.com \
--to=jerinj@marvell.com \
--cc=ajit.khaparde@broadcom.com \
--cc=akhil.goyal@nxp.com \
--cc=anatoly.burakov@intel.com \
--cc=artem.andreev@oktetlabs.ru \
--cc=arybchenko@solarflare.com \
--cc=bruce.richardson@intel.com \
--cc=cristian.dumitrescu@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=drc@linux.vnet.ibm.com \
--cc=erik.g.carrillo@intel.com \
--cc=ferruh.yigit@intel.com \
--cc=gage.eads@intel.com \
--cc=gavin.hu@arm.com \
--cc=hemant.agrawal@nxp.com \
--cc=honnappa.nagarahalli@arm.com \
--cc=jasvinder.singh@intel.com \
--cc=john.mcnamara@intel.com \
--cc=keith.wiles@intel.com \
--cc=kirankumark@marvell.com \
--cc=konstantin.ananyev@intel.com \
--cc=mattias.ronnblom@ericsson.com \
--cc=maxime.coquelin@redhat.com \
--cc=mdr@ashroe.eu \
--cc=ndabilpuram@marvell.com \
--cc=nikhil.rao@intel.com \
--cc=nsaxena@marvell.com \
--cc=olivier.matz@6wind.com \
--cc=pallavi.kadam@intel.com \
--cc=pathreya@marvell.com \
--cc=pbhagavatula@marvell.com \
--cc=pkapoor@marvell.com \
--cc=rasland@mellanox.com \
--cc=shahafs@mellanox.com \
--cc=sshankarnara@marvell.com \
--cc=sthemmin@microsoft.com \
--cc=techboard@dpdk.org \
--cc=thomas@monjalon.net \
--cc=vladimir.medvedkin@intel.com \
--cc=xiaolong.ye@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).