DPDK patches and discussions
 help / color / mirror / Atom feed
* [RFC PATCH 0/2] add feature arc in rte_graph
@ 2024-04-26 12:22 Nitin Saxena
  2024-04-26 12:22 ` [RFC PATCH 1/2] graph: add feature arc support Nitin Saxena
  2024-04-26 12:22 ` [RFC PATCH 2/2] graph: add ip4 output feature arc Nitin Saxena
  0 siblings, 2 replies; 3+ messages in thread
From: Nitin Saxena @ 2024-04-26 12:22 UTC (permalink / raw)
  To: Jerin Jacob, Kiran Kumar K, Nithin Dabilpuram, Zhirun Yan; +Cc: dev

Feature arc represents an ordered list of features/protocols at a given
networking layer. It is a high level abstraction to connect various
rte_graph nodes, as feature nodes, and allow packets steering across
these nodes in a generic manner.

Features (or feature nodes) are nodes which handles partial or complete
handling of a protocol in fast path. Like ipv4-rewrite node, which adds
rewrite data to an outgoing IPv4 packet.

However in above example, outgoing interface(say "eth0") may have
outbound IPsec policy enabled, hence packets must be steered from
ipv4-rewrite node to ipsec-outbound-policy node for outbound IPsec
policy lookup. On the other hand, packets routed to another interface
(eth1) will not be sent to ipsec-outbound-policy node as IPsec feature
is disabled on eth1. Feature-arc allows rte_graph applications to manage
such constraints easily

Feature arc abstraction allows rte_graph based application to

1. Seamlessly steer packets across feature nodes based on wheter feature
is enabled or disabled on an interface. Features enabled on one
interface may not be enabled on another interface with in a same feature
arc.

2. Allow enabling/disabling of features on an interface at runtime,
so that if a feature is disabled, packets associated with that interface
won't be steered to corresponding feature node.

3. Provides mechanism to hook custom/user-defined nodes to a feature
node and allow packet steering from feature node to custom node without
changing former's fast path function

4. Allow expressing features in a particular sequential order so that
packets are steered in an ordered way across nodes in fast path. For
eg: if IPsec and IPv4 features are enabled on an ingress interface,
packets must be sent to IPsec inbound policy node first and then to ipv4
lookup node.

This patch series adds feature arc library in rte_graph and also adds
"ipv4-output" feature arc handling in "ipv4-rewrite" node.

Nitin Saxena (2):
  graph: add feature arc support
  graph: add ip4 output feature arc

 lib/graph/graph_feature_arc.c            | 834 +++++++++++++++++++++++
 lib/graph/meson.build                    |   2 +
 lib/graph/rte_graph_feature_arc.h        | 310 +++++++++
 lib/graph/rte_graph_feature_arc_worker.h | 483 +++++++++++++
 lib/graph/version.map                    |  16 +
 lib/node/ip4_rewrite.c                   | 278 ++++++--
 lib/node/ip4_rewrite_priv.h              |  10 +-
 lib/node/node_private.h                  |  10 +-
 lib/node/rte_node_ip4_api.h              |   3 +
 9 files changed, 1878 insertions(+), 68 deletions(-)
 create mode 100644 lib/graph/graph_feature_arc.c
 create mode 100644 lib/graph/rte_graph_feature_arc.h
 create mode 100644 lib/graph/rte_graph_feature_arc_worker.h

-- 
2.25.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [RFC PATCH 1/2] graph: add feature arc support
  2024-04-26 12:22 [RFC PATCH 0/2] add feature arc in rte_graph Nitin Saxena
@ 2024-04-26 12:22 ` Nitin Saxena
  2024-04-26 12:22 ` [RFC PATCH 2/2] graph: add ip4 output feature arc Nitin Saxena
  1 sibling, 0 replies; 3+ messages in thread
From: Nitin Saxena @ 2024-04-26 12:22 UTC (permalink / raw)
  To: Jerin Jacob, Kiran Kumar K, Nithin Dabilpuram, Zhirun Yan; +Cc: dev

Signed-off-by: Nitin Saxena <nsaxena@marvell.com>
Change-Id: I8ca9d6285aaeedabc75131af8d28f8b2f63f614b
---
 lib/graph/graph_feature_arc.c            | 834 +++++++++++++++++++++++
 lib/graph/meson.build                    |   2 +
 lib/graph/rte_graph_feature_arc.h        | 310 +++++++++
 lib/graph/rte_graph_feature_arc_worker.h | 483 +++++++++++++
 lib/graph/version.map                    |  16 +
 5 files changed, 1645 insertions(+)
 create mode 100644 lib/graph/graph_feature_arc.c
 create mode 100644 lib/graph/rte_graph_feature_arc.h
 create mode 100644 lib/graph/rte_graph_feature_arc_worker.h

diff --git a/lib/graph/graph_feature_arc.c b/lib/graph/graph_feature_arc.c
new file mode 100644
index 0000000000..fc3662727d
--- /dev/null
+++ b/lib/graph/graph_feature_arc.c
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell International Ltd.
+ */
+
+#include "graph_private.h"
+#include <rte_graph_feature_arc_worker.h>
+#include <rte_malloc.h>
+
+#define __RTE_GRAPH_FEATURE_ARC_MAX 32
+
+#define rte_graph_uint_cast(x) ((unsigned int)x)
+
+rte_graph_feature_arc_main_t *__feature_arc_main;
+
+static int
+feature_lookup(struct rte_graph_feature_arc *dfl, const char *feat_name,
+	       struct rte_graph_feature_node_list **ffinfo, uint32_t *slot)
+{
+	struct rte_graph_feature_node_list *finfo = NULL;
+	const char *name;
+
+	if (!feat_name)
+		return -1;
+
+	if (slot)
+		*slot = 0;
+
+	STAILQ_FOREACH(finfo, &dfl->all_features, next_feature) {
+		RTE_VERIFY(finfo->feature_arc == dfl);
+		name = rte_node_id_to_name(finfo->feature_node->id);
+		if (!strncmp(name, feat_name, RTE_GRAPH_NAMESIZE)) {
+			if (ffinfo)
+				*ffinfo = finfo;
+			return 0;
+		}
+		if (slot)
+			(*slot)++;
+	}
+	return -1;
+}
+
+static int
+feature_arc_lookup(rte_graph_feature_arc_t _dfl)
+{
+	struct rte_graph_feature_arc *dfl = rte_graph_feature_arc_get(_dfl);
+	rte_graph_feature_arc_main_t *dm = __feature_arc_main;
+	uint32_t iter;
+
+	if (!__feature_arc_main)
+		return -1;
+
+	for (iter = 0; iter < dm->max_feature_arcs; iter++) {
+		if (dm->feature_arcs[iter] == RTE_GRAPH_FEATURE_ARC_INITIALIZER)
+			continue;
+
+		if (dfl == (rte_graph_feature_arc_get(dm->feature_arcs[iter])))
+			return 0;
+	}
+	return -1;
+}
+
+static int
+get_existing_edge(const char *arc_name, struct rte_node_register *parent_node,
+		  struct rte_node_register *child_node, rte_edge_t *_edge)
+{
+	char **next_edges = NULL;
+	uint32_t count, i;
+
+	RTE_SET_USED(arc_name);
+
+	count = rte_node_edge_get(parent_node->id, NULL);
+	next_edges = malloc(count);
+
+	if (!next_edges)
+		return -1;
+
+	count = rte_node_edge_get(parent_node->id, next_edges);
+	for (i = 0; i < count; i++) {
+		if (strstr(child_node->name, next_edges[i])) {
+			graph_dbg("%s: Edge exists [%s[%u]: \"%s\"]", arc_name,
+				  parent_node->name, i, child_node->name);
+			if (_edge)
+				*_edge = (rte_edge_t)i;
+
+			free(next_edges);
+			return 0;
+		}
+	}
+	free(next_edges);
+
+	return -1;
+}
+
+static int
+connect_graph_nodes(struct rte_node_register *parent_node, struct rte_node_register *child_node,
+		    rte_edge_t *_edge, char *arc_name)
+{
+	const char *next_node = NULL;
+	rte_edge_t edge;
+
+	if (!get_existing_edge(arc_name, parent_node, child_node, &edge)) {
+		graph_dbg("%s: add_feature: Edge reused [%s[%u]: \"%s\"]", arc_name,
+			parent_node->name, edge, child_node->name);
+
+		if (_edge)
+			*_edge = edge;
+
+		return 0;
+	}
+
+	/* Node to be added */
+	next_node = child_node->name;
+
+	edge = rte_node_edge_update(parent_node->id, RTE_EDGE_ID_INVALID, &next_node, 1);
+
+	if (edge == RTE_EDGE_ID_INVALID) {
+		graph_err("edge invalid");
+		return -1;
+	}
+	edge = rte_node_edge_count(parent_node->id) - 1;
+
+	graph_dbg("%s: add_feature: edge added [%s[%u]: \"%s\"]", arc_name, parent_node->name, edge,
+		child_node->name);
+
+	if (_edge)
+		*_edge = edge;
+
+	return 0;
+}
+
+static int
+feature_arc_init(rte_graph_feature_arc_main_t **pfl, uint32_t max_feature_arcs)
+{
+	rte_graph_feature_arc_main_t *pm = NULL;
+	uint32_t i;
+	size_t sz;
+
+	if (!pfl)
+		return -1;
+
+	sz = sizeof(rte_graph_feature_arc_main_t) +
+		(sizeof(pm->feature_arcs[0]) * max_feature_arcs);
+
+	pm = malloc(sz);
+	if (!pm)
+		return -1;
+
+	memset(pm, 0, sz);
+
+	for (i = 0; i < max_feature_arcs; i++)
+		pm->feature_arcs[i] = RTE_GRAPH_FEATURE_ARC_INITIALIZER;
+
+	pm->max_feature_arcs = max_feature_arcs;
+
+	*pfl = pm;
+
+	return 0;
+}
+
+int
+rte_graph_feature_arc_init(int max_feature_arcs)
+{
+	if (!max_feature_arcs)
+		return -1;
+
+	if (__feature_arc_main)
+		return -1;
+
+	return feature_arc_init(&__feature_arc_main, max_feature_arcs);
+}
+
+int
+rte_graph_feature_arc_create(const char *feature_arc_name, int max_features, int max_indexes,
+		       struct rte_node_register *start_node, rte_graph_feature_arc_t *_dfl)
+{
+	char name[2 * RTE_GRAPH_FEATURE_ARC_NAMELEN];
+	rte_graph_feature_arc_main_t *dfm = NULL;
+	struct rte_graph_feature_arc *dfl = NULL;
+	struct rte_graph_feature_data *dfd = NULL;
+	struct rte_graph_feature *df = NULL;
+	uint32_t iter, j, arc_index;
+	size_t sz;
+
+	if (!_dfl)
+		return -1;
+
+	if (max_features < 1)
+		return -1;
+
+	if (!start_node)
+		return -1;
+
+	if (!feature_arc_name)
+		return -1;
+
+	if (max_features > RTE_GRAPH_FEATURE_MAX_PER_ARC) {
+		graph_err("Invalid max features: %u", max_features);
+		return -1;
+	}
+
+	/*
+	 * Application hasn't called rte_graph_feature_arc_init(). Initialize with
+	 * default values
+	 */
+	if (!__feature_arc_main) {
+		if (rte_graph_feature_arc_init((int)__RTE_GRAPH_FEATURE_ARC_MAX) < 0) {
+			graph_err("rte_graph_feature_arc_init() failed");
+			return -1;
+		}
+	}
+
+	dfm = __feature_arc_main;
+
+	/* threshold check */
+	if (dfm->num_feature_arcs > (dfm->max_feature_arcs - 1)) {
+		graph_err("max threshold for num_feature_arcs: %d reached",
+			dfm->max_feature_arcs - 1);
+		return -1;
+	}
+	/* Find the free slot for feature arc */
+	for (iter = 0; iter < dfm->max_feature_arcs; iter++) {
+		if (dfm->feature_arcs[iter] == RTE_GRAPH_FEATURE_ARC_INITIALIZER)
+			break;
+	}
+	arc_index = iter;
+
+	if (arc_index >= dfm->max_feature_arcs) {
+		graph_err("No free slot found for num_feature_arc");
+		return -1;
+	}
+
+	/* This should not happen */
+	RTE_VERIFY(dfm->feature_arcs[arc_index] == RTE_GRAPH_FEATURE_ARC_INITIALIZER);
+
+	sz = sizeof(*dfl) + (sizeof(uint64_t) * max_indexes);
+
+	dfl = rte_malloc(feature_arc_name, sz, RTE_CACHE_LINE_SIZE);
+
+	if (!dfl) {
+		graph_err("malloc failed for feature_arc_create()");
+		return -1;
+	}
+
+	memset(dfl, 0, sz);
+
+	snprintf(name, sizeof(name), "%s-%s", feature_arc_name, "feat");
+
+	dfl->features_by_index =
+		rte_malloc(name, sizeof(struct rte_graph_feature) * max_indexes,
+			   RTE_CACHE_LINE_SIZE);
+
+	if (!dfl->features_by_index) {
+		rte_free(dfl);
+		graph_err("rte_malloc failed for allocating features_by_index()");
+		return -ENOMEM;
+	}
+	memset(dfl->features_by_index, 0, sizeof(rte_graph_feature_t) * max_indexes);
+
+	/* Initialize rte_graph port group fixed variables */
+	STAILQ_INIT(&dfl->all_features);
+	strncpy(dfl->feature_arc_name, feature_arc_name, RTE_GRAPH_FEATURE_ARC_NAMELEN - 1);
+	dfl->feature_arc_main = (void *)dfm;
+	dfl->start_node = start_node;
+	dfl->max_features = max_features;
+	dfl->max_indexes = max_indexes;
+
+	for (iter = 0; iter < dfl->max_indexes; iter++) {
+		df = rte_graph_feature_get(dfl, iter);
+		for (j = 0; j < dfl->max_features; j++) {
+			dfd = rte_graph_feature_data_get(df, j);
+			dfd->feature_data_index = RTE_GRAPH_FEATURE_INVALID_VALUE;
+		}
+	}
+	dfl->feature_arc_index = arc_index;
+	dfm->feature_arcs[dfl->feature_arc_index] = (rte_graph_feature_arc_t)dfl;
+	dfm->num_feature_arcs++;
+
+	if (_dfl)
+		*_dfl = (rte_graph_feature_arc_t)dfl;
+
+	return 0;
+}
+
+int
+rte_graph_feature_add(rte_graph_feature_arc_t _dfl, struct rte_node_register *feature_node,
+		const char *after_feature, const char *before_feature)
+{
+	struct rte_graph_feature_node_list *after_finfo = NULL, *before_finfo = NULL;
+	struct rte_graph_feature_node_list *temp = NULL, *finfo = NULL;
+	struct rte_graph_feature_arc *dfl = rte_graph_feature_arc_get(_dfl);
+	uint32_t slot, add_flag;
+	rte_edge_t edge = -1;
+
+	RTE_VERIFY(dfl->feature_arc_main == __feature_arc_main);
+
+	if (feature_arc_lookup(_dfl)) {
+		graph_err("invalid feature arc: 0x%016" PRIx64, (uint64_t)_dfl);
+		return -1;
+	}
+
+	if (dfl->feature_enable_started) {
+		graph_err("adding features after enabling any one of them is not supported");
+		return -1;
+	}
+
+	if ((after_feature != NULL) && (before_feature != NULL) &&
+	    (after_feature == before_feature)) {
+		graph_err("after_feature and before_feature are same '%s:%s]", after_feature,
+			before_feature);
+		return -1;
+	}
+
+	if (!feature_node) {
+		graph_err("feature_node: %p invalid", feature_node);
+		return -1;
+	}
+
+	dfl = rte_graph_feature_arc_get(_dfl);
+
+	if (feature_node->id == RTE_NODE_ID_INVALID) {
+		graph_err("Invalid node: %s", feature_node->name);
+		return -1;
+	}
+
+	if (!feature_lookup(dfl, feature_node->name, &finfo, &slot)) {
+		graph_err("%s feature already added", feature_node->name);
+		return -1;
+	}
+
+	if (slot >= RTE_GRAPH_FEATURE_MAX_PER_ARC) {
+		graph_err("Max slot %u reached for feature addition", slot);
+		return -1;
+	}
+
+	if (strstr(feature_node->name, dfl->start_node->name)) {
+		graph_err("Feature %s cannot point to itself: %s", feature_node->name,
+			dfl->start_node->name);
+		return -1;
+	}
+
+	if (connect_graph_nodes(dfl->start_node, feature_node, &edge, dfl->feature_arc_name)) {
+		graph_err("unable to connect %s -> %s", dfl->start_node->name, feature_node->name);
+		return -1;
+	}
+
+	finfo = malloc(sizeof(*finfo));
+	if (!finfo)
+		return -1;
+
+	memset(finfo, 0, sizeof(*finfo));
+
+	finfo->feature_arc = (void *)dfl;
+	finfo->feature_node = feature_node;
+	finfo->edge_to_this_feature = edge;
+
+	/* Check for before and after constraints */
+	if (before_feature) {
+		/* before_feature sanity */
+		if (feature_lookup(dfl, before_feature, &before_finfo, NULL))
+			SET_ERR_JMP(EINVAL, finfo_free,
+				     "Invalid before feature name: %s", before_feature);
+
+		if (!before_finfo)
+			SET_ERR_JMP(EINVAL, finfo_free,
+				     "before_feature %s does not exist", before_feature);
+
+		/*
+		 * Starting from 0 to before_feature, continue connecting edges
+		 */
+		add_flag = 1;
+		STAILQ_FOREACH(temp, &dfl->all_features, next_feature) {
+			/*
+			 * As soon as we see before_feature. stop adding edges
+			 */
+			if (!strncmp(temp->feature_node->name, before_feature,
+				     RTE_GRAPH_NAMESIZE))
+				if (!connect_graph_nodes(finfo->feature_node, temp->feature_node,
+							 &edge, dfl->feature_arc_name))
+					add_flag = 0;
+
+			if (add_flag)
+				connect_graph_nodes(temp->feature_node, finfo->feature_node, NULL,
+						    dfl->feature_arc_name);
+		}
+	}
+
+	if (after_feature) {
+		if (feature_lookup(dfl, after_feature, &after_finfo, NULL))
+			SET_ERR_JMP(EINVAL, finfo_free,
+				     "Invalid after feature_name %s", after_feature);
+
+		if (!after_finfo)
+			SET_ERR_JMP(EINVAL, finfo_free,
+				     "after_feature %s does not exist", after_feature);
+
+		/* Starting from after_feature to end continue connecting edges */
+		add_flag = 0;
+		STAILQ_FOREACH(temp, &dfl->all_features, next_feature) {
+			/* We have already seen after_feature now */
+			if (add_flag)
+				/* Add all features as next node to current feature*/
+				connect_graph_nodes(finfo->feature_node, temp->feature_node, NULL,
+						    dfl->feature_arc_name);
+
+			/* as soon as we see after_feature. start adding edges
+			 * from next iteration
+			 */
+			if (!strncmp(temp->feature_node->name, after_feature, RTE_GRAPH_NAMESIZE))
+				/* connect after_feature to this feature */
+				if (!connect_graph_nodes(temp->feature_node, finfo->feature_node,
+							 &edge, dfl->feature_arc_name))
+					add_flag = 1;
+		}
+
+		/* add feature next to after_feature */
+		STAILQ_INSERT_AFTER(&dfl->all_features, after_finfo, finfo, next_feature);
+	} else {
+		if (before_finfo) {
+			after_finfo = NULL;
+			STAILQ_FOREACH(temp, &dfl->all_features, next_feature) {
+				if (before_finfo == temp) {
+					if (after_finfo)
+						STAILQ_INSERT_AFTER(&dfl->all_features, after_finfo,
+								    finfo, next_feature);
+					else
+						STAILQ_INSERT_HEAD(&dfl->all_features, finfo,
+								   next_feature);
+
+					return 0;
+				}
+				after_finfo = temp;
+			}
+		} else {
+			STAILQ_INSERT_TAIL(&dfl->all_features, finfo, next_feature);
+		}
+	}
+
+	return 0;
+
+finfo_free:
+	free(finfo);
+
+	return -1;
+}
+
+int
+rte_graph_feature_destroy(rte_graph_feature_arc_t _dfl, const char *feature_name)
+{
+	RTE_SET_USED(_dfl);
+	RTE_SET_USED(feature_name);
+	return 0;
+}
+
+int
+rte_graph_feature_validate(rte_graph_feature_arc_t _dfl, uint32_t index, const char *feature_name,
+		     int is_enable_disable)
+{
+	struct rte_graph_feature_arc *dfl = rte_graph_feature_arc_get(_dfl);
+	struct rte_graph_feature_node_list *finfo = NULL;
+	struct rte_graph_feature_data *dfd = NULL;
+	struct rte_graph_feature *df = NULL;
+	uint32_t slot;
+
+	/* validate _dfl */
+	if (dfl->feature_arc_main != __feature_arc_main) {
+		graph_err("invalid feature arc: 0x%016" PRIx64, (uint64_t)_dfl);
+		return -EINVAL;
+	}
+
+	/* validate index */
+	if (index >= dfl->max_indexes) {
+		graph_err("%s: Invalid provided index: %u >= %u configured", dfl->feature_arc_name,
+			index, dfl->max_indexes);
+		return -1;
+	}
+
+	/* validate feature_name is already added or not  */
+	if (feature_lookup(dfl, feature_name, &finfo, &slot)) {
+		graph_err("%s: No feature %s added", dfl->feature_arc_name, feature_name);
+		return -EINVAL;
+	}
+
+	if (!finfo) {
+		graph_err("%s: No feature: %s found", dfl->feature_arc_name, feature_name);
+		return -EINVAL;
+	}
+
+	/* slot should be in valid range */
+	if (slot >= dfl->max_features) {
+		graph_err("%s/%s: Invalid free slot %u(max=%u) for feature", dfl->feature_arc_name,
+			feature_name, slot, dfl->max_features);
+		return -EINVAL;
+	}
+
+	df = rte_graph_feature_get(dfl, index);
+
+	/* Exceeded all enabled features for index */
+	if (is_enable_disable && (df->num_enabled_features >= dfl->max_features)) {
+		graph_err("%s: Index: %u has already enabled all features(%d/%d)",
+			dfl->feature_arc_name, index, df->num_enabled_features, dfl->max_features);
+		return -EINVAL;
+	}
+
+	dfd = rte_graph_feature_data_get(df, slot);
+
+	/* validate via bitmask if asked feature is already enabled on index */
+	if (is_enable_disable && (dfl->feature_bit_mask_by_index[index] &
+				  RTE_BIT64(slot))) {
+		graph_err("%s: %s already enabled on index: %u",
+			  dfl->feature_arc_name, feature_name, index);
+		return -1;
+	}
+
+	if (!is_enable_disable && !(dfl->feature_bit_mask_by_index[index] & RTE_BIT64(slot))) {
+		graph_err("%s: %s not enabled in bitmask for index: %u", dfl->feature_arc_name,
+			feature_name, index);
+		return -1;
+	}
+
+	/* validate via feature data that feature_data not in use */
+	if (is_enable_disable && (dfd->feature_data_index !=
+				  RTE_GRAPH_FEATURE_INVALID_VALUE)) {
+		graph_err("%s/%s: slot: %u already in use by %s",
+			  dfl->feature_arc_name, feature_name, slot,
+			  dfd->node_info->feature_node->name);
+		return -1;
+	}
+
+	if (!is_enable_disable && (dfd->feature_data_index == RTE_GRAPH_FEATURE_INVALID_VALUE)) {
+		graph_err("%s/%s: feature data slot: %u not in use ", dfl->feature_arc_name,
+			feature_name, slot);
+		return -1;
+	}
+	return 0;
+}
+
+int
+rte_graph_feature_enable(rte_graph_feature_arc_t _dfl, uint32_t index, const
+			 char *feature_name, int64_t data)
+{
+	struct rte_graph_feature_data *dfd = NULL, *prev_dfd = NULL, *next_dfd = NULL;
+	uint64_t original_mask, lower_feature_mask, upper_feature_mask;
+	struct rte_graph_feature_arc *dfl = rte_graph_feature_arc_get(_dfl);
+	struct rte_graph_feature_node_list *finfo = NULL;
+	uint32_t slot, prev_feature, next_feature;
+	struct rte_graph_feature *df = NULL;
+	rte_edge_t edge = 0;
+	int rc = 0;
+
+	if (rte_graph_feature_validate(_dfl, index, feature_name, 1))
+		return -1;
+
+	if (feature_lookup(dfl, feature_name, &finfo, &slot))
+		return -1;
+
+	df = rte_graph_feature_get(dfl, index);
+	dfd = rte_graph_feature_data_get(df, slot);
+
+	graph_dbg("%s: Enabling feature %s in index: %u at slot %u", dfl->feature_arc_name,
+		feature_name, index, slot);
+
+	memset(dfd, 0, sizeof(*dfd));
+
+	/* app data */
+	dfd->data = data;
+	/* First fill invalid value until everything succeeds */
+	dfd->feature_data_index = RTE_GRAPH_FEATURE_INVALID_VALUE;
+	dfd->node_info = finfo;
+
+	/* edge from base feature arc node to this feature */
+	dfd->edge_to_this_feature = finfo->edge_to_this_feature;
+	dfd->edge_to_next_feature = RTE_GRAPH_FEATURE_INVALID_VALUE;
+
+	/* This should be the case */
+	RTE_VERIFY(slot == (dfd - df->feature_data));
+
+	/* Adjust next edge for previous enabled feature and next enabled
+	 * feature for this index
+	 */
+	original_mask = dfl->feature_bit_mask_by_index[index];
+
+	/* If slot == 0, no lower feature is enabled
+	 * if slot = 1, lower_feature_mask = 0x1,
+	 * if slot = 2, lower_feature_mask = 0x3,
+	 * if slot = 3, lower_feature_mask = 0x7,
+	 */
+	lower_feature_mask = (slot) ? (RTE_BIT64(slot) - 1) : 0;
+
+	/*
+	 * If slot =0, upper_feature_mask = (0xff ff ff ff ff ff ff ff) & ~lower_feature_mask
+	 * If slot =1, upper_feature_mask = (0xff ff ff ff ff ff ff fe) & ~lower_feature_mask
+	 * If slot =2, upper_feature_mask = (0xff ff ff ff ff ff ff fc) & ~lower_feature_mask
+	 * If slot =3, upper_feature_mask = (0xff ff ff ff ff ff ff f8) & ~lower_feature_mask
+	 * If slot =4, upper_feature_mask = (0xff ff ff ff ff ff ff f0) & ~lower_feature_mask
+	 */
+	upper_feature_mask = ~(RTE_BIT64(slot)) & (~lower_feature_mask);
+
+	/* And with original bit mask */
+	upper_feature_mask &= original_mask;
+
+	/* set bits lesser than slot */
+	lower_feature_mask &= original_mask;
+
+	/* immediate lower enabled feature wrt slot is most significant bit in
+	 * lower_feature_mask
+	 */
+	prev_feature = rte_fls_u64(lower_feature_mask);
+
+	if (prev_feature) {
+		/* for us slot starts from 0 instead of 1 */
+		prev_feature--;
+		prev_dfd = rte_graph_feature_data_get(df, prev_feature);
+
+		graph_dbg("%s: enabling for index: %u, %s[] = %s", dfl->feature_arc_name, index,
+			prev_dfd->node_info->feature_node->name,
+			dfd->node_info->feature_node->name);
+		RTE_VERIFY(prev_dfd->feature_data_index != RTE_GRAPH_FEATURE_INVALID_VALUE);
+		if (get_existing_edge(dfl->feature_arc_name, prev_dfd->node_info->feature_node,
+				      dfd->node_info->feature_node, &edge)) {
+			graph_err("%s: index: %u, Could not add next edge from %s to %s",
+				dfl->feature_arc_name, index,
+				prev_dfd->node_info->feature_node->name,
+				dfd->node_info->feature_node->name);
+			rc = -1;
+		} else {
+			graph_dbg("%s: enabled for index: %u, slot %u, %s[%u] = %s",
+				dfl->feature_arc_name, index, slot,
+				prev_dfd->node_info->feature_node->name, edge,
+				dfd->node_info->feature_node->name);
+			prev_dfd->edge_to_next_feature = edge;
+		}
+		if (rc < 0)
+			return -1;
+	}
+
+	/* immediate next upper feature wrt slot is least significant bit in
+	 * upper_feature_mask
+	 */
+	rc = 0;
+	if (rte_bsf64_safe(upper_feature_mask, &next_feature)) {
+		next_dfd = rte_graph_feature_data_get(df, next_feature);
+
+		graph_dbg("%s: enabling for index: %u, %s[] = %s ", dfl->feature_arc_name, index,
+			dfd->node_info->feature_node->name,
+			next_dfd->node_info->feature_node->name);
+		RTE_VERIFY(next_dfd->feature_data_index != RTE_GRAPH_FEATURE_INVALID_VALUE);
+		if (get_existing_edge(dfl->feature_arc_name, dfd->node_info->feature_node,
+				      next_dfd->node_info->feature_node, &edge)) {
+			graph_err("%s: index: %u, Could not add next edge from %s to %s",
+				dfl->feature_arc_name, index, dfd->node_info->feature_node->name,
+				next_dfd->node_info->feature_node->name);
+			rc = -1;
+		} else {
+			graph_dbg("%s: enabled for index: %u, slot %u, %s[%u] = %s",
+				dfl->feature_arc_name, index, slot,
+				dfd->node_info->feature_node->name, edge,
+				next_dfd->node_info->feature_node->name);
+			dfd->edge_to_next_feature = edge;
+		}
+		if (rc < 0)
+			return -1;
+	}
+
+	graph_dbg("%s: enabled for index: %u, slot %u, %s[%u] = %s", dfl->feature_arc_name, index,
+		slot, dfl->start_node->name, dfd->edge_to_this_feature,
+		dfd->node_info->feature_node->name);
+
+	/* Make dfd valid now */
+	dfd->feature_data_index = dfd - df->feature_data;
+
+	/* Increase feature node info reference count */
+	finfo->ref_count++;
+
+	/* Increment number of enabled feature on this index */
+	df->num_enabled_features++;
+
+	/* Make rte_graph_feature_add() disable for this feature arc now */
+	dfl->feature_enable_started++;
+
+	/* Update bitmask feature arc bit mask */
+	rte_bit_relaxed_set64(rte_graph_uint_cast(slot), &dfl->feature_bit_mask_by_index[index]);
+
+	/* Make sure changes made into affect */
+	RTE_VERIFY(dfl->feature_bit_mask_by_index[index] & RTE_BIT64(slot));
+
+	return 0;
+}
+
+int
+rte_graph_feature_disable(rte_graph_feature_arc_t _dfl, uint32_t index, const char *feature_name)
+{
+	struct rte_graph_feature_data *dfd = NULL, *prev_dfd = NULL, *next_dfd = NULL;
+	uint64_t original_mask, lower_feature_mask, upper_feature_mask;
+	struct rte_graph_feature_arc *dfl = rte_graph_feature_arc_get(_dfl);
+	struct rte_graph_feature_node_list *finfo = NULL;
+	uint32_t slot, prev_feature, next_feature;
+	struct rte_graph_feature *df = NULL;
+	rte_edge_t edge = 0;
+	int rc = 0;
+
+	if (rte_graph_feature_validate(_dfl, index, feature_name, 0))
+		return -1;
+
+	if (feature_lookup(dfl, feature_name, &finfo, &slot))
+		return -1;
+
+	df = rte_graph_feature_get(dfl, index);
+	dfd = rte_graph_feature_data_get(df, slot);
+
+	/* This should be the case */
+	RTE_VERIFY(slot == (dfd - df->feature_data));
+
+	graph_dbg("%s: Disbling feature %s in index: %u at slot %u", dfl->feature_arc_name,
+		feature_name, index, slot);
+
+	/* Adjust next edge for previous enabled feature and next enabled
+	 * feature for this index
+	 */
+	original_mask = dfl->feature_bit_mask_by_index[index];
+
+	lower_feature_mask = (slot) ? (RTE_BIT64(slot) - 1) : 0;
+	upper_feature_mask = ~(RTE_BIT64(slot)) & (~lower_feature_mask);
+	upper_feature_mask &= original_mask;
+	lower_feature_mask &= original_mask;
+
+	/* immediate lower enabled feature wrt slot is most significant bit in
+	 * lower_feature_mask
+	 */
+	prev_feature = rte_fls_u64(lower_feature_mask);
+
+	if (prev_feature) {
+		/* for us slot starts from 0 instead of 1 */
+		prev_feature--;
+		prev_dfd = rte_graph_feature_data_get(df, prev_feature);
+
+		/* Adjust later to next enabled feature below */
+		prev_dfd->edge_to_next_feature = RTE_GRAPH_FEATURE_INVALID_VALUE;
+
+		/* If we also have next enable feature */
+		if (rte_bsf64_safe(upper_feature_mask, &next_feature)) {
+			next_dfd = rte_graph_feature_data_get(df, next_feature);
+
+			graph_dbg("%s: index: %u updating next enabled feature for %s to %s ",
+				dfl->feature_arc_name, index,
+				prev_dfd->node_info->feature_node->name,
+				next_dfd->node_info->feature_node->name);
+			if (get_existing_edge(dfl->feature_arc_name,
+					      prev_dfd->node_info->feature_node,
+					      next_dfd->node_info->feature_node, &edge)) {
+				graph_err("%s: index: %u, Could not get next edge from %s to %s",
+					dfl->feature_arc_name, index,
+					prev_dfd->node_info->feature_node->name,
+					next_dfd->node_info->feature_node->name);
+				rc = -1;
+			} else {
+				graph_dbg("%s: index: %u updated next enable feature for %s to %s at edge %u",
+					dfl->feature_arc_name, index,
+					prev_dfd->node_info->feature_node->name,
+					next_dfd->node_info->feature_node->name, edge);
+				prev_dfd->edge_to_next_feature = edge;
+			}
+			if (rc < 9)
+				return -1;
+		}
+	}
+
+	/* First fill invalid value until everything succeeds */
+	dfd->feature_data_index = RTE_GRAPH_FEATURE_INVALID_VALUE;
+	dfd->edge_to_this_feature = RTE_GRAPH_FEATURE_INVALID_VALUE;
+	dfd->edge_to_next_feature = RTE_GRAPH_FEATURE_INVALID_VALUE;
+
+	/* Decrease feature node info reference count */
+	finfo->ref_count--;
+
+	/* Decrement  number of enabled feature on this index */
+	df->num_enabled_features++;
+
+	/* Update bitmask feature arc bit mask */
+	rte_bit_relaxed_clear64(rte_graph_uint_cast(slot), &dfl->feature_bit_mask_by_index[index]);
+
+	return 0;
+}
+
+int
+rte_graph_feature_arc_destroy(rte_graph_feature_arc_t epg)
+{
+	RTE_SET_USED(epg);
+	return 0;
+}
+
+int
+rte_graph_feature_arc_cleanup(void)
+{
+	rte_graph_feature_arc_main_t *dm = __feature_arc_main;
+	uint32_t iter;
+
+	if (!__feature_arc_main)
+		return -1;
+
+	for (iter = 0; iter < dm->max_feature_arcs; iter++) {
+		if (dm->feature_arcs[iter] == RTE_GRAPH_FEATURE_ARC_INITIALIZER)
+			continue;
+
+		rte_graph_feature_arc_destroy((rte_graph_feature_arc_t)dm->feature_arcs[iter]);
+	}
+
+	return 0;
+}
+
+int
+rte_graph_feature_arc_lookup_by_name(const char *arc_name, rte_graph_feature_arc_t *_dfl)
+{
+	rte_graph_feature_arc_main_t *dm = __feature_arc_main;
+	struct rte_graph_feature_arc *dfl = NULL;
+	uint32_t iter;
+
+	if (!__feature_arc_main)
+		return -1;
+
+	for (iter = 0; iter < dm->max_feature_arcs; iter++) {
+		if (dm->feature_arcs[iter] == RTE_GRAPH_FEATURE_ARC_INITIALIZER)
+			continue;
+
+		dfl = rte_graph_feature_arc_get(dm->feature_arcs[iter]);
+
+		if (strstr(arc_name, dfl->feature_arc_name)) {
+			if (_dfl)
+				*_dfl = (rte_graph_feature_arc_t)dfl;
+			return 0;
+		}
+	}
+
+	return -1;
+}
diff --git a/lib/graph/meson.build b/lib/graph/meson.build
index 0cb15442ab..d916176fb7 100644
--- a/lib/graph/meson.build
+++ b/lib/graph/meson.build
@@ -14,11 +14,13 @@ sources = files(
         'graph_debug.c',
         'graph_stats.c',
         'graph_populate.c',
+        'graph_feature_arc.c',
         'graph_pcap.c',
         'rte_graph_worker.c',
         'rte_graph_model_mcore_dispatch.c',
 )
 headers = files('rte_graph.h', 'rte_graph_worker.h')
+headers += files('rte_graph_feature_arc.h', 'rte_graph_feature_arc_worker.h')
 indirect_headers += files(
         'rte_graph_model_mcore_dispatch.h',
         'rte_graph_model_rtc.h',
diff --git a/lib/graph/rte_graph_feature_arc.h b/lib/graph/rte_graph_feature_arc.h
new file mode 100644
index 0000000000..f2b428eb1e
--- /dev/null
+++ b/lib/graph/rte_graph_feature_arc.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell International Ltd.
+ */
+
+#ifndef _RTE_GRAPH_FEATURE_ARC_H_
+#define _RTE_GRAPH_FEATURE_ARC_H_
+
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_debug.h>
+#include <rte_graph.h>
+#include <rte_graph_worker.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ *
+ * rte_graph_feature_arc.h
+ *
+ * Define APIs and structures/variables with respect to
+ *
+ * - Feature arc(s)
+ * - Feature(s)
+ *
+ * A feature arc represents an ordered list of features/protocols at a given
+ * networking layer. Feature arc provides a high level abstraction to connect
+ * various rte_graph nodes, designated as *feature nodes*, and allowing
+ * steering of packets across these feature nodes fast path processing in a
+ * generic manner. In a typical network stack, often a protocol or feature must
+ * be first enabled on a given interface, before any packet received on that
+ * interface is steered to feature processing. For eg: incoming IPv4 packets
+ * are sent to routing sub-system only after a valid IPv4 address is assigned
+ * to the received interface. In other words, often packets needs to be steered
+ * across features not based on the packet content but based on whether feature
+ * is enable or disable on a given incoming/outgoing interface. Feature arc
+ * provides mechanism to enable/disable feature(s) on each interface and
+ * allowing seamless packet steering across enabled feature nodes in fast path.
+ *
+ * Feature arc also provides a way to steer packets from standard nodes to
+ * custom/user-defined *feature nodes* without any change in standard node's
+ * fast path functions
+ *
+ * On a given interface multiple feature(s) might be enabled in a particular
+ * feature arc. For instance, both "ipv4-output" and "IPsec policy output"
+ * features may be enabled on "eth0" interface in "L3-output" feature arc.
+ * Similarly, "ipv6-output" and "ipsec-output" may be enabled on "eth1"
+ * interface in same "L3-output" feature arc.
+ *
+ * When multiple features are present in a given feature arc, its imperative
+ * to allow each feature processing in a particular sequential order. For
+ * instance, in "L3-input" feature arc it may be required to run "IPsec
+ * input" feature first, for packet decryption, before "ip-lookup".  So a
+ * sequential order must be maintained among features present in a feature arc.
+ *
+ * Features are enabled/disabled multiple times at runtime to some or all
+ * available interfaces present in the system. Features can be enabled/disabled
+ * even after @b rte_graph_create() is called. Enable/disabling features on one
+ * interface is independent of other interface.
+ *
+ * A given feature might consume packet (if it's configured to consume) or may
+ * forward it to next enabled feature. For instance, "IPsec input" feature may
+ * consume/drop all packets with "Protect" policy action while all packets with
+ * policy action as "Bypass" may be forwarded to next enabled feature (with in
+ * same feature arc)
+ *
+ * This library facilitates rte graph based applications to steer packets in
+ * fast path to different feature nodes with-in a feature arc and support all
+ * functionalities described above
+ *
+ * In order to use feature-arc APIs, applications needs to do following in
+ * control path:
+ * - Initialize feature arc library via rte_graph_feature_arc_init()
+ * - Create feature arc via rte_graph_feature_arc_create()
+ * - Before calling rte_graph_create(), features must be added to feature-arc
+ *   via rte_graph_feature_add(). rte_graph_feature_add() allows adding
+ *   features in a sequential order with "runs_after" and "runs_before"
+ *   constraints.
+ * - Post rte_graph_create(), features can be enabled/disabled at runtime on
+ *   any interface via rte_graph_feature_enable()/rte_graph_feature_disable()
+ *
+ * In fast path, nodes uses
+ * - rte_graph_feature_arc_has_feature() and
+ *   rte_graph_feature_arc_feature_data_get() APIs to steer packets across
+ *   feature nodes
+ *
+ * rte_graph_feature_enable()/rte_graph_feature_disable() APIs are not
+ * thread-safe hence must be called by single core while other cores are not
+ * using any fast path feature arc APIs.
+ */
+
+/**< Initializer value for rte_graph_feature_arc_t */
+#define RTE_GRAPH_FEATURE_ARC_INITIALIZER ((rte_graph_feature_arc_t)UINT64_MAX)
+
+/**< Initializer value for rte_graph_feature_arc_t */
+#define RTE_GRAPH_FEATURE_INVALID_VALUE UINT16_MAX
+
+/** Max number of features supported in a given feature arc */
+#define RTE_GRAPH_FEATURE_MAX_PER_ARC 64
+
+/** Length of feature arc name */
+#define RTE_GRAPH_FEATURE_ARC_NAMELEN RTE_NODE_NAMESIZE
+
+/** @internal */
+#define rte_graph_feature_cast(x) ((rte_graph_feature_t)x)
+
+/** rte_graph feature arc object */
+typedef uint64_t rte_graph_feature_arc_t;
+
+/** rte_graph feature object */
+typedef uint32_t rte_graph_feature_t;
+
+/**
+ * Initialize feature arc subsystem
+ *
+ * @param max_feature_arcs
+ *   Maximum number of feature arcs required to be supported
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_arc_init(int max_feature_arcs);
+
+/**
+ * Create a feature arc
+ *
+ * @param feature_arc_name
+ *   Feature arc name with max length of @ref RTE_GRAPH_FEATURE_ARC_NAMELEN
+ * @param max_features
+ *   Maximum number of features to be supported in this feature arc
+ * @param max_indexes
+ *   Maximum number of interfaces/ports/indexes to be supported
+ * @param start_node
+ *   Base node where this feature arc's features are checked in fast path
+ * @param[out] _dfl
+ *  Feature arc object
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_arc_create(const char *feature_arc_name, int max_features, int max_indexes,
+			   struct rte_node_register *start_node, rte_graph_feature_arc_t *_dfl);
+
+/**
+ * Get feature arc object with name
+ *
+ * @param arc_name
+ *   Feature arc name provided to successful @ref rte_graph_feature_arc_create
+ * @param[out] _dfl
+ *   Feature arc object returned
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_arc_lookup_by_name(const char *arc_name, rte_graph_feature_arc_t *_dfl);
+
+/**
+ * Add a feature to already created feature arc
+ *
+ * @param _dfl
+ *   Feature arc handle returned from @ref rte_graph_feature_arc_create()
+ * @param feature_node
+ *   Graph node representing feature. On success, feature_node is next_node of
+ *   feature_arc->start_node
+ * @param runs_after
+ *   Add this feature_node after already added "runs_after". Creates
+ *   start_node -> runs_after -> this_feature sequence
+ * @param runs_before
+ *  Add this feature_node before already added "runs_before". Creates
+ *  start_node -> this_feature -> runs_before sequence
+ *
+ * <I> Must be called before rte_graph_create </I>
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_add(rte_graph_feature_arc_t _dfl, struct rte_node_register *feature_node,
+		    const char *runs_after, const char *runs_before);
+
+/**
+ * Enable feature within a feature arc
+ *
+ * Must be called after @b rte_graph_create(). API is NOT Thread-safe
+ *
+ * @param _dfl
+ *   Feature arc object returned by @ref rte_graph_feature_arc_create or @ref
+ *   rte_graph_feature_arc_lookup_by_name
+ * @param index
+ *   Application specific index. Can be corresponding to interface_id/port_id etc
+ * @param feature_name
+ *   Name of the node which is already added via @ref rte_graph_feature_add
+ * @param data
+ *   Application specific data which is retrieved in fast path
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_enable(rte_graph_feature_arc_t _dfl, uint32_t index, const char *feature_name,
+		       int64_t data);
+
+/**
+ * Validate whether subsequent enable/disable feature would succeed or not
+ * API is thread-safe
+ *
+ * @param _dfl
+ *   Feature arc object returned by @ref rte_graph_feature_arc_create or @ref
+ *   rte_graph_feature_arc_lookup_by_name
+ * @param index
+ *   Application specific index. Can be corresponding to interface_id/port_id etc
+ * @param feature_name
+ *   Name of the node which is already added via @ref rte_graph_feature_add
+ * @param is_enable_disable
+ *   If 1, validate whether subsequent @ref rte_graph_feature_enable would pass or not
+ *   If 0, validate whether subsequent @ref rte_graph_feature_disable would pass or not
+ *
+ * @return
+ *  0: Subsequent enable/disable API would pass
+ * <0: Subsequent enable/disable API would not pass
+ */
+__rte_experimental
+int rte_graph_feature_validate(rte_graph_feature_arc_t _dfl, uint32_t index,
+			       const char *feature_name, int is_enable_disable);
+
+/**
+ * Disable already enabled feature within a feature arc
+ *
+ * Must be called after @b rte_graph_create(). API is NOT Thread-safe
+ *
+ * @param _dfl
+ *   Feature arc object returned by @ref rte_graph_feature_arc_create or @ref
+ *   rte_graph_feature_arc_lookup_by_name
+ * @param index
+ *   Application specific index. Can be corresponding to interface_id/port_id etc
+ * @param feature_name
+ *   Name of the node which is already added via @ref rte_graph_feature_add
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_disable(rte_graph_feature_arc_t _dfl, uint32_t index,
+			      const char *feature_name);
+
+/**
+ * Destroy Feature
+ *
+ * @param _dfl
+ *   Feature arc object returned by @ref rte_graph_feature_arc_create or @ref
+ *   rte_graph_feature_arc_lookup_by_name
+ * @param feature_name
+ *   Feature name provided to @ref rte_graph_feature_add
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_destroy(rte_graph_feature_arc_t _dfl, const char *feature_name);
+
+/**
+ * Delete feature_arc object
+ *
+ * @param _dfl
+ *   Feature arc object returned by @ref rte_graph_feature_arc_create or @ref
+ *   rte_graph_feature_arc_lookup_by_name
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_arc_destroy(rte_graph_feature_arc_t _dfl);
+
+/**
+ * Cleanup all feature arcs
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_arc_cleanup(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/graph/rte_graph_feature_arc_worker.h b/lib/graph/rte_graph_feature_arc_worker.h
new file mode 100644
index 0000000000..92ae7072bd
--- /dev/null
+++ b/lib/graph/rte_graph_feature_arc_worker.h
@@ -0,0 +1,483 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell International Ltd.
+ */
+
+#ifndef _RTE_GRAPH_FEATURE_ARC_WORKER_H_
+#define _RTE_GRAPH_FEATURE_ARC_WORKER_H_
+
+#include <rte_graph_feature_arc.h>
+#include <rte_bitops.h>
+
+/**
+ * @file
+ *
+ * rte_graph_feature_arc_worker.h
+ *
+ * Defines fast path structure
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @internal
+ *
+ * Slow path feature node info list
+ */
+struct __rte_cache_aligned rte_graph_feature_node_list {
+	/** Next feature */
+	STAILQ_ENTRY(rte_graph_feature_node_list) next_feature;
+
+	/** node representing feature */
+	struct rte_node_register *feature_node;
+
+	/** How many indexes/interfaces using this feature */
+	int32_t ref_count;
+
+	/** Back pointer to feature arc */
+	void *feature_arc;
+
+	/** rte_edge_t to this feature node from feature_arc->start_node */
+	rte_edge_t edge_to_this_feature;
+};
+
+/**
+ * RTE_GRAPH feature data representing a fast path feature object on an interface/index
+ */
+typedef struct rte_graph_feature_data {
+	/** Data provided by application during @ref rte_graph_feature_enable on interface */
+	int64_t data;
+
+	/** this feature data index */
+	uint32_t feature_data_index;
+
+	/** Edge to this feature node from feature_arc->start_node */
+	rte_edge_t edge_to_this_feature;
+
+	/**
+	 * Edge to next enabled feature on a given interface/index. This field
+	 * keeps on changing as @ref rte_graph_feature_enable()/@ref
+	 * rte_graph_feature_disable() are called on a given interface/index
+	 */
+	rte_edge_t edge_to_next_feature;
+
+	/** Slow path node_info object */
+	struct rte_graph_feature_node_list *node_info;
+} rte_graph_feature_data_t;
+
+/**
+ * RTE_GRAPH Feature object
+ *
+ * Holds all feature related data of a given feature on *all* interfaces
+ */
+struct __rte_cache_aligned rte_graph_feature {
+	/**
+	 * Slow path node_info
+	 * 1st DWORD
+	 */
+	struct rte_graph_feature_node_list *node_info;
+
+	/** Feature arc back pointer
+	 *  2nd DWORD
+	 */
+	void *feature_arc;
+
+	/**
+	 * Number of enabled features in this feature_arc
+	 * 3rd WORD
+	 */
+	uint32_t num_enabled_features;
+
+	/* uint32_t reserved; */
+
+	/**
+	 * Array of feature_data by index/interface
+	 *
+	 */
+	struct rte_graph_feature_data feature_data[RTE_GRAPH_FEATURE_MAX_PER_ARC];
+};
+
+/**
+ * RTE_GRAPH Feature arc object
+ *
+ * Representing a feature arc holding all features which are enabled/disabled on any interfaces
+ */
+struct __rte_cache_aligned rte_graph_feature_arc {
+	/** All feature lists */
+	STAILQ_HEAD(, rte_graph_feature_node_list) all_features;
+
+	/** feature arc name */
+	char feature_arc_name[RTE_GRAPH_FEATURE_ARC_NAMELEN];
+
+	/** this feature group index in feature_arc_main */
+	uint32_t feature_arc_index;
+
+	/** Back pointer to feature_arc_main */
+	void *feature_arc_main;
+
+	/**
+	 * Start_node or Base node where this feature arc is checked for any feature
+	 */
+	struct rte_node_register *start_node;
+
+	/** Max features supported in this arc */
+	uint32_t max_features;
+
+	/** Boolean indicating @ref rte_graph_feature_enable has started and not
+	 * further addition is allowed
+	 */
+	int feature_enable_started;
+
+	/* Fast path stuff*/
+	alignas(RTE_CACHE_LINE_SIZE) RTE_MARKER c0;
+
+	/** RTE_GRAPH feature by interface */
+	struct rte_graph_feature *features_by_index;
+
+	/** Max interfaces supported */
+	uint32_t max_indexes;
+
+	/** Bitmask by interface. Set bit indicates feature is enabled on interface */
+	uint64_t feature_bit_mask_by_index[];
+};
+
+/** Feature arc main */
+typedef struct feature_arc_main {
+	/** number of feature arcs created by application */
+	uint32_t num_feature_arcs;
+
+	/** max features arcs allowed */
+	uint32_t max_feature_arcs;
+
+	/** feature arcs */
+	rte_graph_feature_arc_t feature_arcs[];
+} rte_graph_feature_arc_main_t;
+
+/** @internal Get feature arc pointer from object */
+#define rte_graph_feature_arc_get(dfl) ((struct rte_graph_feature_arc *)dfl)
+
+extern rte_graph_feature_arc_main_t *__feature_arc_main;
+
+/**
+ * Get rte_graph feature data object for a index in feature
+ *
+ * @param df
+ *   Feature pointer
+ * @param feature_index
+ *  Index of feature maintained in slow path linked list
+ *
+ * @return
+ *   Valid feature data
+ */
+static inline struct rte_graph_feature_data *
+rte_graph_feature_data_get(struct rte_graph_feature *df, uint32_t feature_index)
+{
+	return (df->feature_data + feature_index);
+}
+
+/**
+ * Get rte_graph_feature object for a given interface/index from feature arc
+ *
+ * @param dfl
+ *   Feature arc pointer
+ * @param index
+ *   Interface index
+ *
+ * @return
+ *   Valid feature pointer
+ */
+static inline struct rte_graph_feature *
+rte_graph_feature_get(struct rte_graph_feature_arc *dfl, uint32_t index)
+{
+	return (dfl->features_by_index + index);
+}
+
+/**
+ * Fast path API to check if first feature enabled on a feature arc
+ *
+ * Must be called in feature_arc->start_node processing
+ *
+ * @param dfl
+ *   Feature arc object
+ * @param index
+ *   Interface/Index
+ * @param[out] feature
+ *   Pointer to rte_graph_feature_t. Valid if API returns 1
+ *
+ * @return
+ * 1: If feature is enabled
+ * 0: If feature is not enabled
+ *
+ */
+static inline int
+rte_graph_feature_arc_has_first_feature(struct rte_graph_feature_arc *dfl,
+					uint32_t index, rte_graph_feature_t *feature)
+{
+	return rte_bsf64_safe(dfl->feature_bit_mask_by_index[index], feature);
+}
+
+/**
+ * Fast path API to get next feature when current node is already on an feature
+ * arc and not consuming packet. This feature must forward the packet to next
+ * enabled feature by passing returned rte_graph_feature_t to
+ * rte_graph_feature_arc_next_feature_data_get()
+ *
+ * @param dfl
+ *   Feature arc object
+ * @param index
+ *   Interface/Index
+ * @param[out] feature
+ *   Pointer to rte_graph_feature_t. Valid if API returns 1
+ *
+ * @return
+ * 1: If next feature is enabled
+ * 0: If next feature is not enabled
+ */
+static inline int
+rte_graph_feature_arc_has_next_feature(struct rte_graph_feature_arc *dfl,
+				       uint32_t index, rte_graph_feature_t *feature)
+{
+	uint32_t next_feature;
+	uint64_t bitmask;
+
+#ifdef RTE_GRAPH_FEATURE_ARC_DEBUG
+	struct rte_graph_feature *df = rte_graph_feature_get(dfl, index);
+	struct rte_graph_feature_data *dfd = NULL;
+
+	dfd = rte_graph_feature_data_get(df, *feature);
+	/** Check feature sanity */
+	if (unlikely(dfd->feature_data_index != *feature))
+		return 0;
+#endif
+
+	/* Create bitmask where current feature is cleared to get next feature
+	 * bit set
+	 */
+	next_feature = (uint32_t)*feature;
+	bitmask = UINT64_MAX << (next_feature + 1);
+	bitmask = dfl->feature_bit_mask_by_index[index] & bitmask;
+
+	return rte_bsf64_safe(bitmask, feature);
+}
+
+/**
+ * Fast path API to check if any feature enabled on a feature arc
+ *
+ * @param _dfl
+ *   Feature arc object
+ * @param index
+ *   Interface/Index
+ * @param[out] feature
+ *   Pointer to rte_graph_feature_t. Valid if API returns 1
+ *
+ * @return
+ * 1: If feature is enabled
+ * 0: If feature is not enabled
+ *
+ */
+static inline int
+rte_graph_feature_arc_has_feature(rte_graph_feature_arc_t _dfl, uint32_t index,
+				  rte_graph_feature_t *feature)
+{
+	struct rte_graph_feature_arc *dfl = rte_graph_feature_arc_get(_dfl);
+
+#ifdef RTE_GRAPH_FEATURE_ARC_DEBUG
+	if (unlikely(dfl->max_indexes < index))
+		return 0;
+
+	if (unlikely(!feature))
+		return 0;
+#endif
+	/* Look for first feature */
+	if (*feature == RTE_GRAPH_FEATURE_INVALID_VALUE)
+		return rte_graph_feature_arc_has_first_feature(dfl, index, feature);
+	else
+		return rte_graph_feature_arc_has_next_feature(dfl, index, feature);
+}
+
+
+/**
+ * Prefetch feature data upfront
+ *
+ * @param _dfl
+ *   RTE_GRAPH feature arc object
+ * @param index
+ *   Interface/index
+ * @param feature
+ *   Pointer to feature object returned from @ref
+ *   rte_graph_feature_arc_has_feature() or @ref
+ *   rte_graph_feature_arc_first_feature_data_get()
+ */
+static inline void
+__rte_graph_prefetch_data_prefetch(rte_graph_feature_arc_t _dfl, int index,
+				   rte_graph_feature_t feature)
+{
+	struct rte_graph_feature_arc *dfl = rte_graph_feature_arc_get(_dfl);
+	struct rte_graph_feature *df = rte_graph_feature_get(dfl, index);
+
+	rte_prefetch0((void *)rte_graph_feature_data_get(df, feature));
+}
+
+/**
+ * Prefetch feature data upfront. Perform sanity
+ *
+ * @param _dfl
+ *   RTE_GRAPH feature arc object
+ * @param index
+ *   Interface/index
+ * @param feature
+ *   Pointer to feature object returned from @ref
+ *   rte_graph_feature_arc_has_feature() or @ref
+ *   rte_graph_feature_arc_first_feature_data_get()
+ */
+static inline void
+rte_graph_feature_data_prefetch(rte_graph_feature_arc_t _dfl, uint32_t index,
+				rte_graph_feature_t feature)
+{
+#ifdef RTE_GRAPH_FEATURE_ARC_DEBUG
+	struct rte_graph_feature_arc *dfl = rte_graph_feature_arc_get(_dfl);
+
+	if (unlikely(index >= dfl->max_indexes))
+		return;
+
+	if (unlikely(feature >= rte_graph_feature_cast(dfl->max_features)))
+		return;
+#endif
+
+	if (feature != RTE_GRAPH_FEATURE_INVALID_VALUE)
+		__rte_graph_prefetch_data_prefetch(_dfl, index, feature);
+}
+
+/**
+ * Fast path API to get first feature data aka {edge, int32_t data}
+ *
+ * Must be called in feature_arc->start_node processing
+ *
+ * @param _dfl
+ *   Feature arc object
+ * @param feature
+ *  returned from rte_graph_feature_arc_has_feature()
+ * @param index
+ *   Interface/Index
+ * @param[out] edge
+ *   Pointer to rte_node edge. Valid if API returns Success
+ * @param[out] data
+ *   Pointer to int64_t data set via rte_graph_feature_enable(). Valid if API returns
+ *   Success
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+static inline int
+rte_graph_feature_arc_first_feature_data_get(struct rte_graph_feature_arc *dfl,
+					     rte_graph_feature_t feature,
+					     uint32_t index, rte_edge_t *edge,
+					     int64_t *data)
+{
+	struct rte_graph_feature *df = rte_graph_feature_get(dfl, index);
+	struct rte_graph_feature_data *dfd = NULL;
+
+	dfd = rte_graph_feature_data_get(df, feature);
+
+#ifdef RTE_GRAPH_FEATURE_ARC_DEBUG
+	/** Check feature sanity */
+	if (unlikely(dfd->feature_data_index != feature))
+		return -1;
+
+	if (unlikely(!edge && !data))
+		return -1;
+#endif
+
+	*edge = dfd->edge_to_this_feature;
+	*data = dfd->data;
+
+	return 0;
+}
+
+/**
+ * Fast path API to get next feature data aka {edge, int32_t data}
+ *
+ * Must NOT be called in feature_arc->start_node processing instead must be
+ * called in intermediate feature nodes on a featur-arc.
+ *
+ * @param _dfl
+ *   Feature arc object
+ * @param feature
+ *  returned from rte_graph_feature_arc_has_next_feature()
+ * @param index
+ *   Interface/Index
+ * @param[out] edge
+ *   Pointer to rte_node edge. Valid if API returns Success
+ * @param[out] data
+ *   Pointer to int64_t data set via rte_graph_feature_enable(). Valid if API returns
+ *   Success
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+static inline int
+rte_graph_feature_arc_next_feature_data_get(struct rte_graph_feature_arc *dfl,
+					    rte_graph_feature_t feature,
+					    uint32_t index, rte_edge_t *edge,
+					    int64_t *data)
+{
+	struct rte_graph_feature *df = rte_graph_feature_get(dfl, index);
+	struct rte_graph_feature_data *dfd = NULL;
+
+	dfd = rte_graph_feature_data_get(df, feature);
+
+#ifdef RTE_GRAPH_FEATURE_ARC_DEBUG
+	/** Check feature sanity */
+	if (unlikely(dfd->feature_data_index != feature))
+		return -1;
+
+	if (unlikely(!edge && !data))
+		return -1;
+#endif
+
+	*edge = dfd->edge_to_next_feature;
+	*data = dfd->data;
+
+	return 0;
+}
+
+/**
+ * Fast path API to get next feature data aka {edge, int32_t data}
+ *
+ * @param _dfl
+ *   Feature arc object
+ * @param feature
+ *  returned from rte_graph_feature_arc_has_feature()
+ * @param index
+ *   Interface/Index
+ * @param[out] edge
+ *   Pointer to rte_node edge. Valid if API returns Success
+ * @param[out] data
+ *   Pointer to int64_t data set via rte_graph_feature_enable(). Valid if API returns
+ *   Success
+ *
+ * @return
+ *  0: Success
+ * <0: Failure
+ */
+
+static inline int
+rte_graph_feature_arc_feature_data_get(rte_graph_feature_arc_t _dfl,
+				       rte_graph_feature_t feature, uint32_t
+				       index, rte_edge_t *edge, int64_t *data)
+{
+	struct rte_graph_feature_arc *dfl = rte_graph_feature_arc_get(_dfl);
+
+	if (feature == RTE_GRAPH_FEATURE_INVALID_VALUE)
+		return rte_graph_feature_arc_first_feature_data_get(dfl, feature, index, edge,
+								    data);
+	else
+		return rte_graph_feature_arc_next_feature_data_get(dfl, feature, index, edge, data);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/lib/graph/version.map b/lib/graph/version.map
index c84446cdba..b409a2425f 100644
--- a/lib/graph/version.map
+++ b/lib/graph/version.map
@@ -52,3 +52,19 @@ DPDK_24 {
 
 	local: *;
 };
+
+EXPERIMENTAL {
+	global:
+
+	# added in 24.07
+	rte_graph_feature_arc_init;
+	rte_graph_feature_arc_create;
+	rte_graph_feature_arc_lookup_by_name;
+	rte_graph_feature_add;
+	rte_graph_feature_enable;
+	rte_graph_feature_validate;
+	rte_graph_feature_disable;
+	rte_graph_feature_destroy;
+	rte_graph_feature_arc_destroy;
+	rte_graph_feature_arc_cleanup;
+};
-- 
2.25.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [RFC PATCH 2/2] graph: add ip4 output feature arc
  2024-04-26 12:22 [RFC PATCH 0/2] add feature arc in rte_graph Nitin Saxena
  2024-04-26 12:22 ` [RFC PATCH 1/2] graph: add feature arc support Nitin Saxena
@ 2024-04-26 12:22 ` Nitin Saxena
  1 sibling, 0 replies; 3+ messages in thread
From: Nitin Saxena @ 2024-04-26 12:22 UTC (permalink / raw)
  To: Jerin Jacob, Kiran Kumar K, Nithin Dabilpuram, Zhirun Yan; +Cc: dev

Signed-off-by: Nitin Saxena <nsaxena@marvell.com>
Change-Id: I80021403c343354c7e494c6bc79b83b0d0fe6b7c
---
 lib/node/ip4_rewrite.c      | 278 ++++++++++++++++++++++++++++--------
 lib/node/ip4_rewrite_priv.h |  10 +-
 lib/node/node_private.h     |  10 +-
 lib/node/rte_node_ip4_api.h |   3 +
 4 files changed, 233 insertions(+), 68 deletions(-)

diff --git a/lib/node/ip4_rewrite.c b/lib/node/ip4_rewrite.c
index 34a920df5e..60efd6b171 100644
--- a/lib/node/ip4_rewrite.c
+++ b/lib/node/ip4_rewrite.c
@@ -20,6 +20,7 @@ struct ip4_rewrite_node_ctx {
 	int mbuf_priv1_off;
 	/* Cached next index */
 	uint16_t next_index;
+	rte_graph_feature_arc_t output_feature_arc;
 };
 
 static struct ip4_rewrite_node_main *ip4_rewrite_nm;
@@ -30,21 +31,34 @@ static struct ip4_rewrite_node_main *ip4_rewrite_nm;
 #define IP4_REWRITE_NODE_PRIV1_OFF(ctx) \
 	(((struct ip4_rewrite_node_ctx *)ctx)->mbuf_priv1_off)
 
+#define IP4_REWRITE_NODE_OUTPUT_FEATURE_ARC(ctx) \
+	(((struct ip4_rewrite_node_ctx *)ctx)->output_feature_arc)
+
 static uint16_t
 ip4_rewrite_node_process(struct rte_graph *graph, struct rte_node *node,
 			 void **objs, uint16_t nb_objs)
 {
+	rte_graph_feature_arc_t out_feature_arc = IP4_REWRITE_NODE_OUTPUT_FEATURE_ARC(node->ctx);
+	uint16_t next0 = 0, next1 = 0, next2 = 0, next3 = 0, next_index;
 	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
 	struct ip4_rewrite_nh_header *nh = ip4_rewrite_nm->nh;
 	const int dyn = IP4_REWRITE_NODE_PRIV1_OFF(node->ctx);
-	uint16_t next0, next1, next2, next3, next_index;
-	struct rte_ipv4_hdr *ip0, *ip1, *ip2, *ip3;
 	uint16_t n_left_from, held = 0, last_spec = 0;
+	struct rte_ipv4_hdr *ip0, *ip1, *ip2, *ip3;
+	int b0_feat, b1_feat, b2_feat, b3_feat;
+	rte_graph_feature_t f0, f1, f2, f3;
+	uint16_t tx0, tx1, tx2, tx3;
+	int64_t fd0, fd1, fd2, fd3;
 	void *d0, *d1, *d2, *d3;
 	void **to_next, **from;
 	rte_xmm_t priv01;
 	rte_xmm_t priv23;
-	int i;
+	int i, has_feat;
+
+	RTE_SET_USED(fd0);
+	RTE_SET_USED(fd1);
+	RTE_SET_USED(fd2);
+	RTE_SET_USED(fd3);
 
 	/* Speculative next as last next */
 	next_index = IP4_REWRITE_NODE_LAST_NEXT(node->ctx);
@@ -83,54 +97,167 @@ ip4_rewrite_node_process(struct rte_graph *graph, struct rte_node *node,
 		priv23.u64[0] = node_mbuf_priv1(mbuf2, dyn)->u;
 		priv23.u64[1] = node_mbuf_priv1(mbuf3, dyn)->u;
 
-		/* Increment checksum by one. */
-		priv01.u32[1] += rte_cpu_to_be_16(0x0100);
-		priv01.u32[3] += rte_cpu_to_be_16(0x0100);
-		priv23.u32[1] += rte_cpu_to_be_16(0x0100);
-		priv23.u32[3] += rte_cpu_to_be_16(0x0100);
-
-		/* Update ttl,cksum rewrite ethernet hdr on mbuf0 */
-		d0 = rte_pktmbuf_mtod(mbuf0, void *);
-		rte_memcpy(d0, nh[priv01.u16[0]].rewrite_data,
-			   nh[priv01.u16[0]].rewrite_len);
-
-		next0 = nh[priv01.u16[0]].tx_node;
-		ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
-					      sizeof(struct rte_ether_hdr));
-		ip0->time_to_live = priv01.u16[1] - 1;
-		ip0->hdr_checksum = priv01.u16[2] + priv01.u16[3];
-
-		/* Update ttl,cksum rewrite ethernet hdr on mbuf1 */
-		d1 = rte_pktmbuf_mtod(mbuf1, void *);
-		rte_memcpy(d1, nh[priv01.u16[4]].rewrite_data,
-			   nh[priv01.u16[4]].rewrite_len);
-
-		next1 = nh[priv01.u16[4]].tx_node;
-		ip1 = (struct rte_ipv4_hdr *)((uint8_t *)d1 +
-					      sizeof(struct rte_ether_hdr));
-		ip1->time_to_live = priv01.u16[5] - 1;
-		ip1->hdr_checksum = priv01.u16[6] + priv01.u16[7];
-
-		/* Update ttl,cksum rewrite ethernet hdr on mbuf2 */
-		d2 = rte_pktmbuf_mtod(mbuf2, void *);
-		rte_memcpy(d2, nh[priv23.u16[0]].rewrite_data,
-			   nh[priv23.u16[0]].rewrite_len);
-		next2 = nh[priv23.u16[0]].tx_node;
-		ip2 = (struct rte_ipv4_hdr *)((uint8_t *)d2 +
-					      sizeof(struct rte_ether_hdr));
-		ip2->time_to_live = priv23.u16[1] - 1;
-		ip2->hdr_checksum = priv23.u16[2] + priv23.u16[3];
-
-		/* Update ttl,cksum rewrite ethernet hdr on mbuf3 */
-		d3 = rte_pktmbuf_mtod(mbuf3, void *);
-		rte_memcpy(d3, nh[priv23.u16[4]].rewrite_data,
-			   nh[priv23.u16[4]].rewrite_len);
-
-		next3 = nh[priv23.u16[4]].tx_node;
-		ip3 = (struct rte_ipv4_hdr *)((uint8_t *)d3 +
-					      sizeof(struct rte_ether_hdr));
-		ip3->time_to_live = priv23.u16[5] - 1;
-		ip3->hdr_checksum = priv23.u16[6] + priv23.u16[7];
+		f0 = nh[priv01.u16[0]].nh_feature;
+		f1 = nh[priv01.u16[4]].nh_feature;
+		f2 = nh[priv23.u16[0]].nh_feature;
+		f3 = nh[priv23.u16[4]].nh_feature;
+
+		tx0 = nh[priv01.u16[0]].tx_node - 1;
+		tx1 = nh[priv01.u16[4]].tx_node - 1;
+		tx2 = nh[priv23.u16[0]].tx_node - 1;
+		tx3 = nh[priv23.u16[4]].tx_node - 1;
+
+		b0_feat = rte_graph_feature_arc_has_feature(out_feature_arc, tx0, &f0);
+		b1_feat = rte_graph_feature_arc_has_feature(out_feature_arc, tx1, &f1);
+		b2_feat = rte_graph_feature_arc_has_feature(out_feature_arc, tx2, &f2);
+		b3_feat = rte_graph_feature_arc_has_feature(out_feature_arc, tx3, &f3);
+
+		has_feat = b0_feat | b1_feat | b2_feat | b3_feat;
+
+		if (unlikely(has_feat)) {
+			/* prefetch feature data */
+			rte_graph_feature_data_prefetch(out_feature_arc, tx0, f0);
+			rte_graph_feature_data_prefetch(out_feature_arc, tx1, f1);
+			rte_graph_feature_data_prefetch(out_feature_arc, tx2, f2);
+			rte_graph_feature_data_prefetch(out_feature_arc, tx3, f3);
+
+			/* Save feature into mbuf */
+			node_mbuf_priv1(mbuf0, dyn)->current_feature = f0;
+			node_mbuf_priv1(mbuf1, dyn)->current_feature = f1;
+			node_mbuf_priv1(mbuf2, dyn)->current_feature = f2;
+			node_mbuf_priv1(mbuf3, dyn)->current_feature = f3;
+
+			/* Save index into mbuf for next feature node */
+			node_mbuf_priv1(mbuf0, dyn)->index = tx0;
+			node_mbuf_priv1(mbuf1, dyn)->index = tx1;
+			node_mbuf_priv1(mbuf2, dyn)->index = tx2;
+			node_mbuf_priv1(mbuf3, dyn)->index = tx3;
+
+			/* Does all of them have feature enabled */
+			has_feat = b0_feat && b1_feat && b2_feat && b3_feat;
+			if (has_feat) {
+				rte_graph_feature_arc_feature_data_get(out_feature_arc,
+								       f0, tx0, &next0, &fd0);
+				rte_graph_feature_arc_feature_data_get(out_feature_arc,
+								       f1, tx1, &next1, &fd1);
+				rte_graph_feature_arc_feature_data_get(out_feature_arc,
+								       f2, tx2, &next2, &fd2);
+				rte_graph_feature_arc_feature_data_get(out_feature_arc,
+								       f3, tx3, &next3, &fd3);
+			} else {
+				if (b0_feat) {
+					rte_graph_feature_arc_feature_data_get(out_feature_arc, f0,
+									       tx0, &next0, &fd0);
+				} else {
+					priv01.u32[1] += rte_cpu_to_be_16(0x0100);
+					/* Update ttl,cksum rewrite ethernet hdr on mbuf0 */
+					d0 = rte_pktmbuf_mtod(mbuf0, void *);
+					rte_memcpy(d0, nh[priv01.u16[0]].rewrite_data,
+						   nh[priv01.u16[0]].rewrite_len);
+
+					next0 = tx0 + 1;
+					ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
+								      sizeof(struct rte_ether_hdr));
+					ip0->time_to_live = priv01.u16[1] - 1;
+					ip0->hdr_checksum = priv01.u16[2] + priv01.u16[3];
+				}
+				if (b1_feat) {
+					rte_graph_feature_arc_feature_data_get(out_feature_arc, f1,
+									       tx1, &next1, &fd1);
+				} else {
+					priv01.u32[3] += rte_cpu_to_be_16(0x0100);
+					/* Update ttl,cksum rewrite ethernet hdr on mbuf1 */
+					d1 = rte_pktmbuf_mtod(mbuf1, void *);
+					rte_memcpy(d1, nh[priv01.u16[4]].rewrite_data,
+						   nh[priv01.u16[4]].rewrite_len);
+
+					next1 = tx1 + 1;
+					ip1 = (struct rte_ipv4_hdr *)((uint8_t *)d1 +
+								      sizeof(struct rte_ether_hdr));
+					ip1->time_to_live = priv01.u16[5] - 1;
+					ip1->hdr_checksum = priv01.u16[6] + priv01.u16[7];
+				}
+				if (b2_feat) {
+					rte_graph_feature_arc_feature_data_get(out_feature_arc, f2,
+									       tx2, &next2, &fd2);
+				} else {
+					priv23.u32[1] += rte_cpu_to_be_16(0x0100);
+					/* Update ttl,cksum rewrite ethernet hdr on mbuf2 */
+					d2 = rte_pktmbuf_mtod(mbuf2, void *);
+					rte_memcpy(d2, nh[priv23.u16[0]].rewrite_data,
+						   nh[priv23.u16[0]].rewrite_len);
+					next2 = tx2 + 1;
+					ip2 = (struct rte_ipv4_hdr *)((uint8_t *)d2 +
+								      sizeof(struct rte_ether_hdr));
+					ip2->time_to_live = priv23.u16[1] - 1;
+					ip2->hdr_checksum = priv23.u16[2] + priv23.u16[3];
+				}
+				if (b3_feat) {
+					rte_graph_feature_arc_feature_data_get(out_feature_arc, f3,
+									       tx3, &next1, &fd3);
+				} else {
+					priv23.u32[3] += rte_cpu_to_be_16(0x0100);
+					/* Update ttl,cksum rewrite ethernet hdr on mbuf3 */
+					d3 = rte_pktmbuf_mtod(mbuf3, void *);
+					rte_memcpy(d3, nh[priv23.u16[4]].rewrite_data,
+						   nh[priv23.u16[4]].rewrite_len);
+					next3 = tx3 + 1;
+					ip3 = (struct rte_ipv4_hdr *)((uint8_t *)d3 +
+								      sizeof(struct rte_ether_hdr));
+					ip3->time_to_live = priv23.u16[5] - 1;
+					ip3->hdr_checksum = priv23.u16[6] + priv23.u16[7];
+				}
+			}
+		} else {
+			/* Increment checksum by one. */
+			priv01.u32[1] += rte_cpu_to_be_16(0x0100);
+			priv01.u32[3] += rte_cpu_to_be_16(0x0100);
+			priv23.u32[1] += rte_cpu_to_be_16(0x0100);
+			priv23.u32[3] += rte_cpu_to_be_16(0x0100);
+
+			/* Update ttl,cksum rewrite ethernet hdr on mbuf0 */
+			d0 = rte_pktmbuf_mtod(mbuf0, void *);
+			rte_memcpy(d0, nh[priv01.u16[0]].rewrite_data,
+				   nh[priv01.u16[0]].rewrite_len);
+
+			next0 = tx0 + 1;
+			ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
+						      sizeof(struct rte_ether_hdr));
+			ip0->time_to_live = priv01.u16[1] - 1;
+			ip0->hdr_checksum = priv01.u16[2] + priv01.u16[3];
+
+			/* Update ttl,cksum rewrite ethernet hdr on mbuf1 */
+			d1 = rte_pktmbuf_mtod(mbuf1, void *);
+			rte_memcpy(d1, nh[priv01.u16[4]].rewrite_data,
+				   nh[priv01.u16[4]].rewrite_len);
+
+			next1 = tx1 + 1;
+			ip1 = (struct rte_ipv4_hdr *)((uint8_t *)d1 +
+						      sizeof(struct rte_ether_hdr));
+			ip1->time_to_live = priv01.u16[5] - 1;
+			ip1->hdr_checksum = priv01.u16[6] + priv01.u16[7];
+
+			/* Update ttl,cksum rewrite ethernet hdr on mbuf2 */
+			d2 = rte_pktmbuf_mtod(mbuf2, void *);
+			rte_memcpy(d2, nh[priv23.u16[0]].rewrite_data,
+				   nh[priv23.u16[0]].rewrite_len);
+			next2 = tx2 + 1;
+			ip2 = (struct rte_ipv4_hdr *)((uint8_t *)d2 +
+						      sizeof(struct rte_ether_hdr));
+			ip2->time_to_live = priv23.u16[1] - 1;
+			ip2->hdr_checksum = priv23.u16[2] + priv23.u16[3];
+
+			/* Update ttl,cksum rewrite ethernet hdr on mbuf3 */
+			d3 = rte_pktmbuf_mtod(mbuf3, void *);
+			rte_memcpy(d3, nh[priv23.u16[4]].rewrite_data,
+				   nh[priv23.u16[4]].rewrite_len);
+
+			next3 = tx3 + 1;
+			ip3 = (struct rte_ipv4_hdr *)((uint8_t *)d3 +
+						      sizeof(struct rte_ether_hdr));
+			ip3->time_to_live = priv23.u16[5] - 1;
+			ip3->hdr_checksum = priv23.u16[6] + priv23.u16[7];
+		}
 
 		/* Enqueue four to next node */
 		rte_edge_t fix_spec =
@@ -212,19 +339,28 @@ ip4_rewrite_node_process(struct rte_graph *graph, struct rte_node *node,
 		pkts += 1;
 		n_left_from -= 1;
 
-		d0 = rte_pktmbuf_mtod(mbuf0, void *);
-		rte_memcpy(d0, nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_data,
-			   nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_len);
-
-		next0 = nh[node_mbuf_priv1(mbuf0, dyn)->nh].tx_node;
-		ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
-					      sizeof(struct rte_ether_hdr));
-		chksum = node_mbuf_priv1(mbuf0, dyn)->cksum +
-			 rte_cpu_to_be_16(0x0100);
-		chksum += chksum >= 0xffff;
-		ip0->hdr_checksum = chksum;
-		ip0->time_to_live = node_mbuf_priv1(mbuf0, dyn)->ttl - 1;
+		tx0 = nh[node_mbuf_priv1(mbuf0, dyn)->nh].tx_node - 1;
+		f0 = nh[node_mbuf_priv1(mbuf0, dyn)->nh].nh_feature;
 
+		if (unlikely(rte_graph_feature_arc_has_feature(out_feature_arc, tx0, &f0))) {
+			rte_graph_feature_arc_feature_data_get(out_feature_arc, f0, tx0,
+							       &next0, &fd0);
+			node_mbuf_priv1(mbuf0, dyn)->current_feature = f0;
+			node_mbuf_priv1(mbuf0, dyn)->index = tx0;
+		} else {
+			d0 = rte_pktmbuf_mtod(mbuf0, void *);
+			rte_memcpy(d0, nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_data,
+				   nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_len);
+
+			next0 = tx0 + 1;
+			ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
+						      sizeof(struct rte_ether_hdr));
+			chksum = node_mbuf_priv1(mbuf0, dyn)->cksum +
+				 rte_cpu_to_be_16(0x0100);
+			chksum += chksum >= 0xffff;
+			ip0->hdr_checksum = chksum;
+			ip0->time_to_live = node_mbuf_priv1(mbuf0, dyn)->ttl - 1;
+		}
 		if (unlikely(next_index ^ next0)) {
 			/* Copy things successfully speculated till now */
 			rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
@@ -258,19 +394,34 @@ ip4_rewrite_node_process(struct rte_graph *graph, struct rte_node *node,
 static int
 ip4_rewrite_node_init(const struct rte_graph *graph, struct rte_node *node)
 {
+	rte_graph_feature_arc_t feature_arc = RTE_GRAPH_FEATURE_ARC_INITIALIZER;
 	static bool init_once;
 
 	RTE_SET_USED(graph);
 	RTE_BUILD_BUG_ON(sizeof(struct ip4_rewrite_node_ctx) > RTE_NODE_CTX_SZ);
+	RTE_BUILD_BUG_ON(sizeof(struct ip4_rewrite_nh_header) != RTE_CACHE_LINE_MIN_SIZE);
 
 	if (!init_once) {
 		node_mbuf_priv1_dynfield_offset = rte_mbuf_dynfield_register(
 				&node_mbuf_priv1_dynfield_desc);
 		if (node_mbuf_priv1_dynfield_offset < 0)
 			return -rte_errno;
+
+		/* Create ipv4-output feature arc, if not created
+		 */
+		if (rte_graph_feature_arc_lookup_by_name(RTE_IP4_OUTPUT_FEATURE_ARC_NAME, NULL) &&
+		    rte_graph_feature_arc_create(RTE_IP4_OUTPUT_FEATURE_ARC_NAME,
+						 RTE_GRAPH_FEATURE_MAX_PER_ARC, /* max features */
+						 RTE_MAX_ETHPORTS + 1, /* max output interfaces */
+						 ip4_rewrite_node_get(),
+						 &feature_arc)) {
+			return -rte_errno;
+		}
+
 		init_once = true;
 	}
 	IP4_REWRITE_NODE_PRIV1_OFF(node->ctx) = node_mbuf_priv1_dynfield_offset;
+	IP4_REWRITE_NODE_OUTPUT_FEATURE_ARC(node->ctx) = feature_arc;
 
 	node_dbg("ip4_rewrite", "Initialized ip4_rewrite node initialized");
 
@@ -323,6 +474,7 @@ rte_node_ip4_rewrite_add(uint16_t next_hop, uint8_t *rewrite_data,
 	nh->tx_node = ip4_rewrite_nm->next_index[dst_port];
 	nh->rewrite_len = rewrite_len;
 	nh->enabled = true;
+	nh->nh_feature = RTE_GRAPH_FEATURE_INVALID_VALUE;
 
 	return 0;
 }
diff --git a/lib/node/ip4_rewrite_priv.h b/lib/node/ip4_rewrite_priv.h
index 5105ec1d29..8b868026bf 100644
--- a/lib/node/ip4_rewrite_priv.h
+++ b/lib/node/ip4_rewrite_priv.h
@@ -5,9 +5,10 @@
 #define __INCLUDE_IP4_REWRITE_PRIV_H__
 
 #include <rte_common.h>
+#include <rte_graph_feature_arc.h>
 
 #define RTE_GRAPH_IP4_REWRITE_MAX_NH 64
-#define RTE_GRAPH_IP4_REWRITE_MAX_LEN 56
+#define RTE_GRAPH_IP4_REWRITE_MAX_LEN 53
 
 /**
  * @internal
@@ -15,11 +16,10 @@
  * Ipv4 rewrite next hop header data structure. Used to store port specific
  * rewrite data.
  */
-struct ip4_rewrite_nh_header {
+struct __rte_cache_min_aligned ip4_rewrite_nh_header {
 	uint16_t rewrite_len; /**< Header rewrite length. */
 	uint16_t tx_node;     /**< Tx node next index identifier. */
-	uint16_t enabled;     /**< NH enable flag */
-	uint16_t rsvd;
+	rte_graph_feature_t nh_feature;
 	union {
 		struct {
 			struct rte_ether_addr dst;
@@ -30,6 +30,8 @@ struct ip4_rewrite_nh_header {
 		uint8_t rewrite_data[RTE_GRAPH_IP4_REWRITE_MAX_LEN];
 		/**< Generic rewrite data */
 	};
+	/* used in control path */
+	uint8_t enabled;     /**< NH enable flag */
 };
 
 /**
diff --git a/lib/node/node_private.h b/lib/node/node_private.h
index 1de7306792..36f6e05624 100644
--- a/lib/node/node_private.h
+++ b/lib/node/node_private.h
@@ -12,6 +12,9 @@
 #include <rte_mbuf.h>
 #include <rte_mbuf_dyn.h>
 
+#include <rte_graph_worker_common.h>
+#include <rte_graph_feature_arc_worker.h>
+
 extern int rte_node_logtype;
 #define RTE_LOGTYPE_NODE rte_node_logtype
 
@@ -35,9 +38,14 @@ struct node_mbuf_priv1 {
 			uint16_t ttl;
 			uint32_t cksum;
 		};
-
 		uint64_t u;
 	};
+	struct {
+		/** feature that current mbuf holds */
+		rte_graph_feature_t current_feature;
+		/** interface index */
+		uint32_t index;
+	};
 };
 
 static const struct rte_mbuf_dynfield node_mbuf_priv1_dynfield_desc = {
diff --git a/lib/node/rte_node_ip4_api.h b/lib/node/rte_node_ip4_api.h
index 24f8ec843a..0de06f7fc7 100644
--- a/lib/node/rte_node_ip4_api.h
+++ b/lib/node/rte_node_ip4_api.h
@@ -23,6 +23,7 @@ extern "C" {
 #include <rte_compat.h>
 
 #include <rte_graph.h>
+#include <rte_graph_feature_arc_worker.h>
 
 /**
  * IP4 lookup next nodes.
@@ -67,6 +68,8 @@ struct rte_node_ip4_reassembly_cfg {
 	/**< Node identifier to configure. */
 };
 
+#define RTE_IP4_OUTPUT_FEATURE_ARC_NAME "ipv4-output"
+
 /**
  * Add ipv4 route to lookup table.
  *
-- 
2.25.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2024-04-26 12:23 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-04-26 12:22 [RFC PATCH 0/2] add feature arc in rte_graph Nitin Saxena
2024-04-26 12:22 ` [RFC PATCH 1/2] graph: add feature arc support Nitin Saxena
2024-04-26 12:22 ` [RFC PATCH 2/2] graph: add ip4 output feature arc Nitin Saxena

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).