DPDK patches and discussions
 help / color / mirror / Atom feed
From: <jerinj@marvell.com>
To: <dev@dpdk.org>
Cc: <pkapoor@marvell.com>, <ndabilpuram@marvell.com>,
	<kirankumark@marvell.com>, <pbhagavatula@marvell.com>,
	<pathreya@marvell.com>, <nsaxena@marvell.com>,
	<sshankarnara@marvell.com>, <honnappa.nagarahalli@arm.com>,
	<thomas@monjalon.net>, <david.marchand@redhat.com>,
	<ferruh.yigit@intel.com>, <arybchenko@solarflare.com>,
	<ajit.khaparde@broadcom.com>, <xiaolong.ye@intel.com>,
	<rasland@mellanox.com>, <maxime.coquelin@redhat.com>,
	<akhil.goyal@nxp.com>, <cristian.dumitrescu@intel.com>,
	<john.mcnamara@intel.com>, <bruce.richardson@intel.com>,
	<anatoly.burakov@intel.com>, <gavin.hu@arm.com>,
	<drc@linux.vnet.ibm.com>, <konstantin.ananyev@intel.com>,
	<pallavi.kadam@intel.com>, <olivier.matz@6wind.com>,
	<gage.eads@intel.com>, <nikhil.rao@intel.com>,
	<erik.g.carrillo@intel.com>, <hemant.agrawal@nxp.com>,
	<artem.andreev@oktetlabs.ru>, <sthemmin@microsoft.com>,
	<shahafs@mellanox.com>, <keith.wiles@intel.com>,
	<mattias.ronnblom@ericsson.com>, <jasvinder.singh@intel.com>,
	<vladimir.medvedkin@intel.com>, <mdr@ashroe.eu>,
	<techboard@dpdk.org>, "Jerin Jacob" <jerinj@marvell.com>
Subject: [dpdk-dev]  [RFC PATCH 2/5] node: add packet processing nodes
Date: Fri, 31 Jan 2020 22:31:58 +0530	[thread overview]
Message-ID: <20200131170201.3236153-3-jerinj@marvell.com> (raw)
In-Reply-To: <20200131170201.3236153-1-jerinj@marvell.com>

From: Nithin Dabilpuram <ndabilpuram@marvell.com>

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 app/test/meson.build                 |   7 +-
 config/common_base                   |   6 +
 lib/Makefile                         |   4 +
 lib/librte_node/Makefile             |  30 ++
 lib/librte_node/ethdev_ctrl.c        | 106 +++++
 lib/librte_node/ethdev_rx.c          | 218 +++++++++
 lib/librte_node/ethdev_rx.h          |  17 +
 lib/librte_node/ethdev_rx_priv.h     |  45 ++
 lib/librte_node/ethdev_tx.c          |  74 +++
 lib/librte_node/ethdev_tx_priv.h     |  33 ++
 lib/librte_node/ip4_lookup.c         | 657 +++++++++++++++++++++++++++
 lib/librte_node/ip4_lookup_priv.h    |  17 +
 lib/librte_node/ip4_rewrite.c        | 340 ++++++++++++++
 lib/librte_node/ip4_rewrite_priv.h   |  44 ++
 lib/librte_node/log.c                |  14 +
 lib/librte_node/meson.build          |   8 +
 lib/librte_node/node_private.h       |  61 +++
 lib/librte_node/null.c               |  23 +
 lib/librte_node/pkt_drop.c           |  26 ++
 lib/librte_node/rte_node_eth_api.h   |  31 ++
 lib/librte_node/rte_node_ip4_api.h   |  33 ++
 lib/librte_node/rte_node_version.map |   9 +
 lib/meson.build                      |   5 +-
 meson.build                          |   1 +
 mk/rte.app.mk                        |   1 +
 25 files changed, 1807 insertions(+), 3 deletions(-)
 create mode 100644 lib/librte_node/Makefile
 create mode 100644 lib/librte_node/ethdev_ctrl.c
 create mode 100644 lib/librte_node/ethdev_rx.c
 create mode 100644 lib/librte_node/ethdev_rx.h
 create mode 100644 lib/librte_node/ethdev_rx_priv.h
 create mode 100644 lib/librte_node/ethdev_tx.c
 create mode 100644 lib/librte_node/ethdev_tx_priv.h
 create mode 100644 lib/librte_node/ip4_lookup.c
 create mode 100644 lib/librte_node/ip4_lookup_priv.h
 create mode 100644 lib/librte_node/ip4_rewrite.c
 create mode 100644 lib/librte_node/ip4_rewrite_priv.h
 create mode 100644 lib/librte_node/log.c
 create mode 100644 lib/librte_node/meson.build
 create mode 100644 lib/librte_node/node_private.h
 create mode 100644 lib/librte_node/null.c
 create mode 100644 lib/librte_node/pkt_drop.c
 create mode 100644 lib/librte_node/rte_node_eth_api.h
 create mode 100644 lib/librte_node/rte_node_ip4_api.h
 create mode 100644 lib/librte_node/rte_node_version.map

diff --git a/app/test/meson.build b/app/test/meson.build
index e1cdae3cb..7d761c8fa 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -159,8 +159,9 @@ test_deps = ['acl',
 	'rib',
 	'ring',
 	'stack',
-	'timer'
+	'timer',
 	'graph',
+	'node'
 ]
 
 fast_test_names = [
@@ -382,13 +383,15 @@ endforeach
 test_dep_objs += cc.find_library('execinfo', required: false)
 
 link_libs = []
+link_nodes = []
 if get_option('default_library') == 'static'
 	link_libs = dpdk_drivers
+	link_nodes = dpdk_graph_nodes
 endif
 
 dpdk_test = executable('dpdk-test',
 	test_sources,
-	link_whole: link_libs,
+	link_whole: link_libs + link_nodes,
 	dependencies: test_dep_objs,
 	c_args: [cflags, '-DALLOW_EXPERIMENTAL_API'],
 	install_rpath: driver_install_path,
diff --git a/config/common_base b/config/common_base
index badcc0be5..f7f3f2607 100644
--- a/config/common_base
+++ b/config/common_base
@@ -1077,6 +1077,12 @@ CONFIG_RTE_LIBRTE_GRAPH=y
 CONFIG_RTE_GRAPH_BURST_SIZE=256
 CONFIG_RTE_LIBRTE_GRAPH_STATS=y
 CONFIG_RTE_LIBRTE_GRAPH_DEBUG=n
+#
+# Compile librte_node
+#
+CONFIG_RTE_LIBRTE_NODE=y
+CONFIG_RTE_LIBRTE_NODE_DEBUG=n
+
 #
 # Compile the test application
 #
diff --git a/lib/Makefile b/lib/Makefile
index 495f572bf..fe3fcc70f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -121,6 +121,10 @@ DEPDIRS-librte_rcu := librte_eal
 
 DIRS-$(CONFIG_RTE_LIBRTE_GRAPH) += librte_graph
 DEPDIRS-librte_graph := librte_eal librte_mbuf
+
+DIRS-$(CONFIG_RTE_LIBRTE_NODE) += librte_node
+DEPDIRS-librte_node := librte_graph librte_lpm librte_ethdev librte_mbuf
+
 ifeq ($(CONFIG_RTE_EXEC_ENV_LINUX),y)
 DIRS-$(CONFIG_RTE_LIBRTE_KNI) += librte_kni
 endif
diff --git a/lib/librte_node/Makefile b/lib/librte_node/Makefile
new file mode 100644
index 000000000..aaf041580
--- /dev/null
+++ b/lib/librte_node/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2020 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_node.a
+
+CFLAGS += -O3 -DALLOW_EXPERIMENTAL_API
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_graph -lrte_mbuf -lrte_lpm -lrte_ethdev -lrte_mempool
+
+EXPORT_MAP := rte_node_version.map
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_NODE) += null.c
+SRCS-$(CONFIG_RTE_LIBRTE_NODE) += log.c
+SRCS-$(CONFIG_RTE_LIBRTE_NODE) += ethdev_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_NODE) += ethdev_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_NODE) += ethdev_ctrl.c
+SRCS-$(CONFIG_RTE_LIBRTE_NODE) += ip4_lookup.c
+SRCS-$(CONFIG_RTE_LIBRTE_NODE) += ip4_rewrite.c
+SRCS-$(CONFIG_RTE_LIBRTE_NODE) += pkt_drop.c
+
+# install header files
+SYMLINK-$(CONFIG_RTE_LIBRTE_NODE)-include += rte_node_ip4_api.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_NODE)-include += rte_node_eth_api.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_node/ethdev_ctrl.c b/lib/librte_node/ethdev_ctrl.c
new file mode 100644
index 000000000..7fc7af7f7
--- /dev/null
+++ b/lib/librte_node/ethdev_ctrl.c
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_graph.h>
+#include <rte_node_eth_api.h>
+
+#include "ethdev_rx_priv.h"
+#include "ethdev_tx_priv.h"
+#include "ip4_rewrite_priv.h"
+#include "node_private.h"
+
+static struct ethdev_ctrl {
+	uint16_t nb_graphs;
+} ctrl;
+
+int
+rte_node_eth_config(struct rte_node_ethdev_config *conf,
+		    uint16_t nb_confs, uint16_t nb_graphs)
+{
+	uint16_t tx_q_used, rx_q_used, port_id;
+	char name[RTE_NODE_NAMESIZE];
+	const char *next_nodes = name;
+	struct rte_mempool *mp;
+	int i, j, rc;
+	uint32_t id;
+
+	for (i = 0; i < nb_confs; i++) {
+		port_id = conf[i].port_id;
+
+		if (!rte_eth_dev_is_valid_port(port_id))
+			return -EINVAL;
+
+		/* Check for mbuf minimum private size requirement */
+		for (j = 0; j < conf[i].mp_count; j++) {
+			mp = conf[i].mp[j];
+			if (!mp)
+				continue;
+			/* Check for minimum private space */
+			if (rte_pktmbuf_priv_size(mp) <
+			    RTE_NODE_MBUF_PRIV2_SIZE) {
+				node_err("ethdev",
+					 "Minimum mbuf priv size "
+					 "requirement not met by mp %s",
+					 mp->name);
+				return -EINVAL;
+			}
+		}
+
+		rx_q_used = conf[i].num_rx_queues;
+		tx_q_used = conf[i].num_tx_queues;
+		/* Check if we have a txq for each worker */
+		if (tx_q_used < nb_graphs)
+			return -EINVAL;
+
+		/* Create node for each rx port queue pair */
+		for (j = 0; j < rx_q_used; j++) {
+			ethdev_rx_node_elem_t *elem;
+
+			snprintf(name, sizeof(name), "%u-%u", port_id, j);
+			/* Clone a new rx node with same edges as parent */
+			id = rte_node_clone(ethdev_rx_node_base.id, name);
+			if (id == RTE_NODE_ID_INVALID)
+				return -EIO;
+
+			/* Add it to list of ethdev rx nodes for lookup */
+			elem = malloc(sizeof(ethdev_rx_node_elem_t));
+			memset(elem, 0, sizeof(ethdev_rx_node_elem_t));
+			elem->ctx.port_id = port_id;
+			elem->ctx.queue_id = j;
+			elem->nid = id;
+			elem->next = ethdev_rx_main.head;
+			ethdev_rx_main.head = elem;
+
+			node_dbg("ethdev", "rx node %s-%s: is at %u",
+				 ethdev_rx_node_base.name, name, id);
+		}
+
+		/* Create a per port tx node from base node */
+		snprintf(name, sizeof(name), "%u", port_id);
+		/* Clone a new node with same edges as parent */
+		id = rte_node_clone(ethdev_tx_node_base.id, name);
+		ethdev_tx_main.nodes[port_id] = id;
+
+		node_dbg("ethdev", "tx node %s-%s: is at %u",
+			 ethdev_tx_node_base.name, name, id);
+
+		/* Prepare the actual name of the cloned node */
+		snprintf(name, sizeof(name), "ethdev_tx-%u", port_id);
+
+		/* Add this tx port node as next to ip4_rewrite_node */
+		rte_node_edge_update(ip4_rewrite_node.id,
+				     RTE_EDGE_ID_INVALID, &next_nodes, 1);
+		/* Assuming edge id is the last one alloc'ed */
+		rc = ip4_rewrite_set_next(port_id,
+				rte_node_edge_count(ip4_rewrite_node.id) - 1);
+		if (rc < 0)
+			return rc;
+	}
+
+	ctrl.nb_graphs = nb_graphs;
+	return 0;
+}
diff --git a/lib/librte_node/ethdev_rx.c b/lib/librte_node/ethdev_rx.c
new file mode 100644
index 000000000..48cbc5692
--- /dev/null
+++ b/lib/librte_node/ethdev_rx.c
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_mbuf.h>
+#include <rte_graph.h>
+#include <rte_graph_worker.h>
+
+#include "ethdev_rx.h"
+#include "ethdev_rx_priv.h"
+#include "node_private.h"
+
+struct ethdev_rx_node_main ethdev_rx_main;
+
+static __rte_always_inline uint16_t
+ethdev_rx_node_process_inline(struct rte_graph *graph, struct rte_node *node,
+			      uint16_t port, uint16_t queue)
+{
+	uint16_t count, next_index = ETHDEV_RX_NEXT_IP4_LOOKUP;
+
+	/* Get pkts from port */
+	count = rte_eth_rx_burst(port, queue, (struct rte_mbuf **)node->objs,
+				 RTE_GRAPH_BURST_SIZE);
+
+	if (!count)
+		return 0;
+	node->idx = count;
+	/* Enqueue to next node */
+	rte_node_next_stream_move(graph, node, next_index);
+
+	return count;
+}
+
+static __rte_always_inline uint16_t
+ethdev_rx_node_process(struct rte_graph *graph, struct rte_node *node,
+		       void **objs, uint16_t cnt)
+{
+	ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
+	uint16_t n_pkts = 0;
+
+	RTE_SET_USED(objs);
+	RTE_SET_USED(cnt);
+
+	n_pkts = ethdev_rx_node_process_inline(graph, node, ctx->port_id,
+					       ctx->queue_id);
+	return n_pkts;
+}
+
+static inline uint32_t
+l3_ptype(uint16_t etype, uint32_t ptype)
+{
+	ptype = ptype & ~RTE_PTYPE_L3_MASK;
+	if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
+		ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+	else if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
+		ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+	return ptype;
+}
+
+/* Callback for soft ptype parsing */
+static uint16_t
+eth_pkt_parse_cb(uint16_t port, uint16_t queue,
+		 struct rte_mbuf **mbufs, uint16_t nb_pkts,
+		 uint16_t max_pkts, void *user_param)
+{
+	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
+	struct rte_ether_hdr *eth_hdr;
+	uint16_t etype, n_left;
+	struct rte_mbuf **pkts;
+
+	RTE_SET_USED(port);
+	RTE_SET_USED(queue);
+	RTE_SET_USED(max_pkts);
+	RTE_SET_USED(user_param);
+
+	pkts = mbufs;
+	n_left = nb_pkts;
+	while (n_left >= 12) {
+
+		/* Prefetch next-next mbufs */
+		rte_prefetch0(pkts[8]);
+		rte_prefetch0(pkts[9]);
+		rte_prefetch0(pkts[10]);
+		rte_prefetch0(pkts[11]);
+
+		/* Prefetch next mbuf data */
+		rte_prefetch0(rte_pktmbuf_mtod(pkts[4],
+					       struct rte_ether_hdr *));
+		rte_prefetch0(rte_pktmbuf_mtod(pkts[5],
+					       struct rte_ether_hdr *));
+		rte_prefetch0(rte_pktmbuf_mtod(pkts[6],
+					       struct rte_ether_hdr *));
+		rte_prefetch0(rte_pktmbuf_mtod(pkts[7],
+					       struct rte_ether_hdr *));
+
+		mbuf0 = pkts[0];
+		mbuf1 = pkts[1];
+		mbuf2 = pkts[2];
+		mbuf3 = pkts[3];
+		pkts += 4;
+		n_left -= 4;
+
+		/* Extract ptype of mbuf0 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf0,
+					   struct rte_ether_hdr *);
+		etype = eth_hdr->ether_type;
+		mbuf0->packet_type = l3_ptype(etype, 0);
+
+		/* Extract ptype of mbuf1 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf1,
+					   struct rte_ether_hdr *);
+		etype = eth_hdr->ether_type;
+		mbuf1->packet_type = l3_ptype(etype, 0);
+
+		/* Extract ptype of mbuf2 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf2,
+					   struct rte_ether_hdr *);
+		etype = eth_hdr->ether_type;
+		mbuf2->packet_type = l3_ptype(etype, 0);
+
+		/* Extract ptype of mbuf3 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf3,
+					   struct rte_ether_hdr *);
+		etype = eth_hdr->ether_type;
+		mbuf3->packet_type = l3_ptype(etype, 0);
+
+	}
+
+	while (n_left > 0) {
+		mbuf0 = pkts[0];
+
+		pkts += 1;
+		n_left -= 1;
+
+		/* Extract ptype of mbuf0 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf0,
+					   struct rte_ether_hdr *);
+		etype = eth_hdr->ether_type;
+		mbuf0->packet_type = l3_ptype(etype, 0);
+	}
+
+	return nb_pkts;
+}
+
+#define MAX_PTYPES  16
+static int
+ethdev_ptype_setup(uint16_t port, uint16_t queue)
+{
+	uint8_t l3_ipv4 = 0, l3_ipv6 = 0;
+	uint32_t ptypes[MAX_PTYPES];
+	int i, rc;
+
+	/* Check IPv4 & IPv6 ptype support */
+	rc = rte_eth_dev_get_supported_ptypes(port, RTE_PTYPE_L3_MASK,
+					      ptypes, MAX_PTYPES);
+	for (i = 0; i < rc; i++) {
+		if (ptypes[i] & RTE_PTYPE_L3_IPV4)
+			l3_ipv4 = 1;
+		if (ptypes[i] & RTE_PTYPE_L3_IPV6)
+			l3_ipv6 = 1;
+	}
+
+	if (!l3_ipv4 || !l3_ipv6) {
+		node_info("ethdev_rx",
+			  "Enabling ptype callback for required ptypes "
+			  "on port %u\n", port);
+
+		if (!rte_eth_add_rx_callback(port, queue,
+					     eth_pkt_parse_cb, NULL)) {
+			node_err("ethdev_rx",
+				 "Failed to add rx ptype cb: port=%d, queue=%d\n",
+				 port, queue);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+ethdev_rx_node_init(const struct rte_graph *graph, struct rte_node *node)
+{
+	ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
+	ethdev_rx_node_elem_t *elem = ethdev_rx_main.head;
+
+	RTE_SET_USED(graph);
+
+	while (elem) {
+		if (elem->nid == node->id) {
+			/* Update node specific context */
+			memcpy(ctx, &elem->ctx, sizeof(ethdev_rx_node_ctx_t));
+			break;
+		}
+		elem = elem->next;
+	}
+
+	RTE_VERIFY(elem != NULL);
+
+	/* Check and setup ptype */
+	return ethdev_ptype_setup(ctx->port_id, ctx->queue_id);
+}
+
+struct rte_node_register ethdev_rx_node_base = {
+	.process = ethdev_rx_node_process,
+	.flags = RTE_NODE_SOURCE_F,
+	.name = "ethdev_rx",
+
+	.init = ethdev_rx_node_init,
+
+	.nb_edges = ETHDEV_RX_NEXT_MAX,
+	.next_nodes = {
+		[ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup"
+	},
+};
+
+RTE_NODE_REGISTER(ethdev_rx_node_base);
diff --git a/lib/librte_node/ethdev_rx.h b/lib/librte_node/ethdev_rx.h
new file mode 100644
index 000000000..7ae2923ea
--- /dev/null
+++ b/lib/librte_node/ethdev_rx.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+#ifndef __INCLUDE_ETHDEV_RX_H__
+#define __INCLUDE_ETHDEV_RX_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_ETHDEV_RX_H__ */
diff --git a/lib/librte_node/ethdev_rx_priv.h b/lib/librte_node/ethdev_rx_priv.h
new file mode 100644
index 000000000..beb54e739
--- /dev/null
+++ b/lib/librte_node/ethdev_rx_priv.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+#ifndef __INCLUDE_ETHDEV_RX_PRIV_H__
+#define __INCLUDE_ETHDEV_RX_PRIV_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+
+struct ethdev_rx_node_elem;
+struct ethdev_rx_node_ctx;
+typedef struct ethdev_rx_node_elem ethdev_rx_node_elem_t;
+typedef struct ethdev_rx_node_ctx ethdev_rx_node_ctx_t;
+
+struct ethdev_rx_node_ctx {
+	uint16_t port_id;
+	uint16_t queue_id;
+};
+
+struct ethdev_rx_node_elem {
+	struct ethdev_rx_node_elem *next;
+	struct ethdev_rx_node_ctx ctx;
+	rte_node_t nid;
+};
+
+enum ethdev_rx_next_nodes {
+    ETHDEV_RX_NEXT_IP4_LOOKUP,
+    ETHDEV_RX_NEXT_MAX,
+};
+
+struct ethdev_rx_node_main {
+	ethdev_rx_node_elem_t *head;
+};
+
+extern struct ethdev_rx_node_main ethdev_rx_main;
+extern struct rte_node_register ethdev_rx_node_base;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_ETHDEV_RX_PRIV_H__ */
diff --git a/lib/librte_node/ethdev_tx.c b/lib/librte_node/ethdev_tx.c
new file mode 100644
index 000000000..06f944d4c
--- /dev/null
+++ b/lib/librte_node/ethdev_tx.c
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_mbuf.h>
+#include <rte_graph.h>
+#include <rte_graph_worker.h>
+
+#include "ethdev_tx_priv.h"
+
+struct ethdev_tx_node_main ethdev_tx_main;
+
+static uint16_t
+ethdev_tx_node_process(struct rte_graph *graph, struct rte_node *node,
+		       void **objs, uint16_t nb_objs)
+{
+	ethdev_tx_node_ctx_t *ctx = (ethdev_tx_node_ctx_t *)node->ctx;
+	uint16_t port, queue;
+	uint16_t count;
+
+	/* Get Tx port id */
+	port = ctx->port;
+	queue = ctx->queue;
+
+	count = rte_eth_tx_burst(port, queue,
+				 (struct rte_mbuf **)objs, nb_objs);
+
+	/* Redirect unsent pkts to drop node */
+	if (count < nb_objs) {
+		rte_node_enqueue(graph, node, ETHDEV_TX_NEXT_PKT_DROP,
+				 &objs[count], nb_objs - count);
+	}
+
+	return count;
+}
+
+static int
+ethdev_tx_node_init(const struct rte_graph *graph, struct rte_node *node)
+{
+	ethdev_tx_node_ctx_t *ctx = (ethdev_tx_node_ctx_t *)node->ctx;
+	uint64_t port_id = RTE_MAX_ETHPORTS;
+	int i;
+
+	/* Find our port id */
+	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+		if (ethdev_tx_main.nodes[i] == node->id) {
+			port_id = i;
+			break;
+		}
+	}
+	RTE_VERIFY(port_id < RTE_MAX_ETHPORTS);
+
+	/* Update port and queue */
+	ctx->port = port_id;
+	ctx->queue = graph->id;
+	return 0;
+}
+
+
+struct rte_node_register ethdev_tx_node_base = {
+	.process = ethdev_tx_node_process,
+	.name = "ethdev_tx",
+
+	.init = ethdev_tx_node_init,
+
+	.nb_edges = ETHDEV_TX_NEXT_MAX,
+	.next_nodes = {
+		[ETHDEV_TX_NEXT_PKT_DROP] = "pkt_drop",
+	},
+};
+
+RTE_NODE_REGISTER(ethdev_tx_node_base);
diff --git a/lib/librte_node/ethdev_tx_priv.h b/lib/librte_node/ethdev_tx_priv.h
new file mode 100644
index 000000000..394cccbb1
--- /dev/null
+++ b/lib/librte_node/ethdev_tx_priv.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+#ifndef __INCLUDE_ETHDEV_TX_PRIV_H__
+#define __INCLUDE_ETHDEV_TX_PRIV_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum ethdev_tx_next_nodes {
+    ETHDEV_TX_NEXT_PKT_DROP,
+    ETHDEV_TX_NEXT_MAX,
+};
+
+typedef struct ethdev_tx_node_ctx {
+	uint16_t port;
+	uint16_t queue;
+} ethdev_tx_node_ctx_t;
+
+struct ethdev_tx_node_main {
+	/* Tx nodes for each ethdev port */
+	uint32_t nodes[RTE_MAX_ETHPORTS];
+};
+
+extern struct ethdev_tx_node_main ethdev_tx_main;
+extern struct rte_node_register ethdev_tx_node_base;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_ETHDEV_TX_PRIV_H__ */
diff --git a/lib/librte_node/ip4_lookup.c b/lib/librte_node/ip4_lookup.c
new file mode 100644
index 000000000..c7bccad93
--- /dev/null
+++ b/lib/librte_node/ip4_lookup.c
@@ -0,0 +1,657 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_lpm.h>
+#include <rte_mbuf.h>
+#include <rte_graph.h>
+#include <rte_graph_worker.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+
+#include <arpa/inet.h>
+#include <rte_node_ip4_api.h>
+#include "ip4_lookup_priv.h"
+#include "node_private.h"
+
+#define IPV4_L3FWD_LPM_MAX_RULES	1024
+#define IPV4_L3FWD_LPM_NUMBER_TBL8S	(1 << 8)
+
+/* IP4 Lookup global data struct */
+struct ip4_lookup_node_main {
+    struct rte_lpm *lpm_tbl[RTE_MAX_NUMA_NODES];
+};
+
+static struct ip4_lookup_node_main ip4_lookup_nm;
+
+#if defined(RTE_MACHINE_CPUFLAG_NEON)
+/* ARM64 NEON */
+static uint16_t
+ip4_lookup_node_process(struct rte_graph *graph,
+                        struct rte_node *node,
+			void **objs, uint16_t nb_objs)
+{
+	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ether_hdr *eth_hdr;
+	void **to_next, **from;
+	uint16_t last_spec = 0;
+	rte_edge_t next_index;
+	uint16_t n_left_from;
+	struct rte_lpm *lpm;
+	uint16_t held = 0;
+	uint32_t drop_nh;
+	rte_xmm_t result;
+	rte_xmm_t priv01;
+	rte_xmm_t priv23;
+	int32x4_t dip;
+	int rc, i;
+
+	/* Speculative next */
+	next_index = IP4_LOOKUP_NEXT_REWRITE;
+	drop_nh = ((uint32_t)IP4_LOOKUP_NEXT_PKT_DROP) << 16; /* Drop node */
+
+	/* Get socket specific lpm from ctx */
+	lpm = *((struct rte_lpm **)node->ctx);
+
+	pkts = (struct rte_mbuf **)objs;
+	from = objs;
+	n_left_from = nb_objs;
+
+#define OBJS_PER_CLINE	(RTE_CACHE_LINE_SIZE / sizeof (void *))
+	for (i = OBJS_PER_CLINE; i < RTE_GRAPH_BURST_SIZE; i += OBJS_PER_CLINE)
+		rte_prefetch0(&objs[i]);
+
+	for (i = 0; i < 4 && i < n_left_from; i++) {
+		rte_prefetch0(rte_pktmbuf_mtod(pkts[i],
+					struct rte_ether_hdr *) + 1);
+	}
+
+	/* Get stream for the speculated next node */
+	to_next = rte_node_next_stream_get(graph, node,
+			next_index, nb_objs);
+	while (n_left_from >= 4) {
+
+		/* Prefetch next-next mbufs */
+		if (likely(n_left_from >= 11)) {
+			rte_prefetch0(pkts[8]);
+			rte_prefetch0(pkts[9]);
+			rte_prefetch0(pkts[10]);
+			rte_prefetch0(pkts[11]);
+		}
+
+		/* Prefetch next mbuf data */
+		if (likely(n_left_from >= 7 )) {
+			rte_prefetch0(rte_pktmbuf_mtod(pkts[4],
+						struct rte_ether_hdr *) + 1);
+			rte_prefetch0(rte_pktmbuf_mtod(pkts[5],
+						struct rte_ether_hdr *) + 1);
+			rte_prefetch0(rte_pktmbuf_mtod(pkts[6],
+						struct rte_ether_hdr *) + 1);
+			rte_prefetch0(rte_pktmbuf_mtod(pkts[7],
+						struct rte_ether_hdr *) + 1);
+		}
+
+		mbuf0 = pkts[0];
+		mbuf1 = pkts[1];
+		mbuf2 = pkts[2];
+		mbuf3 = pkts[3];
+
+		pkts += 4;
+		n_left_from -= 4;
+
+		/* Extract DIP of mbuf0 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf0,
+				struct rte_ether_hdr *);
+		ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
+		dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 0);
+		/* Extract cksum, ttl as ipv4 hdr is in cache */
+		priv01.u16[1] = ipv4_hdr->time_to_live;
+		priv01.u32[1] = ipv4_hdr->hdr_checksum;
+
+		/* Extract DIP of mbuf1 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf1,
+				struct rte_ether_hdr *);
+		ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
+		dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 1);
+		/* Extract cksum, ttl as ipv4 hdr is in cache */
+		priv01.u16[5] = ipv4_hdr->time_to_live;
+		priv01.u32[3] = ipv4_hdr->hdr_checksum;
+
+		/* Extract DIP of mbuf2 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf2,
+				struct rte_ether_hdr *);
+		ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
+		dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 2);
+		/* Extract cksum, ttl as ipv4 hdr is in cache */
+		priv23.u16[1] = ipv4_hdr->time_to_live;
+		priv23.u32[1] = ipv4_hdr->hdr_checksum;
+
+		/* Extract DIP of mbuf3 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf3,
+				struct rte_ether_hdr *);
+		ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
+		dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 3);
+
+		dip = vreinterpretq_s32_u8(vrev32q_u8(
+					vreinterpretq_u8_s32(dip)));
+		/* Extract cksum, ttl as ipv4 hdr is in cache */
+		priv23.u16[5] = ipv4_hdr->time_to_live;
+		priv23.u32[3] = ipv4_hdr->hdr_checksum;
+
+		/* Perform LPM lookup to get NH and next node */
+		rte_lpm_lookupx4(lpm, dip, result.u32, drop_nh);
+		priv01.u16[0] = result.u16[0];
+		priv01.u16[4] = result.u16[2];
+		priv23.u16[0] = result.u16[4];
+		priv23.u16[4] = result.u16[6];
+
+		rte_node_mbuf_priv1(mbuf0)->u = priv01.u64[0];
+		rte_node_mbuf_priv1(mbuf1)->u = priv01.u64[1];
+		rte_node_mbuf_priv1(mbuf2)->u = priv23.u64[0];
+		rte_node_mbuf_priv1(mbuf3)->u = priv23.u64[1];
+
+		/* Enqueue four to next node */
+		/* TODO: Do we need a macro for this ?*/
+		rte_edge_t fix_spec = (next_index ^ result.u16[1]) |
+			(next_index ^ result.u16[3]) |
+			(next_index ^ result.u16[5]) |
+			(next_index ^ result.u16[7]);
+
+		if (unlikely(fix_spec)) {
+
+			/* Copy things succesfully speculated till now */
+			rte_memcpy(to_next, from,
+					last_spec * sizeof(from[0]));
+			from += last_spec;
+			to_next += last_spec;
+			held += last_spec;
+			last_spec = 0;
+
+			/* next0 */
+			if (next_index == result.u16[1]) {
+				to_next[0] = from[0];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						result.u16[1],
+						from[0]);
+			}
+
+			/* next1 */
+			if (next_index == result.u16[3]) {
+				to_next[0] = from[1];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						result.u16[3],
+						from[1]);
+			}
+
+			/* next2 */
+			if (next_index == result.u16[5]) {
+				to_next[0] = from[2];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						result.u16[5],
+						from[2]);
+			}
+
+			/* next3 */
+			if (next_index == result.u16[7]) {
+				to_next[0] = from[3];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						result.u16[7],
+						from[3]);
+			}
+
+			from += 4;
+		} else {
+			last_spec += 4;
+		}
+	}
+
+	while (n_left_from > 0) {
+		uint32_t next_hop;
+		uint16_t next0;
+
+		mbuf0 = pkts[0];
+
+		pkts += 1;
+		n_left_from -= 1;
+
+		/* Extract DIP of mbuf0 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf0,
+				struct rte_ether_hdr *);
+		ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
+		/* Extract cksum, ttl as ipv4 hdr is in cache */
+		rte_node_mbuf_priv1(mbuf0)->cksum =
+			ipv4_hdr->hdr_checksum;
+		rte_node_mbuf_priv1(mbuf0)->ttl =
+			ipv4_hdr->time_to_live;
+
+		rc = rte_lpm_lookup(lpm,
+				rte_be_to_cpu_32(ipv4_hdr->dst_addr),
+				&next_hop);
+		next_hop = (rc == 0) ? next_hop : drop_nh;
+
+		rte_node_mbuf_priv1(mbuf0)->nh = (uint16_t)next_hop;
+		next_hop = next_hop >> 16;
+		next0 = (uint16_t)next_hop;
+
+		if (unlikely(next_index ^ next0)) {
+			/* Copy things succesfully speculated till now */
+			rte_memcpy(to_next, from,
+					last_spec * sizeof(from[0]));
+			from += last_spec;
+			to_next += last_spec;
+			held += last_spec;
+			last_spec = 0;
+
+			rte_node_enqueue_x1(graph, node,
+					next0, from[0]);
+			from += 1;
+		} else {
+			last_spec += 1;
+		}
+	}
+
+	/* !!! Home run !!! */
+	if (likely(last_spec == nb_objs)) {
+		rte_node_next_stream_move(graph, node, next_index);
+		return nb_objs;
+	}
+	held += last_spec;
+	rte_memcpy(to_next, from,
+			last_spec * sizeof(from[0]));
+	rte_node_next_stream_put(graph, node, next_index, held);
+
+	return nb_objs;
+}
+
+#elif defined(RTE_ARCH_X86)
+/* X86 SSE */
+static uint16_t
+ip4_lookup_node_process(struct rte_graph *graph,
+                        struct rte_node *node,
+			void **objs, uint16_t nb_objs)
+{
+	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
+	rte_edge_t next0, next1, next2, next3, next_index;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ether_hdr *eth_hdr;
+	uint32_t ip0, ip1, ip2, ip3;
+	void **to_next, **from;
+	uint16_t last_spec = 0;
+	uint16_t n_left_from;
+	struct rte_lpm *lpm;
+	uint16_t held = 0;
+	uint32_t drop_nh;
+	rte_xmm_t dst;
+	__m128i dip; /* SSE register */
+	int rc, i;
+
+	/* Speculative next */
+	next_index = IP4_LOOKUP_NEXT_REWRITE;
+	drop_nh = ((uint32_t)IP4_LOOKUP_NEXT_PKT_DROP) << 16; /* Drop node */
+
+	/* Get socket specific lpm from ctx */
+	lpm = *((struct rte_lpm **)node->ctx);
+
+	pkts = (struct rte_mbuf **)objs;
+	from = objs;
+	n_left_from = nb_objs;
+
+	if (n_left_from >= 4) {
+		for (i = 0; i < 4; i++) {
+			rte_prefetch0(rte_pktmbuf_mtod(pkts[i],
+					       struct rte_ether_hdr *) + 1);
+		}
+	}
+
+	/* Get stream for the speculated next node */
+	to_next = rte_node_next_stream_get(graph, node,
+			next_index, nb_objs);
+	while (n_left_from >= 4) {
+
+		/* Prefetch next-next mbufs */
+		if (likely(n_left_from >= 11)) {
+			rte_prefetch0(pkts[8]);
+			rte_prefetch0(pkts[9]);
+			rte_prefetch0(pkts[10]);
+			rte_prefetch0(pkts[11]);
+		}
+
+		/* Prefetch next mbuf data */
+		if (likely(n_left_from >= 7 )) {
+			rte_prefetch0(rte_pktmbuf_mtod(pkts[4],
+						struct rte_ether_hdr *) + 1);
+			rte_prefetch0(rte_pktmbuf_mtod(pkts[5],
+						struct rte_ether_hdr *) + 1);
+			rte_prefetch0(rte_pktmbuf_mtod(pkts[6],
+						struct rte_ether_hdr *) + 1);
+			rte_prefetch0(rte_pktmbuf_mtod(pkts[7],
+						struct rte_ether_hdr *) + 1);
+		}
+
+		mbuf0 = pkts[0];
+		mbuf1 = pkts[1];
+		mbuf2 = pkts[2];
+		mbuf3 = pkts[3];
+
+		pkts += 4;
+		n_left_from -= 4;
+
+		/* Extract DIP of mbuf0 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf0,
+				struct rte_ether_hdr *);
+		ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
+		ip0 = ipv4_hdr->dst_addr;
+		/* Extract cksum, ttl as ipv4 hdr is in cache */
+		rte_node_mbuf_priv1(mbuf0)->cksum =
+			ipv4_hdr->hdr_checksum;
+		rte_node_mbuf_priv1(mbuf0)->ttl =
+			ipv4_hdr->time_to_live;
+
+		/* Extract DIP of mbuf1 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf1,
+				struct rte_ether_hdr *);
+		ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
+		ip1 = ipv4_hdr->dst_addr;
+		/* Extract cksum, ttl as ipv4 hdr is in cache */
+		rte_node_mbuf_priv1(mbuf1)->cksum =
+			ipv4_hdr->hdr_checksum;
+		rte_node_mbuf_priv1(mbuf1)->ttl =
+			ipv4_hdr->time_to_live;
+
+		/* Extract DIP of mbuf2 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf2,
+				struct rte_ether_hdr *);
+		ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
+		ip2 = ipv4_hdr->dst_addr;
+		/* Extract cksum, ttl as ipv4 hdr is in cache */
+		rte_node_mbuf_priv1(mbuf2)->cksum =
+			ipv4_hdr->hdr_checksum;
+		rte_node_mbuf_priv1(mbuf2)->ttl =
+			ipv4_hdr->time_to_live;
+
+		/* Extract DIP of mbuf3 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf3,
+				struct rte_ether_hdr *);
+		ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
+		ip3 = ipv4_hdr->dst_addr;
+
+		/* Prepare for lookup x4 */
+		dip = _mm_set_epi32(ip3, ip2, ip1, ip0);
+
+		/* Byte swap 4 IPV4 addresses. */
+		const  __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15,
+				8, 9, 10, 11, 4, 5, 6, 7,
+				0, 1, 2, 3);
+		dip = _mm_shuffle_epi8(dip, bswap_mask);
+
+		/* Extract cksum, ttl as ipv4 hdr is in cache */
+		rte_node_mbuf_priv1(mbuf3)->cksum =
+			ipv4_hdr->hdr_checksum;
+		rte_node_mbuf_priv1(mbuf3)->ttl =
+			ipv4_hdr->time_to_live;
+
+		/* Perform LPM lookup to get NH and next node */
+		rte_lpm_lookupx4(lpm, dip, dst.u32, drop_nh);
+
+		/* Extract next node id and NH */
+		rte_node_mbuf_priv1(mbuf0)->nh = dst.u32[0] & 0xFFFF;
+		next0 = (dst.u32[0] >> 16);
+
+		rte_node_mbuf_priv1(mbuf1)->nh = dst.u32[1] & 0xFFFF;
+		next1 = (dst.u32[1] >> 16);
+
+		rte_node_mbuf_priv1(mbuf2)->nh = dst.u32[2] & 0xFFFF;
+		next2 = (dst.u32[2] >> 16);
+
+		rte_node_mbuf_priv1(mbuf3)->nh = dst.u32[3] & 0xFFFF;
+		next3 = (dst.u32[3] >> 16);
+
+		/* Enqueue four to next node */
+		/* TODO: Do we need a macro for this ?*/
+		rte_edge_t fix_spec = (next_index ^ next0) |
+			(next_index ^ next1) | (next_index ^ next2) |
+			(next_index ^ next3);
+
+		if (unlikely(fix_spec)) {
+
+			/* Copy things succesfully speculated till now */
+			rte_memcpy(to_next, from,
+					last_spec * sizeof(from[0]));
+			from += last_spec;
+			to_next += last_spec;
+			held += last_spec;
+			last_spec = 0;
+
+			/* next0 */
+			if (next_index == next0) {
+				to_next[0] = from[0];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						next0, from[0]);
+			}
+
+			/* next1 */
+			if (next_index == next1) {
+				to_next[0] = from[1];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						next1, from[1]);
+			}
+
+			/* next2 */
+			if (next_index == next2) {
+				to_next[0] = from[2];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						next2, from[2]);
+			}
+
+			/* next3 */
+			if (next_index == next3) {
+				to_next[0] = from[3];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						next3, from[3]);
+			}
+
+			from += 4;
+
+		} else {
+			last_spec += 4;
+		}
+	}
+
+	while (n_left_from > 0) {
+		uint32_t next_hop;
+
+		mbuf0 = pkts[0];
+
+		pkts += 1;
+		n_left_from -= 1;
+
+		/* Extract DIP of mbuf0 */
+		eth_hdr = rte_pktmbuf_mtod(mbuf0,
+				struct rte_ether_hdr *);
+		ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
+		/* Extract cksum, ttl as ipv4 hdr is in cache */
+		rte_node_mbuf_priv1(mbuf0)->cksum =
+			ipv4_hdr->hdr_checksum;
+		rte_node_mbuf_priv1(mbuf0)->ttl =
+			ipv4_hdr->time_to_live;
+
+		rc = rte_lpm_lookup(lpm,
+				rte_be_to_cpu_32(ipv4_hdr->dst_addr),
+				&next_hop);
+		next_hop = (rc == 0) ? next_hop : drop_nh;
+
+		rte_node_mbuf_priv1(mbuf0)->nh = next_hop & 0xFFFF;
+		next0 = (next_hop >> 16);
+
+		if (unlikely(next_index ^ next0)) {
+			/* Copy things succesfully speculated till now */
+			rte_memcpy(to_next, from,
+					last_spec * sizeof(from[0]));
+			from += last_spec;
+			to_next += last_spec;
+			held += last_spec;
+			last_spec = 0;
+
+			rte_node_enqueue_x1(graph, node,
+					next0, from[0]);
+			from += 1;
+		} else {
+			last_spec += 1;
+		}
+	}
+
+	/* !!! Home run !!! */
+	if (likely(last_spec == nb_objs)) {
+		rte_node_next_stream_move(graph, node, next_index);
+		return nb_objs;
+	}
+
+	held += last_spec;
+	/* Copy things successfully speculated till now */
+	rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
+	rte_node_next_stream_put(graph, node, next_index, held);
+
+	return nb_objs;
+}
+#else
+static uint16_t
+ip4_lookup_node_process(struct rte_graph *graph, struct rte_node *node,
+			void **objs, uint16_t nb_objs)
+{
+	RTE_SET_USED(graph);
+	RTE_SET_USED(node);
+	RTE_SET_USED(objs);
+	RTE_SET_USED(nb_objs);
+	return nb_objs;
+}
+#endif
+
+int rte_node_ip4_route_add(uint32_t ip, uint8_t depth, uint16_t next_hop,
+			   enum ip4_lookup_next_nodes next_node)
+{
+	char abuf[INET6_ADDRSTRLEN];
+	struct in_addr in;
+	uint8_t socket;
+	uint32_t val;
+	int ret;
+
+	in.s_addr = htonl(ip);
+	inet_ntop(AF_INET, &in, abuf, sizeof(abuf));
+	/* Embedded next node id in next hop */
+	val = (next_node << 16) | next_hop;
+	node_dbg("ip4_lookup", "LPM: Adding route %s / %d nh (0x%x)",
+		     abuf, depth, val);
+
+	for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
+
+		if (!ip4_lookup_nm.lpm_tbl[socket])
+			continue;
+
+		ret = rte_lpm_add(ip4_lookup_nm.lpm_tbl[socket], ip, depth, val);
+
+		if (ret < 0)
+			node_err("ip4_lookup", "Unable to add entry %s / %d nh (%x) "
+				 "to LPM table on sock %d, rc=%d\n",
+				 abuf, depth, val, socket, ret);
+	}
+
+	return 0;
+}
+
+static int
+setup_lpm(struct ip4_lookup_node_main *nm, int socket)
+{
+	struct rte_lpm_config config_ipv4;
+	char s[64];
+
+	/* One LPM table per socket */
+	if (nm->lpm_tbl[socket])
+		return 0;
+
+	/* create the LPM table */
+	config_ipv4.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
+	config_ipv4.number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S;
+	config_ipv4.flags = 0;
+	snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socket);
+	nm->lpm_tbl[socket] = rte_lpm_create(s, socket, &config_ipv4);
+	if (nm->lpm_tbl[socket] == NULL)
+		rte_panic("ip4_lookup: Unable to create LPM table on socket %d\n",
+			   socket);
+
+
+	return 0;
+}
+
+static int
+ip4_lookup_node_init(const struct rte_graph *graph, struct rte_node *node)
+{
+	static uint8_t init_once;
+	uint16_t socket, lcore_id;
+	struct rte_lpm **lpm_p = (struct rte_lpm **)&node->ctx;
+	int rc;
+
+	RTE_SET_USED(graph);
+	RTE_SET_USED(node);
+
+	if (!init_once) {
+		/* Setup LPM tables for all sockets */
+		RTE_LCORE_FOREACH(lcore_id) {
+			socket = rte_lcore_to_socket_id(lcore_id);
+			rc = setup_lpm(&ip4_lookup_nm, socket);
+			if (rc)
+				node_err("ip4_lookup",
+					 "Failed to setup lpm tbl for sock %u, rc=%d",
+					 socket, rc);
+		}
+		init_once = 1;
+	}
+
+	*lpm_p = ip4_lookup_nm.lpm_tbl[graph->socket];
+	node_dbg("ip4_lookup", "Initialized ip4_lookup node");
+	return 0;
+}
+
+
+static struct rte_node_register ip4_lookup_node = {
+    .process = ip4_lookup_node_process,
+    .name = "ip4_lookup",
+
+    .init = ip4_lookup_node_init,
+
+    .nb_edges = IP4_LOOKUP_NEXT_MAX,
+    .next_nodes = {
+	[IP4_LOOKUP_NEXT_REWRITE] = "ip4_rewrite",
+	[IP4_LOOKUP_NEXT_PKT_DROP] = "pkt_drop",
+    },
+};
+
+RTE_NODE_REGISTER(ip4_lookup_node);
diff --git a/lib/librte_node/ip4_lookup_priv.h b/lib/librte_node/ip4_lookup_priv.h
new file mode 100644
index 000000000..898eba301
--- /dev/null
+++ b/lib/librte_node/ip4_lookup_priv.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+#ifndef __INCLUDE_IP4_LOOKUP_PRIV_H__
+#define __INCLUDE_IP4_LOOKUP_PRIV_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_IP4_LOOKUP_PRIV_H__ */
diff --git a/lib/librte_node/ip4_rewrite.c b/lib/librte_node/ip4_rewrite.c
new file mode 100644
index 000000000..f0f1d599e
--- /dev/null
+++ b/lib/librte_node/ip4_rewrite.c
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_graph.h>
+#include <rte_graph_worker.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_vect.h>
+
+#include <rte_node_ip4_api.h>
+#include "ip4_rewrite_priv.h"
+#include "node_private.h"
+
+static struct ip4_rewrite_node_main *ip4_rewrite_nm;
+
+static uint16_t
+ip4_rewrite_node_process(struct rte_graph *graph, struct rte_node *node,
+			void **objs, uint16_t nb_objs)
+{
+	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
+	struct ip4_rewrite_nh_header *nh = ip4_rewrite_nm->nh;
+	uint16_t next0, next1, next2, next3, next_index;
+	struct rte_ipv4_hdr *ip0, *ip1, *ip2, *ip3;
+	uint16_t n_left_from, held = 0, last_spec = 0;
+	void *d0, *d1, *d2, *d3;
+	void **to_next, **from;
+	rte_xmm_t priv01;
+	rte_xmm_t priv23;
+	int i;
+
+	/* Speculative next as last next */
+	next_index = *(uint16_t *)node->ctx;
+	rte_prefetch0(nh);
+
+	pkts = (struct rte_mbuf **)objs;
+	from = objs;
+	n_left_from = nb_objs;
+
+	for (i = 0; i < 4 && i < n_left_from; i++)
+		rte_prefetch0(pkts[i]);
+
+	/* Get stream for the speculated next node */
+	to_next = rte_node_next_stream_get(graph, node,
+			next_index, nb_objs);
+	/* Update ethernet header of pkts */
+	while (n_left_from >= 4) {
+
+		if (likely(n_left_from >= 7)) {
+			/* Prefetch only next-mbuf struct and priv area.
+			 * Data need not be prefetched as we only write.
+			 */
+			rte_prefetch0(pkts[4]);
+			rte_prefetch0(pkts[5]);
+			rte_prefetch0(pkts[6]);
+			rte_prefetch0(pkts[7]);
+		}
+
+		mbuf0 = pkts[0];
+		mbuf1 = pkts[1];
+		mbuf2 = pkts[2];
+		mbuf3 = pkts[3];
+
+		pkts += 4;
+		n_left_from -= 4;
+		priv01.u64[0] = rte_node_mbuf_priv1(mbuf0)->u;
+		priv01.u64[1] = rte_node_mbuf_priv1(mbuf1)->u;
+		priv23.u64[0] = rte_node_mbuf_priv1(mbuf2)->u;
+		priv23.u64[1] = rte_node_mbuf_priv1(mbuf3)->u;
+
+		priv01.u16[2]++;
+		priv01.u16[6]++;
+		priv23.u16[2]++;
+		priv23.u16[6]++;
+
+		priv01.u16[1]--;
+		priv01.u16[5]--;
+		priv23.u16[1]--;
+		priv23.u16[5]--;
+
+		/* Update ttl, rewrite ethernet hdr on mbuf0 */
+		d0 = rte_pktmbuf_mtod(mbuf0, void *);
+		rte_memcpy(d0,
+				nh[priv01.u16[0]].rewrite_data,
+				nh[priv01.u16[0]].rewrite_len);
+
+		next0 = nh[priv01.u16[0]].tx_node;
+		ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
+				sizeof(struct rte_ether_hdr));
+		ip0->time_to_live = priv01.u16[1];
+		ip0->hdr_checksum = priv01.u16[2];
+
+
+		/* Update ttl, rewrite ethernet hdr on mbuf1 */
+		d1 = rte_pktmbuf_mtod(mbuf1, void *);
+		rte_memcpy(d1,
+				nh[priv01.u16[4]].rewrite_data,
+				nh[priv01.u16[4]].rewrite_len);
+
+		next1 = nh[priv01.u16[4]].tx_node;
+		ip1 = (struct rte_ipv4_hdr *)((uint8_t *)d1 +
+				sizeof(struct rte_ether_hdr));
+		ip1->time_to_live = priv01.u16[5];
+		ip1->hdr_checksum = priv01.u16[6];
+
+
+		/* Update ttl, rewrite ethernet hdr on mbuf2 */
+		d2 = rte_pktmbuf_mtod(mbuf2, void *);
+		rte_memcpy(d2,
+				nh[priv23.u16[0]].rewrite_data,
+				nh[priv23.u16[0]].rewrite_len);
+		next2 = nh[priv23.u16[0]].tx_node;
+		ip2 = (struct rte_ipv4_hdr *)((uint8_t *)d2 +
+				sizeof(struct rte_ether_hdr));
+		ip2->time_to_live = priv23.u16[1];
+		ip2->hdr_checksum = priv23.u16[2];
+
+
+		/* Update ttl, rewrite ethernet hdr on mbuf3 */
+		d3 = rte_pktmbuf_mtod(mbuf3, void *);
+		rte_memcpy(d3,
+				nh[priv23.u16[4]].rewrite_data,
+				nh[priv23.u16[4]].rewrite_len);
+
+		next3 = nh[priv23.u16[4]].tx_node;
+		ip3 = (struct rte_ipv4_hdr *)((uint8_t *)d3 +
+				sizeof(struct rte_ether_hdr));
+		ip3->time_to_live = priv23.u16[5];
+		ip3->hdr_checksum = priv23.u16[6];
+
+		/* Enqueue four to next node */
+		/* TODO: Do we need a macro for this ?*/
+		rte_edge_t fix_spec = (next_index ^ next0) |
+			(next_index ^ next1) | (next_index ^ next2) |
+			(next_index ^ next3);
+
+		if (unlikely(fix_spec)) {
+			/* Copy things succesfully speculated till now */
+			rte_memcpy(to_next, from,
+					last_spec * sizeof(from[0]));
+			from += last_spec;
+			to_next += last_spec;
+			held += last_spec;
+			last_spec = 0;
+
+			/* next0 */
+			if (next_index == next0) {
+				to_next[0] = from[0];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						next0, from[0]);
+			}
+
+			/* next1 */
+			if (next_index == next1) {
+				to_next[0] = from[1];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						next1, from[1]);
+			}
+
+			/* next2 */
+			if (next_index == next2) {
+				to_next[0] = from[2];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						next2, from[2]);
+			}
+
+			/* next3 */
+			if (next_index == next3) {
+				to_next[0] = from[3];
+				to_next++;
+				held++;
+			} else {
+				rte_node_enqueue_x1(graph, node,
+						next3, from[3]);
+			}
+
+			from += 4;
+
+			/* Change speculation if last two are same */
+			if ((next_index != next3) &&
+					(next2 == next3)) {
+				/* Put the current speculated node */
+				rte_node_next_stream_put(graph,
+						node, next_index,
+						held);
+				held = 0;
+
+				/* Get next speculated stream */
+				next_index = next3;
+				to_next = rte_node_next_stream_get(
+						graph, node,
+						next_index, nb_objs);
+			}
+		} else {
+			last_spec += 4;
+		}
+	}
+
+	while (n_left_from > 0) {
+		uint16_t chksum;
+		mbuf0 = pkts[0];
+
+		pkts += 1;
+		n_left_from -= 1;
+
+		d0 = rte_pktmbuf_mtod(mbuf0, void *);
+		rte_memcpy(d0,
+				nh[rte_node_mbuf_priv1(mbuf0)->nh].rewrite_data,
+				nh[rte_node_mbuf_priv1(mbuf0)->nh].rewrite_len);
+
+		next0 = nh[rte_node_mbuf_priv1(mbuf0)->nh].tx_node;
+		ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
+				sizeof(struct rte_ether_hdr));
+		chksum = rte_node_mbuf_priv1(mbuf0)->cksum +
+			rte_cpu_to_be_16(0x0100);
+		chksum += chksum >= 0xffff;
+		ip0->hdr_checksum = chksum;
+		ip0->time_to_live = rte_node_mbuf_priv1(mbuf0)->ttl - 1;
+
+		if (unlikely(next_index ^ next0)) {
+			/* Copy things succesfully speculated till now */
+			rte_memcpy(to_next, from,
+					last_spec * sizeof(from[0]));
+			from += last_spec;
+			to_next += last_spec;
+			held += last_spec;
+			last_spec = 0;
+
+			rte_node_enqueue_x1(graph, node,
+					next0, from[0]);
+			from += 1;
+		} else {
+			last_spec += 1;
+		}
+	}
+
+	/* !!! Home run !!! */
+	if (likely(last_spec == nb_objs)) {
+		rte_node_next_stream_move(graph, node, next_index);
+		return nb_objs;
+	}
+
+	held += last_spec;
+	rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
+	rte_node_next_stream_put(graph, node, next_index, held);
+	/* Save the last next used */
+	*(uint16_t *)node->ctx = next_index;
+
+	return nb_objs;
+}
+
+static int
+ip4_rewrite_node_init(const struct rte_graph *graph, struct rte_node *node)
+{
+
+	RTE_SET_USED(graph);
+	RTE_SET_USED(node);
+
+	node_dbg("ip4_rewrite", "Initialized ip4_rewrite node initialized");
+	return 0;
+}
+
+
+int
+ip4_rewrite_set_next(uint16_t port_id, uint16_t next_index)
+{
+	if (ip4_rewrite_nm == NULL) {
+		ip4_rewrite_nm = rte_zmalloc("ip4_rewrite",
+					sizeof(struct ip4_rewrite_node_main),
+					RTE_CACHE_LINE_SIZE);
+		if (ip4_rewrite_nm == NULL)
+			return -ENOMEM;
+	}
+	ip4_rewrite_nm->next_index[port_id] = next_index;
+
+	return 0;
+}
+
+int
+rte_node_ip4_rewrite_add(uint16_t next_hop, uint8_t *rewrite_data,
+			 uint8_t rewrite_len, uint16_t dst_port)
+{
+	struct ip4_rewrite_nh_header *nh;
+
+	if (next_hop >= RTE_GRAPH_IP4_REWRITE_MAX_NH)
+		return -EINVAL;
+
+	if (rewrite_len > RTE_GRAPH_IP4_REWRITE_MAX_LEN)
+		return -EINVAL;
+
+	if (ip4_rewrite_nm == NULL) {
+		ip4_rewrite_nm = rte_zmalloc("ip4_rewrite",
+					sizeof(struct ip4_rewrite_node_main),
+					RTE_CACHE_LINE_SIZE);
+		if (ip4_rewrite_nm == NULL)
+			return -ENOMEM;
+	}
+
+	/* Check if dst port doesn't exist as edge */
+	if (!ip4_rewrite_nm->next_index[dst_port])
+		return -EINVAL;
+
+	/* Update next hop */
+	nh = &ip4_rewrite_nm->nh[next_hop];
+
+	memcpy(nh->rewrite_data, rewrite_data, rewrite_len);
+	nh->tx_node = ip4_rewrite_nm->next_index[dst_port];
+	nh->rewrite_len = rewrite_len;
+	nh->enabled = true;
+
+	return 0;
+}
+
+struct rte_node_register ip4_rewrite_node = {
+	.process = ip4_rewrite_node_process,
+	.name = "ip4_rewrite",
+	/* Default edge i.e '0' is pkt drop */
+	.nb_edges = 1,
+	.next_nodes = {
+		[0] = "pkt_drop",
+	},
+	.init = ip4_rewrite_node_init,
+};
+
+RTE_NODE_REGISTER(ip4_rewrite_node);
diff --git a/lib/librte_node/ip4_rewrite_priv.h b/lib/librte_node/ip4_rewrite_priv.h
new file mode 100644
index 000000000..128148faa
--- /dev/null
+++ b/lib/librte_node/ip4_rewrite_priv.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+#ifndef __INCLUDE_IP4_REWRITE_PRIV_H__
+#define __INCLUDE_IP4_REWRITE_PRIV_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+
+#define RTE_GRAPH_IP4_REWRITE_MAX_NH  64
+#define RTE_GRAPH_IP4_REWRITE_MAX_LEN 56
+
+struct ip4_rewrite_nh_header {
+    uint16_t rewrite_len;
+    uint16_t tx_node;
+    uint16_t enabled;
+    uint16_t rsvd;
+    union {
+	struct {
+	    struct rte_ether_addr dst;
+	    struct rte_ether_addr src;
+	};
+	uint8_t rewrite_data[RTE_GRAPH_IP4_REWRITE_MAX_LEN];
+    };
+};
+
+struct ip4_rewrite_node_main {
+    struct ip4_rewrite_nh_header nh[RTE_GRAPH_IP4_REWRITE_MAX_NH];
+    /* Next index of each configured port */
+    uint16_t next_index[RTE_MAX_ETHPORTS];
+};
+
+extern struct rte_node_register ip4_rewrite_node;
+
+int ip4_rewrite_set_next(uint16_t port_id, uint16_t next_index);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_IP4_REWRITE_PRIV_H__ */
diff --git a/lib/librte_node/log.c b/lib/librte_node/log.c
new file mode 100644
index 000000000..f035f91e8
--- /dev/null
+++ b/lib/librte_node/log.c
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include "node_private.h"
+
+int rte_node_logtype;
+
+RTE_INIT(rte_node_init_log)
+{
+	rte_node_logtype = rte_log_register("lib.node");
+	if (rte_node_logtype >= 0)
+		rte_log_set_level(rte_node_logtype, RTE_LOG_INFO);
+}
diff --git a/lib/librte_node/meson.build b/lib/librte_node/meson.build
new file mode 100644
index 000000000..59e11e5b4
--- /dev/null
+++ b/lib/librte_node/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2020 Marvell International Ltd.
+
+sources = files('null.c', 'log.c', 'ethdev_rx.c', 'ethdev_tx.c', 'ip4_lookup.c',
+		'ip4_rewrite.c', 'pkt_drop.c', 'ethdev_ctrl.c')
+headers = files('rte_node_ip4_api.h', 'rte_node_eth_api.h')
+allow_experimental_apis = true
+deps += ['graph', 'mbuf', 'lpm', 'ethdev', 'mempool', 'cryptodev']
diff --git a/lib/librte_node/node_private.h b/lib/librte_node/node_private.h
new file mode 100644
index 000000000..9f40ff222
--- /dev/null
+++ b/lib/librte_node/node_private.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __NODE_PRIVATE_H__
+#define __NODE_PRIVATE_H__
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_crypto.h>
+#include <rte_log.h>
+
+extern int rte_node_logtype;
+#define NODE_LOG(level, node_name, ...) \
+	rte_log(RTE_LOG_ ## level, rte_node_logtype,			       \
+		RTE_FMT("NODE %s: %s():%u " RTE_FMT_HEAD(__VA_ARGS__,)\
+			"\n", node_name, __func__, __LINE__,	       \
+			RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define node_err(node_name, ...)	NODE_LOG(ERR, node_name,  __VA_ARGS__)
+#define node_info(node_name, ...)	NODE_LOG(INFO, node_name,  __VA_ARGS__)
+#define node_dbg(node_name,  ...)	NODE_LOG(DEBUG, node_name,  __VA_ARGS__)
+
+struct rte_node_mbuf_priv1 {
+	union {
+		/* IP4 rewrite */
+		struct {
+			uint16_t nh;
+			uint16_t ttl;
+			uint32_t cksum;
+		};
+
+		uint64_t u;
+	};
+};
+
+struct rte_node_mbuf_priv2 {
+	union {
+		/* Sym crypto */
+		struct {
+			struct rte_crypto_op op;
+		};
+	};
+} __rte_cache_aligned;
+
+#define RTE_NODE_MBUF_PRIV2_SIZE sizeof(struct rte_node_mbuf_priv2)
+
+static __rte_always_inline struct rte_node_mbuf_priv1 *
+rte_node_mbuf_priv1(struct rte_mbuf *m)
+{
+	return (struct rte_node_mbuf_priv1 *)&m->udata64;
+}
+
+static __rte_always_inline struct rte_node_mbuf_priv2 *
+rte_node_mbuf_priv2(struct rte_mbuf *m)
+{
+	return (struct rte_node_mbuf_priv2 *)rte_mbuf_to_priv(m);
+}
+
+
+#endif /* __NODE_PRIVATE_H__ */
diff --git a/lib/librte_node/null.c b/lib/librte_node/null.c
new file mode 100644
index 000000000..5359f958f
--- /dev/null
+++ b/lib/librte_node/null.c
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_graph.h>
+
+static uint16_t
+null(struct rte_graph *graph, struct rte_node *node, void **objs,
+	uint16_t nb_objs)
+{
+	RTE_SET_USED(node);
+	RTE_SET_USED(objs);
+	RTE_SET_USED(graph);
+
+	return nb_objs;
+}
+
+static struct rte_node_register null_node = {
+	.name	 = "null",
+	.process = null,
+};
+
+RTE_NODE_REGISTER(null_node);
diff --git a/lib/librte_node/pkt_drop.c b/lib/librte_node/pkt_drop.c
new file mode 100644
index 000000000..643af6d75
--- /dev/null
+++ b/lib/librte_node/pkt_drop.c
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_debug.h>
+#include <rte_mbuf.h>
+#include <rte_graph.h>
+
+static uint16_t
+pkt_drop_process(struct rte_graph *graph, struct rte_node *node, void **objs,
+		 uint16_t nb_objs)
+{
+	RTE_SET_USED(node);
+	RTE_SET_USED(graph);
+
+	rte_pktmbuf_free_bulk((struct rte_mbuf **)objs, nb_objs);
+
+	return nb_objs;
+}
+
+static struct rte_node_register pkt_drop_node = {
+	.process = pkt_drop_process,
+	.name = "pkt_drop",
+};
+
+RTE_NODE_REGISTER(pkt_drop_node);
diff --git a/lib/librte_node/rte_node_eth_api.h b/lib/librte_node/rte_node_eth_api.h
new file mode 100644
index 000000000..80c69fa66
--- /dev/null
+++ b/lib/librte_node/rte_node_eth_api.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+#ifndef __INCLUDE_RTE_NODE_ETH_API_H__
+#define __INCLUDE_RTE_NODE_ETH_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdbool.h>
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_graph.h>
+
+struct rte_node_ethdev_config {
+	uint16_t port_id;
+	uint16_t num_rx_queues;
+	uint16_t num_tx_queues;
+	struct rte_mempool **mp;
+	uint16_t mp_count;
+};
+
+__rte_experimental int
+rte_node_eth_config(struct rte_node_ethdev_config *cfg,
+		   uint16_t cnt, uint16_t nb_graphs);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_NODE_ETH_API_H__ */
diff --git a/lib/librte_node/rte_node_ip4_api.h b/lib/librte_node/rte_node_ip4_api.h
new file mode 100644
index 000000000..7cbb018c2
--- /dev/null
+++ b/lib/librte_node/rte_node_ip4_api.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+#ifndef __INCLUDE_RTE_NODE_IP4_API_H__
+#define __INCLUDE_RTE_NODE_IP4_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* IP4_LOOKUP node defines */
+enum ip4_lookup_next_nodes {
+    IP4_LOOKUP_NEXT_REWRITE,
+    IP4_LOOKUP_NEXT_PKT_DROP,
+    IP4_LOOKUP_NEXT_MAX,
+};
+
+__rte_experimental
+int rte_node_ip4_route_add(uint32_t ip, uint8_t depth,
+			   uint16_t next_hop,
+			   enum ip4_lookup_next_nodes next_node);
+
+__rte_experimental
+int rte_node_ip4_rewrite_add(uint16_t next_hop, uint8_t *rewrite_data,
+			     uint8_t rewrite_len, uint16_t dst_port);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_NODE_IP4_API_H__ */
diff --git a/lib/librte_node/rte_node_version.map b/lib/librte_node/rte_node_version.map
new file mode 100644
index 000000000..a799b0d38
--- /dev/null
+++ b/lib/librte_node/rte_node_version.map
@@ -0,0 +1,9 @@
+EXPERIMENTAL {
+	global:
+
+	rte_node_eth_config;
+	rte_node_ip4_route_add;
+	rte_node_ip4_rewrite_add;
+	rte_node_logtype;
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 4089ce0c3..60d7e5560 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -30,7 +30,7 @@ libraries = [
 	# add pkt framework libs which use other libs from above
 	'port', 'table', 'pipeline',
 	# flow_classify lib depends on pkt framework table lib
-	'flow_classify', 'bpf', 'graph', 'telemetry']
+	'flow_classify', 'bpf', 'graph', 'node', 'telemetry']
 
 if is_windows
 	libraries = ['kvargs','eal'] # only supported libraries for windows
@@ -182,6 +182,9 @@ foreach l:libraries
 
 			dpdk_libraries = [shared_lib] + dpdk_libraries
 			dpdk_static_libraries = [static_lib] + dpdk_static_libraries
+			if libname == 'rte_node'
+				dpdk_graph_nodes = [static_lib]
+			endif
 		endif # sources.length() > 0
 
 		set_variable('shared_rte_' + name, shared_dep)
diff --git a/meson.build b/meson.build
index b7ae9c8d9..811c96421 100644
--- a/meson.build
+++ b/meson.build
@@ -16,6 +16,7 @@ cc = meson.get_compiler('c')
 dpdk_conf = configuration_data()
 dpdk_libraries = []
 dpdk_static_libraries = []
+dpdk_graph_nodes = []
 dpdk_driver_classes = []
 dpdk_drivers = []
 dpdk_extra_ldflags = []
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index e169d7a7b..72e8f81cc 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -99,6 +99,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_REORDER)        += -lrte_reorder
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrte_sched
 _LDLIBS-$(CONFIG_RTE_LIBRTE_RCU)            += -lrte_rcu
 _LDLIBS-$(CONFIG_RTE_LIBRTE_GRAPH)          += -lrte_graph
+_LDLIBS-$(CONFIG_RTE_LIBRTE_NODE)           += -lrte_node
 
 ifeq ($(CONFIG_RTE_EXEC_ENV_LINUX),y)
 _LDLIBS-$(CONFIG_RTE_LIBRTE_KNI)            += -lrte_kni
-- 
2.24.1


  parent reply	other threads:[~2020-01-31 17:03 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-31 17:01 [dpdk-dev] [RFC PATCH 0/5] graph: introduce graph subsystem jerinj
2020-01-31 17:01 ` [dpdk-dev] [RFC PATCH 1/5] " jerinj
2020-02-02 10:34   ` Stephen Hemminger
2020-02-02 10:35   ` Stephen Hemminger
2020-02-02 11:08     ` Jerin Jacob
2020-02-02 10:38   ` Stephen Hemminger
2020-02-02 11:21     ` Jerin Jacob
2020-02-03  9:14       ` Gaetan Rivet
2020-02-03  9:49         ` Jerin Jacob
2020-01-31 17:01 ` jerinj [this message]
2020-01-31 17:01 ` [dpdk-dev] [RFC PATCH 3/5] test: add graph functional tests jerinj
2020-01-31 17:02 ` [dpdk-dev] [RFC PATCH 4/5] test: add graph performance test cases jerinj
2020-01-31 17:02 ` [dpdk-dev] [RFC PATCH 5/5] example/l3fwd_graph: l3fwd using graph architecture jerinj
2020-01-31 18:34 ` [dpdk-dev] [RFC PATCH 0/5] graph: introduce graph subsystem Ray Kinsella
2020-02-01  5:44   ` Jerin Jacob
2020-02-17  7:19     ` Jerin Jacob
2020-02-17  8:38       ` Thomas Monjalon
2020-02-17 10:58         ` Jerin Jacob
2020-02-21 10:30           ` Jerin Jacob
2020-02-21 11:10             ` Thomas Monjalon
2020-02-21 15:38               ` Mattias Rönnblom
2020-02-21 15:53                 ` dave
2020-02-21 16:04                   ` Thomas Monjalon
2020-02-21 15:56               ` Jerin Jacob
2020-02-21 16:14                 ` Thomas Monjalon
2020-02-22  9:05                   ` Jerin Jacob
2020-02-22  9:52                     ` Thomas Monjalon
2020-02-22 10:24                       ` Jerin Jacob
2020-02-24 10:59                         ` Ray Kinsella
2020-02-25  5:22 ` Honnappa Nagarahalli
2020-02-25  6:14   ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200131170201.3236153-3-jerinj@marvell.com \
    --to=jerinj@marvell.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=akhil.goyal@nxp.com \
    --cc=anatoly.burakov@intel.com \
    --cc=artem.andreev@oktetlabs.ru \
    --cc=arybchenko@solarflare.com \
    --cc=bruce.richardson@intel.com \
    --cc=cristian.dumitrescu@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=drc@linux.vnet.ibm.com \
    --cc=erik.g.carrillo@intel.com \
    --cc=ferruh.yigit@intel.com \
    --cc=gage.eads@intel.com \
    --cc=gavin.hu@arm.com \
    --cc=hemant.agrawal@nxp.com \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=jasvinder.singh@intel.com \
    --cc=john.mcnamara@intel.com \
    --cc=keith.wiles@intel.com \
    --cc=kirankumark@marvell.com \
    --cc=konstantin.ananyev@intel.com \
    --cc=mattias.ronnblom@ericsson.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=mdr@ashroe.eu \
    --cc=ndabilpuram@marvell.com \
    --cc=nikhil.rao@intel.com \
    --cc=nsaxena@marvell.com \
    --cc=olivier.matz@6wind.com \
    --cc=pallavi.kadam@intel.com \
    --cc=pathreya@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=pkapoor@marvell.com \
    --cc=rasland@mellanox.com \
    --cc=shahafs@mellanox.com \
    --cc=sshankarnara@marvell.com \
    --cc=sthemmin@microsoft.com \
    --cc=techboard@dpdk.org \
    --cc=thomas@monjalon.net \
    --cc=vladimir.medvedkin@intel.com \
    --cc=xiaolong.ye@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).