DPDK patches and discussions
 help / color / mirror / Atom feed
From: Sunil Kumar Kori <sunil.kori@nxp.com>
To: "dev@dpdk.org" <dev@dpdk.org>
Cc: Hemant Agrawal <hemant.agrawal@nxp.com>,
	Sunil Kumar Kori <sunil.kori@nxp.com>
Subject: Re: [dpdk-dev] [PATCH] examples/l3fwd: adding event queue support
Date: Thu, 12 Apr 2018 06:09:04 +0000	[thread overview]
Message-ID: <AM5PR0401MB266025290EF389AC41C96C9E8FBC0@AM5PR0401MB2660.eurprd04.prod.outlook.com> (raw)
In-Reply-To: <20180319134520.28155-1-sunil.kori@nxp.com>

Gentle reminder to review the RFC.

Regards
Sunil Kumar

-----Original Message-----
From: Sunil Kumar Kori [mailto:sunil.kori@nxp.com] 
Sent: Monday, March 19, 2018 7:15 PM
To: dev@dpdk.org
Cc: Sunil Kumar Kori <sunil.kori@nxp.com>; Hemant Agrawal <hemant.agrawal@nxp.com>
Subject: [PATCH] examples/l3fwd: adding event queue support

This patch set to add the support for eventdev based queue mode support to the l3fwd application.
1. Eventdev support with parallel queue
2. Eventdev support with atomic queue

This patch adds
- New command line parameter is added named as "dequeue-mode" which
  identifies dequeue method i.e. dequeue via eventdev or polling
  (default is polling)
. If dequeue mode is via:
         a. eventdev: New parameters are added -e, -a, -l  to cater
	    eventdev config, adapter config and link configuration
            respectively. "--config" option will be invalid in this case.
         b. poll mode: It will work as of existing way and option for
	    eventdev parameters(-e, -a, -l) will be invalid.

- Functions are added in l3fwd_em.c and l3fwd_lpm.c for packet I/O
  operation

The main purpose of this RFC is get comments on the approach.
This is a *not tested* code.

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
---
 examples/l3fwd/Makefile         |   2 +-
 examples/l3fwd/l3fwd.h          |  21 ++
 examples/l3fwd/l3fwd_em.c       | 100 ++++++++
 examples/l3fwd/l3fwd_eventdev.c | 541 ++++++++++++++++++++++++++++++++++++++++
 examples/l3fwd/l3fwd_eventdev.h |  85 +++++++
 examples/l3fwd/l3fwd_lpm.c      | 100 ++++++++
 examples/l3fwd/main.c           | 318 +++++++++++++++++++----
 examples/l3fwd/meson.build      |   2 +-
 8 files changed, 1120 insertions(+), 49 deletions(-)  create mode 100644 examples/l3fwd/l3fwd_eventdev.c  create mode 100644 examples/l3fwd/l3fwd_eventdev.h

diff --git a/examples/l3fwd/Makefile b/examples/l3fwd/Makefile index cccdd9d..94ba537 100644
--- a/examples/l3fwd/Makefile
+++ b/examples/l3fwd/Makefile
@@ -5,7 +5,7 @@
 APP = l3fwd
 
 # all source are stored in SRCS-y
-SRCS-y := main.c l3fwd_lpm.c l3fwd_em.c
+SRCS-y := main.c l3fwd_lpm.c l3fwd_em.c l3fwd_eventdev.c
 
 # Build using pkg-config variables if possible  $(shell pkg-config --exists libdpdk) diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h index c962dea..675abd1 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -69,6 +69,17 @@ struct lcore_conf {
 	void *ipv6_lookup_struct;
 } __rte_cache_aligned;
 
+struct l3fwd_lkp_mode {
+	void  (*setup)(int);
+	int   (*check_ptype)(int);
+	rte_rx_callback_fn cb_parse_ptype;
+	int   (*main_loop)(void *);
+	void* (*get_ipv4_lookup_struct)(int);
+	void* (*get_ipv6_lookup_struct)(int);
+};
+
+extern struct l3fwd_lkp_mode l3fwd_lkp;
+
 extern volatile bool force_quit;
 
 /* ethernet addresses of ports */
@@ -81,11 +92,16 @@ extern uint32_t enabled_port_mask;
 /* Used only in exact match mode. */
 extern int ipv6; /**< ipv6 is false by default. */  extern uint32_t hash_entry_number;
+extern int promiscuous_on;
+extern int numa_on; /**< NUMA is enabled by default. */ extern uint16_t 
+nb_rxd;
 
 extern xmm_t val_eth[RTE_MAX_ETHPORTS];
 
 extern struct lcore_conf lcore_conf[RTE_MAX_LCORE];
 
+extern struct rte_mempool *pktmbuf_pool[];
+
 /* Send burst of packets on an output interface */  static inline int  send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) @@ -208,5 +224,10 @@ lpm_get_ipv4_l3fwd_lookup_struct(const int socketid);
 
 void *
 lpm_get_ipv6_l3fwd_lookup_struct(const int socketid);
+int
+prepare_ptype_parser(uint16_t portid, uint16_t queueid);
+
+void
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask);
 
 #endif  /* __L3_FWD_H__ */
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c index 9dc3b8c..199ff75 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -21,12 +21,14 @@
 #include <rte_mempool.h>
 #include <rte_cycles.h>
 #include <rte_mbuf.h>
+#include <rte_eventdev.h>
 #include <rte_ip.h>
 #include <rte_tcp.h>
 #include <rte_udp.h>
 #include <rte_hash.h>
 
 #include "l3fwd.h"
+#include "l3fwd_eventdev.h"
 
 #if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_CRC32)
 #define EM_HASH_CRC 1
@@ -612,6 +614,101 @@ em_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
 	return nb_pkts;
 }
 
+/* main EM processing loop for eventdev*/ int
+em_eventdev_main_loop(__attribute__((unused)) void *dummy) {
+	struct rte_event ev[MAX_PKT_BURST];
+	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+	struct rte_event_port_conf event_port_conf;
+	unsigned int lcore_id;
+	uint64_t prev_tsc, diff_tsc, cur_tsc;
+	int i, nb_rx;
+	uint8_t queueid;
+	uint16_t portid, dequeue_len;
+	uint8_t event_port_id = INVALID_EVENDEV_ID;
+	struct lcore_conf *qconf;
+	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+		US_PER_S * BURST_TX_DRAIN_US;
+
+	prev_tsc = 0;
+
+	lcore_id = rte_lcore_id();
+	qconf = &lcore_conf[lcore_id];
+
+	if (qconf->n_rx_queue == 0) {
+		RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
+		return 0;
+	}
+
+	RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
+
+	for (i = 0; i < qconf->n_rx_queue; i++) {
+
+		portid = qconf->rx_queue_list[i].port_id;
+		queueid = qconf->rx_queue_list[i].queue_id;
+		RTE_LOG(INFO, L3FWD,
+			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+			lcore_id, portid, queueid);
+	}
+
+	for (i = 0; i < link_config.nb_links; i++) {
+		if (link_config.links[i].lcore_id == lcore_id)
+			event_port_id = link_config.links[i].event_portid;
+	}
+
+	rte_event_port_default_conf_get(event_devices[0].dev_id, event_port_id,
+					&event_port_conf);
+	dequeue_len = event_port_conf.dequeue_depth;
+
+	while (!force_quit) {
+
+		cur_tsc = rte_rdtsc();
+
+		/*
+		 * TX burst queue drain
+		 */
+		diff_tsc = cur_tsc - prev_tsc;
+		if (unlikely(diff_tsc > drain_tsc)) {
+
+			for (i = 0; i < qconf->n_tx_port; ++i) {
+				portid = qconf->tx_port_id[i];
+				if (qconf->tx_mbufs[portid].len == 0)
+					continue;
+				send_burst(qconf,
+					qconf->tx_mbufs[portid].len,
+					portid);
+				qconf->tx_mbufs[portid].len = 0;
+			}
+
+			prev_tsc = cur_tsc;
+		}
+
+		/*
+		 * Read packet from event ports
+		 */
+
+		nb_rx = rte_event_dequeue_burst(event_devices[0].dev_id,
+						event_port_id,
+						ev, dequeue_len, 0);
+		if (nb_rx == 0)
+			continue;
+
+		for (i = 0; i < nb_rx; ++i) {
+			pkts_burst[0] = ev[i].mbuf;
+			portid = ev[i].flow_id;
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
+			l3fwd_em_send_packets(1, pkts_burst, portid, qconf); #else
+			l3fwd_em_no_opt_send_packets(1, pkts_burst,
+						     portid, qconf);
+#endif
+		}
+	}
+
+	return 0;
+}
+
 /* main processing loop */
 int
 em_main_loop(__attribute__((unused)) void *dummy) @@ -631,6 +728,9 @@ em_main_loop(__attribute__((unused)) void *dummy)
 	lcore_id = rte_lcore_id();
 	qconf = &lcore_conf[lcore_id];
 
+	if (lcore_dequeue_mode[lcore_id] == EVENTDEV_DEQUEUE)
+		return em_eventdev_main_loop(dummy);
+
 	if (qconf->n_rx_queue == 0) {
 		RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
 		return 0;
diff --git a/examples/l3fwd/l3fwd_eventdev.c b/examples/l3fwd/l3fwd_eventdev.c new file mode 100644 index 0000000..f7d9b4c
--- /dev/null
+++ b/examples/l3fwd/l3fwd_eventdev.c
@@ -0,0 +1,541 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_vect.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_string_fns.h>
+#include <rte_spinlock.h>
+#include <rte_malloc.h>
+
+
+#include "l3fwd.h"
+#include "l3fwd_eventdev.h"
+
+
+static struct eventdev_params eventdev_config[RTE_MAX_EVENTDEV_COUNT];
+static uint16_t nb_eventdev_params;
+struct eventdev_info *event_devices;
+static struct adapter_params rx_adapter_config; struct link_params 
+link_config; enum dequeue_mode lcore_dequeue_mode[RTE_MAX_LCORE];
+
+
+
+int
+parse_eventdev_config(const char *evq_arg) {
+	char s[256];
+	const char *p, *p0 = evq_arg;
+	char *end;
+	enum fieldnames {
+		FLD_EVENTDEV_ID = 0,
+		FLD_EVENT_QUEUE,
+		FLD_EVENT_PORT,
+		FLD_COUNT
+	};
+	unsigned long int_fld[FLD_COUNT];
+	char *str_fld[FLD_COUNT];
+	int i;
+	unsigned int size;
+
+	/*First set all eventdev_config to default*/
+	for (i = 0; i < RTE_MAX_EVENTDEV_COUNT; i++) {
+		eventdev_config[i].num_eventqueue = 1;
+		eventdev_config[i].num_eventport = RTE_MAX_LCORE;
+	}
+
+	nb_eventdev_params = 0;
+
+	while ((p = strchr(p0, '(')) != NULL) {
+		++p;
+		if ((p0 = strchr(p, ')')) == NULL)
+			return -1;
+
+		size = p0 - p;
+		if (size >= sizeof(s))
+			return -1;
+
+		snprintf(s, sizeof(s), "%.*s", size, p);
+		if (rte_strsplit(s, sizeof(s), str_fld, FLD_COUNT, ',') !=
+								FLD_COUNT)
+			return -1;
+
+		for (i = 0; i < FLD_COUNT; i++) {
+			errno = 0;
+			int_fld[i] = strtoul(str_fld[i], &end, 0);
+			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+				return -1;
+		}
+
+		if (nb_eventdev_params >= RTE_MAX_EVENTDEV_COUNT) {
+			printf("exceeded max number of eventdev params: %hu\n",
+				nb_eventdev_params);
+			return -1;
+		}
+
+		eventdev_config[nb_eventdev_params].num_eventqueue =
+					(uint8_t)int_fld[FLD_EVENT_QUEUE];
+		eventdev_config[nb_eventdev_params].num_eventport =
+					(uint8_t)int_fld[FLD_EVENT_PORT];
+		eventdev_config[nb_eventdev_params].eventdev_id =
+					(uint8_t)int_fld[FLD_EVENTDEV_ID];
+		++nb_eventdev_params;
+	}
+
+	return 0;
+}
+
+int
+parse_adapter_config(const char *evq_arg) {
+	char s[256];
+	const char *p, *p0 = evq_arg;
+	char *end;
+	enum fieldnames {
+		FLD_ETHDEV_ID = 0,
+		FLD_ETHDEV_QID,
+		FLD_EVENT_QID_MODE,
+		FLD_EVENTQ_ID,
+		FLD_EVENT_PRIO,
+		FLD_EVENT_DEVID,
+		FLD_COUNT
+	};
+	unsigned long int_fld[FLD_COUNT];
+	char *str_fld[FLD_COUNT];
+	int i, index = 0, j = 0;
+	unsigned int size;
+
+	index = rx_adapter_config.nb_rx_adapter;
+
+	while ((p = strchr(p0, '(')) != NULL) {
+		j = rx_adapter_config.config[index].nb_connections;
+		++p;
+		if ((p0 = strchr(p, ')')) == NULL)
+			return -1;
+
+		size = p0 - p;
+		if (size >= sizeof(s))
+			return -1;
+
+		snprintf(s, sizeof(s), "%.*s", size, p);
+		if (rte_strsplit(s, sizeof(s), str_fld, FLD_COUNT, ',') !=
+								FLD_COUNT)
+			return -1;
+
+		for (i = 0; i < FLD_COUNT; i++) {
+			errno = 0;
+			int_fld[i] = strtoul(str_fld[i], &end, 0);
+			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+				return -1;
+		}
+
+		if (index >= RTE_MAX_EVENTDEV_COUNT) {
+			printf("exceeded max number of eventdev params: %hu\n",
+				rx_adapter_config.nb_rx_adapter);
+			return -1;
+		}
+
+		rx_adapter_config.config[index].connections[j].ethdev_id =
+					(uint8_t)int_fld[FLD_ETHDEV_ID];
+		rx_adapter_config.config[index].connections[j].ethdev_rx_qid =
+					(uint8_t)int_fld[FLD_ETHDEV_QID];
+		rx_adapter_config.config[index].connections[j].ethdev_rx_qid_mode =
+					(uint8_t)int_fld[FLD_EVENT_QID_MODE];
+		rx_adapter_config.config[index].connections[j].eventq_id =
+					(uint8_t)int_fld[FLD_EVENTQ_ID];
+		rx_adapter_config.config[index].connections[j].event_prio =
+					(uint8_t)int_fld[FLD_EVENT_PRIO];
+		rx_adapter_config.config[index].connections[j].eventdev_id =
+					(uint8_t)int_fld[FLD_EVENT_DEVID];
+		rx_adapter_config.config[index].nb_connections++;
+	}
+
+	rx_adapter_config.nb_rx_adapter++;
+	return 0;
+}
+
+int
+parse_link_config(const char *evq_arg)
+{
+	char s[256];
+	const char *p, *p0 = evq_arg;
+	char *end;
+	enum fieldnames {
+		FLD_EVENT_PORTID = 0,
+		FLD_EVENT_QID,
+		FLD_EVENT_DEVID,
+		FLD_LCORE_ID,
+		FLD_COUNT
+	};
+	unsigned long int_fld[FLD_COUNT];
+	char *str_fld[FLD_COUNT];
+	int i, index = 0;
+	unsigned int size;
+
+	/*First set all adapter_config to default*/
+	memset(&link_config, 0, sizeof(struct link_params));
+	while ((p = strchr(p0, '(')) != NULL) {
+		index = link_config.nb_links;
+		++p;
+		if ((p0 = strchr(p, ')')) == NULL)
+			return -1;
+
+		size = p0 - p;
+		if (size >= sizeof(s))
+			return -1;
+
+		snprintf(s, sizeof(s), "%.*s", size, p);
+		if (rte_strsplit(s, sizeof(s), str_fld, FLD_COUNT, ',') !=
+								FLD_COUNT)
+			return -1;
+
+		for (i = 0; i < FLD_COUNT; i++) {
+			errno = 0;
+			int_fld[i] = strtoul(str_fld[i], &end, 0);
+			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+				return -1;
+		}
+
+		if (index >= RTE_MAX_EVENTDEV_COUNT) {
+			printf("exceeded max number of eventdev params: %hu\n",
+				link_config.nb_links);
+			return -1;
+		}
+
+		link_config.links[index].event_portid =
+					(uint8_t)int_fld[FLD_EVENT_PORTID];
+		link_config.links[index].eventq_id =
+					(uint8_t)int_fld[FLD_EVENT_QID];
+		link_config.links[index].eventdev_id =
+					(uint8_t)int_fld[FLD_EVENT_DEVID];
+		link_config.links[index].lcore_id =
+					(uint8_t)int_fld[FLD_LCORE_ID];
+		lcore_dequeue_mode[link_config.links[index].lcore_id] =
+					EVENTDEV_DEQUEUE;
+		link_config.nb_links++;
+	}
+
+	return 0;
+}
+
+static int
+eventdev_configure(void)
+{
+	int ret = -1;
+	uint8_t i, j;
+	void *ports, *queues;
+	struct rte_event_dev_config eventdev_conf = {0};
+	struct rte_event_dev_info eventdev_def_conf = {0};
+	struct rte_event_queue_conf eventq_conf = {0};
+	struct rte_event_port_conf port_conf = {0};
+	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
+
+	/*First allocate space for event device information*/
+	event_devices = rte_zmalloc("event-dev",
+				sizeof(struct eventdev_info) * nb_eventdev_params, 0);
+	if (event_devices == NULL) {
+		printf("Error in allocating memory for event devices\n");
+		return ret;
+	}
+
+	for (i = 0; i < nb_eventdev_params; i++) {
+		/*Now allocate space for event ports request from user*/
+		ports = rte_zmalloc("event-ports",
+				sizeof(uint8_t) * eventdev_config[i].num_eventport, 0);
+		if (ports == NULL) {
+			printf("Error in allocating memory for event ports\n");
+			rte_free(event_devices);
+			return ret;
+		}
+
+		event_devices[i].port = ports;
+
+		/*Now allocate space for event queues request from user*/
+		queues = rte_zmalloc("event-queues",
+				sizeof(uint8_t) * eventdev_config[i].num_eventqueue, 0);
+		if (queues == NULL) {
+			printf("Error in allocating memory for event queues\n");
+			rte_free(event_devices[i].port);
+			rte_free(event_devices);
+			return ret;
+		}
+
+		event_devices[i].queue = queues;
+		event_devices[i].dev_id = eventdev_config[i].eventdev_id;
+
+		/* get default values of eventdev*/
+		memset(&eventdev_def_conf, 0,
+		       sizeof(struct rte_event_dev_info));
+		ret = rte_event_dev_info_get(event_devices[i].dev_id,
+				       &eventdev_def_conf);
+		if (ret < 0) {
+			printf("Error in getting event device info, devid: %d\n",
+				event_devices[i].dev_id);
+			return ret;
+		}
+
+		memset(&eventdev_conf, 0, sizeof(struct rte_event_dev_config));
+		eventdev_conf.nb_events_limit = -1;
+		eventdev_conf.nb_event_queues =
+					eventdev_config[i].num_eventqueue;
+		eventdev_conf.nb_event_ports =
+					eventdev_config[i].num_eventport;
+		eventdev_conf.nb_event_queue_flows =
+				eventdev_def_conf.max_event_queue_flows;
+		eventdev_conf.nb_event_port_dequeue_depth =
+				eventdev_def_conf.max_event_port_dequeue_depth;
+		eventdev_conf.nb_event_port_enqueue_depth =
+				eventdev_def_conf.max_event_port_enqueue_depth;
+
+		ret = rte_event_dev_configure(event_devices[i].dev_id,
+					&eventdev_conf);
+		if (ret < 0) {
+			printf("Error in configuring event device\n");
+			return ret;
+		}
+
+		memset(&eventq_conf, 0, sizeof(struct rte_event_queue_conf));
+		eventq_conf.nb_atomic_flows = 1;
+		eventq_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+		for (j = 0; j < eventdev_config[i].num_eventqueue; j++) {
+			ret = rte_event_queue_setup(event_devices[i].dev_id, j,
+					      &eventq_conf);
+			if (ret < 0) {
+				printf("Error in event queue setup\n");
+				return ret;
+			}
+			event_devices[i].queue[j] = j;
+		}
+
+		for (j = 0; j <  eventdev_config[i].num_eventport; j++) {
+			ret = rte_event_port_setup(event_devices[i].dev_id, j, NULL);
+			if (ret < 0) {
+				printf("Error in event port setup\n");
+				return ret;
+			}
+			event_devices[i].port[j] = j;
+		}
+	}
+
+	for (i = 0; i < rx_adapter_config.nb_rx_adapter; i++) {
+		for (j = 0; j < rx_adapter_config.config[i].nb_connections; j++) {
+			ret = rte_event_eth_rx_adapter_create(j,
+					rx_adapter_config.config[i].connections[j].eventdev_id,
+					&port_conf);
+			if (ret < 0) {
+				printf("Error in event eth adapter creation\n");
+				return ret;
+			}
+			rx_adapter_config.config[i].connections[j].adapter_id =
+					j;
+		}
+	}
+
+	for (j = 0; j <  link_config.nb_links; j++) {
+		ret = rte_event_port_link(link_config.links[j].eventdev_id,
+				    link_config.links[j].event_portid,
+				    &link_config.links[j].eventq_id, NULL, 1);
+		if (ret < 0) {
+			printf("Error in event port linking\n");
+			return ret;
+		}
+	}
+
+	queue_conf.rx_queue_flags =
+				RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+
+	for (i = 0; i <  rx_adapter_config.nb_rx_adapter; i++) {
+		for (j = 0; j < rx_adapter_config.config[i].nb_connections; j++) {
+			queue_conf.ev.queue_id =
+				rx_adapter_config.config[i].connections[j].eventq_id;
+			queue_conf.ev.priority =
+				rx_adapter_config.config[i].connections[j].event_prio;
+			queue_conf.ev.flow_id =
+				rx_adapter_config.config[i].connections[j].ethdev_id;
+			queue_conf.ev.sched_type =
+				rx_adapter_config.config[i].connections[j].ethdev_rx_qid_mode;
+			ret = rte_event_eth_rx_adapter_queue_add(
+				rx_adapter_config.config[i].connections[j].adapter_id,
+				rx_adapter_config.config[i].connections[j].ethdev_id,
+				rx_adapter_config.config[i].connections[j].ethdev_rx_qid,
+				&queue_conf);
+			if (ret < 0) {
+				printf("Error in adding eth queue in event adapter\n");
+				return ret;
+			}
+		}
+	}
+
+	for (i = 0; i < nb_eventdev_params; i++) {
+		ret = rte_event_dev_start(event_devices[i].dev_id);
+		if (ret < 0) {
+			printf("Error in starting event device, devid: %d\n",
+				event_devices[i].dev_id);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+
+int
+config_eventdev(void)
+{
+	struct lcore_conf *qconf;
+	struct rte_eth_dev_info dev_info;
+	int i, j, ret;
+	unsigned nb_ports;
+	uint16_t queueid, portid;
+	unsigned lcore_id;
+	uint8_t queue, socketid;
+
+	nb_ports = rte_eth_dev_count();
+
+	/* Rx queue configuration is to be done */
+	for (i = 0; i < rx_adapter_config.nb_rx_adapter; i++) {
+		printf("\nInitializing rx queues on lcore %u ... ", lcore_id);
+		fflush(stdout);
+		/* init RX queues */
+		for (j = 0; j < rx_adapter_config.config[i].nb_connections; j++) {
+			struct rte_eth_dev *dev;
+			struct rte_eth_conf *conf;
+			struct rte_eth_rxconf rxq_conf;
+
+			portid = rx_adapter_config.config[i].connections[j].ethdev_id;
+			queueid = rx_adapter_config.config[i].connections[j].ethdev_rx_qid;
+			dev = &rte_eth_devices[portid];
+			conf = &dev->data->dev_conf;
+
+			if (numa_on)
+				socketid =
+				(uint8_t)rte_lcore_to_socket_id(lcore_id);
+			else
+				socketid = 0;
+
+			printf("rxq=%d,%d,%d ", portid, queueid, socketid);
+			fflush(stdout);
+
+			rte_eth_dev_info_get(portid, &dev_info);
+			rxq_conf = dev_info.default_rxconf;
+			rxq_conf.offloads = conf->rxmode.offloads;
+			ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
+					socketid,
+					&rxq_conf,
+					pktmbuf_pool[socketid]);
+			if (ret < 0)
+				rte_exit(EXIT_FAILURE,
+				"rte_eth_rx_queue_setup: err=%d, port=%d\n",
+				ret, portid);
+		}
+	}
+
+	printf("\n");
+
+	ret = eventdev_configure();
+	if (ret < 0)
+		rte_exit(EXIT_FAILURE,
+			"event dev configure: err=%d\n", ret);
+
+	/* start ports */
+	for (portid = 0; portid < nb_ports; portid++) {
+		if ((enabled_port_mask & (1 << portid)) == 0) {
+			continue;
+		}
+		/* Start device */
+		ret = rte_eth_dev_start(portid);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE,
+				"rte_eth_dev_start: err=%d, port=%d\n",
+				ret, portid);
+
+		/*
+		 * If enabled, put device in promiscuous mode.
+		 * This allows IO forwarding mode to forward packets
+		 * to itself through 2 cross-connected  ports of the
+		 * target machine.
+		 */
+		if (promiscuous_on)
+			rte_eth_promiscuous_enable(portid);
+	}
+
+	printf("\n");
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_is_enabled(lcore_id) == 0)
+			continue;
+		qconf = &lcore_conf[lcore_id];
+		for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
+			portid = qconf->rx_queue_list[queue].port_id;
+			queueid = qconf->rx_queue_list[queue].queue_id;
+			if (prepare_ptype_parser(portid, queueid) == 0)
+				rte_exit(EXIT_FAILURE, "ptype check fails\n");
+		}
+	}
+
+
+	check_all_ports_link_status(nb_ports, enabled_port_mask);
+
+	ret = 0;
+	/* launch per-lcore init on every lcore */
+	rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MASTER);
+	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+		if (rte_eal_wait_lcore(lcore_id) < 0) {
+			ret = -1;
+			break;
+		}
+	}
+
+	/* stop ports */
+	for (portid = 0; portid < nb_ports; portid++) {
+		if ((enabled_port_mask & (1 << portid)) == 0)
+			continue;
+		printf("Closing port %d...", portid);
+		rte_eth_dev_stop(portid);
+		rte_eth_dev_close(portid);
+		printf(" Done\n");
+	}
+	printf("Bye...\n");
+
+	return ret;
+}
diff --git a/examples/l3fwd/l3fwd_eventdev.h b/examples/l3fwd/l3fwd_eventdev.h new file mode 100644 index 0000000..6a778b0
--- /dev/null
+++ b/examples/l3fwd/l3fwd_eventdev.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __L3_FWD_EVENTDEV_H__
+#define __L3_FWD_EVENTDEV_H__
+
+#define INVALID_EVENDEV_ID 0xFF
+
+#define RTE_MAX_EVENTDEV_COUNT	RTE_MAX_LCORE
+
+/*
+ * Default RX Dequeue size
+ */
+#define MAX_EVENTDEV_PKT_BURST     8
+
+struct eventdev_params {
+	uint8_t num_eventqueue;
+	uint8_t num_eventport;
+	uint8_t eventdev_id;
+};
+
+struct connection_info {
+	uint8_t ethdev_id;
+	uint8_t eventq_id;
+	uint8_t event_prio;
+	uint8_t ethdev_rx_qid;
+	int32_t ethdev_rx_qid_mode;
+	int32_t eventdev_id;
+	int32_t adapter_id;
+};
+
+struct adapter_config {
+	struct connection_info connections[RTE_MAX_EVENTDEV_COUNT];
+	uint8_t nb_connections;
+};
+
+struct adapter_params {
+	struct adapter_config config[RTE_MAX_EVENTDEV_COUNT];
+	uint8_t nb_rx_adapter;
+};
+
+struct eventdev_info {
+	uint8_t dev_id;
+	uint8_t *port;
+	uint8_t *queue;
+};
+
+struct link_info {
+	uint8_t event_portid;
+	uint8_t eventq_id;
+	uint8_t eventdev_id;
+	uint8_t lcore_id;
+};
+struct link_params {
+	struct link_info links[RTE_MAX_EVENTDEV_COUNT];
+	uint8_t nb_links;
+};
+
+enum dequeue_mode {
+	POLL_DEQUEUE = 0,
+	EVENTDEV_DEQUEUE,
+};
+
+extern struct link_params link_config;
+extern struct eventdev_info *event_devices; extern enum dequeue_mode 
+lcore_dequeue_mode[RTE_MAX_LCORE];
+
+int
+em_eventdev_main_loop(__attribute__((unused)) void *dummy);
+
+int
+lpm_eventdev_main_loop(__attribute__((unused)) void *dummy);
+
+int
+parse_eventdev_config(const char *evq_arg); int 
+parse_adapter_config(const char *evq_arg); int parse_link_config(const 
+char *evq_arg);
+
+int
+config_eventdev(void);
+
+#endif  /* __L3_FWD_EVENTDEV_H__ */
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c index a747126..b2a3c1d 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -25,8 +25,10 @@
 #include <rte_udp.h>
 #include <rte_lpm.h>
 #include <rte_lpm6.h>
+#include <rte_eventdev.h>
 
 #include "l3fwd.h"
+#include "l3fwd_eventdev.h"
 
 struct ipv4_l3fwd_lpm_route {
 	uint32_t ip;
@@ -168,6 +170,101 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,  #include "l3fwd_lpm.h"
 #endif
 
+/* main LPM processing loop for eventdev*/ int
+lpm_eventdev_main_loop(__attribute__((unused)) void *dummy) {
+	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+	struct rte_event ev[MAX_PKT_BURST];
+	struct rte_event_port_conf event_port_conf;
+	unsigned int lcore_id;
+	uint64_t prev_tsc, diff_tsc, cur_tsc;
+	int i, nb_rx;
+	uint16_t portid, dequeue_len;
+	uint8_t event_port_id = INVALID_EVENDEV_ID;
+	uint8_t queueid;
+	struct lcore_conf *qconf;
+	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+		US_PER_S * BURST_TX_DRAIN_US;
+
+	prev_tsc = 0;
+
+	lcore_id = rte_lcore_id();
+	qconf = &lcore_conf[lcore_id];
+
+	if (qconf->n_rx_queue == 0) {
+		RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
+		return 0;
+	}
+
+	RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
+
+	for (i = 0; i < qconf->n_rx_queue; i++) {
+
+		portid = qconf->rx_queue_list[i].port_id;
+		queueid = qconf->rx_queue_list[i].queue_id;
+		RTE_LOG(INFO, L3FWD,
+			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+			lcore_id, portid, queueid);
+	}
+
+	for (i = 0; i < link_config.nb_links; i++) {
+		if (link_config.links[i].lcore_id == lcore_id)
+			event_port_id = link_config.links[i].event_portid;
+	}
+
+	rte_event_port_default_conf_get(event_devices[0].dev_id, event_port_id,
+					&event_port_conf);
+	dequeue_len = event_port_conf.dequeue_depth;
+
+	while (!force_quit) {
+
+		cur_tsc = rte_rdtsc();
+
+		/*
+		 * TX burst queue drain
+		 */
+		diff_tsc = cur_tsc - prev_tsc;
+		if (unlikely(diff_tsc > drain_tsc)) {
+
+			for (i = 0; i < qconf->n_tx_port; ++i) {
+				portid = qconf->tx_port_id[i];
+				if (qconf->tx_mbufs[portid].len == 0)
+					continue;
+				send_burst(qconf,
+					qconf->tx_mbufs[portid].len,
+					portid);
+				qconf->tx_mbufs[portid].len = 0;
+			}
+
+			prev_tsc = cur_tsc;
+		}
+
+		/*
+		 * Read packet from event ports
+		 */
+		nb_rx = rte_event_dequeue_burst(event_devices[0].dev_id,
+						event_port_id,
+						ev, dequeue_len, 0);
+		if (nb_rx == 0)
+			continue;
+
+		for (i = 0; i < nb_rx; i++) {
+			pkts_burst[0] = ev[i].mbuf;
+			portid = ev[i].flow_id;
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON \
+			 || defined RTE_ARCH_PPC_64
+			l3fwd_lpm_send_packets(1, pkts_burst, portid, qconf); #else
+			l3fwd_lpm_no_opt_send_packets(1, pkts_burst,
+						      portid, qconf);
+#endif /* X86 */
+		}
+	}
+
+	return 0;
+}
+
 /* main processing loop */
 int
 lpm_main_loop(__attribute__((unused)) void *dummy) @@ -187,6 +284,9 @@ lpm_main_loop(__attribute__((unused)) void *dummy)
 	lcore_id = rte_lcore_id();
 	qconf = &lcore_conf[lcore_id];
 
+	if (lcore_dequeue_mode[lcore_id] == EVENTDEV_DEQUEUE)
+		return lpm_eventdev_main_loop(dummy);
+
 	if (qconf->n_rx_queue == 0) {
 		RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
 		return 0;
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index e7111fa..eff1ac5 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -46,6 +46,7 @@
 #include <cmdline_parse_etheraddr.h>
 
 #include "l3fwd.h"
+#include "l3fwd_eventdev.h"
 
 /*
  * Configurable number of RX/TX ring descriptors @@ -59,17 +60,17 @@  #define MAX_LCORE_PARAMS 1024
 
 /* Static global variables used within this file. */ -static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
 
 /**< Ports set in promiscuous mode off by default. */ -static int promiscuous_on;
+int promiscuous_on;
 
 /* Select Longest-Prefix or Exact match. */  static int l3fwd_lpm_on;  static int l3fwd_em_on;
 
-static int numa_on = 1; /**< NUMA is enabled by default. */
+int numa_on = 1; /**< NUMA is enabled by default. */
 static int parse_ptype; /**< Parse packet type using rx callback, and */
 			/**< disabled by default */
 
@@ -90,6 +91,9 @@ uint32_t enabled_port_mask;  int ipv6; /**< ipv6 is false by default. */  uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
 
+/** <Application dq mode. Default is Poll. */ static int dq_mode;
+
 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
 
 struct lcore_params {
@@ -135,18 +139,9 @@ static struct rte_eth_conf port_conf = {
 	},
 };
 
-static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
-
-struct l3fwd_lkp_mode {
-	void  (*setup)(int);
-	int   (*check_ptype)(int);
-	rte_rx_callback_fn cb_parse_ptype;
-	int   (*main_loop)(void *);
-	void* (*get_ipv4_lookup_struct)(int);
-	void* (*get_ipv6_lookup_struct)(int);
-};
+struct rte_mempool *pktmbuf_pool[NB_SOCKETS];
 
-static struct l3fwd_lkp_mode l3fwd_lkp;
+struct l3fwd_lkp_mode l3fwd_lkp;
 
 static struct l3fwd_lkp_mode l3fwd_em_lkp = {
 	.setup                  = setup_hash,
@@ -281,6 +276,14 @@ print_usage(const char *prgname)
 		" [-P]"
 		" [-E]"
 		" [-L]"
+		" [-e] eventdev config (eventdev, No. of event queues, No. of event ports)"
+		"		[,(eventdev,No. of event queues,No. of event ports)]"
+		" [-a] adapter config (port, queue, queue mode, event queue, event priority,"
+		"		eventdev)[,(port, queue, queue mode, event queue,"
+		"		event priority,eventdev)]"
+		" [-l] port link config (event port, event queue,eventdev,lcore)"
+		"		[,(event port,event queue,eventdev,lcore)]"
+		" --dequeue-mode 0 or 1"
 		" --config (port,queue,lcore)[,(port,queue,lcore)]"
 		" [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
 		" [--enable-jumbo [--max-pkt-len PKTLEN]]"
@@ -293,7 +296,21 @@ print_usage(const char *prgname)
 		"  -P : Enable promiscuous mode\n"
 		"  -E : Enable exact match\n"
 		"  -L : Enable longest prefix match (default)\n"
-		"  --config (port,queue,lcore): Rx queue configuration\n"
+		"  -e : Event dev configuration\n"
+		"  (Eventdev ID,Number of event queues,Number of event ports)\n"
+		"		Valid only for dequeue-mode = 1\n"
+		"  -a : Adapter configuration\n"
+		"	(Ethdev Port ID,Ethdev Rx Queue ID,Ethdev Rx"
+		"	QueueID mode, Event Queue ID,"
+		"	Event Priority,Eventdev ID)\n"
+		"		Valid only for dequeue-mode = 1\n"
+		"  -l : Event port and Event Queue link configuration\n"
+		"	(Event Port ID,Event Queue ID,Eventdev ID,lcore)\n"
+		"		Valid only for dequeue-mode = 1\n"
+		"  --dequeue-mode 0: Dequeue packet via polling (default)\n"
+		"  			1: Dequeue packet via eventdev\n"
+		"  --config (port,queue,lcore): Rx queue configuration."
+		"				  Valid only for dequeue-mode = 0\n"
 		"  --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
 		"  --enable-jumbo: Enable jumbo frames\n"
 		"  --max-pkt-len: Under the premise of enabling jumbo,\n"
@@ -445,6 +462,9 @@ static const char short_options[] =
 	"P"   /* promiscuous */
 	"L"   /* enable long prefix match */
 	"E"   /* enable exact match */
+	"e:"  /* Event Device configuration */
+	"a:"  /* Rx Adapter configuration */
+	"l:"  /* Event Queue and Adapter link configuration */
 	;
 
 #define CMD_LINE_OPT_CONFIG "config"
@@ -454,6 +474,7 @@ static const char short_options[] =  #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
+#define CMD_LINE_OPT_DEQUEUE_MODE "dequeue-mode"
 enum {
 	/* long options mapped to a short option */
 
@@ -467,6 +488,7 @@ enum {
 	CMD_LINE_OPT_ENABLE_JUMBO_NUM,
 	CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
 	CMD_LINE_OPT_PARSE_PTYPE_NUM,
+	CMD_LINE_OPT_DEQUEUE_MODE_NUM,
 };
 
 static const struct option lgopts[] = { @@ -477,6 +499,7 @@ static const struct option lgopts[] = {
 	{CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM},
 	{CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
 	{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
+	{CMD_LINE_OPT_DEQUEUE_MODE, 1, 0, CMD_LINE_OPT_DEQUEUE_MODE_NUM},
 	{NULL, 0, 0, 0}
 };
 
@@ -496,7 +519,7 @@ static const struct option lgopts[] = {
 
 /* Parse the argument given in the command line of the application */  static int -parse_args(int argc, char **argv)
+parse_args_poll(int argc, char **argv)
 {
 	int opt, ret;
 	char **argvopt;
@@ -635,6 +658,194 @@ parse_args(int argc, char **argv)
 	return ret;
 }
 
+/* Parse the argument given in the command line of the application */ 
+static int parse_args_eventdev(int argc, char **argv) {
+	int opt, ret;
+	char **argvopt;
+	int option_index;
+	char *prgname = argv[0];
+
+	argvopt = argv;
+
+	/* Error or normal output strings. */
+	while ((opt = getopt_long(argc, argvopt, short_options,
+				lgopts, &option_index)) != EOF) {
+
+		switch (opt) {
+		/* portmask */
+		case 'p':
+			enabled_port_mask = parse_portmask(optarg);
+			if (enabled_port_mask == 0) {
+				fprintf(stderr, "Invalid portmask\n");
+				print_usage(prgname);
+				return -1;
+			}
+			break;
+
+		case 'P':
+			promiscuous_on = 1;
+			break;
+
+		case 'E':
+			l3fwd_em_on = 1;
+			break;
+
+		case 'L':
+			l3fwd_lpm_on = 1;
+			break;
+
+		/*Event device configuration*/
+		case 'e':
+			ret = parse_eventdev_config(optarg);
+			if (ret < 0) {
+				printf("invalid event device configuration\n");
+				print_usage(prgname);
+				return -1;
+			}
+			break;
+
+		/*Rx adapter configuration*/
+		case 'a':
+			ret = parse_adapter_config(optarg);
+			if (ret < 0) {
+				printf("invalid Rx adapter configuration\n");
+				print_usage(prgname);
+				return -1;
+			}
+			break;
+
+		/*Event Queue and Adapter Link configuration*/
+		case 'l':
+			ret = parse_link_config(optarg);
+			if (ret < 0) {
+				printf("invalid Link configuration\n");
+				print_usage(prgname);
+				return -1;
+			}
+			break;
+
+		case CMD_LINE_OPT_ETH_DEST_NUM:
+			parse_eth_dest(optarg);
+			break;
+
+		case CMD_LINE_OPT_NO_NUMA_NUM:
+			numa_on = 0;
+			break;
+
+		case CMD_LINE_OPT_IPV6_NUM:
+			ipv6 = 1;
+			break;
+
+		case CMD_LINE_OPT_ENABLE_JUMBO_NUM: {
+			const struct option lenopts = {
+				"max-pkt-len", required_argument, 0, 0
+			};
+
+			port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+			port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
+			/*
+			 * if no max-pkt-len set, use the default
+			 * value ETHER_MAX_LEN.
+			 */
+			if (getopt_long(argc, argvopt, "",
+					&lenopts, &option_index) == 0) {
+				ret = parse_max_pkt_len(optarg);
+				if (ret < 64 || ret > MAX_JUMBO_PKT_LEN) {
+					fprintf(stderr,
+						"invalid maximum packet length\n");
+					print_usage(prgname);
+					return -1;
+				}
+				port_conf.rxmode.max_rx_pkt_len = ret;
+			}
+			break;
+		}
+
+		case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
+			ret = parse_hash_entry_number(optarg);
+			if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
+				hash_entry_number = ret;
+			} else {
+				fprintf(stderr, "invalid hash entry number\n");
+				print_usage(prgname);
+				return -1;
+			}
+			break;
+
+		case CMD_LINE_OPT_PARSE_PTYPE_NUM:
+			printf("soft parse-ptype is enabled\n");
+			parse_ptype = 1;
+			break;
+
+		default:
+			print_usage(prgname);
+			return -1;
+		}
+	}
+
+	/* If both LPM and EM are selected, return error. */
+	if (l3fwd_lpm_on && l3fwd_em_on) {
+		fprintf(stderr, "LPM and EM are mutually exclusive, select only one\n");
+		return -1;
+	}
+
+	/*
+	 * Nothing is selected, pick longest-prefix match
+	 * as default match.
+	 */
+	if (!l3fwd_lpm_on && !l3fwd_em_on) {
+		fprintf(stderr, "LPM or EM none selected, default LPM on\n");
+		l3fwd_lpm_on = 1;
+	}
+
+	/*
+	 * ipv6 and hash flags are valid only for
+	 * exact macth, reset them to default for
+	 * longest-prefix match.
+	 */
+	if (l3fwd_lpm_on) {
+		ipv6 = 0;
+		hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
+	}
+
+	if (optind >= 0)
+		argv[optind-1] = prgname;
+
+	ret = optind-1;
+	optind = 1; /* reset getopt lib */
+	return ret;
+}
+
+/* Parse the argument given in the command line of the application */ 
+static int parse_args(int argc, char **argv) {
+	int opt;
+	char **argvopt;
+	int option_index;
+	char *end = NULL;
+
+	argvopt = argv;
+
+	/* Error or normal output strings. */
+	while ((opt = getopt_long(argc, argvopt, short_options,
+				lgopts, &option_index)) != EOF) {
+		/* long options */
+		if (opt == CMD_LINE_OPT_DEQUEUE_MODE_NUM)  {
+				dq_mode = strtoul(optarg, &end, 10);
+				break;
+		}
+	}
+
+	if (dq_mode == EVENTDEV_DEQUEUE)
+		return parse_args_eventdev(argc, argv);
+
+	return parse_args_poll(argc, argv);
+}
+
 static void
 print_ethaddr(const char *name, const struct ether_addr *eth_addr)  { @@ -693,7 +904,7 @@ init_mem(unsigned nb_mbuf)  }
 
 /* Check the link status of all ports in up to 9s, and print them finally */ -static void
+void
 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)  {  #define CHECK_INTERVAL 100 /* 100ms */ @@ -761,7 +972,7 @@ signal_handler(int signum)
 	}
 }
 
-static int
+int
 prepare_ptype_parser(uint16_t portid, uint16_t queueid)  {
 	if (parse_ptype) {
@@ -783,8 +994,8 @@ prepare_ptype_parser(uint16_t portid, uint16_t queueid)
 	return 0;
 }
 
-int
-main(int argc, char **argv)
+static int
+config_poll(void)
 {
 	struct lcore_conf *qconf;
 	struct rte_eth_dev_info dev_info;
@@ -796,36 +1007,15 @@ main(int argc, char **argv)
 	uint32_t n_tx_queue, nb_lcores;
 	uint8_t nb_rx_queue, queue, socketid;
 
-	/* init EAL */
-	ret = rte_eal_init(argc, argv);
-	if (ret < 0)
-		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
-	argc -= ret;
-	argv += ret;
+	if (dq_mode != EVENTDEV_DEQUEUE) {
 
-	force_quit = false;
-	signal(SIGINT, signal_handler);
-	signal(SIGTERM, signal_handler);
+		if (check_lcore_params() < 0)
+			rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
 
-	/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
-	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
-		dest_eth_addr[portid] =
-			ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
-		*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
+		ret = init_lcore_rx_queues();
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
 	}
-
-	/* parse application arguments (after the EAL ones) */
-	ret = parse_args(argc, argv);
-	if (ret < 0)
-		rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
-
-	if (check_lcore_params() < 0)
-		rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
-
-	ret = init_lcore_rx_queues();
-	if (ret < 0)
-		rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
-
 	nb_ports = rte_eth_dev_count();
 
 	if (check_port_config(nb_ports) < 0)
@@ -927,6 +1117,8 @@ main(int argc, char **argv)
 		}
 		printf("\n");
 	}
+	if (dq_mode == EVENTDEV_DEQUEUE)
+		return config_eventdev();
 
 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
 		if (rte_lcore_is_enabled(lcore_id) == 0) @@ -1032,3 +1224,35 @@ main(int argc, char **argv)
 
 	return ret;
 }
+
+int
+main(int argc, char **argv)
+{
+	int ret;
+	uint16_t portid;
+
+	/* init EAL */
+	ret = rte_eal_init(argc, argv);
+	if (ret < 0)
+		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
+	argc -= ret;
+	argv += ret;
+
+	force_quit = false;
+	signal(SIGINT, signal_handler);
+	signal(SIGTERM, signal_handler);
+
+	/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
+	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+		dest_eth_addr[portid] =
+			ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
+		*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
+	}
+
+	/* parse application arguments (after the EAL ones) */
+	ret = parse_args(argc, argv);
+	if (ret < 0)
+		rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
+
+	return config_poll();
+}
diff --git a/examples/l3fwd/meson.build b/examples/l3fwd/meson.build index 6dd4b90..e6fe32b 100644
--- a/examples/l3fwd/meson.build
+++ b/examples/l3fwd/meson.build
@@ -8,5 +8,5 @@
 
 deps += ['hash', 'lpm']
 sources = files(
-	'l3fwd_em.c', 'l3fwd_lpm.c', 'main.c'
+	'l3fwd_em.c', 'l3fwd_lpm.c', 'l3fwd_eventdev.c', 'main.c'
 )
--
2.9.3

  reply	other threads:[~2018-04-12  6:09 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-19 13:45 Sunil Kumar Kori
2018-04-12  6:09 ` Sunil Kumar Kori [this message]
2018-04-12 14:03   ` Bruce Richardson
2018-04-12 14:20     ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=AM5PR0401MB266025290EF389AC41C96C9E8FBC0@AM5PR0401MB2660.eurprd04.prod.outlook.com \
    --to=sunil.kori@nxp.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).