DPDK patches and discussions
 help / color / mirror / Atom feed
From: Anoob Joseph <anoob.joseph@caviumnetworks.com>
To: Bruce Richardson <bruce.richardson@intel.com>,
	Jerin Jacob <jerin.jacob@caviumnetworks.com>,
	Pablo de Lara <pablo.de.lara.guarch@intel.com>
Cc: Anoob Joseph <anoob.joseph@caviumnetworks.com>,
	Hemant Agrawal <hemant.agrawal@nxp.com>,
	Narayana Prasad <narayanaprasad.athreya@caviumnetworks.com>,
	Nikhil Rao <nikhil.rao@intel.com>,
	Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>,
	Sunil Kumar Kori <sunil.kori@nxp.com>,
	dev@dpdk.org
Subject: [dpdk-dev] [PATCH 20/20] examples/l2fwd: add eventmode for l2fwd
Date: Fri,  8 Jun 2018 22:54:19 +0530	[thread overview]
Message-ID: <1528478659-15859-21-git-send-email-anoob.joseph@caviumnetworks.com> (raw)
In-Reply-To: <1528478659-15859-1-git-send-email-anoob.joseph@caviumnetworks.com>

Adding eventmode support in l2fwd. This uses rte_eventmode_helper APIs
to setup and use the eventmode capabilties.

Signed-off-by: Anoob Joseph <anoob.joseph@caviumnetworks.com>
---
 examples/l2fwd/l2fwd_worker.c | 815 +++++++++++++++++++++++++++++++++++++++++-
 examples/l2fwd/main.c         |  64 +++-
 2 files changed, 864 insertions(+), 15 deletions(-)

diff --git a/examples/l2fwd/l2fwd_worker.c b/examples/l2fwd/l2fwd_worker.c
index 56e0bdb..bc63b31 100644
--- a/examples/l2fwd/l2fwd_worker.c
+++ b/examples/l2fwd/l2fwd_worker.c
@@ -25,6 +25,9 @@
 #include <rte_branch_prediction.h>
 #include <rte_ether.h>
 #include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_eventmode_helper.h>
+#include <rte_malloc.h>
 #include <rte_mbuf.h>
 
 #include "l2fwd_common.h"
@@ -138,6 +141,16 @@ l2fwd_periodic_drain_stats_monitor(struct lcore_queue_conf *qconf,
 	}
 }
 
+static inline void
+l2fwd_drain_loop(struct lcore_queue_conf *qconf, struct tsc_tracker *t,
+		int is_master_core)
+{
+	while (!force_quit) {
+		/* Do periodic operations (buffer drain & stats monitor) */
+		l2fwd_periodic_drain_stats_monitor(qconf, t, is_master_core);
+	}
+}
+
 static void
 l2fwd_mac_updating(struct rte_mbuf *m, unsigned dest_portid)
 {
@@ -180,9 +193,45 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
 	l2fwd_send_pkt(m, dst_port);
 }
 
-/* main processing loop */
+static inline void
+l2fwd_send_single_pkt(struct rte_mbuf *m)
+{
+	l2fwd_send_pkt(m, m->port);
+}
+
+static inline void
+l2fwd_event_pre_forward(struct rte_event *ev, unsigned portid)
+{
+	unsigned dst_port;
+	struct rte_mbuf *m;
+
+	/* Get the mbuf */
+	m = ev->mbuf;
+
+	/* Get the destination port from the tables */
+	dst_port = l2fwd_dst_ports[portid];
+
+	/* Save the destination port in the mbuf */
+	m->port = dst_port;
+
+	/* Perform work */
+	if (mac_updating)
+		l2fwd_mac_updating(m, dst_port);
+}
+
+static inline void
+l2fwd_event_switch_to_atomic(struct rte_event *ev, uint8_t atomic_queue_id)
+{
+	ev->event_type = RTE_EVENT_TYPE_CPU;
+	ev->op = RTE_EVENT_OP_FORWARD;
+	ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+	ev->queue_id = atomic_queue_id;
+}
+
+
+/* poll mode processing loop */
 static void
-l2fwd_main_loop(void)
+l2fwd_poll_mode_worker(void)
 {
 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
 	struct rte_mbuf *m;
@@ -241,9 +290,767 @@ l2fwd_main_loop(void)
 	}
 }
 
+/*
+ * Event mode exposes various operating modes depending on the
+ * capabilities of the event device and the operating mode
+ * selected.
+ */
+
+/* Workers registered */
+#define L2FWD_EVENTMODE_WORKERS		4
+
+/*
+ * Event mode worker
+ * Operating mode : Single stage non-burst with atomic scheduling
+ */
+static void
+l2fwd_eventmode_non_burst_atomic_worker(void *args)
+{
+	struct rte_event ev;
+	struct rte_mbuf *pkt;
+	struct rte_eventmode_helper_conf *mode_conf;
+	struct rte_eventmode_helper_event_link_info *links = NULL;
+	unsigned lcore_nb_link = 0;
+	uint32_t lcore_id;
+	unsigned i, nb_rx = 0;
+	unsigned portid;
+	struct lcore_queue_conf *qconf;
+	int is_master_core;
+	struct tsc_tracker tsc = {0};
+
+	/* Get core ID */
+	lcore_id = rte_lcore_id();
+
+	RTE_LOG(INFO, L2FWD,
+		"Launching event mode single stage non-burst woker with "
+		"atomic scheduling on lcore %d\n", lcore_id);
+
+	/* Set the flag if master core */
+	is_master_core = (lcore_id == rte_get_master_lcore()) ? 1 : 0;
+
+	/* Get qconf for this core */
+	qconf = &lcore_queue_conf[lcore_id];
+
+	/* Set drain tsc */
+	tsc.drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+			US_PER_S * BURST_TX_DRAIN_US;
+
+	/* Mode conf will be passed as args */
+	mode_conf = (struct rte_eventmode_helper_conf *)args;
+
+	/* Get the links configured for this lcore */
+	lcore_nb_link = rte_eventmode_helper_get_event_lcore_links(lcore_id,
+			mode_conf, &links);
+
+	/* Check if we have links registered for this lcore */
+	if (lcore_nb_link == 0) {
+		/* No links registered. The core could do periodic drains */
+		l2fwd_drain_loop(qconf, &tsc, is_master_core);
+		goto clean_and_exit;
+	}
+
+	/* We have valid links */
+
+	/* See if it's single link */
+	if (lcore_nb_link == 1)
+		goto single_link_loop;
+	else
+		goto multi_link_loop;
+
+single_link_loop:
+
+	RTE_LOG(INFO, L2FWD, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
+			links[0].event_portid);
+
+	while (!force_quit) {
+
+		/* Do periodic operations (buffer drain & stats monitor) */
+		l2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);
+
+		/* Read packet from event queues */
+		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
+				links[0].event_portid,
+				&ev,     /* events */
+				1,       /* nb_events */
+				0        /* timeout_ticks */);
+
+		if (nb_rx == 0)
+			continue;
+
+		portid = ev.queue_id;
+		port_statistics[portid].rx++;
+		pkt = ev.mbuf;
+
+		rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
+		l2fwd_simple_forward(pkt, portid);
+	}
+	goto clean_and_exit;
+
+multi_link_loop:
+
+	for (i = 0; i < lcore_nb_link; i++) {
+		RTE_LOG(INFO, L2FWD, " -- lcoreid=%u event_port_id=%u\n",
+				lcore_id, links[i].event_portid);
+	}
+
+	while (!force_quit) {
+
+		/* Do periodic operations (buffer drain & stats monitor) */
+		l2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);
+
+		for (i = 0; i < lcore_nb_link; i++) {
+			/* Read packet from event queues */
+			nb_rx = rte_event_dequeue_burst(links[i].eventdev_id,
+					links[i].event_portid,
+					&ev,     /* events */
+					1,       /* nb_events */
+					0        /* timeout_ticks */);
+
+			if (nb_rx == 0)
+				continue;
+
+			portid = ev.queue_id;
+			port_statistics[portid].rx++;
+			pkt = ev.mbuf;
+
+			rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
+			l2fwd_simple_forward(pkt, portid);
+		}
+	}
+	goto clean_and_exit;
+
+clean_and_exit:
+	if (links != NULL)
+		rte_free(links);
+}
+
+/*
+ * Event mode worker
+ * Operating mode : Single stage burst with atomic scheduling
+ */
+static void
+l2fwd_eventmode_burst_atomic_worker(void *args)
+{
+	struct rte_event ev[MAX_PKT_BURST];
+	struct rte_mbuf *pkt;
+	struct rte_eventmode_helper_conf *mode_conf;
+	struct rte_eventmode_helper_event_link_info *links = NULL;
+	unsigned lcore_nb_link = 0;
+	uint32_t lcore_id;
+	unsigned i, j, nb_rx = 0;
+	unsigned portid;
+	struct lcore_queue_conf *qconf;
+	int is_master_core;
+	struct rte_event_port_conf event_port_conf;
+	uint16_t dequeue_len = 0;
+	struct tsc_tracker tsc = {0};
+
+	/* Get core ID */
+	lcore_id = rte_lcore_id();
+
+	RTE_LOG(INFO, L2FWD,
+		"Launching event mode single stage burst woker with "
+		"atomic scheduling on lcore %d\n", lcore_id);
+
+	/* Set the flag if master core */
+	is_master_core = (lcore_id == rte_get_master_lcore()) ? 1 : 0;
+
+	/* Get qconf for this core */
+	qconf = &lcore_queue_conf[lcore_id];
+
+	/* Set drain tsc */
+	tsc.drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+			US_PER_S * BURST_TX_DRAIN_US;
+
+	/* Mode conf will be passed as args */
+	mode_conf = (struct rte_eventmode_helper_conf *)args;
+
+	/* Get the links configured for this lcore */
+	lcore_nb_link = rte_eventmode_helper_get_event_lcore_links(lcore_id,
+			mode_conf, &links);
+
+	/* Check if we have links registered for this lcore */
+	if (lcore_nb_link == 0) {
+		/* No links registered. The core could do periodic drains */
+		l2fwd_drain_loop(qconf, &tsc, is_master_core);
+		goto clean_and_exit;
+	}
+
+	/* We have valid links */
+
+	/* Get the burst size of the event device */
+
+	/* Get the default conf of the first link */
+	rte_event_port_default_conf_get(links[0].eventdev_id,
+			links[0].event_portid,
+			&event_port_conf);
+
+	/* Save the burst size */
+	dequeue_len = event_port_conf.dequeue_depth;
+
+	/* Dequeue len should not exceed MAX_PKT_BURST */
+	if (dequeue_len > MAX_PKT_BURST)
+		dequeue_len = MAX_PKT_BURST;
+
+	/* See if it's single link */
+	if (lcore_nb_link == 1)
+		goto single_link_loop;
+	else
+		goto multi_link_loop;
+
+single_link_loop:
+
+	RTE_LOG(INFO, L2FWD, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
+			links[0].event_portid);
+
+	while (!force_quit) {
+
+		/* Do periodic operations (buffer drain & stats monitor) */
+		l2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);
+
+		/* Read packet from event queues */
+		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
+				links[0].event_portid,
+				ev,             /* events */
+				dequeue_len,    /* nb_events */
+				0               /* timeout_ticks */);
+
+		if (nb_rx == 0)
+			continue;
+
+		for (j = 0; j < nb_rx; j++) {
+			portid = ev[j].queue_id;
+			port_statistics[portid].rx++;
+			pkt = ev[j].mbuf;
+
+			rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
+			l2fwd_simple_forward(pkt, portid);
+		}
+	}
+	goto clean_and_exit;
+
+multi_link_loop:
+
+	for (i = 0; i < lcore_nb_link; i++) {
+		RTE_LOG(INFO, L2FWD, " -- lcoreid=%u event_port_id=%u\n",
+				lcore_id, links[i].event_portid);
+	}
+
+	while (!force_quit) {
+
+		/* Do periodic operations (buffer drain & stats monitor) */
+		l2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);
+
+		for (i = 0; i < lcore_nb_link; i++) {
+			/* Read packet from event queues */
+			nb_rx = rte_event_dequeue_burst(links[i].eventdev_id,
+					links[i].event_portid,
+					ev,             /* events */
+					dequeue_len,    /* nb_events */
+					0               /* timeout_ticks */);
+
+			if (nb_rx == 0)
+				continue;
+
+			for (j = 0; j < nb_rx; j++) {
+				portid = ev[j].queue_id;
+				port_statistics[portid].rx++;
+				pkt = ev[j].mbuf;
+
+				rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
+				l2fwd_simple_forward(pkt, portid);
+			}
+		}
+	}
+	goto clean_and_exit;
+
+clean_and_exit:
+	if (links != NULL)
+		rte_free(links);
+}
+
+/*
+ * Event mode worker
+ * Operating mode : Single stage non-burst with ordered scheduling
+ */
+static void
+l2fwd_eventmode_non_burst_ordered_worker(void *args)
+{
+	struct rte_event ev;
+	struct rte_mbuf *pkt;
+	struct rte_eventmode_helper_conf *mode_conf;
+	struct rte_eventmode_helper_event_link_info *links = NULL;
+	unsigned lcore_nb_link = 0;
+	uint32_t lcore_id;
+	unsigned i, nb_rx = 0;
+	unsigned portid;
+	struct lcore_queue_conf *qconf;
+	int is_master_core;
+	uint8_t tx_queue;
+	uint8_t eventdev_id;
+	struct tsc_tracker tsc = {0};
+
+	/* Get core ID */
+	lcore_id = rte_lcore_id();
+
+	RTE_LOG(INFO, L2FWD,
+		"Launching event mode single stage non-burst woker with "
+		"ordered scheduling on lcore %d\n", lcore_id);
+
+	/* Set the flag if master core */
+	is_master_core = (lcore_id == rte_get_master_lcore()) ? 1 : 0;
+
+	/* Get qconf for this core */
+	qconf = &lcore_queue_conf[lcore_id];
+
+	/* Set drain tsc */
+	tsc.drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+			US_PER_S * BURST_TX_DRAIN_US;
+
+	/* Mode conf will be passed as args */
+	mode_conf = (struct rte_eventmode_helper_conf *)args;
+
+	/* Get the links configured for this lcore */
+	lcore_nb_link = rte_eventmode_helper_get_event_lcore_links(lcore_id,
+			mode_conf, &links);
+
+	/* Check if we have links registered for this lcore */
+	if (lcore_nb_link == 0) {
+		/* No links registered. The core could do periodic drains */
+		l2fwd_drain_loop(qconf, &tsc, is_master_core);
+		goto clean_and_exit;
+	}
+
+	/* We have valid links */
+
+	/*
+	 * When the stage 1 is set to have scheduling ORDERED, the event need
+	 * to change the scheduling type to ATOMIC before it can be send out.
+	 * This would ensure that the packets are send out in the same order
+	 * as it came.
+	 */
+
+	/*
+	 * The helper function would create a queue with ATOMIC scheduling
+	 * for this purpose. Worker would submit packets to that queue if the
+	 * event is not coming from an ATOMIC queue.
+	 */
+
+	/* Get event dev ID from the first link */
+	eventdev_id = links[0].eventdev_id;
+
+	/*
+	 * One queue would be reserved to be used as atomic queue for the last
+	 * stage (eth packet tx stage)
+	 */
+	tx_queue = rte_eventmode_helper_get_tx_queue(mode_conf, eventdev_id);
+
+	/* See if it's single link */
+	if (lcore_nb_link == 1)
+		goto single_link_loop;
+	else
+		goto multi_link_loop;
+
+single_link_loop:
+
+	RTE_LOG(INFO, L2FWD, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
+			links[0].event_portid);
+
+	while (!force_quit) {
+
+		/* Do periodic operations (buffer drain & stats monitor) */
+		l2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);
+
+		/* Read packet from event queues */
+		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
+				links[0].event_portid,
+				&ev,     /* events */
+				1,       /* nb_events */
+				0        /* timeout_ticks */);
+
+		if (nb_rx == 0)
+			continue;
+
+		/*
+		 * Check if this event came on atomic queue. If yes, do eth tx
+		 */
+		if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+			l2fwd_send_single_pkt(ev.mbuf);
+			continue;
+		}
+
+		/* Else, we have a fresh packet */
+		portid = ev.queue_id;
+		port_statistics[portid].rx++;
+		pkt = ev.mbuf;
+
+		rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
+
+		/* Process packet */
+		l2fwd_event_pre_forward(&ev, portid);
+
+		/* Update the scheduling type for tx stage */
+		l2fwd_event_switch_to_atomic(&ev, tx_queue);
+
+		/* Submit the updated event for tx stage */
+		rte_event_enqueue_burst(links[0].eventdev_id,
+				links[0].event_portid,
+				&ev,    /* events */
+				1       /* nb_events */);
+	}
+	goto clean_and_exit;
+
+multi_link_loop:
+
+	for (i = 0; i < lcore_nb_link; i++) {
+		RTE_LOG(INFO, L2FWD, " -- lcoreid=%u event_port_id=%u\n",
+				lcore_id, links[i].event_portid);
+	}
+
+	while (!force_quit) {
+
+		/* Do periodic operations (buffer drain & stats monitor) */
+		l2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);
+
+		for (i = 0; i < lcore_nb_link; i++) {
+			/* Read packet from event queues */
+			nb_rx = rte_event_dequeue_burst(links[i].eventdev_id,
+					links[i].event_portid,
+					&ev,     /* events */
+					1,       /* nb_events */
+					0        /* timeout_ticks */);
+
+			if (nb_rx == 0)
+				continue;
+
+			/*
+			 * Check if this event came on atomic queue.
+			 * If yes, do eth tx
+			 */
+			if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+				l2fwd_send_single_pkt(ev.mbuf);
+				continue;
+			}
+
+			/* Else, we have a fresh packet */
+			portid = ev.queue_id;
+			port_statistics[portid].rx++;
+			pkt = ev.mbuf;
+
+			rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
+
+			/* Process packet */
+			l2fwd_event_pre_forward(&ev, portid);
+
+			/* Update the scheduling type for tx stage */
+			l2fwd_event_switch_to_atomic(&ev, tx_queue);
+
+			/* Submit the updated event for tx stage */
+			rte_event_enqueue_burst(links[i].eventdev_id,
+					links[i].event_portid,
+					&ev,    /* events */
+					1       /* nb_events */);
+		}
+	}
+	goto clean_and_exit;
+
+clean_and_exit:
+	if (links != NULL)
+		rte_free(links);
+}
+
+/*
+ * Event mode worker
+ * Operating mode : Single stage burst with ordered scheduling
+ */
+static void
+l2fwd_eventmode_burst_ordered_worker(void *args)
+{
+	struct rte_event ev[MAX_PKT_BURST];
+	struct rte_mbuf *pkt;
+	struct rte_eventmode_helper_conf *mode_conf;
+	struct rte_eventmode_helper_event_link_info *links = NULL;
+	unsigned lcore_nb_link = 0;
+	uint32_t lcore_id;
+	unsigned i, j, nb_rx = 0;
+	unsigned portid;
+	struct lcore_queue_conf *qconf;
+	int is_master_core;
+	struct rte_event_port_conf event_port_conf;
+	uint16_t dequeue_len = 0;
+	uint8_t tx_queue;
+	uint8_t eventdev_id;
+	struct tsc_tracker tsc = {0};
+
+	/* Get core ID */
+	lcore_id = rte_lcore_id();
+
+	RTE_LOG(INFO, L2FWD,
+		"Launching event mode single stage burst woker with "
+		"ordered scheduling on lcore %d\n", lcore_id);
+
+	/* Set the flag if master core */
+	is_master_core = (lcore_id == rte_get_master_lcore()) ? 1 : 0;
+
+	/* Get qconf for this core */
+	qconf = &lcore_queue_conf[lcore_id];
+
+	/* Set drain tsc */
+	tsc.drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+			US_PER_S * BURST_TX_DRAIN_US;
+
+	/* Mode conf will be passed as args */
+	mode_conf = (struct rte_eventmode_helper_conf *)args;
+
+	/* Get the links configured for this lcore */
+	lcore_nb_link = rte_eventmode_helper_get_event_lcore_links(lcore_id,
+			mode_conf, &links);
+
+	/* Check if we have links registered for this lcore */
+	if (lcore_nb_link == 0) {
+		/* No links registered. The core could do periodic drains */
+		l2fwd_drain_loop(qconf, &tsc, is_master_core);
+		goto clean_and_exit;
+	}
+
+	/* We have valid links */
+
+	/*
+	 * When the stage 1 is set to have scheduling ORDERED, the event need
+	 * to change the scheduling type to ATOMIC before it can be send out.
+	 * This would ensure that the packets are send out in the same order
+	 * as it came.
+	 */
+
+	/*
+	 * The helper function would create a queue with ATOMIC scheduling
+	 * for this purpose. Worker would submit packets to that queue if the
+	 * event is not coming from an ATOMIC queue.
+	 */
+
+	/* Get event dev ID from the first link */
+	eventdev_id = links[0].eventdev_id;
+
+	/*
+	 * One queue would be reserved to be used as atomic queue for the last
+	 * stage (eth packet tx stage)
+	 */
+	tx_queue = rte_eventmode_helper_get_tx_queue(mode_conf, eventdev_id);
+
+	/* Get the burst size of the event device */
+
+	/* Get the default conf of the first link */
+	rte_event_port_default_conf_get(links[0].eventdev_id,
+			links[0].event_portid,
+			&event_port_conf);
+
+	/* Save the burst size */
+	dequeue_len = event_port_conf.dequeue_depth;
+
+	/* Dequeue len should not exceed MAX_PKT_BURST */
+	if (dequeue_len > MAX_PKT_BURST)
+		dequeue_len = MAX_PKT_BURST;
+
+	/* See if it's single link */
+	if (lcore_nb_link == 1)
+		goto single_link_loop;
+	else
+		goto multi_link_loop;
+
+single_link_loop:
+
+	RTE_LOG(INFO, L2FWD, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
+			links[0].event_portid);
+
+	while (!force_quit) {
+
+		/* Do periodic operations (buffer drain & stats monitor) */
+		l2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);
+
+		/* Read packet from event queues */
+		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
+				links[0].event_portid,
+				ev,             /* events */
+				dequeue_len,    /* nb_events */
+				0               /* timeout_ticks */);
+
+		if (nb_rx == 0)
+			continue;
+
+		for (j = 0; j < nb_rx; j++) {
+			/*
+			 * Check if this event came on atomic queue.
+			 * If yes, do eth tx
+			 */
+			if (ev[j].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+				l2fwd_send_single_pkt(ev[j].mbuf);
+				continue;
+			}
+
+			/* Else, we have a fresh packet */
+			portid = ev[j].queue_id;
+			port_statistics[portid].rx++;
+			pkt = ev[j].mbuf;
+
+			rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
+
+			/* Process packet */
+			l2fwd_event_pre_forward(&(ev[j]), portid);
+
+			/* Update the scheduling type for tx stage */
+			l2fwd_event_switch_to_atomic(&(ev[j]), tx_queue);
+
+			/* Submit the updated event for tx stage */
+			rte_event_enqueue_burst(links[0].eventdev_id,
+					links[0].event_portid,
+					&(ev[j]),       /* events */
+					1               /* nb_events */);
+		}
+	}
+	goto clean_and_exit;
+
+multi_link_loop:
+
+	for (i = 0; i < lcore_nb_link; i++) {
+		RTE_LOG(INFO, L2FWD, " -- lcoreid=%u event_port_id=%u\n",
+				lcore_id, links[i].event_portid);
+	}
+
+	while (!force_quit) {
+
+		/* Do periodic operations (buffer drain & stats monitor) */
+		l2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);
+
+		for (i = 0; i < lcore_nb_link; i++) {
+			/* Read packet from event queues */
+			nb_rx = rte_event_dequeue_burst(links[i].eventdev_id,
+					links[i].event_portid,
+					ev,             /* events */
+					dequeue_len,    /* nb_events */
+					0               /* timeout_ticks */);
+
+			if (nb_rx == 0)
+				continue;
+
+			for (j = 0; j < nb_rx; j++) {
+				/*
+				 * Check if this event came on atomic queue.
+				 * If yes, do eth tx
+				 */
+				if (ev[j].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+					l2fwd_send_single_pkt(ev[j].mbuf);
+					continue;
+				}
+
+				/* Else, we have a fresh packet */
+				portid = ev[j].queue_id;
+				port_statistics[portid].rx++;
+				pkt = ev[j].mbuf;
+
+				rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
+
+				/* Process packet */
+				l2fwd_event_pre_forward(&(ev[j]), portid);
+
+				/* Update the scheduling type for tx stage */
+				l2fwd_event_switch_to_atomic(&(ev[j]),
+						tx_queue);
+
+				/* Submit the updated event for tx stage */
+				rte_event_enqueue_burst(links[i].eventdev_id,
+						links[i].event_portid,
+						&(ev[j]), /* events */
+						1         /* nb_events */);
+			}
+		}
+	}
+	goto clean_and_exit;
+
+clean_and_exit:
+	if (links != NULL)
+		rte_free(links);
+}
+
+static uint8_t
+l2fwd_eventmode_populate_wrkr_params(
+		struct rte_eventmode_helper_app_worker_params *wrkrs)
+{
+	uint8_t nb_wrkr_param = 0;
+	struct rte_eventmode_helper_app_worker_params *wrkr;
+
+	/* Save workers */
+
+	wrkr = wrkrs;
+
+	/* Single stage non-burst with atomic scheduling */
+	wrkr->cap.burst = RTE_EVENTMODE_HELPER_RX_TYPE_NON_BURST;
+	wrkr->cap.s1_sched_type = RTE_SCHED_TYPE_ATOMIC;
+	wrkr->nb_stage = 1;
+	wrkr->s1_worker_thread = l2fwd_eventmode_non_burst_atomic_worker;
+
+	nb_wrkr_param++;
+	wrkr++;
+
+	/* Single stage burst with atomic scheduling */
+	wrkr->cap.burst = RTE_EVENTMODE_HELPER_RX_TYPE_BURST;
+	wrkr->cap.s1_sched_type = RTE_SCHED_TYPE_ATOMIC;
+	wrkr->nb_stage = 1;
+	wrkr->s1_worker_thread = l2fwd_eventmode_burst_atomic_worker;
+
+	nb_wrkr_param++;
+	wrkr++;
+
+	/* Single stage non-burst with ordered scheduling */
+	wrkr->cap.burst = RTE_EVENTMODE_HELPER_RX_TYPE_NON_BURST;
+	wrkr->cap.s1_sched_type = RTE_SCHED_TYPE_ORDERED;
+	wrkr->nb_stage = 1;
+	wrkr->s1_worker_thread = l2fwd_eventmode_non_burst_ordered_worker;
+
+	nb_wrkr_param++;
+	wrkr++;
+
+	/* Single stage burst with ordered scheduling */
+	wrkr->cap.burst = RTE_EVENTMODE_HELPER_RX_TYPE_BURST;
+	wrkr->cap.s1_sched_type = RTE_SCHED_TYPE_ORDERED;
+	wrkr->nb_stage = 1;
+	wrkr->s1_worker_thread = l2fwd_eventmode_burst_ordered_worker;
+
+	nb_wrkr_param++;
+	return nb_wrkr_param;
+}
+
+static void
+l2fwd_eventmode_worker(struct rte_eventmode_helper_conf *mode_conf)
+{
+	struct rte_eventmode_helper_app_worker_params
+			l2fwd_wrkr[L2FWD_EVENTMODE_WORKERS] = {0};
+	uint8_t nb_wrkr_param;
+
+	/* Populate l2fwd_wrkr params */
+	nb_wrkr_param = l2fwd_eventmode_populate_wrkr_params(l2fwd_wrkr);
+
+	/*
+	 * The helper function will launch the correct worker after checking the
+	 * event device's capabilities.
+	 */
+	rte_eventmode_helper_launch_worker(mode_conf, l2fwd_wrkr,
+			nb_wrkr_param);
+}
+
 int
-l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
+l2fwd_launch_one_lcore(void *args)
 {
-	l2fwd_main_loop();
+	struct rte_eventmode_helper_conf *mode_conf;
+
+	mode_conf = (struct rte_eventmode_helper_conf *)args;
+
+	if (mode_conf->mode == RTE_EVENTMODE_HELPER_PKT_TRANSFER_MODE_POLL) {
+		/* App is initialized to run in poll mode */
+		l2fwd_poll_mode_worker();
+	} else if (mode_conf->mode ==
+			RTE_EVENTMODE_HELPER_PKT_TRANSFER_MODE_EVENT) {
+		/* App is initialized to run in event mode */
+		l2fwd_eventmode_worker(mode_conf);
+	}
 	return 0;
 }
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index ac81beb..278b9a8 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -38,6 +38,7 @@
 #include <rte_ethdev.h>
 #include <rte_mempool.h>
 #include <rte_mbuf.h>
+#include <rte_eventmode_helper.h>
 
 #include "l2fwd_common.h"
 #include "l2fwd_worker.h"
@@ -69,6 +70,8 @@ l2fwd_usage(const char *prgname)
 		" [-q NQ]",
 		prgname);
 
+	rte_eventmode_helper_print_options_list();
+
 	fprintf(stderr, "\n\n");
 
 	fprintf(stderr,
@@ -79,7 +82,9 @@ l2fwd_usage(const char *prgname)
 		"      When enabled:\n"
 		"       - The source MAC address is replaced by the TX port MAC address\n"
 		"       - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n"
-		"\n");
+		"");
+
+	rte_eventmode_helper_print_options_description();
 }
 
 static int
@@ -158,12 +163,14 @@ static const struct option lgopts[] = {
 
 /* Parse the argument given in the command line of the application */
 static int
-l2fwd_parse_args(int argc, char **argv)
+l2fwd_parse_args(int argc, char **argv,
+		struct rte_eventmode_helper_conf **mode_conf)
 {
-	int opt, ret, timer_secs;
+	int opt, timer_secs;
 	char **argvopt;
 	int option_index;
 	char *prgname = argv[0];
+	int options_parsed = 0;
 
 	argvopt = argv;
 
@@ -212,12 +219,31 @@ l2fwd_parse_args(int argc, char **argv)
 		}
 	}
 
-	if (optind >= 0)
-		argv[optind-1] = prgname;
+	/* Update argc & argv to move to event mode options */
+	options_parsed = optind-1;
+	argc -= options_parsed;
+	argv += options_parsed;
 
-	ret = optind-1;
-	optind = 1; /* reset getopt lib */
-	return ret;
+	/* Reset getopt lib */
+	optind = 1;
+
+	/* Check for event mode parameters and get the conf prepared*/
+	*mode_conf = rte_eventmode_helper_parse_args(argc, argv);
+	if (*mode_conf == NULL) {
+		l2fwd_usage(prgname);
+		return -1;
+	}
+
+	/* Add the number of options parsed */
+	options_parsed += optind-1;
+
+	if (options_parsed >= 0)
+		argv[options_parsed] = prgname;
+
+	/* Reset getopt lib */
+	optind = 1;
+
+	return options_parsed;
 }
 
 /* Check the link status of all ports in up to 9s, and print them finally */
@@ -315,6 +341,7 @@ main(int argc, char **argv)
 	unsigned nb_ports_in_mask = 0;
 	unsigned int nb_lcores = 0;
 	unsigned int nb_mbufs;
+	struct rte_eventmode_helper_conf *mode_conf = NULL;
 
 	/* Set default values for global vars */
 	l2fwd_init_global_vars();
@@ -329,8 +356,12 @@ main(int argc, char **argv)
 	signal(SIGINT, signal_handler);
 	signal(SIGTERM, signal_handler);
 
-	/* parse application arguments (after the EAL ones) */
-	ret = l2fwd_parse_args(argc, argv);
+	/*
+	 * Parse application arguments (after the EAL ones). This would parse
+	 * the event mode options too, and would set the conf pointer
+	 * accordingly.
+	 */
+	ret = l2fwd_parse_args(argc, argv, &mode_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
 
@@ -521,9 +552,20 @@ main(int argc, char **argv)
 
 	check_all_ports_link_status(l2fwd_enabled_port_mask);
 
+	/*
+	 * Set the enabled port mask in helper conf to be used by helper
+	 * sub-system. This would be used while intializing devices using
+	 * helper sub-system.
+	 */
+	mode_conf->eth_portmask = l2fwd_enabled_port_mask;
+
+	/* Initialize eventmode components */
+	rte_eventmode_helper_initialize_devs(mode_conf);
+
 	ret = 0;
 	/* launch per-lcore init on every lcore */
-	rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
+	rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, (void *)mode_conf,
+			CALL_MASTER);
 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
 		if (rte_eal_wait_lcore(lcore_id) < 0) {
 			ret = -1;
-- 
2.7.4

  parent reply	other threads:[~2018-06-08 17:26 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-08 17:23 [dpdk-dev] [PATCH 00/20] add eventmode helper functions Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 01/20] eventdev: add files for eventmode helper Anoob Joseph
2018-06-27  6:20   ` Sunil Kumar Kori
2018-06-28 10:43     ` Joseph, Anoob
2018-06-28 10:47       ` Ananyev, Konstantin
2018-06-28 10:58         ` Joseph, Anoob
2018-06-28 11:44           ` Ananyev, Konstantin
2018-06-28 11:54             ` Joseph, Anoob
2018-07-03  6:27       ` Sunil Kumar Kori
2018-07-03 13:13         ` Joseph, Anoob
2018-07-04 10:49           ` Sunil Kumar Kori
2018-06-08 17:24 ` [dpdk-dev] [PATCH 02/20] eventdev: add routines for logging " Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 03/20] eventdev: add eventmode CL options framework Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 04/20] eventdev: allow application to set ethernet portmask Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 05/20] eventdev: add framework for eventmode conf Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 06/20] eventdev: add common initialize routine for eventmode devs Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 07/20] eventdev: add eventdevice init for eventmode Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 08/20] eventdev: add eventdev port-lcore link Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 09/20] eventdev: add option to specify schedule mode for app stage Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 10/20] eventdev: add placeholder for ethdev init Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 11/20] eventdev: add Rx adapter init in eventmode Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 12/20] eventdev: add routine to validate conf Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 13/20] eventdev: add default conf for event devs field in conf Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 14/20] eventdev: add default conf for Rx adapter conf Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 15/20] eventdev: add default conf for event port-lcore link Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 16/20] eventdev: add routines to display the eventmode conf Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 17/20] eventdev: add routine to access eventmode link info Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 18/20] eventdev: add routine to access event queue for eth Tx Anoob Joseph
2018-06-08 17:24 ` [dpdk-dev] [PATCH 19/20] eventdev: add routine to launch eventmode workers Anoob Joseph
2018-06-08 17:24 ` Anoob Joseph [this message]
2018-06-11  8:32 ` [dpdk-dev] [PATCH 00/20] add eventmode helper functions Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1528478659-15859-21-git-send-email-anoob.joseph@caviumnetworks.com \
    --to=anoob.joseph@caviumnetworks.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=jerin.jacob@caviumnetworks.com \
    --cc=narayanaprasad.athreya@caviumnetworks.com \
    --cc=nikhil.rao@intel.com \
    --cc=pablo.de.lara.guarch@intel.com \
    --cc=pbhagavatula@caviumnetworks.com \
    --cc=sunil.kori@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).