DPDK patches and discussions
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, David Christensen <drc@linux.vnet.ibm.com>,
	"Ruifeng Wang" <ruifeng.wang@arm.com>,
	Bruce Richardson <bruce.richardson@intel.com>,
	Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Cc: <dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Subject: [PATCH v3 2/5] examples/l3fwd: split processing and send stages
Date: Sun, 11 Sep 2022 23:42:46 +0530	[thread overview]
Message-ID: <20220911181250.2286-2-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20220911181250.2286-1-pbhagavatula@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Split packet processing from packet send stage, as send stage
is not common for poll and event mode.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 examples/l3fwd/l3fwd_em_hlm.h      | 39 +++++++++++++++++++-----------
 examples/l3fwd/l3fwd_lpm_altivec.h | 25 ++++++++++++++++---
 examples/l3fwd/l3fwd_lpm_neon.h    | 35 ++++++++++++++++++++-------
 examples/l3fwd/l3fwd_lpm_sse.h     | 25 ++++++++++++++++---
 4 files changed, 95 insertions(+), 29 deletions(-)

diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h
index e76f2760b0..12b997e477 100644
--- a/examples/l3fwd/l3fwd_em_hlm.h
+++ b/examples/l3fwd/l3fwd_em_hlm.h
@@ -177,16 +177,12 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
 	return portid;
 }
 
-/*
- * Buffer optimized handling of packets, invoked
- * from main_loop.
- */
 static inline void
-l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
-		uint16_t portid, struct lcore_conf *qconf)
+l3fwd_em_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+			 uint16_t *dst_port, uint16_t portid,
+			 struct lcore_conf *qconf, const uint8_t do_step3)
 {
 	int32_t i, j, pos;
-	uint16_t dst_port[MAX_PKT_BURST];
 
 	/*
 	 * Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets
@@ -233,13 +229,30 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 				dst_port[j + i] = em_get_dst_port(qconf,
 						pkts_burst[j + i], portid);
 		}
+
+		for (i = 0; i < EM_HASH_LOOKUP_COUNT && do_step3; i += FWDSTEP)
+			processx4_step3(&pkts_burst[j + i], &dst_port[j + i]);
 	}
 
-	for (; j < nb_rx; j++)
+	for (; j < nb_rx; j++) {
 		dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &pkts_burst[j]->port);
+	}
+}
 
-	send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
+/*
+ * Buffer optimized handling of packets, invoked
+ * from main_loop.
+ */
+static inline void
+l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid,
+		      struct lcore_conf *qconf)
+{
+	uint16_t dst_port[MAX_PKT_BURST];
 
+	l3fwd_em_process_packets(nb_rx, pkts_burst, dst_port, portid, qconf, 0);
+	send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
 }
 
 /*
@@ -260,11 +273,8 @@ l3fwd_em_process_events(int nb_rx, struct rte_event **ev,
 	 */
 	int32_t n = RTE_ALIGN_FLOOR(nb_rx, EM_HASH_LOOKUP_COUNT);
 
-	for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) {
+	for (j = 0; j < nb_rx; j++)
 		pkts_burst[j] = ev[j]->mbuf;
-		rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
-					       struct rte_ether_hdr *) + 1);
-	}
 
 	for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) {
 
@@ -305,7 +315,8 @@ l3fwd_em_process_events(int nb_rx, struct rte_event **ev,
 			}
 			continue;
 		}
-		processx4_step3(&pkts_burst[j], &dst_port[j]);
+		for (i = 0; i < EM_HASH_LOOKUP_COUNT; i += FWDSTEP)
+			processx4_step3(&pkts_burst[j + i], &dst_port[j + i]);
 
 		for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
 			pkts_burst[j + i]->port = dst_port[j + i];
diff --git a/examples/l3fwd/l3fwd_lpm_altivec.h b/examples/l3fwd/l3fwd_lpm_altivec.h
index 0c6852a7bb..adb82f1478 100644
--- a/examples/l3fwd/l3fwd_lpm_altivec.h
+++ b/examples/l3fwd/l3fwd_lpm_altivec.h
@@ -96,11 +96,11 @@ processx4_step2(const struct lcore_conf *qconf,
  * from main_loop.
  */
 static inline void
-l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
-			uint8_t portid, struct lcore_conf *qconf)
+l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+			  uint8_t portid, uint16_t *dst_port,
+			  struct lcore_conf *qconf, const uint8_t do_step3)
 {
 	int32_t j;
-	uint16_t dst_port[MAX_PKT_BURST];
 	__vector unsigned int dip[MAX_PKT_BURST / FWDSTEP];
 	uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
 	const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
@@ -114,22 +114,41 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 				ipv4_flag[j / FWDSTEP],
 				portid, &pkts_burst[j], &dst_port[j]);
 
+	if (do_step3)
+		for (j = 0; j != k; j += FWDSTEP)
+			processx4_step3(&pkts_burst[j], &dst_port[j]);
+
 	/* Classify last up to 3 packets one by one */
 	switch (nb_rx % FWDSTEP) {
 	case 3:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 		/* fall-through */
 	case 2:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 		/* fall-through */
 	case 1:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 		/* fall-through */
 	}
+}
+
+static inline void
+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint8_t portid,
+		       struct lcore_conf *qconf)
+{
+	uint16_t dst_port[MAX_PKT_BURST];
 
+	l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
+				  0);
 	send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
 }
 
diff --git a/examples/l3fwd/l3fwd_lpm_neon.h b/examples/l3fwd/l3fwd_lpm_neon.h
index 78ee83b76c..2a68c4c15e 100644
--- a/examples/l3fwd/l3fwd_lpm_neon.h
+++ b/examples/l3fwd/l3fwd_lpm_neon.h
@@ -80,16 +80,12 @@ processx4_step2(const struct lcore_conf *qconf,
 	}
 }
 
-/*
- * Buffer optimized handling of packets, invoked
- * from main_loop.
- */
 static inline void
-l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
-			uint16_t portid, struct lcore_conf *qconf)
+l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+			  uint16_t portid, uint16_t *dst_port,
+			  struct lcore_conf *qconf, const uint8_t do_step3)
 {
 	int32_t i = 0, j = 0;
-	uint16_t dst_port[MAX_PKT_BURST];
 	int32x4_t dip;
 	uint32_t ipv4_flag;
 	const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
@@ -100,7 +96,6 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i],
 							void *));
 		}
-
 		for (j = 0; j != k - FWDSTEP; j += FWDSTEP) {
 			for (i = 0; i < FWDSTEP; i++) {
 				rte_prefetch0(rte_pktmbuf_mtod(
@@ -111,11 +106,15 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 			processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
 			processx4_step2(qconf, dip, ipv4_flag, portid,
 					&pkts_burst[j], &dst_port[j]);
+			if (do_step3)
+				processx4_step3(&pkts_burst[j], &dst_port[j]);
 		}
 
 		processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
 		processx4_step2(qconf, dip, ipv4_flag, portid, &pkts_burst[j],
 				&dst_port[j]);
+		if (do_step3)
+			processx4_step3(&pkts_burst[j], &dst_port[j]);
 
 		j += FWDSTEP;
 	}
@@ -138,26 +137,44 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 							void *));
 			j++;
 		}
-
 		j -= m;
 		/* Classify last up to 3 packets one by one */
 		switch (m) {
 		case 3:
 			dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
 						       portid);
+			if (do_step3)
+				process_packet(pkts_burst[j], &dst_port[j]);
 			j++;
 			/* fallthrough */
 		case 2:
 			dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
 						       portid);
+			if (do_step3)
+				process_packet(pkts_burst[j], &dst_port[j]);
 			j++;
 			/* fallthrough */
 		case 1:
 			dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
 						       portid);
+			if (do_step3)
+				process_packet(pkts_burst[j], &dst_port[j]);
 		}
 	}
+}
+
+/*
+ * Buffer optimized handling of packets, invoked
+ * from main_loop.
+ */
+static inline void
+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid,
+		       struct lcore_conf *qconf)
+{
+	uint16_t dst_port[MAX_PKT_BURST];
 
+	l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
+				  0);
 	send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
 }
 
diff --git a/examples/l3fwd/l3fwd_lpm_sse.h b/examples/l3fwd/l3fwd_lpm_sse.h
index 3f637a23d1..db15030320 100644
--- a/examples/l3fwd/l3fwd_lpm_sse.h
+++ b/examples/l3fwd/l3fwd_lpm_sse.h
@@ -82,11 +82,11 @@ processx4_step2(const struct lcore_conf *qconf,
  * from main_loop.
  */
 static inline void
-l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
-			uint16_t portid, struct lcore_conf *qconf)
+l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+			  uint16_t portid, uint16_t *dst_port,
+			  struct lcore_conf *qconf, const uint8_t do_step3)
 {
 	int32_t j;
-	uint16_t dst_port[MAX_PKT_BURST];
 	__m128i dip[MAX_PKT_BURST / FWDSTEP];
 	uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
 	const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
@@ -99,21 +99,40 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 		processx4_step2(qconf, dip[j / FWDSTEP],
 				ipv4_flag[j / FWDSTEP], portid, &pkts_burst[j], &dst_port[j]);
 
+	if (do_step3)
+		for (j = 0; j != k; j += FWDSTEP)
+			processx4_step3(&pkts_burst[j], &dst_port[j]);
+
 	/* Classify last up to 3 packets one by one */
 	switch (nb_rx % FWDSTEP) {
 	case 3:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 		/* fall-through */
 	case 2:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 		/* fall-through */
 	case 1:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 	}
+}
+
+static inline void
+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid,
+		       struct lcore_conf *qconf)
+{
+	uint16_t dst_port[MAX_PKT_BURST];
 
+	l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
+				  0);
 	send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
 }
 
-- 
2.25.1


  reply	other threads:[~2022-09-11 18:13 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-29  9:44 [PATCH 1/5] examples/l3fwd: fix port group mask generation pbhagavatula
2022-08-29  9:44 ` [PATCH 2/5] examples/l3fwd: split processing and send stages pbhagavatula
2022-08-29  9:44 ` [PATCH 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-08-29  9:44 ` [PATCH 4/5] examples/l3fwd: use em " pbhagavatula
2022-08-29  9:44 ` [PATCH 5/5] examples/l3fwd: fix event vector processing in fib pbhagavatula
2022-09-02  9:18 ` [PATCH v2 1/5] examples/l3fwd: fix port group mask generation pbhagavatula
2022-09-02  9:18   ` [PATCH v2 2/5] examples/l3fwd: split processing and send stages pbhagavatula
2022-09-02  9:18   ` [PATCH v2 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-09-02  9:18   ` [PATCH v2 4/5] examples/l3fwd: fix event vector processing in fib pbhagavatula
2022-09-02  9:18   ` [PATCH v2 5/5] examples/l3fwd: use em vector path for event vector pbhagavatula
2022-09-08 18:33   ` [PATCH v2 1/5] examples/l3fwd: fix port group mask generation David Christensen
2022-09-09  5:56     ` [EXT] " Pavan Nikhilesh Bhagavatula
2022-09-11 18:12   ` [PATCH v3 " pbhagavatula
2022-09-11 18:12     ` pbhagavatula [this message]
2022-09-11 18:12     ` [PATCH v3 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-09-11 18:12     ` [PATCH v3 4/5] examples/l3fwd: fix event vector processing in fib pbhagavatula
2022-10-07 20:03       ` [EXT] " Shijith Thotton
2022-09-11 18:12     ` [PATCH v3 5/5] examples/l3fwd: use em vector path for event vector pbhagavatula
2022-10-07 20:01       ` [EXT] " Shijith Thotton
2022-10-11  9:08     ` [PATCH v4 1/5] examples/l3fwd: fix port group mask generation pbhagavatula
2022-10-11  9:08       ` [PATCH v4 2/5] examples/l3fwd: split processing and send stages pbhagavatula
2022-10-11  9:08       ` [PATCH v4 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-10-11  9:08       ` [PATCH v4 4/5] examples/l3fwd: fix event vector processing in fib pbhagavatula
2022-10-11  9:08       ` [PATCH v4 5/5] examples/l3fwd: use em vector path for event vector pbhagavatula
2022-10-11 10:12       ` [PATCH v5 1/5] examples/l3fwd: fix port group mask generation pbhagavatula
2022-10-11 10:12         ` [PATCH v5 2/5] examples/l3fwd: split processing and send stages pbhagavatula
2022-10-17 12:06           ` [EXT] " Shijith Thotton
2022-10-11 10:12         ` [PATCH v5 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-10-17 12:06           ` [EXT] " Shijith Thotton
2022-10-11 10:12         ` [PATCH v5 4/5] examples/l3fwd: fix event vector processing in fib pbhagavatula
2022-10-17 12:06           ` [EXT] " Shijith Thotton
2022-10-11 10:12         ` [PATCH v5 5/5] examples/l3fwd: use em vector path for event vector pbhagavatula
2022-10-12  8:57           ` [EXT] " Shijith Thotton
2022-10-17 12:05         ` [EXT] [PATCH v5 1/5] examples/l3fwd: fix port group mask generation Shijith Thotton
2022-10-20 16:15           ` Pavan Nikhilesh Bhagavatula
2022-10-25 16:05         ` [PATCH v6 " pbhagavatula
2022-10-25 16:05           ` [PATCH v6 2/5] examples/l3fwd: split processing and send stages pbhagavatula
2022-10-25 16:05           ` [PATCH v6 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-10-25 16:05           ` [PATCH v6 4/5] examples/l3fwd: fix event vector processing in fib pbhagavatula
2022-10-25 16:05           ` [PATCH v6 5/5] examples/l3fwd: use em vector path for event vector pbhagavatula
2022-10-31 14:52           ` [PATCH v6 1/5] examples/l3fwd: fix port group mask generation Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220911181250.2286-2-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=drc@linux.vnet.ibm.com \
    --cc=jerinj@marvell.com \
    --cc=konstantin.v.ananyev@yandex.ru \
    --cc=ruifeng.wang@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).