From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 19DBDA00C3;
	Sun, 11 Sep 2022 20:13:09 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 0C6E3410D2;
	Sun, 11 Sep 2022 20:13:09 +0200 (CEST)
Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com
 [67.231.156.173])
 by mails.dpdk.org (Postfix) with ESMTP id DD9534003F
 for <dev@dpdk.org>; Sun, 11 Sep 2022 20:13:06 +0200 (CEST)
Received: from pps.filterd (m0045851.ppops.net [127.0.0.1])
 by mx0b-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 28BFuFa4031971;
 Sun, 11 Sep 2022 11:13:03 -0700
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;
 h=from : to : cc :
 subject : date : message-id : in-reply-to : references : mime-version :
 content-transfer-encoding : content-type; s=pfpt0220;
 bh=/wyFidc59OvGdGuTslI+YEJs4K6yvXCfzEhEP846jbM=;
 b=Ctw03uYa9xgmn8Bnvhzq91HH+wAgivPyWdk6V0ZtZPOd+lGW/Mfx+qwaEvdXDrviouG3
 AgvmXuLtu2V9zXkXjHZIl0NLgTdw4RigOFeCGe+oUA3t5lYO7efkqa5gX26t6XmmDEyc
 xN1ZG0t2QzxgpYFD2OXSmipcgCM9E2jqaHxv5l1bx+d5A6byXylue/Sw3sNLzCA4/kHZ
 d/KwXFzOr3Y1GHUiIj7um6LNxdrXxRh1PUZZDgpvil6UXmFYY9MiUeDkb+Dtic9eAw8m
 G2eAeeNchWsNQhQ1GJjNdrAPNuJHHKA7IGVABpkzaO25l88EmwurKPV/qYWpxPB11Wpv MQ== 
Received: from dc5-exch01.marvell.com ([199.233.59.181])
 by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3jgt3mutc2-1
 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);
 Sun, 11 Sep 2022 11:13:03 -0700
Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com
 (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2;
 Sun, 11 Sep 2022 11:13:01 -0700
Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com
 (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend
 Transport; Sun, 11 Sep 2022 11:13:01 -0700
Received: from MININT-80QBFE8.corp.innovium.com (unknown [10.193.65.138])
 by maili.marvell.com (Postfix) with ESMTP id B76713F7070;
 Sun, 11 Sep 2022 11:12:58 -0700 (PDT)
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, David Christensen <drc@linux.vnet.ibm.com>, "Ruifeng
 Wang" <ruifeng.wang@arm.com>,
 Bruce Richardson <bruce.richardson@intel.com>,
 Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
CC: <dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Subject: [PATCH v3 2/5] examples/l3fwd: split processing and send stages
Date: Sun, 11 Sep 2022 23:42:46 +0530
Message-ID: <20220911181250.2286-2-pbhagavatula@marvell.com>
X-Mailer: git-send-email 2.25.1
In-Reply-To: <20220911181250.2286-1-pbhagavatula@marvell.com>
References: <20220902091833.9074-1-pbhagavatula@marvell.com>
 <20220911181250.2286-1-pbhagavatula@marvell.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
Content-Type: text/plain
X-Proofpoint-ORIG-GUID: KLEWhcAUvIgRgp-FA_Ym6CFMPPEkdzcX
X-Proofpoint-GUID: KLEWhcAUvIgRgp-FA_Ym6CFMPPEkdzcX
X-Proofpoint-Virus-Version: vendor=baseguard
 engine=ICAP:2.0.205,Aquarius:18.0.895,Hydra:6.0.528,FMLib:17.11.122.1
 definitions=2022-09-11_08,2022-09-09_01,2022-06-22_01
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Split packet processing from packet send stage, as send stage
is not common for poll and event mode.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 examples/l3fwd/l3fwd_em_hlm.h      | 39 +++++++++++++++++++-----------
 examples/l3fwd/l3fwd_lpm_altivec.h | 25 ++++++++++++++++---
 examples/l3fwd/l3fwd_lpm_neon.h    | 35 ++++++++++++++++++++-------
 examples/l3fwd/l3fwd_lpm_sse.h     | 25 ++++++++++++++++---
 4 files changed, 95 insertions(+), 29 deletions(-)

diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h
index e76f2760b0..12b997e477 100644
--- a/examples/l3fwd/l3fwd_em_hlm.h
+++ b/examples/l3fwd/l3fwd_em_hlm.h
@@ -177,16 +177,12 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
 	return portid;
 }
 
-/*
- * Buffer optimized handling of packets, invoked
- * from main_loop.
- */
 static inline void
-l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
-		uint16_t portid, struct lcore_conf *qconf)
+l3fwd_em_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+			 uint16_t *dst_port, uint16_t portid,
+			 struct lcore_conf *qconf, const uint8_t do_step3)
 {
 	int32_t i, j, pos;
-	uint16_t dst_port[MAX_PKT_BURST];
 
 	/*
 	 * Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets
@@ -233,13 +229,30 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 				dst_port[j + i] = em_get_dst_port(qconf,
 						pkts_burst[j + i], portid);
 		}
+
+		for (i = 0; i < EM_HASH_LOOKUP_COUNT && do_step3; i += FWDSTEP)
+			processx4_step3(&pkts_burst[j + i], &dst_port[j + i]);
 	}
 
-	for (; j < nb_rx; j++)
+	for (; j < nb_rx; j++) {
 		dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &pkts_burst[j]->port);
+	}
+}
 
-	send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
+/*
+ * Buffer optimized handling of packets, invoked
+ * from main_loop.
+ */
+static inline void
+l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid,
+		      struct lcore_conf *qconf)
+{
+	uint16_t dst_port[MAX_PKT_BURST];
 
+	l3fwd_em_process_packets(nb_rx, pkts_burst, dst_port, portid, qconf, 0);
+	send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
 }
 
 /*
@@ -260,11 +273,8 @@ l3fwd_em_process_events(int nb_rx, struct rte_event **ev,
 	 */
 	int32_t n = RTE_ALIGN_FLOOR(nb_rx, EM_HASH_LOOKUP_COUNT);
 
-	for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) {
+	for (j = 0; j < nb_rx; j++)
 		pkts_burst[j] = ev[j]->mbuf;
-		rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
-					       struct rte_ether_hdr *) + 1);
-	}
 
 	for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) {
 
@@ -305,7 +315,8 @@ l3fwd_em_process_events(int nb_rx, struct rte_event **ev,
 			}
 			continue;
 		}
-		processx4_step3(&pkts_burst[j], &dst_port[j]);
+		for (i = 0; i < EM_HASH_LOOKUP_COUNT; i += FWDSTEP)
+			processx4_step3(&pkts_burst[j + i], &dst_port[j + i]);
 
 		for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
 			pkts_burst[j + i]->port = dst_port[j + i];
diff --git a/examples/l3fwd/l3fwd_lpm_altivec.h b/examples/l3fwd/l3fwd_lpm_altivec.h
index 0c6852a7bb..adb82f1478 100644
--- a/examples/l3fwd/l3fwd_lpm_altivec.h
+++ b/examples/l3fwd/l3fwd_lpm_altivec.h
@@ -96,11 +96,11 @@ processx4_step2(const struct lcore_conf *qconf,
  * from main_loop.
  */
 static inline void
-l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
-			uint8_t portid, struct lcore_conf *qconf)
+l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+			  uint8_t portid, uint16_t *dst_port,
+			  struct lcore_conf *qconf, const uint8_t do_step3)
 {
 	int32_t j;
-	uint16_t dst_port[MAX_PKT_BURST];
 	__vector unsigned int dip[MAX_PKT_BURST / FWDSTEP];
 	uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
 	const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
@@ -114,22 +114,41 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 				ipv4_flag[j / FWDSTEP],
 				portid, &pkts_burst[j], &dst_port[j]);
 
+	if (do_step3)
+		for (j = 0; j != k; j += FWDSTEP)
+			processx4_step3(&pkts_burst[j], &dst_port[j]);
+
 	/* Classify last up to 3 packets one by one */
 	switch (nb_rx % FWDSTEP) {
 	case 3:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 		/* fall-through */
 	case 2:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 		/* fall-through */
 	case 1:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 		/* fall-through */
 	}
+}
+
+static inline void
+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint8_t portid,
+		       struct lcore_conf *qconf)
+{
+	uint16_t dst_port[MAX_PKT_BURST];
 
+	l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
+				  0);
 	send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
 }
 
diff --git a/examples/l3fwd/l3fwd_lpm_neon.h b/examples/l3fwd/l3fwd_lpm_neon.h
index 78ee83b76c..2a68c4c15e 100644
--- a/examples/l3fwd/l3fwd_lpm_neon.h
+++ b/examples/l3fwd/l3fwd_lpm_neon.h
@@ -80,16 +80,12 @@ processx4_step2(const struct lcore_conf *qconf,
 	}
 }
 
-/*
- * Buffer optimized handling of packets, invoked
- * from main_loop.
- */
 static inline void
-l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
-			uint16_t portid, struct lcore_conf *qconf)
+l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+			  uint16_t portid, uint16_t *dst_port,
+			  struct lcore_conf *qconf, const uint8_t do_step3)
 {
 	int32_t i = 0, j = 0;
-	uint16_t dst_port[MAX_PKT_BURST];
 	int32x4_t dip;
 	uint32_t ipv4_flag;
 	const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
@@ -100,7 +96,6 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i],
 							void *));
 		}
-
 		for (j = 0; j != k - FWDSTEP; j += FWDSTEP) {
 			for (i = 0; i < FWDSTEP; i++) {
 				rte_prefetch0(rte_pktmbuf_mtod(
@@ -111,11 +106,15 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 			processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
 			processx4_step2(qconf, dip, ipv4_flag, portid,
 					&pkts_burst[j], &dst_port[j]);
+			if (do_step3)
+				processx4_step3(&pkts_burst[j], &dst_port[j]);
 		}
 
 		processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
 		processx4_step2(qconf, dip, ipv4_flag, portid, &pkts_burst[j],
 				&dst_port[j]);
+		if (do_step3)
+			processx4_step3(&pkts_burst[j], &dst_port[j]);
 
 		j += FWDSTEP;
 	}
@@ -138,26 +137,44 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 							void *));
 			j++;
 		}
-
 		j -= m;
 		/* Classify last up to 3 packets one by one */
 		switch (m) {
 		case 3:
 			dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
 						       portid);
+			if (do_step3)
+				process_packet(pkts_burst[j], &dst_port[j]);
 			j++;
 			/* fallthrough */
 		case 2:
 			dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
 						       portid);
+			if (do_step3)
+				process_packet(pkts_burst[j], &dst_port[j]);
 			j++;
 			/* fallthrough */
 		case 1:
 			dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
 						       portid);
+			if (do_step3)
+				process_packet(pkts_burst[j], &dst_port[j]);
 		}
 	}
+}
+
+/*
+ * Buffer optimized handling of packets, invoked
+ * from main_loop.
+ */
+static inline void
+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid,
+		       struct lcore_conf *qconf)
+{
+	uint16_t dst_port[MAX_PKT_BURST];
 
+	l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
+				  0);
 	send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
 }
 
diff --git a/examples/l3fwd/l3fwd_lpm_sse.h b/examples/l3fwd/l3fwd_lpm_sse.h
index 3f637a23d1..db15030320 100644
--- a/examples/l3fwd/l3fwd_lpm_sse.h
+++ b/examples/l3fwd/l3fwd_lpm_sse.h
@@ -82,11 +82,11 @@ processx4_step2(const struct lcore_conf *qconf,
  * from main_loop.
  */
 static inline void
-l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
-			uint16_t portid, struct lcore_conf *qconf)
+l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+			  uint16_t portid, uint16_t *dst_port,
+			  struct lcore_conf *qconf, const uint8_t do_step3)
 {
 	int32_t j;
-	uint16_t dst_port[MAX_PKT_BURST];
 	__m128i dip[MAX_PKT_BURST / FWDSTEP];
 	uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
 	const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
@@ -99,21 +99,40 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 		processx4_step2(qconf, dip[j / FWDSTEP],
 				ipv4_flag[j / FWDSTEP], portid, &pkts_burst[j], &dst_port[j]);
 
+	if (do_step3)
+		for (j = 0; j != k; j += FWDSTEP)
+			processx4_step3(&pkts_burst[j], &dst_port[j]);
+
 	/* Classify last up to 3 packets one by one */
 	switch (nb_rx % FWDSTEP) {
 	case 3:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 		/* fall-through */
 	case 2:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 		/* fall-through */
 	case 1:
 		dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+		if (do_step3)
+			process_packet(pkts_burst[j], &dst_port[j]);
 		j++;
 	}
+}
+
+static inline void
+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid,
+		       struct lcore_conf *qconf)
+{
+	uint16_t dst_port[MAX_PKT_BURST];
 
+	l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
+				  0);
 	send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
 }
 
-- 
2.25.1