From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <yskoh@mellanox.com>
Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])
 by dpdk.org (Postfix) with ESMTP id DB73558F6
 for <dev@dpdk.org>; Thu, 28 Mar 2019 02:51:09 +0100 (CET)
Received: from Internal Mail-Server by MTLPINE1 (envelope-from
 yskoh@mellanox.com)
 with ESMTPS (AES256-SHA encrypted); 28 Mar 2019 03:51:09 +0200
Received: from scfae-sc-2.mti.labs.mlnx (scfae-sc-2.mti.labs.mlnx
 [10.101.0.96])
 by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x2S1p6bw021806;
 Thu, 28 Mar 2019 03:51:07 +0200
From: Yongseok Koh <yskoh@mellanox.com>
To: wenzhuo.lu@intel.com, jingjing.wu@intel.com
Cc: dev@dpdk.org, jerinj@marvell.com, konstantin.ananyev@intel.com
Date: Wed, 27 Mar 2019 18:51:02 -0700
Message-Id: <20190328015102.21920-1-yskoh@mellanox.com>
X-Mailer: git-send-email 2.11.0
In-Reply-To: <20171215211125.39177-1-yskoh@mellanox.com>
References: <20171215211125.39177-1-yskoh@mellanox.com>
Subject: [dpdk-dev] [PATCH v3] app/testpmd: make txonly mode generate
	multiple flows
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
X-List-Received-Date: Thu, 28 Mar 2019 01:51:10 -0000

Testpmd can generate multiple flows without taking much cost and this could
be a simple traffic generator for developer's quick tests. If
"--txonly-multi-flow" is specified in the command line, IP source address
is varied to gnerate multiple flows.

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
---

v3:
* Add "--txonly-multi-flow" param to enable the feature.
* Replace __thread with RTE_PER_LCORE.

v2:
* Add detailed explanation in a comment.

 app/test-pmd/parameters.c             |  4 ++++
 app/test-pmd/testpmd.c                |  3 +++
 app/test-pmd/testpmd.h                |  2 ++
 app/test-pmd/txonly.c                 | 22 ++++++++++++++++++++++
 doc/guides/testpmd_app_ug/run_app.rst |  4 ++++
 5 files changed, 35 insertions(+)

diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 38b419767b..7b6b60905d 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -173,6 +173,7 @@ usage(char* progname)
 	       " Used mainly with PCAP drivers.\n");
 	printf("  --txpkts=X[,Y]*: set TX segment sizes"
 		" or total packet length.\n");
+	printf("  --txonly-multi-flow: generate multiple flows in txonly mode\n");
 	printf("  --disable-link-check: disable check on link status when "
 	       "starting/stopping ports.\n");
 	printf("  --no-lsc-interrupt: disable link status change interrupt.\n");
@@ -632,6 +633,7 @@ launch_args_parse(int argc, char** argv)
 		{ "no-flush-rx",	0, 0, 0 },
 		{ "flow-isolate-all",	        0, 0, 0 },
 		{ "txpkts",			1, 0, 0 },
+		{ "txonly-multi-flow",		0, 0, 0 },
 		{ "disable-link-check",		0, 0, 0 },
 		{ "no-lsc-interrupt",		0, 0, 0 },
 		{ "no-rmv-interrupt",		0, 0, 0 },
@@ -1141,6 +1143,8 @@ launch_args_parse(int argc, char** argv)
 				else
 					rte_exit(EXIT_FAILURE, "bad txpkts\n");
 			}
+			if (!strcmp(lgopts[opt_idx].name, "txonly-multi-flow"))
+				txonly_multi_flow = 1;
 			if (!strcmp(lgopts[opt_idx].name, "no-flush-rx"))
 				no_flush_rx = 1;
 			if (!strcmp(lgopts[opt_idx].name, "disable-link-check"))
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 216be47f92..b950e8cc55 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -217,6 +217,9 @@ uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
 /**< Split policy for packets to TX. */
 
+int txonly_multi_flow;
+/**< Whether multiple flows are generated in TXONLY mode. */
+
 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 85b791b6bb..668227bc55 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -439,6 +439,8 @@ enum tx_pkt_split {
 
 extern enum tx_pkt_split tx_pkt_split;
 
+extern int txonly_multi_flow;
+
 extern uint16_t nb_pkt_per_burst;
 extern uint16_t mb_mempool_cache;
 extern int8_t rx_pthresh;
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 1f08b6ed37..def52a0487 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -52,6 +52,7 @@
 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
 
 static struct ipv4_hdr  pkt_ip_hdr;  /**< IP header of transmitted packets. */
+RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
 static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
 
 static void
@@ -164,6 +165,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	uint16_t vlan_tci, vlan_tci_outer;
 	uint32_t retry;
 	uint64_t ol_flags = 0;
+	uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
 	uint8_t  i;
 	uint64_t tx_offloads;
 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
@@ -237,6 +239,23 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		copy_buf_to_pkt(&eth_hdr, sizeof(eth_hdr), pkt, 0);
 		copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
 				sizeof(struct ether_hdr));
+		if (txonly_multi_flow) {
+			struct ipv4_hdr *ip_hdr;
+			uint32_t addr;
+
+			ip_hdr = rte_pktmbuf_mtod_offset(pkt,
+					struct ipv4_hdr *,
+					sizeof(struct ether_hdr));
+			/*
+			 * Generate multiple flows by varying IP src addr. This
+			 * enables packets are well distributed by RSS in
+			 * receiver side if any and txonly mode can be a decent
+			 * packet generator for developer's quick performance
+			 * regression test.
+			 */
+			addr = (IP_DST_ADDR | (ip_var++ << 8)) + rte_lcore_id();
+			ip_hdr->src_addr = rte_cpu_to_be_32(addr);
+		}
 		copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
 				sizeof(struct ether_hdr) +
 				sizeof(struct ipv4_hdr));
@@ -268,6 +287,9 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	}
 	fs->tx_packets += nb_tx;
 
+	if (txonly_multi_flow)
+		RTE_PER_LCORE(_ip_var) += nb_tx;
+
 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
 	fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
 #endif
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 4495ed0382..b717b8c7b7 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -324,6 +324,10 @@ The commandline options are:
     Set TX segment sizes or total packet length. Valid for ``tx-only``
     and ``flowgen`` forwarding modes.
 
+*   ``--txonly-multi-flow``
+
+    Generate multiple flows in txonly mode.
+
 *   ``--disable-link-check``
 
     Disable check on link status when starting/stopping ports.
-- 
2.11.0

From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by dpdk.space (Postfix) with ESMTP id A33F8A05D3
	for <public@inbox.dpdk.org>; Thu, 28 Mar 2019 02:51:13 +0100 (CET)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id A75BE5B12;
	Thu, 28 Mar 2019 02:51:11 +0100 (CET)
Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])
 by dpdk.org (Postfix) with ESMTP id DB73558F6
 for <dev@dpdk.org>; Thu, 28 Mar 2019 02:51:09 +0100 (CET)
Received: from Internal Mail-Server by MTLPINE1 (envelope-from
 yskoh@mellanox.com)
 with ESMTPS (AES256-SHA encrypted); 28 Mar 2019 03:51:09 +0200
Received: from scfae-sc-2.mti.labs.mlnx (scfae-sc-2.mti.labs.mlnx
 [10.101.0.96])
 by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x2S1p6bw021806;
 Thu, 28 Mar 2019 03:51:07 +0200
From: Yongseok Koh <yskoh@mellanox.com>
To: wenzhuo.lu@intel.com, jingjing.wu@intel.com
Cc: dev@dpdk.org, jerinj@marvell.com, konstantin.ananyev@intel.com
Date: Wed, 27 Mar 2019 18:51:02 -0700
Message-Id: <20190328015102.21920-1-yskoh@mellanox.com>
X-Mailer: git-send-email 2.11.0
In-Reply-To: <20171215211125.39177-1-yskoh@mellanox.com>
References: <20171215211125.39177-1-yskoh@mellanox.com>
Subject: [dpdk-dev] [PATCH v3] app/testpmd: make txonly mode generate
	multiple flows
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>
Content-Type: text/plain; charset="UTF-8"
Message-ID: <20190328015102.CVgOeEcSJv3cK4RJHOWQhOIE8Pml3biiYuC8QBh0ujs@z>

Testpmd can generate multiple flows without taking much cost and this could
be a simple traffic generator for developer's quick tests. If
"--txonly-multi-flow" is specified in the command line, IP source address
is varied to gnerate multiple flows.

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
---

v3:
* Add "--txonly-multi-flow" param to enable the feature.
* Replace __thread with RTE_PER_LCORE.

v2:
* Add detailed explanation in a comment.

 app/test-pmd/parameters.c             |  4 ++++
 app/test-pmd/testpmd.c                |  3 +++
 app/test-pmd/testpmd.h                |  2 ++
 app/test-pmd/txonly.c                 | 22 ++++++++++++++++++++++
 doc/guides/testpmd_app_ug/run_app.rst |  4 ++++
 5 files changed, 35 insertions(+)

diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 38b419767b..7b6b60905d 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -173,6 +173,7 @@ usage(char* progname)
 	       " Used mainly with PCAP drivers.\n");
 	printf("  --txpkts=X[,Y]*: set TX segment sizes"
 		" or total packet length.\n");
+	printf("  --txonly-multi-flow: generate multiple flows in txonly mode\n");
 	printf("  --disable-link-check: disable check on link status when "
 	       "starting/stopping ports.\n");
 	printf("  --no-lsc-interrupt: disable link status change interrupt.\n");
@@ -632,6 +633,7 @@ launch_args_parse(int argc, char** argv)
 		{ "no-flush-rx",	0, 0, 0 },
 		{ "flow-isolate-all",	        0, 0, 0 },
 		{ "txpkts",			1, 0, 0 },
+		{ "txonly-multi-flow",		0, 0, 0 },
 		{ "disable-link-check",		0, 0, 0 },
 		{ "no-lsc-interrupt",		0, 0, 0 },
 		{ "no-rmv-interrupt",		0, 0, 0 },
@@ -1141,6 +1143,8 @@ launch_args_parse(int argc, char** argv)
 				else
 					rte_exit(EXIT_FAILURE, "bad txpkts\n");
 			}
+			if (!strcmp(lgopts[opt_idx].name, "txonly-multi-flow"))
+				txonly_multi_flow = 1;
 			if (!strcmp(lgopts[opt_idx].name, "no-flush-rx"))
 				no_flush_rx = 1;
 			if (!strcmp(lgopts[opt_idx].name, "disable-link-check"))
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 216be47f92..b950e8cc55 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -217,6 +217,9 @@ uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
 /**< Split policy for packets to TX. */
 
+int txonly_multi_flow;
+/**< Whether multiple flows are generated in TXONLY mode. */
+
 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 85b791b6bb..668227bc55 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -439,6 +439,8 @@ enum tx_pkt_split {
 
 extern enum tx_pkt_split tx_pkt_split;
 
+extern int txonly_multi_flow;
+
 extern uint16_t nb_pkt_per_burst;
 extern uint16_t mb_mempool_cache;
 extern int8_t rx_pthresh;
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 1f08b6ed37..def52a0487 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -52,6 +52,7 @@
 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
 
 static struct ipv4_hdr  pkt_ip_hdr;  /**< IP header of transmitted packets. */
+RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
 static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
 
 static void
@@ -164,6 +165,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	uint16_t vlan_tci, vlan_tci_outer;
 	uint32_t retry;
 	uint64_t ol_flags = 0;
+	uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
 	uint8_t  i;
 	uint64_t tx_offloads;
 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
@@ -237,6 +239,23 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		copy_buf_to_pkt(&eth_hdr, sizeof(eth_hdr), pkt, 0);
 		copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
 				sizeof(struct ether_hdr));
+		if (txonly_multi_flow) {
+			struct ipv4_hdr *ip_hdr;
+			uint32_t addr;
+
+			ip_hdr = rte_pktmbuf_mtod_offset(pkt,
+					struct ipv4_hdr *,
+					sizeof(struct ether_hdr));
+			/*
+			 * Generate multiple flows by varying IP src addr. This
+			 * enables packets are well distributed by RSS in
+			 * receiver side if any and txonly mode can be a decent
+			 * packet generator for developer's quick performance
+			 * regression test.
+			 */
+			addr = (IP_DST_ADDR | (ip_var++ << 8)) + rte_lcore_id();
+			ip_hdr->src_addr = rte_cpu_to_be_32(addr);
+		}
 		copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
 				sizeof(struct ether_hdr) +
 				sizeof(struct ipv4_hdr));
@@ -268,6 +287,9 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	}
 	fs->tx_packets += nb_tx;
 
+	if (txonly_multi_flow)
+		RTE_PER_LCORE(_ip_var) += nb_tx;
+
 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
 	fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
 #endif
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 4495ed0382..b717b8c7b7 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -324,6 +324,10 @@ The commandline options are:
     Set TX segment sizes or total packet length. Valid for ``tx-only``
     and ``flowgen`` forwarding modes.
 
+*   ``--txonly-multi-flow``
+
+    Generate multiple flows in txonly mode.
+
 *   ``--disable-link-check``
 
     Disable check on link status when starting/stopping ports.
-- 
2.11.0