DPDK patches and discussions
 help / color / mirror / Atom feed
From: Yongseok Koh <yskoh@mellanox.com>
To: wenzhuo.lu@intel.com, jingjing.wu@intel.com
Cc: dev@dpdk.org, bernard.iremonger@intel.com, jerinj@marvell.com,
	konstantin.ananyev@intel.com
Subject: [dpdk-dev] [PATCH v4] app/testpmd: make txonly mode generate multiple flows
Date: Thu, 28 Mar 2019 11:46:28 -0700	[thread overview]
Message-ID: <20190328184628.28960-1-yskoh@mellanox.com> (raw)
Message-ID: <20190328184628.MieIeFnCoeYBJegEesNXSroLJrCTkdGegsvCMZBH87U@z> (raw)
In-Reply-To: <20171215211125.39177-1-yskoh@mellanox.com>

Testpmd can generate multiple flows without taking much cost and this could
be a simple traffic generator for developer's quick tests. If
"--txonly-multi-flow" is specified in the command line, IP source address
is varied to gnerate multiple flows.

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
---

v4:
* use uint8_t for txonly_multi_flow variable.

v3:
* Add "--txonly-multi-flow" param to enable the feature.
* Replace __thread with RTE_PER_LCORE.

v2:
* Add detailed explanation in a comment.

 app/test-pmd/parameters.c             |  4 ++++
 app/test-pmd/testpmd.c                |  3 +++
 app/test-pmd/testpmd.h                |  2 ++
 app/test-pmd/txonly.c                 | 22 ++++++++++++++++++++++
 doc/guides/testpmd_app_ug/run_app.rst |  4 ++++
 5 files changed, 35 insertions(+)

diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 38b419767b..7b6b60905d 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -173,6 +173,7 @@ usage(char* progname)
 	       " Used mainly with PCAP drivers.\n");
 	printf("  --txpkts=X[,Y]*: set TX segment sizes"
 		" or total packet length.\n");
+	printf("  --txonly-multi-flow: generate multiple flows in txonly mode\n");
 	printf("  --disable-link-check: disable check on link status when "
 	       "starting/stopping ports.\n");
 	printf("  --no-lsc-interrupt: disable link status change interrupt.\n");
@@ -632,6 +633,7 @@ launch_args_parse(int argc, char** argv)
 		{ "no-flush-rx",	0, 0, 0 },
 		{ "flow-isolate-all",	        0, 0, 0 },
 		{ "txpkts",			1, 0, 0 },
+		{ "txonly-multi-flow",		0, 0, 0 },
 		{ "disable-link-check",		0, 0, 0 },
 		{ "no-lsc-interrupt",		0, 0, 0 },
 		{ "no-rmv-interrupt",		0, 0, 0 },
@@ -1141,6 +1143,8 @@ launch_args_parse(int argc, char** argv)
 				else
 					rte_exit(EXIT_FAILURE, "bad txpkts\n");
 			}
+			if (!strcmp(lgopts[opt_idx].name, "txonly-multi-flow"))
+				txonly_multi_flow = 1;
 			if (!strcmp(lgopts[opt_idx].name, "no-flush-rx"))
 				no_flush_rx = 1;
 			if (!strcmp(lgopts[opt_idx].name, "disable-link-check"))
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 216be47f92..237b6b0b39 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -217,6 +217,9 @@ uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
 /**< Split policy for packets to TX. */
 
+uint8_t txonly_multi_flow;
+/**< Whether multiple flows are generated in TXONLY mode. */
+
 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 85b791b6bb..29a5520126 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -439,6 +439,8 @@ enum tx_pkt_split {
 
 extern enum tx_pkt_split tx_pkt_split;
 
+extern uint8_t txonly_multi_flow;
+
 extern uint16_t nb_pkt_per_burst;
 extern uint16_t mb_mempool_cache;
 extern int8_t rx_pthresh;
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 1f08b6ed37..def52a0487 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -52,6 +52,7 @@
 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
 
 static struct ipv4_hdr  pkt_ip_hdr;  /**< IP header of transmitted packets. */
+RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
 static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
 
 static void
@@ -164,6 +165,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	uint16_t vlan_tci, vlan_tci_outer;
 	uint32_t retry;
 	uint64_t ol_flags = 0;
+	uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
 	uint8_t  i;
 	uint64_t tx_offloads;
 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
@@ -237,6 +239,23 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		copy_buf_to_pkt(&eth_hdr, sizeof(eth_hdr), pkt, 0);
 		copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
 				sizeof(struct ether_hdr));
+		if (txonly_multi_flow) {
+			struct ipv4_hdr *ip_hdr;
+			uint32_t addr;
+
+			ip_hdr = rte_pktmbuf_mtod_offset(pkt,
+					struct ipv4_hdr *,
+					sizeof(struct ether_hdr));
+			/*
+			 * Generate multiple flows by varying IP src addr. This
+			 * enables packets are well distributed by RSS in
+			 * receiver side if any and txonly mode can be a decent
+			 * packet generator for developer's quick performance
+			 * regression test.
+			 */
+			addr = (IP_DST_ADDR | (ip_var++ << 8)) + rte_lcore_id();
+			ip_hdr->src_addr = rte_cpu_to_be_32(addr);
+		}
 		copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
 				sizeof(struct ether_hdr) +
 				sizeof(struct ipv4_hdr));
@@ -268,6 +287,9 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	}
 	fs->tx_packets += nb_tx;
 
+	if (txonly_multi_flow)
+		RTE_PER_LCORE(_ip_var) += nb_tx;
+
 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
 	fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
 #endif
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 4495ed0382..b717b8c7b7 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -324,6 +324,10 @@ The commandline options are:
     Set TX segment sizes or total packet length. Valid for ``tx-only``
     and ``flowgen`` forwarding modes.
 
+*   ``--txonly-multi-flow``
+
+    Generate multiple flows in txonly mode.
+
 *   ``--disable-link-check``
 
     Disable check on link status when starting/stopping ports.
-- 
2.11.0


  parent reply	other threads:[~2019-03-28 18:46 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-15 21:11 [dpdk-dev] [PATCH] " Yongseok Koh
2018-01-12 18:08 ` Thomas Monjalon
2018-01-16  2:40 ` Lu, Wenzhuo
2018-01-17 18:04   ` Yongseok Koh
2018-01-17 18:18 ` [dpdk-dev] [PATCH v2] " Yongseok Koh
2018-01-18  1:22   ` Lu, Wenzhuo
2018-01-18 12:21   ` Ananyev, Konstantin
2018-01-19  7:09     ` Yongseok Koh
2018-01-19 11:02       ` Ananyev, Konstantin
2018-01-18 13:55   ` Jerin Jacob
2019-03-28  1:51 ` [dpdk-dev] [PATCH v3] " Yongseok Koh
2019-03-28  1:51   ` Yongseok Koh
2019-03-28 10:24   ` Iremonger, Bernard
2019-03-28 10:24     ` Iremonger, Bernard
2019-03-28 17:33     ` Yongseok Koh
2019-03-28 17:33       ` Yongseok Koh
2019-03-28 17:42       ` Iremonger, Bernard
2019-03-28 17:42         ` Iremonger, Bernard
2019-03-28 18:46 ` Yongseok Koh [this message]
2019-03-28 18:46   ` [dpdk-dev] [PATCH v4] " Yongseok Koh
2019-03-29  9:55   ` Iremonger, Bernard
2019-03-29  9:55     ` Iremonger, Bernard
2019-03-29 18:44     ` Ferruh Yigit
2019-03-29 18:44       ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190328184628.28960-1-yskoh@mellanox.com \
    --to=yskoh@mellanox.com \
    --cc=bernard.iremonger@intel.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=jingjing.wu@intel.com \
    --cc=konstantin.ananyev@intel.com \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).