DPDK patches and discussions
 help / color / mirror / Atom feed
From: Igor Russkikh <irusskikh@marvell.com>
To: <dev@dpdk.org>
Cc: Rasesh Mody <rmody@marvell.com>,
	Devendra Singh Rawat <dsinghrawat@marvell.com>,
	Ferruh Yigit <ferruh.yigit@intel.com>,
	Wenzhuo Lu <wenzhuo.lu@intel.com>,
	Beilei Xing <beilei.xing@intel.com>,
	"Bernard Iremonger" <bernard.iremonger@intel.com>,
	Igor Russkikh <irusskikh@marvell.com>
Subject: [dpdk-dev] [PATCH] app/testpmd: tx pkt clones parameter in flowgen
Date: Sat, 16 Jan 2021 10:38:59 +0100
Message-ID: <20210116093859.3025-1-irusskikh@marvell.com> (raw)

When testing high performance numbers, it is often that CPU performance
limits the max values device can reach (both in pps and in gbps)

Here instead of recreating each packet separately, we use clones counter
to resend the same mbuf to the line multiple times.

PMDs handle that transparently due to reference counting inside of mbuf.

Reaching max PPS on small packet sizes helps here:
Some data from our 2 port x 50G device. Using 2*6 tx queues, 64b packets,
PowerEdge R7525, AMD EPYC 7452:

./build/app/dpdk-testpmd -l 32-63  -- --forward-mode=flowgen \
  --rxq=6 --txq=6  --disable-crc-strip --burst=512 \
  --flowgen-clones=0 --txd=4096 --stats-period=1 --txpkts=64

Gives ~46MPPS TX output:

  Tx-pps:     22926849          Tx-bps:  11738590176
  Tx-pps:     23642629          Tx-bps:  12105024112

Setting flowgen-clones to 512 pushes TX almost to our device
physical limit (68MPPS) using same 2*6 queues(cores):

  Tx-pps:     34357556          Tx-bps:  17591073696
  Tx-pps:     34353211          Tx-bps:  17588802640

Doing similar measurements per core, I see one core can do
6.9MPPS (without clones) vs 11MPPS (with clones)

Verified on Marvell qede and atlantic PMDs.

this v1:
  - fixes on Ferruh's comments

rfc v2: http://patchwork.dpdk.org/patch/78800/
  - increment ref counter for each mbuf pointer copy
rfc v1: http://patchwork.dpdk.org/patch/78674/

Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
---
 app/test-pmd/flowgen.c                | 105 ++++++++++++++------------
 app/test-pmd/parameters.c             |  10 +++
 app/test-pmd/testpmd.c                |   1 +
 app/test-pmd/testpmd.h                |   1 +
 doc/guides/testpmd_app_ug/run_app.rst |   7 ++
 5 files changed, 77 insertions(+), 47 deletions(-)

diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index acf3e2460..53a2e5a63 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -94,6 +94,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
 	uint16_t nb_rx;
 	uint16_t nb_tx;
 	uint16_t nb_pkt;
+	uint16_t nb_clones = nb_pkt_flowgen_clones;
 	uint16_t i;
 	uint32_t retry;
 	uint64_t tx_offloads;
@@ -123,53 +124,63 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
 		ol_flags |= PKT_TX_MACSEC;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
-		pkt = rte_mbuf_raw_alloc(mbp);
-		if (!pkt)
-			break;
-
-		pkt->data_len = pkt_size;
-		pkt->next = NULL;
-
-		/* Initialize Ethernet header. */
-		eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
-		rte_ether_addr_copy(&cfg_ether_dst, &eth_hdr->d_addr);
-		rte_ether_addr_copy(&cfg_ether_src, &eth_hdr->s_addr);
-		eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
-
-		/* Initialize IP header. */
-		ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
-		memset(ip_hdr, 0, sizeof(*ip_hdr));
-		ip_hdr->version_ihl	= RTE_IPV4_VHL_DEF;
-		ip_hdr->type_of_service	= 0;
-		ip_hdr->fragment_offset	= 0;
-		ip_hdr->time_to_live	= IP_DEFTTL;
-		ip_hdr->next_proto_id	= IPPROTO_UDP;
-		ip_hdr->packet_id	= 0;
-		ip_hdr->src_addr	= rte_cpu_to_be_32(cfg_ip_src);
-		ip_hdr->dst_addr	= rte_cpu_to_be_32(cfg_ip_dst +
-							   next_flow);
-		ip_hdr->total_length	= RTE_CPU_TO_BE_16(pkt_size -
-							   sizeof(*eth_hdr));
-		ip_hdr->hdr_checksum	= ip_sum((unaligned_uint16_t *)ip_hdr,
-						 sizeof(*ip_hdr));
-
-		/* Initialize UDP header. */
-		udp_hdr = (struct rte_udp_hdr *)(ip_hdr + 1);
-		udp_hdr->src_port	= rte_cpu_to_be_16(cfg_udp_src);
-		udp_hdr->dst_port	= rte_cpu_to_be_16(cfg_udp_dst);
-		udp_hdr->dgram_cksum	= 0; /* No UDP checksum. */
-		udp_hdr->dgram_len	= RTE_CPU_TO_BE_16(pkt_size -
-							   sizeof(*eth_hdr) -
-							   sizeof(*ip_hdr));
-		pkt->nb_segs		= 1;
-		pkt->pkt_len		= pkt_size;
-		pkt->ol_flags		&= EXT_ATTACHED_MBUF;
-		pkt->ol_flags		|= ol_flags;
-		pkt->vlan_tci		= vlan_tci;
-		pkt->vlan_tci_outer	= vlan_tci_outer;
-		pkt->l2_len		= sizeof(struct rte_ether_hdr);
-		pkt->l3_len		= sizeof(struct rte_ipv4_hdr);
-		pkts_burst[nb_pkt]	= pkt;
+		if (!nb_pkt || !nb_clones) {
+			nb_clones = nb_pkt_flowgen_clones;
+			/* Logic limitation */
+			if (nb_clones > nb_pkt_per_burst)
+				nb_clones = nb_pkt_per_burst;
+
+			pkt = rte_mbuf_raw_alloc(mbp);
+			if (!pkt)
+				break;
+
+			pkt->data_len = pkt_size;
+			pkt->next = NULL;
+
+			/* Initialize Ethernet header. */
+			eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+			rte_ether_addr_copy(&cfg_ether_dst, &eth_hdr->d_addr);
+			rte_ether_addr_copy(&cfg_ether_src, &eth_hdr->s_addr);
+			eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+
+			/* Initialize IP header. */
+			ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
+			memset(ip_hdr, 0, sizeof(*ip_hdr));
+			ip_hdr->version_ihl	= RTE_IPV4_VHL_DEF;
+			ip_hdr->type_of_service	= 0;
+			ip_hdr->fragment_offset	= 0;
+			ip_hdr->time_to_live	= IP_DEFTTL;
+			ip_hdr->next_proto_id	= IPPROTO_UDP;
+			ip_hdr->packet_id	= 0;
+			ip_hdr->src_addr	= rte_cpu_to_be_32(cfg_ip_src);
+			ip_hdr->dst_addr	= rte_cpu_to_be_32(cfg_ip_dst +
+								next_flow);
+			ip_hdr->total_length	= RTE_CPU_TO_BE_16(pkt_size -
+								sizeof(*eth_hdr));
+			ip_hdr->hdr_checksum	= ip_sum((unaligned_uint16_t *)ip_hdr,
+							sizeof(*ip_hdr));
+
+			/* Initialize UDP header. */
+			udp_hdr = (struct rte_udp_hdr *)(ip_hdr + 1);
+			udp_hdr->src_port	= rte_cpu_to_be_16(cfg_udp_src);
+			udp_hdr->dst_port	= rte_cpu_to_be_16(cfg_udp_dst);
+			udp_hdr->dgram_cksum	= 0; /* No UDP checksum. */
+			udp_hdr->dgram_len	= RTE_CPU_TO_BE_16(pkt_size -
+								sizeof(*eth_hdr) -
+								sizeof(*ip_hdr));
+			pkt->nb_segs		= 1;
+			pkt->pkt_len		= pkt_size;
+			pkt->ol_flags		&= EXT_ATTACHED_MBUF;
+			pkt->ol_flags		|= ol_flags;
+			pkt->vlan_tci		= vlan_tci;
+			pkt->vlan_tci_outer	= vlan_tci_outer;
+			pkt->l2_len		= sizeof(struct rte_ether_hdr);
+			pkt->l3_len		= sizeof(struct rte_ipv4_hdr);
+		} else {
+			nb_clones--;
+			rte_mbuf_refcnt_update(pkt, 1);
+		}
+		pkts_burst[nb_pkt] = pkt;
 
 		next_flow = (next_flow + 1) % cfg_n_flows;
 	}
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 414a0068f..a095aa8f6 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -163,6 +163,7 @@ usage(char* progname)
 	printf("  --hairpinq=N: set the number of hairpin queues per port to "
 	       "N.\n");
 	printf("  --burst=N: set the number of packets per burst to N.\n");
+	printf("  --flowgen-clones=N: set the number of single packet clones to send in flowgen mode. Should be less than burst value.\n");
 	printf("  --mbcache=N: set the cache of mbuf memory pool to N.\n");
 	printf("  --rxpt=N: set prefetch threshold register of RX rings to N.\n");
 	printf("  --rxht=N: set the host threshold register of RX rings to N.\n");
@@ -561,6 +562,7 @@ launch_args_parse(int argc, char** argv)
 		{ "hairpinq",			1, 0, 0 },
 		{ "hairpin-mode",		1, 0, 0 },
 		{ "burst",			1, 0, 0 },
+		{ "flowgen-clones",		1, 0, 0 },
 		{ "mbcache",			1, 0, 0 },
 		{ "txpt",			1, 0, 0 },
 		{ "txht",			1, 0, 0 },
@@ -1089,6 +1091,14 @@ launch_args_parse(int argc, char** argv)
 				else
 					nb_pkt_per_burst = (uint16_t) n;
 			}
+			if (!strcmp(lgopts[opt_idx].name, "flowgen-clones")) {
+				n = atoi(optarg);
+				if (n >= 0)
+					nb_pkt_flowgen_clones = (uint16_t) n;
+				else
+					rte_exit(EXIT_FAILURE,
+						 "clones must be >= 0 and <= current burst\n");
+			}
 			if (!strcmp(lgopts[opt_idx].name, "mbcache")) {
 				n = atoi(optarg);
 				if ((n >= 0) &&
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 2b60f6c5d..b0f825f6f 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -240,6 +240,7 @@ uint32_t tx_pkt_times_intra;
 /**< Timings for send scheduling in TXONLY mode, time between packets. */
 
 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
+uint16_t nb_pkt_flowgen_clones; /**< Number of tx packet clones to send in flowgen mode. */
 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
 
 /* current configuration is in DCB or not,0 means it is not in DCB mode */
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 5f2316210..efd558d15 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -476,6 +476,7 @@ extern enum tx_pkt_split tx_pkt_split;
 extern uint8_t txonly_multi_flow;
 
 extern uint16_t nb_pkt_per_burst;
+extern uint16_t nb_pkt_flowgen_clones;
 extern uint16_t mb_mempool_cache;
 extern int8_t rx_pthresh;
 extern int8_t rx_hthresh;
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index ca67105b7..c4c8f3a6c 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -299,6 +299,13 @@ The command line options are:
     If set to 0, driver default is used if defined. Else, if driver
     default is not defined, default of 32 is used.
 
+*   ``--flowgen-clones=N``
+
+    Set the number of each packet clones to be sent in `flowgen` mode.
+    Sending clones reduces host CPU load on creating packets and may help
+    in testing extreme speeds or maxing out tx packet performance.
+    N should be not zero, but less than 'burst' parameter.
+
 *   ``--mbcache=N``
 
     Set the cache of mbuf memory pools to N, where 0 <= N <= 512.
-- 
2.27.0


             reply	other threads:[~2021-01-16  9:39 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-16  9:38 Igor Russkikh [this message]
2021-01-18  8:32 ` David Marchand
2021-01-19 11:58   ` [dpdk-dev] [EXT] " Igor Russkikh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210116093859.3025-1-irusskikh@marvell.com \
    --to=irusskikh@marvell.com \
    --cc=beilei.xing@intel.com \
    --cc=bernard.iremonger@intel.com \
    --cc=dev@dpdk.org \
    --cc=dsinghrawat@marvell.com \
    --cc=ferruh.yigit@intel.com \
    --cc=rmody@marvell.com \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git