DPDK patches and discussions
 help / color / mirror / Atom feed
From: Stephen Hemminger <stephen@networkplumber.org>
To: Igor Russkikh <irusskikh@marvell.com>
Cc: <dev@dpdk.org>, Rasesh Mody <rmody@marvell.com>,
	Devendra Singh Rawat <dsinghrawat@marvell.com>,
	Wenzhuo Lu <wenzhuo.lu@intel.com>,
	Beilei Xing <beilei.xing@intel.com>,
	Bernard Iremonger <bernard.iremonger@intel.com>
Subject: Re: [dpdk-dev] [RFC PATCH] app/testpmd: tx pkt clones parameter in flowgen
Date: Thu, 24 Sep 2020 07:56:55 -0700	[thread overview]
Message-ID: <20200924075647.3e160c0b@hermes.lan> (raw)
In-Reply-To: <20200924113414.483-1-irusskikh@marvell.com>

On Thu, 24 Sep 2020 14:34:14 +0300
Igor Russkikh <irusskikh@marvell.com> wrote:

> When testing high performance numbers, it is often that CPU performance
> limits the max values device can reach (both in pps and in gbps)
> 
> Here instead of recreating each packet separately, we use clones counter
> to resend the same mbuf to the line multiple times.
> 
> PMDs handle that transparently due to reference counting inside of mbuf.
> 
> Verified on Marvell qede and atlantic PMDs.
> 
> Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
> ---
>  app/test-pmd/flowgen.c                | 100 ++++++++++++++------------
>  app/test-pmd/parameters.c             |  12 ++++
>  app/test-pmd/testpmd.c                |   1 +
>  app/test-pmd/testpmd.h                |   1 +
>  doc/guides/testpmd_app_ug/run_app.rst |   7 ++
>  5 files changed, 74 insertions(+), 47 deletions(-)
> 
> diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
> index acf3e2460..b6f6e7a0e 100644
> --- a/app/test-pmd/flowgen.c
> +++ b/app/test-pmd/flowgen.c
> @@ -94,6 +94,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
>  	uint16_t nb_rx;
>  	uint16_t nb_tx;
>  	uint16_t nb_pkt;
> +	uint16_t nb_clones = nb_pkt_clones;
>  	uint16_t i;
>  	uint32_t retry;
>  	uint64_t tx_offloads;
> @@ -123,53 +124,58 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
>  		ol_flags |= PKT_TX_MACSEC;
>  
>  	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
> -		pkt = rte_mbuf_raw_alloc(mbp);
> -		if (!pkt)
> -			break;
> -
> -		pkt->data_len = pkt_size;
> -		pkt->next = NULL;
> -
> -		/* Initialize Ethernet header. */
> -		eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> -		rte_ether_addr_copy(&cfg_ether_dst, &eth_hdr->d_addr);
> -		rte_ether_addr_copy(&cfg_ether_src, &eth_hdr->s_addr);
> -		eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
> -
> -		/* Initialize IP header. */
> -		ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
> -		memset(ip_hdr, 0, sizeof(*ip_hdr));
> -		ip_hdr->version_ihl	= RTE_IPV4_VHL_DEF;
> -		ip_hdr->type_of_service	= 0;
> -		ip_hdr->fragment_offset	= 0;
> -		ip_hdr->time_to_live	= IP_DEFTTL;
> -		ip_hdr->next_proto_id	= IPPROTO_UDP;
> -		ip_hdr->packet_id	= 0;
> -		ip_hdr->src_addr	= rte_cpu_to_be_32(cfg_ip_src);
> -		ip_hdr->dst_addr	= rte_cpu_to_be_32(cfg_ip_dst +
> -							   next_flow);
> -		ip_hdr->total_length	= RTE_CPU_TO_BE_16(pkt_size -
> -							   sizeof(*eth_hdr));
> -		ip_hdr->hdr_checksum	= ip_sum((unaligned_uint16_t *)ip_hdr,
> -						 sizeof(*ip_hdr));
> -
> -		/* Initialize UDP header. */
> -		udp_hdr = (struct rte_udp_hdr *)(ip_hdr + 1);
> -		udp_hdr->src_port	= rte_cpu_to_be_16(cfg_udp_src);
> -		udp_hdr->dst_port	= rte_cpu_to_be_16(cfg_udp_dst);
> -		udp_hdr->dgram_cksum	= 0; /* No UDP checksum. */
> -		udp_hdr->dgram_len	= RTE_CPU_TO_BE_16(pkt_size -
> -							   sizeof(*eth_hdr) -
> -							   sizeof(*ip_hdr));
> -		pkt->nb_segs		= 1;
> -		pkt->pkt_len		= pkt_size;
> -		pkt->ol_flags		&= EXT_ATTACHED_MBUF;
> -		pkt->ol_flags		|= ol_flags;
> -		pkt->vlan_tci		= vlan_tci;
> -		pkt->vlan_tci_outer	= vlan_tci_outer;
> -		pkt->l2_len		= sizeof(struct rte_ether_hdr);
> -		pkt->l3_len		= sizeof(struct rte_ipv4_hdr);
> -		pkts_burst[nb_pkt]	= pkt;
> +		if (!nb_pkt || !nb_clones) {
> +			nb_clones = nb_pkt_clones;
> +			pkt = rte_mbuf_raw_alloc(mbp);
> +			if (!pkt)
> +				break;
> +
> +			pkt->data_len = pkt_size;
> +			pkt->next = NULL;
> +
> +			/* Initialize Ethernet header. */
> +			eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> +			rte_ether_addr_copy(&cfg_ether_dst, &eth_hdr->d_addr);
> +			rte_ether_addr_copy(&cfg_ether_src, &eth_hdr->s_addr);
> +			eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
> +
> +			/* Initialize IP header. */
> +			ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
> +			memset(ip_hdr, 0, sizeof(*ip_hdr));
> +			ip_hdr->version_ihl	= RTE_IPV4_VHL_DEF;
> +			ip_hdr->type_of_service	= 0;
> +			ip_hdr->fragment_offset	= 0;
> +			ip_hdr->time_to_live	= IP_DEFTTL;
> +			ip_hdr->next_proto_id	= IPPROTO_UDP;
> +			ip_hdr->packet_id	= 0;
> +			ip_hdr->src_addr	= rte_cpu_to_be_32(cfg_ip_src);
> +			ip_hdr->dst_addr	= rte_cpu_to_be_32(cfg_ip_dst +
> +								next_flow);
> +			ip_hdr->total_length	= RTE_CPU_TO_BE_16(pkt_size -
> +								sizeof(*eth_hdr));
> +			ip_hdr->hdr_checksum	= ip_sum((unaligned_uint16_t *)ip_hdr,
> +							sizeof(*ip_hdr));
> +
> +			/* Initialize UDP header. */
> +			udp_hdr = (struct rte_udp_hdr *)(ip_hdr + 1);
> +			udp_hdr->src_port	= rte_cpu_to_be_16(cfg_udp_src);
> +			udp_hdr->dst_port	= rte_cpu_to_be_16(cfg_udp_dst);
> +			udp_hdr->dgram_cksum	= 0; /* No UDP checksum. */
> +			udp_hdr->dgram_len	= RTE_CPU_TO_BE_16(pkt_size -
> +								sizeof(*eth_hdr) -
> +								sizeof(*ip_hdr));
> +			pkt->nb_segs		= 1;
> +			pkt->pkt_len		= pkt_size;
> +			pkt->ol_flags		&= EXT_ATTACHED_MBUF;
> +			pkt->ol_flags		|= ol_flags;
> +			pkt->vlan_tci		= vlan_tci;
> +			pkt->vlan_tci_outer	= vlan_tci_outer;
> +			pkt->l2_len		= sizeof(struct rte_ether_hdr);
> +			pkt->l3_len		= sizeof(struct rte_ipv4_hdr);
> +		} else {
> +			nb_clones--;
> +		}
> +		pkts_burst[nb_pkt] = pkt;
>  
>  		next_flow = (next_flow + 1) % cfg_n_flows;
>  	}


This doesn't look safe. You can't just send same mbuf N times without incrementing
the reference count.

  reply	other threads:[~2020-09-24 15:33 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-24 11:34 Igor Russkikh
2020-09-24 14:56 ` Stephen Hemminger [this message]
2020-09-25  8:44   ` [dpdk-dev] [EXT] " Igor Russkikh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200924075647.3e160c0b@hermes.lan \
    --to=stephen@networkplumber.org \
    --cc=beilei.xing@intel.com \
    --cc=bernard.iremonger@intel.com \
    --cc=dev@dpdk.org \
    --cc=dsinghrawat@marvell.com \
    --cc=irusskikh@marvell.com \
    --cc=rmody@marvell.com \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).