DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ferruh Yigit <ferruh.yigit@amd.com>
To: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
	Aman Singh <aman.deep.singh@intel.com>,
	Yuying Zhang <yuying.zhang@intel.com>
Cc: dev@dpdk.org, Georgiy Levashov <georgiy.levashov@oktetlabs.ru>,
	Ivan Ilchenko <ivan.ilchenko@oktetlabs.ru>
Subject: Re: [PATCH 2/2] app/testpmd: support TCP TSO in Tx only mode
Date: Wed, 19 Oct 2022 17:41:56 +0100	[thread overview]
Message-ID: <47576263-8afa-b21b-20f7-e887ee02bd94@amd.com> (raw)
In-Reply-To: <20221017144133.1899052-3-andrew.rybchenko@oktetlabs.ru>

On 10/17/2022 3:41 PM, Andrew Rybchenko wrote:
> Add '--txonly-tso-mss=N' option that enables TSO offload
> and generates packets with specified MSS in txonly mode.
> 
> Signed-off-by: Georgiy Levashov <georgiy.levashov@oktetlabs.ru>
> Signed-off-by: Ivan Ilchenko <ivan.ilchenko@oktetlabs.ru>
> Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> ---
>   app/test-pmd/parameters.c             | 10 +++++++++
>   app/test-pmd/testpmd.c                | 12 +++++++++++
>   app/test-pmd/testpmd.h                |  1 +
>   app/test-pmd/txonly.c                 | 31 ++++++++++++++++++++++++++-
>   doc/guides/testpmd_app_ug/run_app.rst |  4 ++++
>   5 files changed, 57 insertions(+), 1 deletion(-)
> 
> diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
> index 545ebee16b..eba0c658c2 100644
> --- a/app/test-pmd/parameters.c
> +++ b/app/test-pmd/parameters.c
> @@ -156,6 +156,7 @@ usage(char* progname)
>   	printf("  --txpkts=X[,Y]*: set TX segment sizes"
>   		" or total packet length.\n");
>   	printf("  --txonly-multi-flow: generate multiple flows in txonly mode\n");
> +	printf("  --txonly-tso-mss=N: enable TSO offload and generate packets with specified MSS in txonly mode\n");
>   	printf("  --tx-ip=src,dst: IP addresses in Tx-only mode\n");
>   	printf("  --tx-udp=src[,dst]: UDP ports in Tx-only mode\n");
>   	printf("  --eth-link-speed: force link speed.\n");
> @@ -671,6 +672,7 @@ launch_args_parse(int argc, char** argv)
>   		{ "rxhdrs",			1, 0, 0 },
>   		{ "txpkts",			1, 0, 0 },
>   		{ "txonly-multi-flow",		0, 0, 0 },
> +		{ "txonly-tso-mss",		1, 0, 0 },
>   		{ "rxq-share",			2, 0, 0 },
>   		{ "eth-link-speed",		1, 0, 0 },
>   		{ "disable-link-check",		0, 0, 0 },
> @@ -1299,6 +1301,14 @@ launch_args_parse(int argc, char** argv)
>   			}
>   			if (!strcmp(lgopts[opt_idx].name, "txonly-multi-flow"))
>   				txonly_multi_flow = 1;
> +			if (!strcmp(lgopts[opt_idx].name, "txonly-tso-mss")) {
> +				n = atoi(optarg);
> +				if (n >= 0 && n <= UINT16_MAX)
> +					txonly_tso_segsz = n;
> +				else
> +					rte_exit(EXIT_FAILURE,
> +						 "TSO MSS must be >= 0 and <= UINT16_MAX\n");
> +			}
>   			if (!strcmp(lgopts[opt_idx].name, "rxq-share")) {
>   				if (optarg == NULL) {
>   					rxq_share = UINT32_MAX;
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index 97adafacd0..076f1b3740 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -264,6 +264,9 @@ enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
>   uint8_t txonly_multi_flow;
>   /**< Whether multiple flows are generated in TXONLY mode. */
>   
> +uint16_t txonly_tso_segsz;
> +/**< TSO MSS for generated packets in TXONLY mode. */
> +
>   uint32_t tx_pkt_times_inter;
>   /**< Timings for send scheduling in TXONLY mode, time between bursts. */
>   
> @@ -1619,6 +1622,15 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
>   		port->dev_conf.txmode.offloads &=
>   			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
>   
> +	if (txonly_tso_segsz > 0) {
> +		if ((ports[pid].dev_info.tx_offload_capa &
> +		    RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
> +			rte_exit(EXIT_FAILURE,
> +				 "TSO isn't supported for port %d\n", pid);
> +		}
> +		port->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
> +	}
> +
>   	/* Apply Rx offloads configuration */
>   	for (i = 0; i < port->dev_info.max_rx_queues; i++)
>   		port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
> index 30915bd84b..bb47bdb490 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -605,6 +605,7 @@ enum tx_pkt_split {
>   extern enum tx_pkt_split tx_pkt_split;
>   
>   extern uint8_t txonly_multi_flow;
> +extern uint16_t txonly_tso_segsz;
>   
>   extern uint32_t rxq_share;
>   
> diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
> index 44bda752bc..858cc73ab4 100644
> --- a/app/test-pmd/txonly.c
> +++ b/app/test-pmd/txonly.c
> @@ -60,6 +60,7 @@ RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
>   
>   static union pkt_l4_hdr_t {
>   	struct rte_udp_hdr udp;	/**< UDP header of tx packets. */
> +	struct rte_tcp_hdr tcp; /**< TCP header of tx packets. */
>   } pkt_l4_hdr; /**< Layer 4 header of tx packets. */
>   
>   static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
> @@ -112,8 +113,19 @@ setup_pkt_l4_ip_headers(uint8_t ip_proto, struct rte_ipv4_hdr *ip_hdr,
>   	uint32_t ip_cksum;
>   	uint16_t pkt_len;
>   	struct rte_udp_hdr *udp_hdr;
> +	struct rte_tcp_hdr *tcp_hdr;
>   
>   	switch (ip_proto) {
> +	case IPPROTO_TCP:
> +		/*
> +		 * Initialize TCP header.
> +		 */
> +		pkt_len = (uint16_t)(pkt_data_len + sizeof(struct rte_tcp_hdr));
> +		tcp_hdr = &l4_hdr->tcp;
> +		tcp_hdr->src_port = rte_cpu_to_be_16(tx_l4_src_port);
> +		tcp_hdr->dst_port = rte_cpu_to_be_16(tx_l4_dst_port);
> +		tcp_hdr->data_off = (sizeof(struct rte_tcp_hdr) << 2) & 0xF0;
> +		break;
>   	case IPPROTO_UDP:
>   		/*
>   		 * Initialize UDP header.
> @@ -189,6 +201,8 @@ update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len)
>   	ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr);
>   
>   	switch (ip_hdr->next_proto_id) {
> +	case IPPROTO_TCP:
> +		break;

Why packet length is updated for UDP, but not for TCP packets?

>   	case IPPROTO_UDP:
>   		/* update UDP packet length */
>   		udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
> @@ -232,6 +246,12 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
>   	pkt->l2_len = sizeof(struct rte_ether_hdr);
>   	pkt->l3_len = sizeof(struct rte_ipv4_hdr);
>   
> +	if (txonly_tso_segsz > 0) {
> +		pkt->tso_segsz = txonly_tso_segsz;
> +		pkt->ol_flags |= RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_IPV4 |
> +				 RTE_MBUF_F_TX_IP_CKSUM;
> +	}
> +
>   	pkt_len = pkt->data_len;
>   	pkt_seg = pkt;
>   	for (i = 1; i < nb_segs; i++) {
> @@ -267,6 +287,12 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
>   		RTE_PER_LCORE(_ip_var) = ip_var;
>   	}
>   	switch (ip_hdr->next_proto_id) {
> +	case IPPROTO_TCP:
> +		copy_buf_to_pkt(&pkt_l4_hdr.tcp, sizeof(pkt_l4_hdr.tcp), pkt,
> +				sizeof(struct rte_ether_hdr) +
> +				sizeof(struct rte_ipv4_hdr));
> +		l4_hdr_size = sizeof(pkt_l4_hdr.tcp);
> +		break;
>   	case IPPROTO_UDP:
>   		copy_buf_to_pkt(&pkt_l4_hdr.udp, sizeof(pkt_l4_hdr.udp), pkt,
>   				sizeof(struct rte_ether_hdr) +
> @@ -277,6 +303,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
>   		l4_hdr_size = 0;
>   		break;
>   	}
> +	pkt->l4_len = l4_hdr_size;
>   
>   	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
>   		update_pkt_header(pkt, pkt_len);
> @@ -459,6 +486,7 @@ tx_only_begin(portid_t pi)
>   {
>   	uint16_t pkt_hdr_len, pkt_data_len;
>   	int dynf;
> +	uint8_t ip_proto;
>   
>   	pkt_hdr_len = (uint16_t)(sizeof(struct rte_ether_hdr) +
>   				 sizeof(struct rte_ipv4_hdr) +
> @@ -474,7 +502,8 @@ tx_only_begin(portid_t pi)
>   		return -EINVAL;
>   	}
>   
> -	setup_pkt_l4_ip_headers(IPPROTO_UDP, &pkt_ip_hdr, &pkt_l4_hdr,
> +	ip_proto = txonly_tso_segsz > 0 ? IPPROTO_TCP : IPPROTO_UDP;
> +	setup_pkt_l4_ip_headers(ip_proto, &pkt_ip_hdr, &pkt_l4_hdr,
>   				pkt_data_len);

please check comment in previous patch to have more explict way to 
detect the protocol type.

>   
>   	timestamp_enable = false;
> diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
> index ed53b4fb1f..b7d1a07346 100644
> --- a/doc/guides/testpmd_app_ug/run_app.rst
> +++ b/doc/guides/testpmd_app_ug/run_app.rst
> @@ -369,6 +369,10 @@ The command line options are:
>   
>       Generate multiple flows in txonly mode.
>   
> +*   ``--txonly-tso-mss=N```
> +
> +    Enable TSO offload and generate TCP packets with specified MSS in txonly mode.
> +
>   *   ``--rxq-share=[X]``
>   
>       Create queues in shared Rx queue mode if device supports.


  reply	other threads:[~2022-10-19 16:42 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-17 14:41 [PATCH 0/2] " Andrew Rybchenko
2022-10-17 14:41 ` [PATCH 1/2] app/testpmd: prepare to support TCP " Andrew Rybchenko
2022-10-19 16:39   ` Ferruh Yigit
2022-11-11  8:36     ` Andrew Rybchenko
2022-11-11  8:54       ` Andrew Rybchenko
2022-11-15 12:32       ` Ferruh Yigit
2022-10-17 14:41 ` [PATCH 2/2] app/testpmd: support TCP TSO " Andrew Rybchenko
2022-10-19 16:41   ` Ferruh Yigit [this message]
2022-11-11  8:44     ` Andrew Rybchenko
2022-11-11  9:04 ` [PATCH v3 0/2] " Andrew Rybchenko
2022-11-11  9:04   ` [PATCH v3 1/2] app/testpmd: prepare to support TCP " Andrew Rybchenko
2022-11-11  9:04   ` [PATCH v3 2/2] app/testpmd: support TCP TSO " Andrew Rybchenko
2022-11-15 12:43     ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=47576263-8afa-b21b-20f7-e887ee02bd94@amd.com \
    --to=ferruh.yigit@amd.com \
    --cc=aman.deep.singh@intel.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    --cc=georgiy.levashov@oktetlabs.ru \
    --cc=ivan.ilchenko@oktetlabs.ru \
    --cc=yuying.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).