DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v2 0/2] app/testpmd: support TCP TSO in Tx only mode
@ 2022-11-11  8:49 Andrew Rybchenko
  2022-11-11  8:49 ` [PATCH v2 1/2] app/testpmd: prepare to support TCP " Andrew Rybchenko
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Andrew Rybchenko @ 2022-11-11  8:49 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev

Add command-line option to generate TSO packets with specified MSS
in txonly mode.

I'm not sure that it is a good idea to enable TSO offload automaticaly
when the options is specified, but it is convenient.

v2:
    - fix wrong data length calculation (as per review notes from Ferruh)

Andrew Rybchenko (2):
  app/testpmd: prepare to support TCP in Tx only mode
  app/testpmd: support TCP TSO in Tx only mode

 app/test-pmd/parameters.c             |  16 ++-
 app/test-pmd/testpmd.c                |  12 +++
 app/test-pmd/testpmd.h                |   5 +-
 app/test-pmd/txonly.c                 | 140 +++++++++++++++++++-------
 doc/guides/testpmd_app_ug/run_app.rst |   4 +
 5 files changed, 134 insertions(+), 43 deletions(-)

-- 
2.30.2


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v2 1/2] app/testpmd: prepare to support TCP in Tx only mode
  2022-11-11  8:49 [PATCH v2 0/2] app/testpmd: support TCP TSO in Tx only mode Andrew Rybchenko
@ 2022-11-11  8:49 ` Andrew Rybchenko
  2022-11-11  8:49 ` [PATCH v2 2/2] app/testpmd: support TCP TSO " Andrew Rybchenko
  2022-11-11  8:52 ` [PATCH v2 0/2] " Andrew Rybchenko
  2 siblings, 0 replies; 4+ messages in thread
From: Andrew Rybchenko @ 2022-11-11  8:49 UTC (permalink / raw)
  To: Ferruh Yigit, Aman Singh, Yuying Zhang
  Cc: dev, Georgiy Levashov, Ivan Ilchenko

This is useful for the incoming support of TCP TSO in Tx only mode.

Signed-off-by: Georgiy Levashov <georgiy.levashov@oktetlabs.ru>
Signed-off-by: Ivan Ilchenko <ivan.ilchenko@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
---
 app/test-pmd/parameters.c |  6 +--
 app/test-pmd/testpmd.h    |  4 +-
 app/test-pmd/txonly.c     | 98 +++++++++++++++++++++++++--------------
 3 files changed, 67 insertions(+), 41 deletions(-)

diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index aed4cdcb84..2ed8afedfd 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -833,7 +833,7 @@ launch_args_parse(int argc, char** argv)
 					rte_exit(EXIT_FAILURE,
 						 "Invalid UDP port: %s\n",
 						 optarg);
-				tx_udp_src_port = n;
+				tx_l4_src_port = n;
 				if (*end == ',') {
 					char *dst = end + 1;
 
@@ -843,9 +843,9 @@ launch_args_parse(int argc, char** argv)
 						rte_exit(EXIT_FAILURE,
 							 "Invalid destination UDP port: %s\n",
 							 dst);
-					tx_udp_dst_port = n;
+					tx_l4_dst_port = n;
 				} else {
-					tx_udp_dst_port = n;
+					tx_l4_dst_port = n;
 				}
 
 			}
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 05ca8628cf..976f4f83dd 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -625,8 +625,8 @@ extern int8_t tx_pthresh;
 extern int8_t tx_hthresh;
 extern int8_t tx_wthresh;
 
-extern uint16_t tx_udp_src_port;
-extern uint16_t tx_udp_dst_port;
+extern uint16_t tx_l4_src_port;
+extern uint16_t tx_l4_dst_port;
 
 extern uint32_t tx_ip_src_addr;
 extern uint32_t tx_ip_dst_addr;
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 021624952d..44bda752bc 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -46,8 +46,8 @@ struct tx_timestamp {
 };
 
 /* use RFC863 Discard Protocol */
-uint16_t tx_udp_src_port = 9;
-uint16_t tx_udp_dst_port = 9;
+uint16_t tx_l4_src_port = 9;
+uint16_t tx_l4_dst_port = 9;
 
 /* use RFC5735 / RFC2544 reserved network test addresses */
 uint32_t tx_ip_src_addr = (198U << 24) | (18 << 16) | (0 << 8) | 1;
@@ -57,7 +57,10 @@ uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2;
 
 static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
 RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
-static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
+
+static union pkt_l4_hdr_t {
+	struct rte_udp_hdr udp;	/**< UDP header of tx packets. */
+} pkt_l4_hdr; /**< Layer 4 header of tx packets. */
 
 static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
 static int32_t timestamp_off; /**< Timestamp dynamic field offset */
@@ -102,22 +105,30 @@ copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
 }
 
 static void
-setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
-			 struct rte_udp_hdr *udp_hdr,
-			 uint16_t pkt_data_len)
+setup_pkt_l4_ip_headers(uint8_t ip_proto, struct rte_ipv4_hdr *ip_hdr,
+			union pkt_l4_hdr_t *l4_hdr, uint16_t pkt_data_len)
 {
 	uint16_t *ptr16;
 	uint32_t ip_cksum;
 	uint16_t pkt_len;
+	struct rte_udp_hdr *udp_hdr;
 
-	/*
-	 * Initialize UDP header.
-	 */
-	pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
-	udp_hdr->src_port = rte_cpu_to_be_16(tx_udp_src_port);
-	udp_hdr->dst_port = rte_cpu_to_be_16(tx_udp_dst_port);
-	udp_hdr->dgram_len      = RTE_CPU_TO_BE_16(pkt_len);
-	udp_hdr->dgram_cksum    = 0; /* No UDP checksum. */
+	switch (ip_proto) {
+	case IPPROTO_UDP:
+		/*
+		 * Initialize UDP header.
+		 */
+		pkt_len = (uint16_t)(pkt_data_len + sizeof(struct rte_udp_hdr));
+		udp_hdr = &l4_hdr->udp;
+		udp_hdr->src_port = rte_cpu_to_be_16(tx_l4_src_port);
+		udp_hdr->dst_port = rte_cpu_to_be_16(tx_l4_dst_port);
+		udp_hdr->dgram_len      = RTE_CPU_TO_BE_16(pkt_len);
+		udp_hdr->dgram_cksum    = 0; /* No UDP checksum. */
+		break;
+	default:
+		pkt_len = pkt_data_len;
+		break;
+	}
 
 	/*
 	 * Initialize IP header.
@@ -127,7 +138,7 @@ setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
 	ip_hdr->type_of_service   = 0;
 	ip_hdr->fragment_offset = 0;
 	ip_hdr->time_to_live   = IP_DEFTTL;
-	ip_hdr->next_proto_id = IPPROTO_UDP;
+	ip_hdr->next_proto_id = ip_proto;
 	ip_hdr->packet_id = 0;
 	ip_hdr->total_length   = RTE_CPU_TO_BE_16(pkt_len);
 	ip_hdr->src_addr = rte_cpu_to_be_32(tx_ip_src_addr);
@@ -162,27 +173,32 @@ update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len)
 {
 	struct rte_ipv4_hdr *ip_hdr;
 	struct rte_udp_hdr *udp_hdr;
-	uint16_t pkt_data_len;
+	uint16_t ip_data_len;
 	uint16_t pkt_len;
 
-	pkt_data_len = (uint16_t) (total_pkt_len - (
+	ip_data_len = (uint16_t) (total_pkt_len - (
 					sizeof(struct rte_ether_hdr) +
-					sizeof(struct rte_ipv4_hdr) +
-					sizeof(struct rte_udp_hdr)));
-	/* update UDP packet length */
-	udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
-				sizeof(struct rte_ether_hdr) +
-				sizeof(struct rte_ipv4_hdr));
-	pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
-	udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
+					sizeof(struct rte_ipv4_hdr)));
 
 	/* update IP packet length and checksum */
 	ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
 				sizeof(struct rte_ether_hdr));
 	ip_hdr->hdr_checksum = 0;
-	pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
+	pkt_len = (uint16_t) (ip_data_len + sizeof(struct rte_ipv4_hdr));
 	ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len);
 	ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr);
+
+	switch (ip_hdr->next_proto_id) {
+	case IPPROTO_UDP:
+		/* update UDP packet length */
+		udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
+					sizeof(struct rte_ether_hdr) +
+					sizeof(struct rte_ipv4_hdr));
+		udp_hdr->dgram_len = RTE_CPU_TO_BE_16(ip_data_len);
+		break;
+	default:
+		break;
+	}
 }
 
 static inline bool
@@ -193,8 +209,9 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 {
 	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
 	struct rte_mbuf *pkt_seg;
-	uint32_t nb_segs, pkt_len;
+	uint32_t nb_segs, pkt_len, l4_hdr_size;
 	uint8_t i;
+	struct rte_ipv4_hdr *ip_hdr;
 
 	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
 		nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
@@ -230,14 +247,14 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 	copy_buf_to_pkt(eth_hdr, sizeof(*eth_hdr), pkt, 0);
 	copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
 			sizeof(struct rte_ether_hdr));
+
+	ip_hdr = rte_pktmbuf_mtod_offset(pkt,
+			struct rte_ipv4_hdr *,
+			sizeof(struct rte_ether_hdr));
 	if (txonly_multi_flow) {
 		uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
-		struct rte_ipv4_hdr *ip_hdr;
 		uint32_t addr;
 
-		ip_hdr = rte_pktmbuf_mtod_offset(pkt,
-				struct rte_ipv4_hdr *,
-				sizeof(struct rte_ether_hdr));
 		/*
 		 * Generate multiple flows by varying IP src addr. This
 		 * enables packets are well distributed by RSS in
@@ -249,9 +266,17 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 		ip_hdr->src_addr = rte_cpu_to_be_32(addr);
 		RTE_PER_LCORE(_ip_var) = ip_var;
 	}
-	copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
-			sizeof(struct rte_ether_hdr) +
-			sizeof(struct rte_ipv4_hdr));
+	switch (ip_hdr->next_proto_id) {
+	case IPPROTO_UDP:
+		copy_buf_to_pkt(&pkt_l4_hdr.udp, sizeof(pkt_l4_hdr.udp), pkt,
+				sizeof(struct rte_ether_hdr) +
+				sizeof(struct rte_ipv4_hdr));
+		l4_hdr_size = sizeof(pkt_l4_hdr.udp);
+		break;
+	default:
+		l4_hdr_size = 0;
+		break;
+	}
 
 	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
 		update_pkt_header(pkt, pkt_len);
@@ -308,7 +333,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 		copy_buf_to_pkt(&timestamp_mark, sizeof(timestamp_mark), pkt,
 			sizeof(struct rte_ether_hdr) +
 			sizeof(struct rte_ipv4_hdr) +
-			sizeof(pkt_udp_hdr));
+			l4_hdr_size);
 	}
 	/*
 	 * Complete first mbuf of packet and append it to the
@@ -449,7 +474,8 @@ tx_only_begin(portid_t pi)
 		return -EINVAL;
 	}
 
-	setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
+	setup_pkt_l4_ip_headers(IPPROTO_UDP, &pkt_ip_hdr, &pkt_l4_hdr,
+				pkt_data_len);
 
 	timestamp_enable = false;
 	timestamp_mask = 0;
-- 
2.30.2


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v2 2/2] app/testpmd: support TCP TSO in Tx only mode
  2022-11-11  8:49 [PATCH v2 0/2] app/testpmd: support TCP TSO in Tx only mode Andrew Rybchenko
  2022-11-11  8:49 ` [PATCH v2 1/2] app/testpmd: prepare to support TCP " Andrew Rybchenko
@ 2022-11-11  8:49 ` Andrew Rybchenko
  2022-11-11  8:52 ` [PATCH v2 0/2] " Andrew Rybchenko
  2 siblings, 0 replies; 4+ messages in thread
From: Andrew Rybchenko @ 2022-11-11  8:49 UTC (permalink / raw)
  To: Ferruh Yigit, Aman Singh, Yuying Zhang
  Cc: dev, Georgiy Levashov, Ivan Ilchenko

Add '--txonly-tso-mss=N' option that enables TSO offload
and generates packets with specified MSS in txonly mode.

Signed-off-by: Georgiy Levashov <georgiy.levashov@oktetlabs.ru>
Signed-off-by: Ivan Ilchenko <ivan.ilchenko@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
---
 app/test-pmd/parameters.c             | 10 ++++++
 app/test-pmd/testpmd.c                | 12 ++++++++
 app/test-pmd/testpmd.h                |  1 +
 app/test-pmd/txonly.c                 | 44 +++++++++++++++++++++++++--
 doc/guides/testpmd_app_ug/run_app.rst |  4 +++
 5 files changed, 68 insertions(+), 3 deletions(-)

diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 2ed8afedfd..e71cb3e139 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -156,6 +156,7 @@ usage(char* progname)
 	printf("  --txpkts=X[,Y]*: set TX segment sizes"
 		" or total packet length.\n");
 	printf("  --txonly-multi-flow: generate multiple flows in txonly mode\n");
+	printf("  --txonly-tso-mss=N: enable TSO offload and generate packets with specified MSS in txonly mode\n");
 	printf("  --tx-ip=src,dst: IP addresses in Tx-only mode\n");
 	printf("  --tx-udp=src[,dst]: UDP ports in Tx-only mode\n");
 	printf("  --eth-link-speed: force link speed.\n");
@@ -670,6 +671,7 @@ launch_args_parse(int argc, char** argv)
 		{ "rxhdrs",			1, 0, 0 },
 		{ "txpkts",			1, 0, 0 },
 		{ "txonly-multi-flow",		0, 0, 0 },
+		{ "txonly-tso-mss",		1, 0, 0 },
 		{ "rxq-share",			2, 0, 0 },
 		{ "eth-link-speed",		1, 0, 0 },
 		{ "disable-link-check",		0, 0, 0 },
@@ -1297,6 +1299,14 @@ launch_args_parse(int argc, char** argv)
 			}
 			if (!strcmp(lgopts[opt_idx].name, "txonly-multi-flow"))
 				txonly_multi_flow = 1;
+			if (!strcmp(lgopts[opt_idx].name, "txonly-tso-mss")) {
+				n = atoi(optarg);
+				if (n >= 0 && n <= UINT16_MAX)
+					txonly_tso_segsz = n;
+				else
+					rte_exit(EXIT_FAILURE,
+						 "TSO MSS must be >= 0 and <= UINT16_MAX\n");
+			}
 			if (!strcmp(lgopts[opt_idx].name, "rxq-share")) {
 				if (optarg == NULL) {
 					rxq_share = UINT32_MAX;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index ef281ccd20..94d37be692 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -264,6 +264,9 @@ enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
 uint8_t txonly_multi_flow;
 /**< Whether multiple flows are generated in TXONLY mode. */
 
+uint16_t txonly_tso_segsz;
+/**< TSO MSS for generated packets in TXONLY mode. */
+
 uint32_t tx_pkt_times_inter;
 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
 
@@ -1615,6 +1618,15 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
 		port->dev_conf.txmode.offloads &=
 			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
+	if (txonly_tso_segsz > 0) {
+		if ((ports[pid].dev_info.tx_offload_capa &
+		    RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
+			rte_exit(EXIT_FAILURE,
+				 "TSO isn't supported for port %d\n", pid);
+		}
+		port->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
+	}
+
 	/* Apply Rx offloads configuration */
 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
 		port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 976f4f83dd..fbe1839a8f 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -611,6 +611,7 @@ enum tx_pkt_split {
 extern enum tx_pkt_split tx_pkt_split;
 
 extern uint8_t txonly_multi_flow;
+extern uint16_t txonly_tso_segsz;
 
 extern uint32_t rxq_share;
 
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 44bda752bc..7095c31b01 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -60,6 +60,7 @@ RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
 
 static union pkt_l4_hdr_t {
 	struct rte_udp_hdr udp;	/**< UDP header of tx packets. */
+	struct rte_tcp_hdr tcp; /**< TCP header of tx packets. */
 } pkt_l4_hdr; /**< Layer 4 header of tx packets. */
 
 static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
@@ -112,8 +113,19 @@ setup_pkt_l4_ip_headers(uint8_t ip_proto, struct rte_ipv4_hdr *ip_hdr,
 	uint32_t ip_cksum;
 	uint16_t pkt_len;
 	struct rte_udp_hdr *udp_hdr;
+	struct rte_tcp_hdr *tcp_hdr;
 
 	switch (ip_proto) {
+	case IPPROTO_TCP:
+		/*
+		 * Initialize TCP header.
+		 */
+		pkt_len = (uint16_t)(pkt_data_len + sizeof(struct rte_tcp_hdr));
+		tcp_hdr = &l4_hdr->tcp;
+		tcp_hdr->src_port = rte_cpu_to_be_16(tx_l4_src_port);
+		tcp_hdr->dst_port = rte_cpu_to_be_16(tx_l4_dst_port);
+		tcp_hdr->data_off = (sizeof(struct rte_tcp_hdr) << 2) & 0xF0;
+		break;
 	case IPPROTO_UDP:
 		/*
 		 * Initialize UDP header.
@@ -189,6 +201,8 @@ update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len)
 	ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr);
 
 	switch (ip_hdr->next_proto_id) {
+	case IPPROTO_TCP:
+		break;
 	case IPPROTO_UDP:
 		/* update UDP packet length */
 		udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
@@ -232,6 +246,12 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 	pkt->l2_len = sizeof(struct rte_ether_hdr);
 	pkt->l3_len = sizeof(struct rte_ipv4_hdr);
 
+	if (txonly_tso_segsz > 0) {
+		pkt->tso_segsz = txonly_tso_segsz;
+		pkt->ol_flags |= RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_IPV4 |
+				 RTE_MBUF_F_TX_IP_CKSUM;
+	}
+
 	pkt_len = pkt->data_len;
 	pkt_seg = pkt;
 	for (i = 1; i < nb_segs; i++) {
@@ -267,6 +287,12 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 		RTE_PER_LCORE(_ip_var) = ip_var;
 	}
 	switch (ip_hdr->next_proto_id) {
+	case IPPROTO_TCP:
+		copy_buf_to_pkt(&pkt_l4_hdr.tcp, sizeof(pkt_l4_hdr.tcp), pkt,
+				sizeof(struct rte_ether_hdr) +
+				sizeof(struct rte_ipv4_hdr));
+		l4_hdr_size = sizeof(pkt_l4_hdr.tcp);
+		break;
 	case IPPROTO_UDP:
 		copy_buf_to_pkt(&pkt_l4_hdr.udp, sizeof(pkt_l4_hdr.udp), pkt,
 				sizeof(struct rte_ether_hdr) +
@@ -277,6 +303,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 		l4_hdr_size = 0;
 		break;
 	}
+	pkt->l4_len = l4_hdr_size;
 
 	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
 		update_pkt_header(pkt, pkt_len);
@@ -459,10 +486,21 @@ tx_only_begin(portid_t pi)
 {
 	uint16_t pkt_hdr_len, pkt_data_len;
 	int dynf;
+	uint8_t ip_proto;
 
 	pkt_hdr_len = (uint16_t)(sizeof(struct rte_ether_hdr) +
-				 sizeof(struct rte_ipv4_hdr) +
-				 sizeof(struct rte_udp_hdr));
+				 sizeof(struct rte_ipv4_hdr));
+	ip_proto = txonly_tso_segsz > 0 ? IPPROTO_TCP : IPPROTO_UDP;
+	switch (ip_proto) {
+	case IPPROTO_TCP:
+		pkt_hdr_len += sizeof(struct rte_tcp_hdr);
+		break;
+	case IPPROTO_UDP:
+		pkt_hdr_len += sizeof(struct rte_udp_hdr);
+		break;
+	default:
+		break;
+	}
 	pkt_data_len = tx_pkt_length - pkt_hdr_len;
 
 	if ((tx_pkt_split == TX_PKT_SPLIT_RND || txonly_multi_flow) &&
@@ -474,7 +512,7 @@ tx_only_begin(portid_t pi)
 		return -EINVAL;
 	}
 
-	setup_pkt_l4_ip_headers(IPPROTO_UDP, &pkt_ip_hdr, &pkt_l4_hdr,
+	setup_pkt_l4_ip_headers(ip_proto, &pkt_ip_hdr, &pkt_l4_hdr,
 				pkt_data_len);
 
 	timestamp_enable = false;
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 610e442924..01d6852fd3 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -369,6 +369,10 @@ The command line options are:
 
     Generate multiple flows in txonly mode.
 
+*   ``--txonly-tso-mss=N```
+
+    Enable TSO offload and generate TCP packets with specified MSS in txonly mode.
+
 *   ``--rxq-share=[X]``
 
     Create queues in shared Rx queue mode if device supports.
-- 
2.30.2


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 0/2] app/testpmd: support TCP TSO in Tx only mode
  2022-11-11  8:49 [PATCH v2 0/2] app/testpmd: support TCP TSO in Tx only mode Andrew Rybchenko
  2022-11-11  8:49 ` [PATCH v2 1/2] app/testpmd: prepare to support TCP " Andrew Rybchenko
  2022-11-11  8:49 ` [PATCH v2 2/2] app/testpmd: support TCP TSO " Andrew Rybchenko
@ 2022-11-11  8:52 ` Andrew Rybchenko
  2 siblings, 0 replies; 4+ messages in thread
From: Andrew Rybchenko @ 2022-11-11  8:52 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev

On 11/11/22 11:49, Andrew Rybchenko wrote:
> Add command-line option to generate TSO packets with specified MSS
> in txonly mode.
> 
> I'm not sure that it is a good idea to enable TSO offload automaticaly
> when the options is specified, but it is convenient.
> 
> v2:
>      - fix wrong data length calculation (as per review notes from Ferruh)
> 
> Andrew Rybchenko (2):
>    app/testpmd: prepare to support TCP in Tx only mode
>    app/testpmd: support TCP TSO in Tx only mode
> 
>   app/test-pmd/parameters.c             |  16 ++-
>   app/test-pmd/testpmd.c                |  12 +++
>   app/test-pmd/testpmd.h                |   5 +-
>   app/test-pmd/txonly.c                 | 140 +++++++++++++++++++-------
>   doc/guides/testpmd_app_ug/run_app.rst |   4 +
>   5 files changed, 134 insertions(+), 43 deletions(-)
> 

Self NAK, I forgot --in-reply-to and finally changed my mind
about review notes processing. Sorry for spam. I'll send v3
shortly.


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-11-11  8:52 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-11  8:49 [PATCH v2 0/2] app/testpmd: support TCP TSO in Tx only mode Andrew Rybchenko
2022-11-11  8:49 ` [PATCH v2 1/2] app/testpmd: prepare to support TCP " Andrew Rybchenko
2022-11-11  8:49 ` [PATCH v2 2/2] app/testpmd: support TCP TSO " Andrew Rybchenko
2022-11-11  8:52 ` [PATCH v2 0/2] " Andrew Rybchenko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).