DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] app/testpmd:add vxlan txonly
@ 2023-01-03  2:30 wushaohua
  2023-02-17 20:43 ` Ferruh Yigit
  0 siblings, 1 reply; 6+ messages in thread
From: wushaohua @ 2023-01-03  2:30 UTC (permalink / raw)
  To: dev; +Cc: aman.deep.singh, yuying.zhang, wushaohua

From: Shaohua Wu <wushaohua@chinatelecom.cn>

Add the vxlan packet sending module to actively send
vxlan packets using a common network adapter.
The default vni is 1000.
example:
 ./dpdk-testpmd -l 32-47 -n 16 --file-prefix pg0 -- -i
	--rxq=16 --txq=16 --rxd=1024 --txd=1024
	--txpkts=64 --burst=64  --mbuf-size=4096
	--nb-cores=15  --underlay_tx_only
	--underlay-eth-peer=0,f0:00:00:00:00:66
	--eth-peer=0,08:c0:eb:3e:87:af
	--utx-ip=11.0.0.1,11.0.0.2
	--tx-ip=30.0.0.1,30.0.0.2
	--forward-mode=tuntxonly
	--txonly-multi-flow

Signed-off-by: Shaohua Wu <wushaohua@chinatelecom.cn>
---
 app/test-pmd/cmdline.c    |  47 ++++
 app/test-pmd/config.c     |  15 ++
 app/test-pmd/meson.build  |   1 +
 app/test-pmd/parameters.c |  44 ++++
 app/test-pmd/testpmd.c    |  26 +-
 app/test-pmd/testpmd.h    |  11 +-
 app/test-pmd/txonly.c     |   4 +
 app/test-pmd/vxlan.c      | 534 ++++++++++++++++++++++++++++++++++++++
 app/test-pmd/vxlan.h      |  23 ++
 9 files changed, 700 insertions(+), 5 deletions(-)
 create mode 100644 app/test-pmd/vxlan.c
 create mode 100644 app/test-pmd/vxlan.h

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index b32dc8bfd4..a82214a6b1 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -12621,6 +12621,52 @@ static cmdline_parse_inst_t cmd_show_port_flow_transfer_proxy = {
 	}
 };
 
+struct cmd_underlay_eth_peer_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t underlay_eth_peer;
+	portid_t port_id;
+	cmdline_fixed_string_t underlay_peer_addr;
+};
+
+static void cmd_set_underlay_eth_peer_parsed(void *parsed_result,
+			__rte_unused struct cmdline *cl,
+			__rte_unused void *data)
+{
+		struct cmd_underlay_eth_peer_result *res = parsed_result;
+
+		if (test_done == 0) {
+			fprintf(stderr, "Please stop forwarding first\n");
+			return;
+		}
+		if (!strcmp(res->underlay_eth_peer, "underlay-eth-peer")) {
+			set_fwd_underlay_eth_peer(res->port_id, res->underlay_peer_addr);
+			fwd_config_setup();
+		}
+}
+
+static cmdline_parse_token_string_t cmd_underlay_eth_peer_set =
+	TOKEN_STRING_INITIALIZER(struct cmd_underlay_eth_peer_result, set, "set");
+static cmdline_parse_token_string_t cmd_underlay_eth_peer =
+	TOKEN_STRING_INITIALIZER(struct cmd_underlay_eth_peer_result, underlay_eth_peer, "underlay-eth-peer");
+static cmdline_parse_token_num_t cmd_underlay_eth_peer_port_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_underlay_eth_peer_result, port_id,
+		RTE_UINT16);
+static cmdline_parse_token_string_t cmd_underlay_eth_peer_addr =
+	TOKEN_STRING_INITIALIZER(struct cmd_underlay_eth_peer_result, underlay_peer_addr, NULL);
+
+static cmdline_parse_inst_t cmd_set_fwd_underlay_eth_peer = {
+	.f = cmd_set_underlay_eth_peer_parsed,
+	.data = NULL,
+	.help_str = "set underlay-eth-peer <port_id> <peer_mac>",
+	.tokens = {
+		(void *)&cmd_underlay_eth_peer_set,
+		(void *)&cmd_underlay_eth_peer,
+		(void *)&cmd_underlay_eth_peer_port_id,
+		(void *)&cmd_underlay_eth_peer_addr,
+		NULL,
+	},
+};
+
 /* ******************************************************************************** */
 
 /* list of instructions */
@@ -12851,6 +12897,7 @@ static cmdline_parse_ctx_t builtin_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_show_capability,
 	(cmdline_parse_inst_t *)&cmd_set_flex_is_pattern,
 	(cmdline_parse_inst_t *)&cmd_set_flex_spec_pattern,
+	(cmdline_parse_inst_t *)&cmd_set_fwd_underlay_eth_peer,
 	NULL,
 };
 
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index acccb6b035..af878314bf 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -4769,6 +4769,21 @@ set_fwd_eth_peer(portid_t port_id, char *peer_addr)
 	}
 	peer_eth_addrs[port_id] = new_peer_addr;
 }
+void
+set_fwd_underlay_eth_peer(portid_t port_id, char *peer_addr)
+{
+	struct rte_ether_addr new_peer_addr;
+	if (!rte_eth_dev_is_valid_port(port_id)) {
+		fprintf(stderr, "Error: Invalid port number %i\n", port_id);
+		return;
+	}
+	if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
+		fprintf(stderr, "Error: Invalid ethernet address: %s\n",
+				peer_addr);
+		return;
+	}
+	peer_underlay_eth_addrs[port_id] = new_peer_addr;
+}
 
 int
 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index 8488efc138..5f0de21996 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -26,6 +26,7 @@ sources = files(
         'testpmd.c',
         'txonly.c',
         'util.c',
+	'vxlan.c'
 )
 
 if dpdk_conf.has('RTE_HAS_JANSSON')
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index d597c209ba..ca152f0294 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -603,6 +603,7 @@ launch_args_parse(int argc, char** argv)
 		{ "auto-start",			0, 0, 0 },
 		{ "eth-peers-configfile",	1, 0, 0 },
 		{ "eth-peer",			1, 0, 0 },
+		{ "underlay-eth-peer",		1, 0, 0 },
 #endif
 		{ "tx-first",			0, 0, 0 },
 		{ "stats-period",		1, 0, 0 },
@@ -693,6 +694,8 @@ launch_args_parse(int argc, char** argv)
 		{ "mp-alloc",			1, 0, 0 },
 		{ "tx-ip",			1, 0, 0 },
 		{ "tx-udp",			1, 0, 0 },
+		{ "utx-ip",		1, 0, 0 },
+		{ "underlay_tx_only",		0, 0, 0 },
 		{ "noisy-tx-sw-buffer-size",	1, 0, 0 },
 		{ "noisy-tx-sw-buffer-flushtime", 1, 0, 0 },
 		{ "noisy-lkup-memory",		1, 0, 0 },
@@ -803,6 +806,47 @@ launch_args_parse(int argc, char** argv)
 				nb_peer_eth_addrs++;
 			}
 #endif
+			if (!strcmp(lgopts[opt_idx].name, "underlay-eth-peer")) {
+				char *port_end;
+
+				errno = 0;
+				n = strtoul(optarg, &port_end, 10);
+				if (errno != 0 || port_end == optarg || *port_end++ != ',')
+					rte_exit(EXIT_FAILURE,
+						 "Invalid underlay-eth-peer: %s", optarg);
+				if (n >= RTE_MAX_ETHPORTS)
+					rte_exit(EXIT_FAILURE,
+						 "eth-peer: port %d >= RTE_MAX_ETHPORTS(%d)\n",
+						 n, RTE_MAX_ETHPORTS);
+
+				if (rte_ether_unformat_addr(port_end,
+						&peer_underlay_eth_addrs[n]) < 0)
+					rte_exit(EXIT_FAILURE,
+						 "Invalid ethernet address: %s\n",
+						 port_end);
+			}
+			if (!strcmp(lgopts[opt_idx].name, "utx-ip")) {
+				struct in_addr in;
+				char *end;
+
+				end = strchr(optarg, ',');
+				if (end == optarg || !end)
+					rte_exit(EXIT_FAILURE,
+						 "Invalid tx-sip: %s", optarg);
+
+				*end++ = 0;
+				if (inet_pton(AF_INET, optarg, &in) == 0)
+					rte_exit(EXIT_FAILURE,
+						 "Invalid source IP address: %s\n",
+						 optarg);
+				underlay_tx_ip_src_addr = rte_be_to_cpu_32(in.s_addr);
+
+				if (inet_pton(AF_INET, end, &in) == 0)
+					rte_exit(EXIT_FAILURE,
+						 "Invalid destination IP address: %s\n",
+						 optarg);
+				underlay_tx_ip_dst_addr = rte_be_to_cpu_32(in.s_addr);
+			}
 			if (!strcmp(lgopts[opt_idx].name, "tx-ip")) {
 				struct in_addr in;
 				char *end;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 134d79a555..5d177fa02e 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -156,7 +156,10 @@ uint8_t txring_numa[RTE_MAX_ETHPORTS];
  * ports.
  */
 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
-portid_t nb_peer_eth_addrs = 0;
+struct rte_ether_addr peer_underlay_eth_addrs[RTE_MAX_ETHPORTS];
+portid_t nb_peer_eth_addrs;
+portid_t nb_underlay_peer_eth_addr;
+bool is_underlay_tx_only;
 
 /*
  * Probed Target Environment.
@@ -194,6 +197,7 @@ struct fwd_engine * fwd_engines[] = {
 	&flow_gen_engine,
 	&rx_only_engine,
 	&tx_only_engine,
+	&tun_tx_only_engine,
 	&csum_fwd_engine,
 	&icmp_echo_engine,
 	&noisy_vnf_engine,
@@ -257,6 +261,7 @@ uint8_t multi_rx_mempool; /**< Enables multi-rx-mempool feature */
 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
 	TXONLY_DEF_PACKET_LEN,
+	VXLAN_TXONLY_DEF_PACKET_LEN,
 };
 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
 
@@ -759,6 +764,8 @@ set_def_peer_eth_addrs(void)
 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
 		peer_eth_addrs[i].addr_bytes[5] = i;
+		peer_underlay_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
+		peer_underlay_eth_addrs[i].addr_bytes[5] = i;
 	}
 }
 
@@ -2309,7 +2316,10 @@ run_one_txonly_burst_on_core(void *fwd_arg)
 	fwd_lc = (struct fwd_lcore *) fwd_arg;
 	tmp_lcore = *fwd_lc;
 	tmp_lcore.stopped = 1;
-	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
+	if (is_underlay_tx_only)
+		run_pkt_fwd_on_lcore(&tmp_lcore, tun_tx_only_engine.packet_fwd);
+	else
+		run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
 	return 0;
 }
 
@@ -2394,7 +2404,11 @@ start_packet_forwarding(int with_tx_first)
 	}
 
 	if (with_tx_first) {
-		port_fwd_begin = tx_only_engine.port_fwd_begin;
+		if (is_underlay_tx_only) {
+			port_fwd_begin = tun_tx_only_engine.port_fwd_begin;
+		} else {
+			port_fwd_begin = tx_only_engine.port_fwd_begin;
+		}
 		if (port_fwd_begin != NULL) {
 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
 				if (port_fwd_begin(fwd_ports_ids[i])) {
@@ -2420,7 +2434,11 @@ start_packet_forwarding(int with_tx_first)
 					run_one_txonly_burst_on_core);
 			rte_eal_mp_wait_lcore();
 		}
-		port_fwd_end = tx_only_engine.port_fwd_end;
+		if (is_underlay_tx_only) {
+			port_fwd_end = tun_tx_only_engine.port_fwd_end;
+		} else {
+			port_fwd_end = tx_only_engine.port_fwd_end;
+		}
 		if (port_fwd_end != NULL) {
 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
 				(*port_fwd_end)(fwd_ports_ids[i]);
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970..eb749736e8 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -423,6 +423,7 @@ extern struct fwd_engine mac_swap_engine;
 extern struct fwd_engine flow_gen_engine;
 extern struct fwd_engine rx_only_engine;
 extern struct fwd_engine tx_only_engine;
+extern struct fwd_engine tun_tx_only_engine;
 extern struct fwd_engine csum_fwd_engine;
 extern struct fwd_engine icmp_echo_engine;
 extern struct fwd_engine noisy_vnf_engine;
@@ -598,6 +599,7 @@ extern uint8_t multi_rx_mempool; /**< Enables multi-rx-mempool feature. */
  * Configuration of packet segments used by the "txonly" processing engine.
  */
 #define TXONLY_DEF_PACKET_LEN 64
+#define VXLAN_TXONLY_DEF_PACKET_LEN 128
 extern uint16_t tx_pkt_length; /**< Length of TXONLY packet */
 extern uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */
 extern uint8_t  tx_pkt_nb_segs; /**< Number of segments in TX packets */
@@ -630,9 +632,15 @@ extern int8_t tx_wthresh;
 extern uint16_t tx_udp_src_port;
 extern uint16_t tx_udp_dst_port;
 
+extern uint16_t underlay_tx_udp_src_port;
+extern uint16_t underlay_tx_udp_dst_port;
+
 extern uint32_t tx_ip_src_addr;
 extern uint32_t tx_ip_dst_addr;
 
+extern uint32_t underlay_tx_ip_src_addr;
+extern uint32_t underlay_tx_ip_dst_addr;
+
 extern struct fwd_config cur_fwd_config;
 extern struct fwd_engine *cur_fwd_eng;
 extern uint32_t retry_enabled;
@@ -644,6 +652,7 @@ extern uint16_t geneve_udp_port; /**< UDP port of tunnel GENEVE. */
 
 extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */
 extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
+extern struct rte_ether_addr peer_underlay_eth_addrs[RTE_MAX_ETHPORTS];
 
 extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */
 extern uint32_t burst_tx_retry_num;  /**< Burst tx retry number for mac-retry. */
@@ -892,7 +901,7 @@ int init_fwd_streams(void);
 void update_fwd_ports(portid_t new_pid);
 
 void set_fwd_eth_peer(portid_t port_id, char *peer_addr);
-
+void set_fwd_underlay_eth_peer(portid_t port_id, char *peer_addr);
 void port_mtu_set(portid_t port_id, uint16_t mtu);
 int port_action_handle_create(portid_t port_id, uint32_t id,
 			      const struct rte_flow_indir_action_conf *conf,
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 021624952d..9bc04722a0 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -48,10 +48,14 @@ struct tx_timestamp {
 /* use RFC863 Discard Protocol */
 uint16_t tx_udp_src_port = 9;
 uint16_t tx_udp_dst_port = 9;
+uint16_t underlay_tx_udp_src_port = 9;
+uint16_t underlay_tx_udp_dst_port = 4789;
 
 /* use RFC5735 / RFC2544 reserved network test addresses */
 uint32_t tx_ip_src_addr = (198U << 24) | (18 << 16) | (0 << 8) | 1;
 uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2;
+uint32_t underlay_tx_ip_src_addr = (198U << 24) | (18 << 16) | (0 << 8) | 1;
+uint32_t underlay_tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2;
 
 #define IP_DEFTTL  64   /* from RFC 1340. */
 
diff --git a/app/test-pmd/vxlan.c b/app/test-pmd/vxlan.c
new file mode 100644
index 0000000000..52e7b1b486
--- /dev/null
+++ b/app/test-pmd/vxlan.c
@@ -0,0 +1,534 @@
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_string_fns.h>
+#include <rte_flow.h>
+
+#include "vxlan.h"
+#include "testpmd.h"
+struct tx_timestamp {
+	rte_be32_t signature;
+	rte_be16_t pkt_idx;
+	rte_be16_t queue_idx;
+	rte_be64_t ts;
+};
+uint16_t tun_tx_pkt_length = VXLAN_TXONLY_DEF_PACKET_LEN;
+static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
+RTE_DEFINE_PER_LCORE(uint8_t, __ip_var); /**< IP address variation */
+static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
+struct encap_vxlan_ipv4_vlan_data pkt_vxlan_hdr;
+static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
+static int32_t timestamp_off; /**< Timestamp dynamic field offset */
+static bool timestamp_enable; /**< Timestamp enable */
+static uint64_t timestamp_initial[RTE_MAX_ETHPORTS];
+
+static void
+copy_buf_to_pkt_segs(void *buf, unsigned int len,
+		struct rte_mbuf *pkt, unsigned int offset)
+{
+	struct rte_mbuf *seg;
+	void *seg_buf;
+	unsigned int copy_len;
+
+	seg = pkt;
+	while (offset >= seg->data_len) {
+		offset -= seg->data_len;
+		seg = seg->next;
+	}
+	copy_len = seg->data_len - offset;
+	seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
+	while (len > copy_len) {
+		rte_memcpy(seg_buf, buf, (size_t) copy_len);
+		len -= copy_len;
+		buf = ((char *) buf + copy_len);
+		seg = seg->next;
+		seg_buf = rte_pktmbuf_mtod(seg, char *);
+		copy_len = seg->data_len;
+	}
+	rte_memcpy(seg_buf, buf, (size_t) len);
+}
+
+static inline void
+copy_buf_to_pkt(void *buf, unsigned int len,
+		struct rte_mbuf *pkt, unsigned int offset)
+{
+	if (offset + len <= pkt->data_len) {
+		rte_memcpy(rte_pktmbuf_mtod_offset(pkt,
+			char *, offset),
+			buf, (size_t) len);
+		return;
+	}
+	copy_buf_to_pkt_segs(buf, len, pkt, offset);
+}
+
+static void
+set_innter_eth_hdr_apply(struct rte_ipv4_hdr *ip_hdr,
+			 struct rte_udp_hdr *udp_hdr,
+			 uint16_t pkt_data_len)
+{
+	uint16_t *ptr16;
+	uint32_t ip_cksum;
+	uint16_t pkt_len;
+	/*
+	 * Initialize UDP header.
+	 */
+	pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
+	udp_hdr->src_port = rte_cpu_to_be_16(tx_udp_src_port);
+	udp_hdr->dst_port = rte_cpu_to_be_16(tx_udp_dst_port);
+	udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
+	udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
+
+	/*
+	 * Initialize IP header.
+	 */
+	pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
+	ip_hdr->version_ihl   = RTE_IPV4_VHL_DEF;
+	ip_hdr->type_of_service   = 0;
+	ip_hdr->fragment_offset = 0;
+	ip_hdr->time_to_live   = 64;
+	ip_hdr->next_proto_id = IPPROTO_UDP;
+	ip_hdr->packet_id = 0;
+	ip_hdr->total_length   = RTE_CPU_TO_BE_16(pkt_len);
+	ip_hdr->src_addr = rte_cpu_to_be_32(tx_ip_src_addr);
+	ip_hdr->dst_addr = rte_cpu_to_be_32(tx_ip_dst_addr);
+
+	/*
+	 * Compute IP header checksum.
+	 */
+	ptr16 = (unaligned_uint16_t *) ip_hdr;
+	ip_cksum = 0;
+	ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
+	ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
+	ip_cksum += ptr16[4];
+	ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
+	ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
+
+	/*
+	 * Reduce 32 bit checksum to 16 bits and complement it.
+	 */
+	ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
+		(ip_cksum & 0x0000FFFF);
+	if (ip_cksum > 65535)
+		ip_cksum -= 65535;
+	ip_cksum = (~ip_cksum) & 0x0000FFFF;
+	if (ip_cksum == 0)
+		ip_cksum = 0xFFFF;
+	ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
+}
+
+static void
+setup_pkt_encap_vlan_vxlan_apply(
+			 struct encap_vxlan_ipv4_vlan_data *vxlan_hdr,
+			 uint16_t pkt_data_len)
+{
+	uint16_t pkt_len;
+
+	vxlan_hdr->ether.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
+
+	/*vxlan head*/
+	vxlan_hdr->vxlan.vx_vni = htonl(1000<<8); /*vni */
+	vxlan_hdr->vxlan.vx_flags = 0x08; /*vni valid*/
+
+	/*udp*/
+	pkt_len = sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr) + pkt_data_len;
+	vxlan_hdr->udp.dst_port = htons(underlay_tx_udp_dst_port);
+	vxlan_hdr->udp.src_port = htons(underlay_tx_udp_src_port);
+	vxlan_hdr->udp.dgram_len = htons(pkt_len);
+
+	pkt_len += sizeof(struct rte_ipv4_hdr);
+	/* IPv4*/
+	vxlan_hdr->ipv4.version_ihl = 0x45;
+	vxlan_hdr->ipv4.type_of_service = 0;
+	vxlan_hdr->ipv4.total_length = rte_cpu_to_be_16(pkt_len); /* not pre-computed */
+	vxlan_hdr->ipv4.packet_id = 0;
+	vxlan_hdr->ipv4.fragment_offset = 0;
+	vxlan_hdr->ipv4.time_to_live = 64;
+	vxlan_hdr->ipv4.next_proto_id = IPPROTO_UDP;
+	vxlan_hdr->ipv4.hdr_checksum = rte_ipv4_cksum(&vxlan_hdr->ipv4);
+	vxlan_hdr->ipv4.src_addr = rte_cpu_to_be_32(underlay_tx_ip_src_addr);
+	vxlan_hdr->ipv4.dst_addr = rte_cpu_to_be_32(underlay_tx_ip_dst_addr);
+
+
+	/* VLAN */
+	vxlan_hdr->vlan.vlan_tci = rte_cpu_to_be_16(0);  /*vlan*/
+	vxlan_hdr->vlan.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+	;
+}
+
+static int
+underlay_tx_only_begin(portid_t pi)
+{
+	uint16_t pkt_hdr_len, pkt_data_len, payload_head_len;
+	int dynf;
+
+	pkt_hdr_len = sizeof(struct encap_vxlan_ipv4_vlan_data);
+	payload_head_len = (uint16_t)(sizeof(struct rte_ether_hdr) +
+				 sizeof(struct rte_ipv4_hdr) +
+				 sizeof(struct rte_udp_hdr));
+	pkt_data_len = tun_tx_pkt_length - pkt_hdr_len;
+
+	if ((tx_pkt_split == TX_PKT_SPLIT_RND || txonly_multi_flow) &&
+	    tx_pkt_seg_lengths[1] < pkt_hdr_len) {
+		TESTPMD_LOG(ERR,
+			    "Random segment number or multiple flow is enabled, "
+			    "but tx_pkt_seg_lengths[0] %u < %u (needed)\n",
+			    tx_pkt_seg_lengths[1], pkt_hdr_len);
+		return -EINVAL;
+	}
+
+	setup_pkt_encap_vlan_vxlan_apply(&pkt_vxlan_hdr, pkt_data_len);
+	pkt_data_len = tun_tx_pkt_length - pkt_hdr_len - payload_head_len;
+	set_innter_eth_hdr_apply(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
+
+	timestamp_enable = false;
+	timestamp_mask = 0;
+	timestamp_off = -1;
+	dynf = rte_mbuf_dynflag_lookup
+				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
+	if (dynf >= 0)
+		timestamp_mask = 1ULL << dynf;
+	dynf = rte_mbuf_dynfield_lookup
+				(RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
+	if (dynf >= 0)
+		timestamp_off = dynf;
+	timestamp_enable = tx_pkt_times_inter &&
+			   timestamp_mask &&
+			   timestamp_off >= 0 &&
+			   !rte_eth_read_clock(pi, &timestamp_initial[pi]);
+
+	if (timestamp_enable) {
+		pkt_hdr_len += sizeof(struct tx_timestamp);
+
+		if (tx_pkt_split == TX_PKT_SPLIT_RND) {
+			if (tx_pkt_seg_lengths[1] < pkt_hdr_len) {
+				TESTPMD_LOG(ERR,
+					    "Time stamp and random segment number are enabled, "
+					    "but tx_pkt_seg_lengths[0] %u < %u (needed)\n",
+					    tx_pkt_seg_lengths[0], pkt_hdr_len);
+				return -EINVAL;
+			}
+		} else {
+			uint16_t total = 0;
+			uint8_t i;
+
+			for (i = 1; i < 2; i++) {
+				total += tx_pkt_seg_lengths[i];
+				if (total >= pkt_hdr_len)
+					break;
+			}
+
+			if (total < pkt_hdr_len) {
+				TESTPMD_LOG(ERR,
+					    "Not enough Tx segment space for time stamp info, "
+					    "total %u < %u (needed)\n",
+					    total, pkt_hdr_len);
+				return -EINVAL;
+			}
+		}
+	}
+
+	/* Make sure all settings are visible on forwarding cores.*/
+	rte_wmb();
+	return 0;
+}
+
+
+static inline bool
+pkt_burst_vxlan_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
+		struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci,
+		const uint16_t vlan_tci_outer, const uint64_t ol_flags,
+		const uint16_t idx, struct encap_vxlan_ipv4_vlan_data *vxlan,
+		struct fwd_stream *fs)
+{
+	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
+	struct rte_mbuf *pkt_seg;
+	uint32_t nb_segs, pkt_len, off = 0;
+	uint8_t i;
+
+	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
+		nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
+	else
+		nb_segs = tx_pkt_nb_segs;
+
+	if (nb_segs > 1) {
+		if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs - 1))
+			return false;
+	}
+
+	rte_pktmbuf_reset_headroom(pkt);
+	pkt->data_len = tx_pkt_seg_lengths[1];
+	pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
+	pkt->ol_flags |= ol_flags;
+	pkt->vlan_tci = vlan_tci;
+	pkt->vlan_tci_outer = vlan_tci_outer;
+	pkt->l2_len = sizeof(struct rte_ether_hdr);
+	pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+
+	pkt_len = pkt->data_len;
+	pkt_seg = pkt;
+	for (i = 1; i < nb_segs; i++) {
+		pkt_seg->next = pkt_segs[i - 1];
+		pkt_seg = pkt_seg->next;
+		pkt_seg->data_len = tx_pkt_seg_lengths[i];
+		pkt_len += pkt_seg->data_len;
+	}
+	pkt_seg->next = NULL; /* Last segment of packet. */
+
+	copy_buf_to_pkt(vxlan, sizeof(*vxlan), pkt, 0);
+	off = sizeof(struct encap_vxlan_ipv4_vlan_data);
+	/*
+	 * Copy headers in first packet segment(s).
+	 */
+	copy_buf_to_pkt(eth_hdr, sizeof(*eth_hdr), pkt, off);
+	off += sizeof(struct rte_ether_hdr);
+	copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
+			off);
+
+	if (txonly_multi_flow) {
+		uint8_t  ip_var = RTE_PER_LCORE(__ip_var);
+		struct rte_ipv4_hdr *ip_hdr;
+		uint32_t addr;
+
+		ip_hdr = rte_pktmbuf_mtod_offset(pkt,
+				struct rte_ipv4_hdr *,
+				off);
+
+		/*
+		 * Generate multiple flows by varying IP src addr. This
+		 * enables packets are well distributed by RSS in
+		 * receiver side if any and txonly mode can be a decent
+		 * packet generator for developer's quick performance
+		 * regression test.
+		 */
+		addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id();
+		ip_hdr->src_addr = rte_cpu_to_be_32(addr);
+		RTE_PER_LCORE(__ip_var) = ip_var;
+	}
+	off += sizeof(pkt_ip_hdr);
+	copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt, off);
+	off += sizeof(pkt_udp_hdr);
+
+	if (unlikely(timestamp_enable)) {
+		uint64_t skew = fs->ts_skew;
+		struct tx_timestamp timestamp_mark;
+
+		if (unlikely(!skew)) {
+			struct rte_eth_dev_info dev_info;
+			unsigned int txqs_n;
+			uint64_t phase;
+			int ret;
+
+			ret = eth_dev_info_get_print_err(fs->tx_port, &dev_info);
+			if (ret != 0) {
+				TESTPMD_LOG(ERR,
+					"Failed to get device info for port %d,"
+					"could not finish timestamp init",
+					fs->tx_port);
+				return false;
+			}
+			txqs_n = dev_info.nb_tx_queues;
+			phase = tx_pkt_times_inter * fs->tx_queue /
+					 (txqs_n ? txqs_n : 1);
+			/*
+			 * Initialize the scheduling time phase shift
+			 * depending on queue index.
+			 */
+			skew = timestamp_initial[fs->tx_port] +
+			       tx_pkt_times_inter + phase;
+			fs->ts_skew = skew;
+		}
+		timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx);
+		timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue);
+		timestamp_mark.signature = rte_cpu_to_be_32(0xBEEFC0DE);
+		if (unlikely(!idx)) {
+			skew +=	tx_pkt_times_inter;
+			pkt->ol_flags |= timestamp_mask;
+			*RTE_MBUF_DYNFIELD
+				(pkt, timestamp_off, uint64_t *) = skew;
+			fs->ts_skew = skew;
+			timestamp_mark.ts = rte_cpu_to_be_64(skew);
+		} else if (tx_pkt_times_intra) {
+			skew +=	tx_pkt_times_intra;
+			pkt->ol_flags |= timestamp_mask;
+			*RTE_MBUF_DYNFIELD
+				(pkt, timestamp_off, uint64_t *) = skew;
+			fs->ts_skew = skew;
+			timestamp_mark.ts = rte_cpu_to_be_64(skew);
+		} else {
+			timestamp_mark.ts = RTE_BE64(0);
+		}
+		copy_buf_to_pkt(&timestamp_mark, sizeof(timestamp_mark), pkt,
+			sizeof(struct encap_vxlan_ipv4_vlan_data) +
+			sizeof(struct rte_ether_hdr) +
+			sizeof(struct rte_ipv4_hdr) +
+			sizeof(pkt_udp_hdr));
+	}
+	/*
+	 * Complete first mbuf of packet and append it to the
+	 * burst of packets to be transmitted.
+	 */
+	pkt->nb_segs = nb_segs;
+	pkt->pkt_len = pkt_len;
+
+	return true;
+}
+
+/*
+ * Transmit a burst of multi-segments packets.
+ */
+static void
+pkt_burst_transmit(struct fwd_stream *fs)
+{
+	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+	struct rte_port *txp;
+	struct rte_mbuf *pkt;
+	struct rte_mempool *mbp;
+	struct rte_ether_hdr eth_hdr;
+	uint16_t nb_tx;
+	uint16_t nb_pkt;
+	uint16_t vlan_tci, vlan_tci_outer;
+	uint32_t retry;
+	uint64_t ol_flags = 0;
+	uint64_t tx_offloads;
+	uint64_t start_tsc = 0;
+
+	get_start_cycles(&start_tsc);
+
+	mbp = current_fwd_lcore()->mbp;
+	txp = &ports[fs->tx_port];
+	tx_offloads = txp->dev_conf.txmode.offloads;
+	vlan_tci = txp->tx_vlan_id;
+	vlan_tci_outer = txp->tx_vlan_id_outer;
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
+		ol_flags = RTE_MBUF_F_TX_VLAN;
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
+		ol_flags |= RTE_MBUF_F_TX_QINQ;
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
+		ol_flags |= RTE_MBUF_F_TX_MACSEC;
+
+	rte_ether_addr_copy(&peer_underlay_eth_addrs[fs->peer_addr], &pkt_vxlan_hdr.ether.dst_addr);
+	rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, &pkt_vxlan_hdr.ether.src_addr);
+
+	/*
+	 * Initialize Ethernet header.
+	 */
+	rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], &eth_hdr.dst_addr);
+	rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, &eth_hdr.src_addr);
+	eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+
+	if (rte_mempool_get_bulk(mbp, (void **)pkts_burst,
+				nb_pkt_per_burst) == 0) {
+		for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
+			if (unlikely(!pkt_burst_vxlan_prepare(
+					pkts_burst[nb_pkt], mbp, &eth_hdr,
+					vlan_tci, vlan_tci_outer, ol_flags,
+					nb_pkt, &pkt_vxlan_hdr, fs))) {
+				rte_mempool_put_bulk(mbp,
+						(void **)&pkts_burst[nb_pkt],
+						nb_pkt_per_burst - nb_pkt);
+				break;
+			}
+		}
+	} else {
+		for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
+			pkt = rte_mbuf_raw_alloc(mbp);
+			if (pkt == NULL)
+				break;
+			if (unlikely(!pkt_burst_vxlan_prepare(pkt, mbp, &eth_hdr,
+							vlan_tci,
+							vlan_tci_outer,
+							ol_flags,
+							nb_pkt, &pkt_vxlan_hdr,
+							fs))) {
+				rte_pktmbuf_free(pkt);
+				break;
+			}
+			pkts_burst[nb_pkt] = pkt;
+		}
+	}
+
+	if (nb_pkt == 0)
+		return;
+
+	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
+
+	/*
+	 * Retry if necessary
+	 */
+	if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) {
+		retry = 0;
+		while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) {
+			rte_delay_us(burst_tx_delay_time);
+			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+					&pkts_burst[nb_tx], nb_pkt - nb_tx);
+		}
+	}
+	fs->tx_packets += nb_tx;
+
+	if (txonly_multi_flow)
+		RTE_PER_LCORE(__ip_var) -= nb_pkt - nb_tx;
+
+	inc_tx_burst_stats(fs, nb_tx);
+	if (unlikely(nb_tx < nb_pkt)) {
+		if (verbose_level > 0 && fs->fwd_dropped == 0)
+			printf("port %d tx_queue %d - drop "
+			       "(nb_pkt:%u - nb_tx:%u)=%u packets\n",
+			       fs->tx_port, fs->tx_queue,
+			       (unsigned int) nb_pkt,
+				(unsigned int) nb_tx,
+			       (unsigned int) (nb_pkt - nb_tx));
+		fs->fwd_dropped += (nb_pkt - nb_tx);
+		do {
+			rte_pktmbuf_free(pkts_burst[nb_tx]);
+		} while (++nb_tx < nb_pkt);
+	}
+
+	get_end_cycles(fs, start_tsc);
+}
+static void
+tx_only_stream_init(struct fwd_stream *fs)
+{
+	fs->disabled = ports[fs->tx_port].txq[fs->tx_queue].state ==
+						RTE_ETH_QUEUE_STATE_STOPPED;
+}
+
+
+struct fwd_engine tun_tx_only_engine = {
+	.fwd_mode_name  = "tuntxonly",
+	.port_fwd_begin = underlay_tx_only_begin,
+	.port_fwd_end   = NULL,
+	.stream_init    = tx_only_stream_init,
+	.packet_fwd     = pkt_burst_transmit,
+};
+
+
+
+
diff --git a/app/test-pmd/vxlan.h b/app/test-pmd/vxlan.h
new file mode 100644
index 0000000000..f4cc031d92
--- /dev/null
+++ b/app/test-pmd/vxlan.h
@@ -0,0 +1,23 @@
+#ifndef __VXLAN_H
+#define __VXLAN_H
+#include <rte_ether.h>
+
+
+
+struct encap_vxlan_ipv4_vlan_data {
+	struct rte_ether_hdr ether;
+	struct rte_vlan_hdr vlan;
+	struct rte_ipv4_hdr ipv4;
+	struct rte_udp_hdr udp;
+	struct rte_vxlan_hdr vxlan;
+} __rte_packed __rte_aligned(2);
+
+struct encap_vxlan_ipv4_data {
+	struct rte_ether_hdr ether;
+	struct rte_ipv4_hdr ipv4;
+	struct rte_udp_hdr udp;
+	struct rte_vxlan_hdr vxlan;
+} __rte_packed __rte_aligned(2);
+
+
+#endif
-- 
2.30.2


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-03-23 11:51 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-03  2:30 [PATCH] app/testpmd:add vxlan txonly wushaohua
2023-02-17 20:43 ` Ferruh Yigit
2023-02-18 11:06   ` wushaohua
2023-02-18 23:47     ` Ferruh Yigit
2023-03-06 12:07       ` Ferruh Yigit
2023-03-23 11:51         ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).