From: Jijiang Liu <jijiang.liu@intel.com> To: dev@dpdk.org Subject: [dpdk-dev] [PATCH v8 10/10] app/testpmd:test VxLAN Tx checksum offload Date: Mon, 27 Oct 2014 10:13:26 +0800 Message-ID: <1414376006-31402-11-git-send-email-jijiang.liu@intel.com> (raw) In-Reply-To: <1414376006-31402-1-git-send-email-jijiang.liu@intel.com> Add test cases in testpmd to test VxLAN Tx Checksum offload, which include - IPv4 and IPv6 packet - outer L3, inner L3 and L4 checksum offload for Tx side. Signed-off-by: Jijiang Liu <jijiang.liu@intel.com> --- app/test-pmd/cmdline.c | 13 ++- app/test-pmd/config.c | 6 +- app/test-pmd/csumonly.c | 194 +++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 192 insertions(+), 21 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index da5d272..757c399 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -310,13 +310,17 @@ static void cmd_help_long_parsed(void *parsed_result, " Disable hardware insertion of a VLAN header in" " packets sent on a port.\n\n" - "tx_checksum set mask (port_id)\n" + "tx_checksum set (mask) (port_id)\n" " Enable hardware insertion of checksum offload with" - " the 4-bit mask, 0~0xf, in packets sent on a port.\n" + " the 8-bit mask, 0~0xff, in packets sent on a port.\n" " bit 0 - insert ip checksum offload if set\n" " bit 1 - insert udp checksum offload if set\n" " bit 2 - insert tcp checksum offload if set\n" " bit 3 - insert sctp checksum offload if set\n" + " bit 4 - insert inner ip checksum offload if set\n" + " bit 5 - insert inner udp checksum offload if set\n" + " bit 6 - insert inner tcp checksum offload if set\n" + " bit 7 - insert inner sctp checksum offload if set\n" " Please check the NIC datasheet for HW limits.\n\n" "set fwd (%s)\n" @@ -2763,8 +2767,9 @@ cmdline_parse_inst_t cmd_tx_cksum_set = { .f = cmd_tx_cksum_set_parsed, .data = NULL, .help_str = "enable hardware insertion of L3/L4checksum with a given " - "mask in packets sent on a port, the bit mapping is given as, Bit 0 for ip" - "Bit 1 for UDP, Bit 2 for TCP, Bit 3 for SCTP", + "mask in packets sent on a port, the bit mapping is given as, Bit 0 for ip, " + "Bit 1 for UDP, Bit 2 for TCP, Bit 3 for SCTP, Bit 4 for inner ip, " + "Bit 5 for inner UDP, Bit 6 for inner TCP, Bit 7 for inner SCTP", .tokens = { (void *)&cmd_tx_cksum_set_tx_cksum, (void *)&cmd_tx_cksum_set_set, diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index 2a1b93f..9bc08f4 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -1753,9 +1753,9 @@ tx_cksum_set(portid_t port_id, uint64_t ol_flags) uint64_t tx_ol_flags; if (port_id_is_invalid(port_id)) return; - /* Clear last 4 bits and then set L3/4 checksum mask again */ - tx_ol_flags = ports[port_id].tx_ol_flags & (~0x0Full); - ports[port_id].tx_ol_flags = ((ol_flags & 0xf) | tx_ol_flags); + /* Clear last 8 bits and then set L3/4 checksum mask again */ + tx_ol_flags = ports[port_id].tx_ol_flags & (~0x0FFull); + ports[port_id].tx_ol_flags = ((ol_flags & 0xff) | tx_ol_flags); } void diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c index fcc4876..3967476 100644 --- a/app/test-pmd/csumonly.c +++ b/app/test-pmd/csumonly.c @@ -209,10 +209,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) struct rte_mbuf *mb; struct ether_hdr *eth_hdr; struct ipv4_hdr *ipv4_hdr; + struct ether_hdr *inner_eth_hdr; + struct ipv4_hdr *inner_ipv4_hdr = NULL; struct ipv6_hdr *ipv6_hdr; + struct ipv6_hdr *inner_ipv6_hdr = NULL; struct udp_hdr *udp_hdr; + struct udp_hdr *inner_udp_hdr; struct tcp_hdr *tcp_hdr; + struct tcp_hdr *inner_tcp_hdr; struct sctp_hdr *sctp_hdr; + struct sctp_hdr *inner_sctp_hdr; uint16_t nb_rx; uint16_t nb_tx; @@ -221,12 +227,17 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) uint64_t pkt_ol_flags; uint64_t tx_ol_flags; uint16_t l4_proto; + uint16_t inner_l4_proto = 0; uint16_t eth_type; uint8_t l2_len; uint8_t l3_len; + uint8_t inner_l2_len = 0; + uint8_t inner_l3_len = 0; uint32_t rx_bad_ip_csum; uint32_t rx_bad_l4_csum; + uint8_t ipv4_tunnel; + uint8_t ipv6_tunnel; #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES uint64_t start_tsc; @@ -262,7 +273,10 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) l2_len = sizeof(struct ether_hdr); pkt_ol_flags = mb->ol_flags; ol_flags = (pkt_ol_flags & (~PKT_TX_L4_MASK)); - + ipv4_tunnel = (pkt_ol_flags & PKT_RX_TUNNEL_IPV4_HDR) ? + 1 : 0; + ipv6_tunnel = (pkt_ol_flags & PKT_RX_TUNNEL_IPV6_HDR) ? + 1 : 0; eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *); eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); if (eth_type == ETHER_TYPE_VLAN) { @@ -295,7 +309,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) * + ipv4 or ipv6 * + udp or tcp or sctp or others */ - if (pkt_ol_flags & PKT_RX_IPV4_HDR) { + if (pkt_ol_flags & (PKT_RX_IPV4_HDR | PKT_RX_TUNNEL_IPV4_HDR)) { /* Do not support ipv4 option field */ l3_len = sizeof(struct ipv4_hdr) ; @@ -325,17 +339,92 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) if (tx_ol_flags & 0x2) { /* HW Offload */ ol_flags |= PKT_TX_UDP_CKSUM; - /* Pseudo header sum need be set properly */ - udp_hdr->dgram_cksum = get_ipv4_psd_sum(ipv4_hdr); + if (ipv4_tunnel) + udp_hdr->dgram_cksum = 0; + else + /* Pseudo header sum need be set properly */ + udp_hdr->dgram_cksum = + get_ipv4_psd_sum(ipv4_hdr); } else { /* SW Implementation, clear checksum field first */ udp_hdr->dgram_cksum = 0; udp_hdr->dgram_cksum = get_ipv4_udptcp_checksum(ipv4_hdr, - (uint16_t*)udp_hdr); + (uint16_t *)udp_hdr); } - } - else if (l4_proto == IPPROTO_TCP){ + + if (ipv4_tunnel) { + + uint16_t len; + + /* Check if inner L3/L4 checkum flag is set */ + if (tx_ol_flags & 0xF0) + ol_flags |= PKT_TX_VXLAN_CKSUM; + + inner_l2_len = sizeof(struct ether_hdr); + inner_eth_hdr = (struct ether_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + l2_len + l3_len + + ETHER_VXLAN_HLEN); + + eth_type = rte_be_to_cpu_16(inner_eth_hdr->ether_type); + if (eth_type == ETHER_TYPE_VLAN) { + inner_l2_len += sizeof(struct vlan_hdr); + eth_type = rte_be_to_cpu_16(*(uint16_t *) + ((uintptr_t)ð_hdr->ether_type + + sizeof(struct vlan_hdr))); + } + + len = l2_len + l3_len + ETHER_VXLAN_HLEN + inner_l2_len; + if (eth_type == ETHER_TYPE_IPv4) { + inner_l3_len = sizeof(struct ipv4_hdr); + inner_ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + len); + inner_l4_proto = inner_ipv4_hdr->next_proto_id; + + if (tx_ol_flags & 0x10) { + + /* Do not delete, this is required by HW*/ + inner_ipv4_hdr->hdr_checksum = 0; + ol_flags |= PKT_TX_IPV4_CSUM; + } + + } else if (eth_type == ETHER_TYPE_IPv6) { + inner_l3_len = sizeof(struct ipv6_hdr); + inner_ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + len); + inner_l4_proto = inner_ipv6_hdr->proto; + } + if ((inner_l4_proto == IPPROTO_UDP) && (tx_ol_flags & 0x20)) { + + /* HW Offload */ + ol_flags |= PKT_TX_UDP_CKSUM; + inner_udp_hdr = (struct udp_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + len + inner_l3_len); + if (eth_type == ETHER_TYPE_IPv4) + inner_udp_hdr->dgram_cksum = get_ipv4_psd_sum(inner_ipv4_hdr); + else if (eth_type == ETHER_TYPE_IPv6) + inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr); + + } else if ((inner_l4_proto == IPPROTO_TCP) && (tx_ol_flags & 0x40)) { + /* HW Offload */ + ol_flags |= PKT_TX_TCP_CKSUM; + inner_tcp_hdr = (struct tcp_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + len + inner_l3_len); + if (eth_type == ETHER_TYPE_IPv4) + inner_tcp_hdr->cksum = get_ipv4_psd_sum(inner_ipv4_hdr); + else if (eth_type == ETHER_TYPE_IPv6) + inner_tcp_hdr->cksum = get_ipv6_psd_sum(inner_ipv6_hdr); + } else if ((inner_l4_proto == IPPROTO_SCTP) && (tx_ol_flags & 0x80)) { + /* HW Offload */ + ol_flags |= PKT_TX_SCTP_CKSUM; + inner_sctp_hdr = (struct sctp_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + len + inner_l3_len); + inner_sctp_hdr->cksum = 0; + } + + } + + } else if (l4_proto == IPPROTO_TCP) { tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb, unsigned char *) + l2_len + l3_len); if (tx_ol_flags & 0x4) { @@ -347,8 +436,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) tcp_hdr->cksum = get_ipv4_udptcp_checksum(ipv4_hdr, (uint16_t*)tcp_hdr); } - } - else if (l4_proto == IPPROTO_SCTP) { + } else if (l4_proto == IPPROTO_SCTP) { sctp_hdr = (struct sctp_hdr*) (rte_pktmbuf_mtod(mb, unsigned char *) + l2_len + l3_len); @@ -367,9 +455,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) } } /* End of L4 Handling*/ - } - else if (pkt_ol_flags & PKT_RX_IPV6_HDR) { - + } else if (pkt_ol_flags & (PKT_RX_IPV6_HDR | PKT_RX_TUNNEL_IPV6_HDR)) { ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb, unsigned char *) + l2_len); l3_len = sizeof(struct ipv6_hdr) ; @@ -382,15 +468,93 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) if (tx_ol_flags & 0x2) { /* HW Offload */ ol_flags |= PKT_TX_UDP_CKSUM; - udp_hdr->dgram_cksum = get_ipv6_psd_sum(ipv6_hdr); + if (ipv6_tunnel) + udp_hdr->dgram_cksum = 0; + else + udp_hdr->dgram_cksum = + get_ipv6_psd_sum(ipv6_hdr); } else { /* SW Implementation */ /* checksum field need be clear first */ udp_hdr->dgram_cksum = 0; udp_hdr->dgram_cksum = get_ipv6_udptcp_checksum(ipv6_hdr, - (uint16_t*)udp_hdr); + (uint16_t *)udp_hdr); } + + if (ipv6_tunnel) { + + uint16_t len; + + /* Check if inner L3/L4 checksum flag is set */ + if (tx_ol_flags & 0xF0) + ol_flags |= PKT_TX_VXLAN_CKSUM; + + inner_l2_len = sizeof(struct ether_hdr); + inner_eth_hdr = (struct ether_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + l2_len + l3_len + ETHER_VXLAN_HLEN); + eth_type = rte_be_to_cpu_16(inner_eth_hdr->ether_type); + + if (eth_type == ETHER_TYPE_VLAN) { + inner_l2_len += sizeof(struct vlan_hdr); + eth_type = rte_be_to_cpu_16(*(uint16_t *) + ((uintptr_t)ð_hdr->ether_type + + sizeof(struct vlan_hdr))); + } + + len = l2_len + l3_len + ETHER_VXLAN_HLEN + inner_l2_len; + + if (eth_type == ETHER_TYPE_IPv4) { + inner_l3_len = sizeof(struct ipv4_hdr); + inner_ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + len); + inner_l4_proto = inner_ipv4_hdr->next_proto_id; + + /* HW offload */ + if (tx_ol_flags & 0x10) { + + /* Do not delete, this is required by HW*/ + inner_ipv4_hdr->hdr_checksum = 0; + ol_flags |= PKT_TX_IPV4_CSUM; + } + } else if (eth_type == ETHER_TYPE_IPv6) { + inner_l3_len = sizeof(struct ipv6_hdr); + inner_ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + len); + inner_l4_proto = inner_ipv6_hdr->proto; + } + + if ((inner_l4_proto == IPPROTO_UDP) && (tx_ol_flags & 0x20)) { + inner_udp_hdr = (struct udp_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + len + inner_l3_len); + /* HW offload */ + ol_flags |= PKT_TX_UDP_CKSUM; + inner_udp_hdr->dgram_cksum = 0; + if (eth_type == ETHER_TYPE_IPv4) + inner_udp_hdr->dgram_cksum = get_ipv4_psd_sum(inner_ipv4_hdr); + else if (eth_type == ETHER_TYPE_IPv6) + inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr); + } else if ((inner_l4_proto == IPPROTO_TCP) && (tx_ol_flags & 0x40)) { + /* HW offload */ + ol_flags |= PKT_TX_TCP_CKSUM; + inner_tcp_hdr = (struct tcp_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + len + inner_l3_len); + + if (eth_type == ETHER_TYPE_IPv4) + inner_tcp_hdr->cksum = get_ipv4_psd_sum(inner_ipv4_hdr); + else if (eth_type == ETHER_TYPE_IPv6) + inner_tcp_hdr->cksum = get_ipv6_psd_sum(inner_ipv6_hdr); + + } else if ((inner_l4_proto == IPPROTO_SCTP) && (tx_ol_flags & 0x80)) { + /* HW offload */ + ol_flags |= PKT_TX_SCTP_CKSUM; + inner_sctp_hdr = (struct sctp_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + len + inner_l3_len); + inner_sctp_hdr->cksum = 0; + } + + } + } else if (l4_proto == IPPROTO_TCP) { tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb, @@ -434,6 +598,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) /* Combine the packet header write. VLAN is not consider here */ mb->l2_len = l2_len; mb->l3_len = l3_len; + mb->inner_l2_len = inner_l2_len; + mb->inner_l3_len = inner_l3_len; mb->ol_flags = ol_flags; } nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); -- 1.7.7.6
next prev parent reply other threads:[~2014-10-27 2:05 UTC|newest] Thread overview: 47+ messages / expand[flat|nested] mbox.gz Atom feed top 2014-10-27 2:13 [dpdk-dev] [PATCH v8 00/10] Support VxLAN on Fortville Jijiang Liu 2014-10-27 2:13 ` [dpdk-dev] [PATCH v8 01/10] librte_mbuf:the rte_mbuf structure changes Jijiang Liu 2014-10-27 2:13 ` [dpdk-dev] [PATCH v8 02/10] librte_ether:add the basic data structures of VxLAN Jijiang Liu 2014-10-27 2:13 ` [dpdk-dev] [PATCH v8 03/10] librte_ether:add VxLAN packet identification API Jijiang Liu 2014-10-27 2:13 ` [dpdk-dev] [PATCH v8 04/10] i40e:support VxLAN packet identification in i40e Jijiang Liu 2014-10-27 2:13 ` [dpdk-dev] [PATCH v8 05/10] app/test-pmd:test VxLAN packet identification Jijiang Liu 2014-10-27 2:13 ` [dpdk-dev] [PATCH v8 06/10] librte_ether:add data structures of VxLAN filter Jijiang Liu 2014-10-27 2:13 ` [dpdk-dev] [PATCH v8 07/10] i40e:implement the API of VxLAN filter in librte_pmd_i40e Jijiang Liu 2014-10-27 2:13 ` [dpdk-dev] [PATCH v8 08/10] app/testpmd:test VxLAN packet filter Jijiang Liu 2014-10-27 2:13 ` [dpdk-dev] [PATCH v8 09/10] i40e:support VxLAN Tx checksum offload Jijiang Liu 2014-10-27 2:13 ` Jijiang Liu [this message] 2014-11-04 8:19 ` [dpdk-dev] [PATCH v8 10/10] app/testpmd:test " Olivier MATZ 2014-11-05 6:02 ` Liu, Jijiang 2014-11-05 10:28 ` Olivier MATZ 2014-11-06 11:24 ` Liu, Jijiang 2014-11-06 13:08 ` Olivier MATZ 2014-11-06 14:27 ` Liu, Jijiang 2014-11-07 0:43 ` Yong Wang 2014-11-07 17:16 ` Olivier MATZ 2014-11-10 11:39 ` Ananyev, Konstantin 2014-11-10 15:57 ` Olivier MATZ 2014-11-12 9:55 ` Ananyev, Konstantin 2014-11-12 13:05 ` Olivier MATZ 2014-11-12 13:40 ` Thomas Monjalon 2014-11-12 23:14 ` Ananyev, Konstantin 2014-11-12 14:39 ` Ananyev, Konstantin 2014-11-12 14:56 ` Olivier MATZ [not found] ` <D0868B54.24DBB%yongwang@vmware.com> 2014-11-11 0:07 ` [dpdk-dev] FW: " Yong Wang 2014-11-10 6:03 ` [dpdk-dev] " Liu, Jijiang 2014-11-10 16:17 ` Olivier MATZ [not found] ` <1ED644BD7E0A5F4091CF203DAFB8E4CC01D8F7A7@SHSMSX101.ccr.corp.intel.com> 2014-11-12 17:26 ` Thomas Monjalon 2014-11-13 5:35 ` Liu, Jijiang 2014-11-13 5:39 ` Liu, Jijiang 2014-11-13 6:51 ` Liu, Jijiang 2014-11-13 9:10 ` Thomas Monjalon 2014-11-14 8:15 ` Liu, Jijiang 2014-11-14 9:09 ` Olivier MATZ 2014-11-17 6:52 ` Liu, Jijiang 2014-11-17 11:21 ` Olivier MATZ 2014-11-20 7:28 ` Liu, Jijiang 2014-11-20 16:36 ` Olivier MATZ 2014-11-21 5:40 ` Liu, Jijiang 2014-10-27 2:20 ` [dpdk-dev] [PATCH v8 00/10] Support VxLAN on Fortville Liu, Yong 2014-10-27 2:41 ` Zhang, Helin 2014-10-27 13:46 ` Thomas Monjalon 2014-10-27 14:34 ` Liu, Jijiang 2014-10-27 15:15 ` Thomas Monjalon
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1414376006-31402-11-git-send-email-jijiang.liu@intel.com \ --to=jijiang.liu@intel.com \ --cc=dev@dpdk.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
DPDK patches and discussions This inbox may be cloned and mirrored by anyone: git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \ dev@dpdk.org public-inbox-index dev Example config snippet for mirrors. Newsgroup available over NNTP: nntp://inbox.dpdk.org/inbox.dpdk.dev AGPL code for this site: git clone https://public-inbox.org/public-inbox.git