From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 3E582C64C for ; Tue, 5 May 2015 04:32:46 +0200 (CEST) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga103.fm.intel.com with ESMTP; 04 May 2015 19:32:45 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.13,369,1427785200"; d="scan'208";a="566296649" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga003.jf.intel.com with ESMTP; 04 May 2015 19:32:44 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id t452WfdK003057; Tue, 5 May 2015 10:32:41 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t452WcU6003686; Tue, 5 May 2015 10:32:40 +0800 Received: (from hzhan75@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t452Wcvs003682; Tue, 5 May 2015 10:32:38 +0800 From: Helin Zhang To: dev@dpdk.org Date: Tue, 5 May 2015 10:32:23 +0800 Message-Id: <1430793143-3610-7-git-send-email-helin.zhang@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1430793143-3610-1-git-send-email-helin.zhang@intel.com> References: <1430793143-3610-1-git-send-email-helin.zhang@intel.com> Subject: [dpdk-dev] [PATCH RFC 6/6] app/testpmd: support of QinQ stripping and insertion X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 05 May 2015 02:32:47 -0000 As QinQ stripping and insertion have been supported, test commands should be updated. In detail, "tx_vlan set vlan_id (port_id)" will be changed to "tx_vlan set (port_id) vlan_id0[, vlan_id1]" to support both single and double VLAN tag insertion; also VLAN tags stripped from received packets will be printed in 'rxonly' mode. Signed-off-by: Helin Zhang --- app/test-pmd/cmdline.c | 78 +++++++++++++++++++++++++++++++++++++++++++++----- app/test-pmd/config.c | 23 +++++++++++++-- app/test-pmd/flowgen.c | 8 ++++-- app/test-pmd/macfwd.c | 5 +++- app/test-pmd/macswap.c | 5 +++- app/test-pmd/rxonly.c | 3 ++ app/test-pmd/testpmd.h | 6 +++- app/test-pmd/txonly.c | 10 +++++-- 8 files changed, 120 insertions(+), 18 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index f01db2a..a19d32a 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -304,9 +304,9 @@ static void cmd_help_long_parsed(void *parsed_result, "rx_vxlan_port rm (udp_port) (port_id)\n" " Remove an UDP port for VXLAN packet filter on a port\n\n" - "tx_vlan set vlan_id (port_id)\n" - " Set hardware insertion of VLAN ID in packets sent" - " on a port.\n\n" + "tx_vlan set (port_id) vlan_id0[, vlan_id1]\n" + " Set hardware insertion of VLAN IDs (single or double VLAN " + "depends on the number of VLAN IDs) in packets sent on a port.\n\n" "tx_vlan set pvid port_id vlan_id (on|off)\n" " Set port based TX VLAN insertion.\n\n" @@ -2799,8 +2799,8 @@ cmdline_parse_inst_t cmd_rx_vlan_filter = { struct cmd_tx_vlan_set_result { cmdline_fixed_string_t tx_vlan; cmdline_fixed_string_t set; - uint16_t vlan_id; uint8_t port_id; + uint16_t vlan_id; }; static void @@ -2809,6 +2809,13 @@ cmd_tx_vlan_set_parsed(void *parsed_result, __attribute__((unused)) void *data) { struct cmd_tx_vlan_set_result *res = parsed_result; + int vlan_offload = rte_eth_dev_get_vlan_offload(res->port_id); + + if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { + printf("Error, as QinQ has been enabled.\n"); + return; + } + tx_vlan_set(res->port_id, res->vlan_id); } @@ -2828,13 +2835,69 @@ cmdline_parse_token_num_t cmd_tx_vlan_set_portid = cmdline_parse_inst_t cmd_tx_vlan_set = { .f = cmd_tx_vlan_set_parsed, .data = NULL, - .help_str = "enable hardware insertion of a VLAN header with a given " - "TAG Identifier in packets sent on a port", + .help_str = "enable hardware insertion of a single VLAN header " + "with a given TAG Identifier in packets sent on a port", .tokens = { (void *)&cmd_tx_vlan_set_tx_vlan, (void *)&cmd_tx_vlan_set_set, - (void *)&cmd_tx_vlan_set_vlanid, (void *)&cmd_tx_vlan_set_portid, + (void *)&cmd_tx_vlan_set_vlanid, + NULL, + }, +}; + +/* *** ENABLE HARDWARE INSERTION OF Double VLAN HEADER IN TX PACKETS *** */ +struct cmd_tx_vlan_set_qinq_result { + cmdline_fixed_string_t tx_vlan; + cmdline_fixed_string_t set; + uint8_t port_id; + uint16_t vlan_id0; + uint16_t vlan_id1; +}; + +static void +cmd_tx_vlan_set_qinq_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_tx_vlan_set_qinq_result *res = parsed_result; + int vlan_offload = rte_eth_dev_get_vlan_offload(res->port_id); + + if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { + printf("Error, as QinQ hasn't been enabled.\n"); + return; + } + + tx_qinq_set(res->port_id, res->vlan_id0, res->vlan_id1); +} + +cmdline_parse_token_string_t cmd_tx_vlan_set_qinq_tx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_qinq_result, + tx_vlan, "tx_vlan"); +cmdline_parse_token_string_t cmd_tx_vlan_set_qinq_set = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_qinq_result, + set, "set"); +cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_portid = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_qinq_result, + port_id, UINT8); +cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_vlanid0 = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_qinq_result, + vlan_id0, UINT16); +cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_vlanid1 = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_qinq_result, + vlan_id1, UINT16); + +cmdline_parse_inst_t cmd_tx_vlan_set_qinq = { + .f = cmd_tx_vlan_set_qinq_parsed, + .data = NULL, + .help_str = "enable hardware insertion of a double VLAN header " + "with a given TAG Identifier in packets sent on a port", + .tokens = { + (void *)&cmd_tx_vlan_set_qinq_tx_vlan, + (void *)&cmd_tx_vlan_set_qinq_set, + (void *)&cmd_tx_vlan_set_qinq_portid, + (void *)&cmd_tx_vlan_set_qinq_vlanid0, + (void *)&cmd_tx_vlan_set_qinq_vlanid1, NULL, }, }; @@ -8782,6 +8845,7 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_rx_vlan_filter_all, (cmdline_parse_inst_t *)&cmd_rx_vlan_filter, (cmdline_parse_inst_t *)&cmd_tx_vlan_set, + (cmdline_parse_inst_t *)&cmd_tx_vlan_set_qinq, (cmdline_parse_inst_t *)&cmd_tx_vlan_reset, (cmdline_parse_inst_t *)&cmd_tx_vlan_set_pvid, (cmdline_parse_inst_t *)&cmd_csum_set, diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index f788ed5..6825a1e 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -1732,8 +1732,24 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id) return; if (vlan_id_is_invalid(vlan_id)) return; + tx_vlan_reset(port_id); ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; - ports[port_id].tx_vlan_id = vlan_id; + ports[port_id].tx_vlan_id0 = vlan_id; +} + +void +tx_qinq_set(portid_t port_id, uint16_t vlan_id0, uint16_t vlan_id1) +{ + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (vlan_id_is_invalid(vlan_id0)) + return; + if (vlan_id_is_invalid(vlan_id1)) + return; + tx_vlan_reset(port_id); + ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ; + ports[port_id].tx_vlan_id0 = vlan_id0; + ports[port_id].tx_vlan_id1 = vlan_id1; } void @@ -1741,7 +1757,10 @@ tx_vlan_reset(portid_t port_id) { if (port_id_is_invalid(port_id, ENABLED_WARN)) return; - ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_INSERT_VLAN; + ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN | + TESTPMD_TX_OFFLOAD_INSERT_QINQ); + ports[port_id].tx_vlan_id0 = 0; + ports[port_id].tx_vlan_id1 = 0; } void diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c index f24b00c..66a4687 100644 --- a/app/test-pmd/flowgen.c +++ b/app/test-pmd/flowgen.c @@ -136,7 +136,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs) struct ether_hdr *eth_hdr; struct ipv4_hdr *ip_hdr; struct udp_hdr *udp_hdr; - uint16_t vlan_tci; + uint16_t vlan_tci0, vlan_tci1; uint16_t ol_flags; uint16_t nb_rx; uint16_t nb_tx; @@ -162,7 +162,8 @@ pkt_burst_flow_gen(struct fwd_stream *fs) rte_pktmbuf_free(pkts_burst[i]); mbp = current_fwd_lcore()->mbp; - vlan_tci = ports[fs->tx_port].tx_vlan_id; + vlan_tci0 = ports[fs->tx_port].tx_vlan_id0; + vlan_tci1 = ports[fs->tx_port].tx_vlan_id1; ol_flags = ports[fs->tx_port].tx_ol_flags; for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { @@ -207,7 +208,8 @@ pkt_burst_flow_gen(struct fwd_stream *fs) pkt->nb_segs = 1; pkt->pkt_len = pkt_size; pkt->ol_flags = ol_flags; - pkt->vlan_tci0 = vlan_tci; + pkt->vlan_tci0 = vlan_tci0; + pkt->vlan_tci0 = vlan_tci1; pkt->l2_len = sizeof(struct ether_hdr); pkt->l3_len = sizeof(struct ipv4_hdr); pkts_burst[nb_pkt] = pkt; diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c index 590b613..5eaa70a 100644 --- a/app/test-pmd/macfwd.c +++ b/app/test-pmd/macfwd.c @@ -110,6 +110,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs) txp = &ports[fs->tx_port]; if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN) ol_flags = PKT_TX_VLAN_PKT; + if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ) + ol_flags |= PKT_TX_QINQ_PKT; for (i = 0; i < nb_rx; i++) { mb = pkts_burst[i]; eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *); @@ -120,7 +122,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs) mb->ol_flags = ol_flags; mb->l2_len = sizeof(struct ether_hdr); mb->l3_len = sizeof(struct ipv4_hdr); - mb->vlan_tci0 = txp->tx_vlan_id; + mb->vlan_tci0 = txp->tx_vlan_id0; + mb->vlan_tci1 = txp->tx_vlan_id1; } nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); fs->tx_packets += nb_tx; diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c index c355399..fcdb155 100644 --- a/app/test-pmd/macswap.c +++ b/app/test-pmd/macswap.c @@ -110,6 +110,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs) txp = &ports[fs->tx_port]; if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN) ol_flags = PKT_TX_VLAN_PKT; + if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ) + ol_flags |= PKT_TX_QINQ_PKT; for (i = 0; i < nb_rx; i++) { mb = pkts_burst[i]; eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *); @@ -122,7 +124,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs) mb->ol_flags = ol_flags; mb->l2_len = sizeof(struct ether_hdr); mb->l3_len = sizeof(struct ipv4_hdr); - mb->vlan_tci0 = txp->tx_vlan_id; + mb->vlan_tci0 = txp->tx_vlan_id0; + mb->vlan_tci1 = txp->tx_vlan_id1; } nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); fs->tx_packets += nb_tx; diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c index aa2cf7f..41d3874 100644 --- a/app/test-pmd/rxonly.c +++ b/app/test-pmd/rxonly.c @@ -160,6 +160,9 @@ pkt_burst_receive(struct fwd_stream *fs) } if (ol_flags & PKT_RX_VLAN_PKT) printf(" - VLAN tci=0x%x", mb->vlan_tci0); + if (ol_flags & PKT_RX_QINQ_PKT) + printf(" - QinQ VLAN tci0=0x%x, VLAN tci1=0x%x", + mb->vlan_tci0, mb->vlan_tci1); if (is_encapsulation) { struct ipv4_hdr *ipv4_hdr; struct ipv6_hdr *ipv6_hdr; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index 389fc24..890fa3e 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -132,6 +132,8 @@ struct fwd_stream { #define TESTPMD_TX_OFFLOAD_PARSE_TUNNEL 0x0020 /** Insert VLAN header in forward engine */ #define TESTPMD_TX_OFFLOAD_INSERT_VLAN 0x0040 +/** Insert double VLAN header in forward engine */ +#define TESTPMD_TX_OFFLOAD_INSERT_QINQ 0x0080 /** * The data structure associated with each port. @@ -148,7 +150,8 @@ struct rte_port { unsigned int socket_id; /**< For NUMA support */ uint16_t tx_ol_flags;/**< TX Offload Flags (TESTPMD_TX_OFFLOAD...). */ uint16_t tso_segsz; /**< MSS for segmentation offload. */ - uint16_t tx_vlan_id; /**< Tag Id. in TX VLAN packets. */ + uint16_t tx_vlan_id0;/**< The (outer) tag ID */ + uint16_t tx_vlan_id1;/**< The inner tag ID */ void *fwd_ctx; /**< Forwarding mode context */ uint64_t rx_bad_ip_csum; /**< rx pkts with bad ip checksum */ uint64_t rx_bad_l4_csum; /**< rx pkts with bad l4 checksum */ @@ -512,6 +515,7 @@ int rx_vft_set(portid_t port_id, uint16_t vlan_id, int on); void vlan_extend_set(portid_t port_id, int on); void vlan_tpid_set(portid_t port_id, uint16_t tp_id); void tx_vlan_set(portid_t port_id, uint16_t vlan_id); +void tx_qinq_set(portid_t port_id, uint16_t vlan_id0, uint16_t vlan_id1); void tx_vlan_reset(portid_t port_id); void tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on); diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index 4a2827f..9c7a86e 100644 --- a/app/test-pmd/txonly.c +++ b/app/test-pmd/txonly.c @@ -202,7 +202,7 @@ pkt_burst_transmit(struct fwd_stream *fs) struct ether_hdr eth_hdr; uint16_t nb_tx; uint16_t nb_pkt; - uint16_t vlan_tci; + uint16_t vlan_tci0, vlan_tci1; uint64_t ol_flags = 0; uint8_t i; #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES @@ -217,9 +217,12 @@ pkt_burst_transmit(struct fwd_stream *fs) mbp = current_fwd_lcore()->mbp; txp = &ports[fs->tx_port]; - vlan_tci = txp->tx_vlan_id; + vlan_tci0 = txp->tx_vlan_id0; + vlan_tci1 = txp->tx_vlan_id1; if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN) ol_flags = PKT_TX_VLAN_PKT; + if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ) + ol_flags |= PKT_TX_QINQ_PKT; for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { pkt = tx_mbuf_alloc(mbp); if (pkt == NULL) { @@ -266,7 +269,8 @@ pkt_burst_transmit(struct fwd_stream *fs) pkt->nb_segs = tx_pkt_nb_segs; pkt->pkt_len = tx_pkt_length; pkt->ol_flags = ol_flags; - pkt->vlan_tci0 = vlan_tci; + pkt->vlan_tci0 = vlan_tci0; + pkt->vlan_tci1 = vlan_tci1; pkt->l2_len = sizeof(struct ether_hdr); pkt->l3_len = sizeof(struct ipv4_hdr); pkts_burst[nb_pkt] = pkt; -- 1.9.3