From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id D08C15963 for ; Thu, 29 Oct 2015 09:54:34 +0100 (CET) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga102.jf.intel.com with ESMTP; 29 Oct 2015 01:54:12 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,213,1444719600"; d="scan'208";a="822123206" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by fmsmga001.fm.intel.com with ESMTP; 29 Oct 2015 01:54:11 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id t9T8s99H016593; Thu, 29 Oct 2015 16:54:09 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t9T8s6el007997; Thu, 29 Oct 2015 16:54:09 +0800 Received: (from wujingji@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t9T8s68U007993; Thu, 29 Oct 2015 16:54:06 +0800 From: Jingjing Wu To: dev@dpdk.org Date: Thu, 29 Oct 2015 16:53:45 +0800 Message-Id: <1446108827-7907-9-git-send-email-jingjing.wu@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1446108827-7907-1-git-send-email-jingjing.wu@intel.com> References: <1443074591-19803-1-git-send-email-jingjing.wu@intel.com> <1446108827-7907-1-git-send-email-jingjing.wu@intel.com> Subject: [dpdk-dev] [PATCH v2 08/10] app/testpmd: set up DCB forwarding based on traffic class X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 29 Oct 2015 08:54:39 -0000 This patch changes the testpmd DCB forwarding stream to make it based on traffic class. It also fixes some coding style issues. Signed-off-by: Jingjing Wu --- app/test-pmd/cmdline.c | 39 +++++++----- app/test-pmd/config.c | 159 +++++++++++++++++++++---------------------------- app/test-pmd/testpmd.c | 151 +++++++++++++++++++++++++--------------------- app/test-pmd/testpmd.h | 23 +------ 4 files changed, 176 insertions(+), 196 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index 0f8f48f..2ec855f 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -1999,37 +1999,46 @@ cmd_config_dcb_parsed(void *parsed_result, __attribute__((unused)) void *data) { struct cmd_config_dcb *res = parsed_result; - struct dcb_config dcb_conf; portid_t port_id = res->port_id; struct rte_port *port; + uint8_t pfc_en; + int ret; port = &ports[port_id]; /** Check if the port is not started **/ if (port->port_status != RTE_PORT_STOPPED) { - printf("Please stop port %d first\n",port_id); + printf("Please stop port %d first\n", port_id); return; } - dcb_conf.num_tcs = (enum rte_eth_nb_tcs) res->num_tcs; - if ((dcb_conf.num_tcs != ETH_4_TCS) && (dcb_conf.num_tcs != ETH_8_TCS)){ - printf("The invalid number of traffic class,only 4 or 8 allowed\n"); + if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) { + printf("The invalid number of traffic class," + " only 4 or 8 allowed.\n"); return; } - /* DCB in VT mode */ - if (!strncmp(res->vt_en, "on",2)) - dcb_conf.dcb_mode = DCB_VT_ENABLED; + if (nb_fwd_lcores < res->num_tcs) { + printf("nb_cores shouldn't be less than number of TCs.\n"); + return; + } + if (!strncmp(res->pfc_en, "on", 2)) + pfc_en = 1; else - dcb_conf.dcb_mode = DCB_ENABLED; + pfc_en = 0; - if (!strncmp(res->pfc_en, "on",2)) { - dcb_conf.pfc_en = 1; - } + /* DCB in VT mode */ + if (!strncmp(res->vt_en, "on", 2)) + ret = init_port_dcb_config(port_id, DCB_VT_ENABLED, + (enum rte_eth_nb_tcs)res->num_tcs, + pfc_en); else - dcb_conf.pfc_en = 0; + ret = init_port_dcb_config(port_id, DCB_ENABLED, + (enum rte_eth_nb_tcs)res->num_tcs, + pfc_en); + - if (init_port_dcb_config(port_id,&dcb_conf) != 0) { - printf("Cannot initialize network ports\n"); + if (ret != 0) { + printf("Cannot initialize network ports.\n"); return; } diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index cf2aa6e..11136aa 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -1128,113 +1128,92 @@ rss_fwd_config_setup(void) } } -/* - * In DCB and VT on,the mapping of 128 receive queues to 128 transmit queues. - */ -static void -dcb_rxq_2_txq_mapping(queueid_t rxq, queueid_t *txq) -{ - if(dcb_q_mapping == DCB_4_TCS_Q_MAPPING) { - - if (rxq < 32) - /* tc0: 0-31 */ - *txq = rxq; - else if (rxq < 64) { - /* tc1: 64-95 */ - *txq = (uint16_t)(rxq + 32); - } - else { - /* tc2: 96-111;tc3:112-127 */ - *txq = (uint16_t)(rxq/2 + 64); - } - } - else { - if (rxq < 16) - /* tc0 mapping*/ - *txq = rxq; - else if (rxq < 32) { - /* tc1 mapping*/ - *txq = (uint16_t)(rxq + 16); - } - else if (rxq < 64) { - /*tc2,tc3 mapping */ - *txq = (uint16_t)(rxq + 32); - } - else { - /* tc4,tc5,tc6 and tc7 mapping */ - *txq = (uint16_t)(rxq/2 + 64); - } - } -} - /** - * For the DCB forwarding test, each core is assigned on every port multi-transmit - * queue. + * For the DCB forwarding test, each core is assigned on each traffic class. * * Each core is assigned a multi-stream, each stream being composed of * a RX queue to poll on a RX port for input messages, associated with - * a TX queue of a TX port where to send forwarded packets. - * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" - * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two - * following rules: - * In VT mode, - * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd - * - TxQl = RxQj - * In non-VT mode, - * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd - * There is a mapping of RxQj to TxQl to be required,and the mapping was implemented - * in dcb_rxq_2_txq_mapping function. + * a TX queue of a TX port where to send forwarded packets. All RX and + * TX queues are mapping to the same traffic class. + * If VMDQ and DCB co-exist, each traffic class on different POOLs share + * the same core */ static void dcb_fwd_config_setup(void) { - portid_t rxp; - portid_t txp; - queueid_t rxq; - queueid_t nb_q; + struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; + portid_t txp, rxp = 0; + queueid_t txq, rxq = 0; lcoreid_t lc_id; - uint16_t sm_id; - - nb_q = nb_rxq; + uint16_t nb_rx_queue, nb_tx_queue; + uint16_t i, j, k, sm_id = 0; + uint8_t tc = 0; cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; cur_fwd_config.nb_fwd_ports = nb_fwd_ports; cur_fwd_config.nb_fwd_streams = - (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); + (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); /* reinitialize forwarding streams */ init_fwd_streams(); + sm_id = 0; + if ((rxp & 0x1) == 0) + txp = (portid_t) (rxp + 1); + else + txp = (portid_t) (rxp - 1); + /* get the dcb info on the first RX and TX ports */ + (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); + (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); - setup_fwd_config_of_each_lcore(&cur_fwd_config); - rxp = 0; rxq = 0; for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { - /* a fwd core can run multi-streams */ - for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) - { - struct fwd_stream *fs; - fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; - if ((rxp & 0x1) == 0) - txp = (portid_t) (rxp + 1); - else - txp = (portid_t) (rxp - 1); - fs->rx_port = fwd_ports_ids[rxp]; - fs->rx_queue = rxq; - fs->tx_port = fwd_ports_ids[txp]; - if (dcb_q_mapping == DCB_VT_Q_MAPPING) - fs->tx_queue = rxq; - else - dcb_rxq_2_txq_mapping(rxq, &fs->tx_queue); - fs->peer_addr = fs->tx_port; - rxq = (queueid_t) (rxq + 1); - if (rxq < nb_q) - continue; - rxq = 0; - if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) - rxp = (portid_t) - (rxp + ((nb_ports >> 1) / nb_fwd_ports)); - else - rxp = (portid_t) (rxp + 1); + fwd_lcores[lc_id]->stream_nb = 0; + fwd_lcores[lc_id]->stream_idx = sm_id; + for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { + /* if the nb_queue is zero, means this tc is + * not enabled on the POOL + */ + if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) + break; + k = fwd_lcores[lc_id]->stream_nb + + fwd_lcores[lc_id]->stream_idx; + rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; + txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; + nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; + nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; + for (j = 0; j < nb_rx_queue; j++) { + struct fwd_stream *fs; + + fs = fwd_streams[k + j]; + fs->rx_port = fwd_ports_ids[rxp]; + fs->rx_queue = rxq + j; + fs->tx_port = fwd_ports_ids[txp]; + fs->tx_queue = txq + j % nb_tx_queue; + fs->peer_addr = fs->tx_port; + } + fwd_lcores[lc_id]->stream_nb += + rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; } + sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); + + tc++; + if (tc < rxp_dcb_info.nb_tcs) + continue; + /* Restart from TC 0 on next RX port */ + tc = 0; + if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) + rxp = (portid_t) + (rxp + ((nb_ports >> 1) / nb_fwd_ports)); + else + rxp++; + if (rxp >= nb_fwd_ports) + return; + /* get the dcb information on next RX and TX ports */ + if ((rxp & 0x1) == 0) + txp = (portid_t) (rxp + 1); + else + txp = (portid_t) (rxp - 1); + rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); + rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); } } @@ -1354,10 +1333,6 @@ pkt_fwd_config_display(struct fwd_config *cfg) void fwd_config_display(void) { - if((dcb_config) && (nb_fwd_lcores == 1)) { - printf("In DCB mode,the nb forwarding cores should be larger than 1\n"); - return; - } fwd_config_setup(); pkt_fwd_config_display(&cur_fwd_config); } diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 8b8eb7d..6805297 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -182,9 +182,6 @@ uint8_t dcb_config = 0; /* Whether the dcb is in testing status */ uint8_t dcb_test = 0; -/* DCB on and VT on mapping is default */ -enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING; - /* * Configurable number of RX/TX queues. */ @@ -1849,115 +1846,131 @@ const uint16_t vlan_tags[] = { }; static int -get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf) +get_eth_dcb_conf(struct rte_eth_conf *eth_conf, + enum dcb_mode_enable dcb_mode, + enum rte_eth_nb_tcs num_tcs, + uint8_t pfc_en) { - uint8_t i; + uint8_t i; /* * Builds up the correct configuration for dcb+vt based on the vlan tags array * given above, and the number of traffic classes available for use. */ - if (dcb_conf->dcb_mode == DCB_VT_ENABLED) { - struct rte_eth_vmdq_dcb_conf vmdq_rx_conf; - struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf; + if (dcb_mode == DCB_VT_ENABLED) { + struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = + ð_conf->rx_adv_conf.vmdq_dcb_conf; + struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = + ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; /* VMDQ+DCB RX and TX configrations */ - vmdq_rx_conf.enable_default_pool = 0; - vmdq_rx_conf.default_pool = 0; - vmdq_rx_conf.nb_queue_pools = - (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); - vmdq_tx_conf.nb_queue_pools = - (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); - - vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); - for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) { - vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ]; - vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools); + vmdq_rx_conf->enable_default_pool = 0; + vmdq_rx_conf->default_pool = 0; + vmdq_rx_conf->nb_queue_pools = + (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); + vmdq_tx_conf->nb_queue_pools = + (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); + + vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; + for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { + vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; + vmdq_rx_conf->pool_map[i].pools = + 1 << (i % vmdq_rx_conf->nb_queue_pools); } for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - vmdq_rx_conf.dcb_tc[i] = i; - vmdq_tx_conf.dcb_tc[i] = i; + vmdq_rx_conf->dcb_tc[i] = i; + vmdq_tx_conf->dcb_tc[i] = i; } - /*set DCB mode of RX and TX of multiple queues*/ + /* set DCB mode of RX and TX of multiple queues */ eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; - if (dcb_conf->pfc_en) - eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; - else - eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; - - (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf, - sizeof(struct rte_eth_vmdq_dcb_conf))); - (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf, - sizeof(struct rte_eth_vmdq_dcb_tx_conf))); - } - else { - struct rte_eth_dcb_rx_conf rx_conf; - struct rte_eth_dcb_tx_conf tx_conf; - - /* queue mapping configuration of DCB RX and TX */ - if (dcb_conf->num_tcs == ETH_4_TCS) - dcb_q_mapping = DCB_4_TCS_Q_MAPPING; - else - dcb_q_mapping = DCB_8_TCS_Q_MAPPING; - - rx_conf.nb_tcs = dcb_conf->num_tcs; - tx_conf.nb_tcs = dcb_conf->num_tcs; - - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ - rx_conf.dcb_tc[i] = i; - tx_conf.dcb_tc[i] = i; + } else { + struct rte_eth_dcb_rx_conf *rx_conf = + ð_conf->rx_adv_conf.dcb_rx_conf; + struct rte_eth_dcb_tx_conf *tx_conf = + ð_conf->tx_adv_conf.dcb_tx_conf; + + rx_conf->nb_tcs = num_tcs; + tx_conf->nb_tcs = num_tcs; + + for (i = 0; i < num_tcs; i++) { + rx_conf->dcb_tc[i] = i; + tx_conf->dcb_tc[i] = i; } - eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB; + eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; + eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; - if (dcb_conf->pfc_en) - eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; - else - eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; - - (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf, - sizeof(struct rte_eth_dcb_rx_conf))); - (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf, - sizeof(struct rte_eth_dcb_tx_conf))); } + if (pfc_en) + eth_conf->dcb_capability_en = + ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; + else + eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; + return 0; } int -init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf) +init_port_dcb_config(portid_t pid, + enum dcb_mode_enable dcb_mode, + enum rte_eth_nb_tcs num_tcs, + uint8_t pfc_en) { struct rte_eth_conf port_conf; + struct rte_eth_dev_info dev_info; struct rte_port *rte_port; int retval; - uint16_t nb_vlan; uint16_t i; - /* rxq and txq configuration in dcb mode */ - nb_rxq = 128; - nb_txq = 128; + rte_eth_dev_info_get(pid, &dev_info); + + /* If dev_info.vmdq_pool_base is greater than 0, + * the queue id of vmdq pools is started after pf queues. + */ + if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) { + printf("VMDQ_DCB multi-queue mode is nonsensical" + " for port %d.", pid); + return -1; + } + + /* Assume the ports in testpmd have the same dcb capability + * and has the same number of rxq and txq in dcb mode + */ + if (dcb_mode == DCB_VT_ENABLED) { + nb_rxq = dev_info.max_rx_queues; + nb_txq = dev_info.max_tx_queues; + } else { + /*if vt is disabled, use all pf queues */ + if (dev_info.vmdq_pool_base == 0) { + nb_rxq = dev_info.max_rx_queues; + nb_txq = dev_info.max_tx_queues; + } else { + nb_rxq = (queueid_t)num_tcs; + nb_txq = (queueid_t)num_tcs; + + } + } rx_free_thresh = 64; - memset(&port_conf,0,sizeof(struct rte_eth_conf)); + memset(&port_conf, 0, sizeof(struct rte_eth_conf)); /* Enter DCB configuration status */ dcb_config = 1; - nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); /*set configuration of DCB in vt mode and DCB in non-vt mode*/ - retval = get_eth_dcb_conf(&port_conf, dcb_conf); + retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); if (retval < 0) return retval; rte_port = &ports[pid]; - memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf)); + memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); rxtx_port_config(rte_port); /* VLAN filter */ rte_port->dev_conf.rxmode.hw_vlan_filter = 1; - for (i = 0; i < nb_vlan; i++){ + for (i = 0; i < RTE_DIM(vlan_tags); i++) rx_vft_set(pid, vlan_tags[i], 1); - } rte_eth_macaddr_get(pid, &rte_port->eth_addr); map_port_queue_stats_mapping_registers(pid, rte_port); diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index f925df7..3661755 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -255,25 +255,6 @@ enum dcb_mode_enable DCB_ENABLED }; -/* - * DCB general config info - */ -struct dcb_config { - enum dcb_mode_enable dcb_mode; - uint8_t vt_en; - enum rte_eth_nb_tcs num_tcs; - uint8_t pfc_en; -}; - -/* - * In DCB io FWD mode, 128 RX queue to 128 TX queue mapping - */ -enum dcb_queue_mapping_mode { - DCB_VT_Q_MAPPING = 0, - DCB_4_TCS_Q_MAPPING, - DCB_8_TCS_Q_MAPPING -}; - #define MAX_TX_QUEUE_STATS_MAPPINGS 1024 /* MAX_PORT of 32 @ 32 tx_queues/port */ #define MAX_RX_QUEUE_STATS_MAPPINGS 4096 /* MAX_PORT of 32 @ 128 rx_queues/port */ @@ -536,7 +517,9 @@ void dev_set_link_down(portid_t pid); void init_port_config(void); void set_port_slave_flag(portid_t slave_pid); void clear_port_slave_flag(portid_t slave_pid); -int init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf); +int init_port_dcb_config(portid_t pid, enum dcb_mode_enable dcb_mode, + enum rte_eth_nb_tcs num_tcs, + uint8_t pfc_en); int start_port(portid_t pid); void stop_port(portid_t pid); void close_port(portid_t pid); -- 2.4.0