From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id AB0278D8E for ; Mon, 11 Jan 2016 03:50:48 +0100 (CET) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga104.fm.intel.com with ESMTP; 10 Jan 2016 18:50:47 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,550,1444719600"; d="scan'208";a="724369950" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga003.jf.intel.com with ESMTP; 10 Jan 2016 18:50:46 -0800 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id u0B2oiYF025442; Mon, 11 Jan 2016 10:50:44 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id u0B2ofvB030196; Mon, 11 Jan 2016 10:50:43 +0800 Received: (from wujingji@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id u0B2ofCi030192; Mon, 11 Jan 2016 10:50:41 +0800 From: Jingjing Wu To: dev@dpdk.org Date: Mon, 11 Jan 2016 10:50:34 +0800 Message-Id: <1452480636-30155-2-git-send-email-jingjing.wu@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1452480636-30155-1-git-send-email-jingjing.wu@intel.com> References: <1452480636-30155-1-git-send-email-jingjing.wu@intel.com> Subject: [dpdk-dev] [RFC PATCH 1/3] i40e: enable DCB in VMDQ vsis X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 11 Jan 2016 02:50:49 -0000 Currently, DCB is only enabled on PF, queue mapping and BW configuration is only done on PF vsi. This patch enabled DCB for VMDQ vsis by following steps: 1. Take BW and ETS configuration on VEB. 2. Take BW and ETS configuration on VMDQ vsis. 3. Update TC and queues mapping on VMDQ vsis. To enable DCB on VMDQ, the number of TCs should not be lager than the number of queues in VMDQ pools, and the number of queues per VMDQ pool is specified by CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM in config/common_* file. Signed-off-by: Jingjing Wu --- drivers/net/i40e/i40e_ethdev.c | 153 ++++++++++++++++++++++++++++++++++++----- drivers/net/i40e/i40e_ethdev.h | 28 ++++---- 2 files changed, 149 insertions(+), 32 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index bf6220d..fbafcc6 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -8087,6 +8087,8 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, int i, total_tc = 0; uint16_t qpnum_per_tc, bsf, qp_idx; struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + uint16_t used_queues; ret = validate_tcmap_parameter(vsi, enabled_tcmap); if (ret != I40E_SUCCESS) @@ -8100,7 +8102,18 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, total_tc = 1; vsi->enabled_tc = enabled_tcmap; - qpnum_per_tc = dev_data->nb_rx_queues / total_tc; + /* different VSI has different queues assigned */ + if (vsi->type == I40E_VSI_MAIN) + used_queues = dev_data->nb_rx_queues - + pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + else if (vsi->type == I40E_VSI_VMDQ2) + used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + else { + PMD_INIT_LOG(ERR, "unsupported VSI type."); + return I40E_ERR_NO_AVAILABLE_VSI; + } + + qpnum_per_tc = used_queues / total_tc; /* Number of queues per enabled TC */ if (qpnum_per_tc == 0) { PMD_INIT_LOG(ERR, " number of queues is less that tcs."); @@ -8145,6 +8158,93 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, } /* + * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map + * @veb: VEB to be configured + * @tc_map: enabled TC bitmap + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map) +{ + struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw; + struct i40e_aqc_query_switching_comp_bw_config_resp bw_query; + struct i40e_aqc_query_switching_comp_ets_config_resp ets_query; + struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi); + enum i40e_status_code ret = I40E_SUCCESS; + int i; + uint32_t bw_max; + + /* Check if enabled_tc is same as existing or new TCs */ + if (veb->enabled_tc == tc_map) + return ret; + + /* configure tc bandwidth */ + memset(&veb_bw, 0, sizeof(veb_bw)); + veb_bw.tc_valid_bits = tc_map; + /* Enable ETS TCs with equal BW Share for now across all VSIs */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (tc_map & BIT_ULL(i)) + veb_bw.tc_bw_share_credits[i] = 1; + } + ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid, + &veb_bw, NULL); + if (ret) { + PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation" + " per TC failed = %d", + hw->aq.asq_last_status); + return ret; + } + + memset(&ets_query, 0, sizeof(ets_query)); + ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, + &ets_query, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS" + " configuration %u", hw->aq.asq_last_status); + return ret; + } + memset(&bw_query, 0, sizeof(bw_query)); + ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, + &bw_query, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth" + " configuration %u", hw->aq.asq_last_status); + return ret; + } + + /* store and print out BW info */ + veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit); + veb->bw_info.bw_max = ets_query.tc_bw_max; + PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit); + PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max); + bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) | + (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) << + I40E_16_BIT_WIDTH); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + veb->bw_info.bw_ets_share_credits[i] = + bw_query.tc_bw_share_credits[i]; + veb->bw_info.bw_ets_credits[i] = + rte_le_to_cpu_16(bw_query.tc_bw_limits[i]); + /* 4 bits per TC, 4th bit is reserved */ + veb->bw_info.bw_ets_max[i] = + (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) & + I40E_3_BIT_MASK); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i, + veb->bw_info.bw_ets_share_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i, + veb->bw_info.bw_ets_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i, + veb->bw_info.bw_ets_max[i]); + } + + veb->enabled_tc = tc_map; + + return ret; +} + + +/* * i40e_vsi_config_tc - Configure VSI tc setting for given TC map * @vsi: VSI to be configured * @tc_map: enabled TC bitmap @@ -8152,7 +8252,7 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, * Returns 0 on success, negative value on failure */ static enum i40e_status_code -i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 tc_map) +i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; struct i40e_vsi_context ctxt; @@ -8294,15 +8394,27 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, &hw->local_dcbx_config); + /* if Veb is created, need to update TC of it at first */ + if (main_vsi->veb) { + ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map); + if (ret) + PMD_INIT_LOG(WARNING, + "Failed configuring TC for VEB seid=%d\n", + main_vsi->veb->seid); + } /* Update each VSI */ i40e_vsi_config_tc(main_vsi, tc_map); if (main_vsi->veb) { TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) { - /* Beside main VSI, only enable default + /* Beside main VSI and VMDQ VSIs, only enable default * TC for other VSIs */ - ret = i40e_vsi_config_tc(vsi_list->vsi, - I40E_DEFAULT_TCMAP); + if (vsi_list->vsi->type == I40E_VSI_VMDQ2) + ret = i40e_vsi_config_tc(vsi_list->vsi, + tc_map); + else + ret = i40e_vsi_config_tc(vsi_list->vsi, + I40E_DEFAULT_TCMAP); if (ret) PMD_INIT_LOG(WARNING, "Failed configuring TC for VSI seid=%d\n", @@ -8422,9 +8534,8 @@ i40e_dcb_setup(struct rte_eth_dev *dev) return -ENOTSUP; } - if (pf->vf_num != 0 || - (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)) - PMD_INIT_LOG(DEBUG, " DCB only works on main vsi."); + if (pf->vf_num != 0) + PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis."); ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map); if (ret) { @@ -8449,7 +8560,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, struct i40e_vsi *vsi = pf->main_vsi; struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config; uint16_t bsf, tc_mapping; - int i; + int i, j; if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1); @@ -8460,23 +8571,27 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, for (i = 0; i < dcb_info->nb_tcs; i++) dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i]; - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (vsi->enabled_tc & (1 << i)) { + j = 0; + do { + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); /* only main vsi support multi TCs */ - dcb_info->tc_queue.tc_rxq[0][i].base = + dcb_info->tc_queue.tc_rxq[j][i].base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; - dcb_info->tc_queue.tc_txq[0][i].base = - dcb_info->tc_queue.tc_rxq[0][i].base; + dcb_info->tc_queue.tc_txq[j][i].base = + dcb_info->tc_queue.tc_rxq[j][i].base; bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; - dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 1 << bsf; - dcb_info->tc_queue.tc_txq[0][i].nb_queue = - dcb_info->tc_queue.tc_rxq[0][i].nb_queue; + dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf; + dcb_info->tc_queue.tc_txq[j][i].nb_queue = + dcb_info->tc_queue.tc_rxq[j][i].nb_queue; } - } - + vsi = pf->vmdq[j].vsi; + j++; + } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL)); return 0; } diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 1f9792b..d426784 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -199,6 +199,19 @@ struct i40e_vsi_list { struct i40e_rx_queue; struct i40e_tx_queue; +/* Bandwidth limit information */ +struct i40e_bw_info { + uint16_t bw_limit; /* BW Limit (0 = disabled) */ + uint8_t bw_max; /* Max BW limit if enabled */ + + /* Relative credits within same TC with respect to other VSIs or Comps */ + uint8_t bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS]; + /* Bandwidth limit per TC */ + uint8_t bw_ets_credits[I40E_MAX_TRAFFIC_CLASS]; + /* Max bandwidth limit per TC */ + uint8_t bw_ets_max[I40E_MAX_TRAFFIC_CLASS]; +}; + /* Structure that defines a VEB */ struct i40e_veb { struct i40e_vsi_list_head head; @@ -207,6 +220,8 @@ struct i40e_veb { uint16_t uplink_seid; /* The uplink seid of this VEB */ uint16_t stats_idx; struct i40e_eth_stats stats; + uint8_t enabled_tc; /* The traffic class enabled */ + struct i40e_bw_info bw_info; /* VEB bandwidth information */ }; /* i40e MACVLAN filter structure */ @@ -216,19 +231,6 @@ struct i40e_macvlan_filter { uint16_t vlan_id; }; -/* Bandwidth limit information */ -struct i40e_bw_info { - uint16_t bw_limit; /* BW Limit (0 = disabled) */ - uint8_t bw_max; /* Max BW limit if enabled */ - - /* Relative VSI credits within same TC with respect to other VSIs */ - uint8_t bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS]; - /* Bandwidth limit per TC */ - uint8_t bw_ets_credits[I40E_MAX_TRAFFIC_CLASS]; - /* Max bandwidth limit per TC */ - uint8_t bw_ets_max[I40E_MAX_TRAFFIC_CLASS]; -}; - /* * Structure that defines a VSI, associated with a adapter. */ -- 2.4.0