From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id EA322A0C51 for ; Thu, 15 Jul 2021 04:13:48 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id DA49F410F1; Thu, 15 Jul 2021 04:13:48 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by mails.dpdk.org (Postfix) with ESMTP id 9932F40143; Thu, 15 Jul 2021 04:13:46 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10045"; a="210273172" X-IronPort-AV: E=Sophos;i="5.84,240,1620716400"; d="scan'208";a="210273172" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 Jul 2021 19:13:45 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,240,1620716400"; d="scan'208";a="460200383" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.193]) by orsmga008.jf.intel.com with ESMTP; 14 Jul 2021 19:13:43 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, Ting Xu , stable@dpdk.org Date: Thu, 15 Jul 2021 10:16:42 +0800 Message-Id: <20210715021642.17478-1-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [dpdk-stable] [PATCH v1] net/ice: fix not clear bandwidth correctly when DCF close X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org Sender: "stable" When closing DCF, the bandwidth limit configured for VFs by DCF is not cleared correctly. The configuration will still take effect when DCF starts again, if VFs are not re-allocated. This patch cleared VFs bandwidth limit when DCF closes, and DCF needs to re-configure bandwidth for VFs when it starts next time. Fixes: 3a6bfc37eaf4 ("net/ice: support QoS config VF bandwidth in DCF") Cc: stable@dpdk.org Signed-off-by: Ting Xu --- drivers/net/ice/ice_dcf.c | 10 ++++++--- drivers/net/ice/ice_dcf.h | 1 + drivers/net/ice/ice_dcf_sched.c | 39 ++++++++++++++++++++++++++++++++- 3 files changed, 46 insertions(+), 4 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 045800a2d9..4c2e0c7216 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -706,6 +706,12 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) + if (hw->tm_conf.committed) { + ice_dcf_clear_bw(hw); + ice_dcf_tm_conf_uninit(eth_dev); + } + ice_dcf_disable_irq0(hw); rte_intr_disable(intr_handle); rte_intr_callback_unregister(intr_handle, @@ -714,14 +720,12 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) ice_dcf_mode_disable(hw); iavf_shutdown_adminq(&hw->avf); - if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) - ice_dcf_tm_conf_uninit(eth_dev); - rte_free(hw->arq_buf); rte_free(hw->vf_vsi_map); rte_free(hw->vf_res); rte_free(hw->rss_lut); rte_free(hw->rss_key); + rte_free(hw->qos_bw_cfg); } static int diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index 711c0cf3ad..36c2d0965e 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -133,5 +133,6 @@ int ice_dcf_link_update(struct rte_eth_dev *dev, void ice_dcf_tm_conf_init(struct rte_eth_dev *dev); void ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev); int ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id); +int ice_dcf_clear_bw(struct ice_dcf_hw *hw); #endif /* _ICE_DCF_H_ */ diff --git a/drivers/net/ice/ice_dcf_sched.c b/drivers/net/ice/ice_dcf_sched.c index 1e16654d90..090988c6e1 100644 --- a/drivers/net/ice/ice_dcf_sched.c +++ b/drivers/net/ice/ice_dcf_sched.c @@ -32,6 +32,9 @@ const struct rte_tm_ops ice_dcf_tm_ops = { .node_delete = ice_dcf_node_delete, }; +#define ICE_DCF_SCHED_TC_NODE 0xffff +#define ICE_DCF_VFID 0 + void ice_dcf_tm_conf_init(struct rte_eth_dev *dev) { @@ -709,6 +712,32 @@ ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id) return ICE_SUCCESS; } +int +ice_dcf_clear_bw(struct ice_dcf_hw *hw) +{ + uint16_t vf_id; + uint32_t tc; + int ret, size; + + size = sizeof(struct virtchnl_dcf_bw_cfg_list) + + sizeof(struct virtchnl_dcf_bw_cfg) * + (hw->tm_conf.nb_tc_node - 1); + + for (vf_id = 0; vf_id < hw->num_vfs; vf_id++) { + for (tc = 0; tc < hw->tm_conf.nb_tc_node; tc++) { + hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.peak = 0; + hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.committed = 0; + } + ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size); + if (ret) { + PMD_DRV_LOG(DEBUG, "VF %u BW clear failed", vf_id); + return ICE_ERR_CFG; + } + } + + return ICE_SUCCESS; +} + static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, __rte_unused struct rte_tm_error *error) @@ -748,7 +777,6 @@ static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev, cir_total = 0; /* init tc bw configuration */ -#define ICE_DCF_SCHED_TC_NODE 0xffff tc_bw->vf_id = ICE_DCF_SCHED_TC_NODE; tc_bw->node_type = VIRTCHNL_DCF_TARGET_TC_BW; tc_bw->num_elem = hw->tm_conf.nb_tc_node; @@ -825,6 +853,15 @@ static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev, if (ret_val) goto fail_clear; + /* store TC node bw configuration */ + hw->qos_bw_cfg[ICE_DCF_VFID] = rte_zmalloc("tc_bw_cfg", size, 0); + if (!hw->qos_bw_cfg[ICE_DCF_VFID]) { + ret_val = ICE_ERR_NO_MEMORY; + goto fail_clear; + } + ice_memcpy(hw->qos_bw_cfg[ICE_DCF_VFID], tc_bw, sizeof(*tc_bw), + ICE_NONDMA_TO_NONDMA); + hw->tm_conf.committed = true; return ret_val; -- 2.17.1