From: Ting Xu <ting.xu@intel.com>
To: dev@dpdk.org
Cc: qi.z.zhang@intel.com, jingjing.wu@intel.com,
beilei.xing@intel.com, Ting Xu <ting.xu@intel.com>,
stable@dpdk.org
Subject: [dpdk-dev] [PATCH v1] net/ice: fix not clear bandwidth correctly when DCF close
Date: Thu, 15 Jul 2021 10:16:42 +0800 [thread overview]
Message-ID: <20210715021642.17478-1-ting.xu@intel.com> (raw)
When closing DCF, the bandwidth limit configured for VFs by DCF is not
cleared correctly. The configuration will still take effect when DCF starts
again, if VFs are not re-allocated. This patch cleared VFs bandwidth limit
when DCF closes, and DCF needs to re-configure bandwidth for VFs when it
starts next time.
Fixes: 3a6bfc37eaf4 ("net/ice: support QoS config VF bandwidth in DCF")
Cc: stable@dpdk.org
Signed-off-by: Ting Xu <ting.xu@intel.com>
---
drivers/net/ice/ice_dcf.c | 10 ++++++---
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_sched.c | 39 ++++++++++++++++++++++++++++++++-
3 files changed, 46 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 045800a2d9..4c2e0c7216 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -706,6 +706,12 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+ if (hw->tm_conf.committed) {
+ ice_dcf_clear_bw(hw);
+ ice_dcf_tm_conf_uninit(eth_dev);
+ }
+
ice_dcf_disable_irq0(hw);
rte_intr_disable(intr_handle);
rte_intr_callback_unregister(intr_handle,
@@ -714,14 +720,12 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
ice_dcf_mode_disable(hw);
iavf_shutdown_adminq(&hw->avf);
- if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
- ice_dcf_tm_conf_uninit(eth_dev);
-
rte_free(hw->arq_buf);
rte_free(hw->vf_vsi_map);
rte_free(hw->vf_res);
rte_free(hw->rss_lut);
rte_free(hw->rss_key);
+ rte_free(hw->qos_bw_cfg);
}
static int
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index 711c0cf3ad..36c2d0965e 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -133,5 +133,6 @@ int ice_dcf_link_update(struct rte_eth_dev *dev,
void ice_dcf_tm_conf_init(struct rte_eth_dev *dev);
void ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev);
int ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id);
+int ice_dcf_clear_bw(struct ice_dcf_hw *hw);
#endif /* _ICE_DCF_H_ */
diff --git a/drivers/net/ice/ice_dcf_sched.c b/drivers/net/ice/ice_dcf_sched.c
index 1e16654d90..090988c6e1 100644
--- a/drivers/net/ice/ice_dcf_sched.c
+++ b/drivers/net/ice/ice_dcf_sched.c
@@ -32,6 +32,9 @@ const struct rte_tm_ops ice_dcf_tm_ops = {
.node_delete = ice_dcf_node_delete,
};
+#define ICE_DCF_SCHED_TC_NODE 0xffff
+#define ICE_DCF_VFID 0
+
void
ice_dcf_tm_conf_init(struct rte_eth_dev *dev)
{
@@ -709,6 +712,32 @@ ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id)
return ICE_SUCCESS;
}
+int
+ice_dcf_clear_bw(struct ice_dcf_hw *hw)
+{
+ uint16_t vf_id;
+ uint32_t tc;
+ int ret, size;
+
+ size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
+ sizeof(struct virtchnl_dcf_bw_cfg) *
+ (hw->tm_conf.nb_tc_node - 1);
+
+ for (vf_id = 0; vf_id < hw->num_vfs; vf_id++) {
+ for (tc = 0; tc < hw->tm_conf.nb_tc_node; tc++) {
+ hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.peak = 0;
+ hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.committed = 0;
+ }
+ ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size);
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "VF %u BW clear failed", vf_id);
+ return ICE_ERR_CFG;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
int clear_on_fail,
__rte_unused struct rte_tm_error *error)
@@ -748,7 +777,6 @@ static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
cir_total = 0;
/* init tc bw configuration */
-#define ICE_DCF_SCHED_TC_NODE 0xffff
tc_bw->vf_id = ICE_DCF_SCHED_TC_NODE;
tc_bw->node_type = VIRTCHNL_DCF_TARGET_TC_BW;
tc_bw->num_elem = hw->tm_conf.nb_tc_node;
@@ -825,6 +853,15 @@ static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
if (ret_val)
goto fail_clear;
+ /* store TC node bw configuration */
+ hw->qos_bw_cfg[ICE_DCF_VFID] = rte_zmalloc("tc_bw_cfg", size, 0);
+ if (!hw->qos_bw_cfg[ICE_DCF_VFID]) {
+ ret_val = ICE_ERR_NO_MEMORY;
+ goto fail_clear;
+ }
+ ice_memcpy(hw->qos_bw_cfg[ICE_DCF_VFID], tc_bw, sizeof(*tc_bw),
+ ICE_NONDMA_TO_NONDMA);
+
hw->tm_conf.committed = true;
return ret_val;
--
2.17.1
next reply other threads:[~2021-07-15 2:13 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-15 2:16 Ting Xu [this message]
2021-07-18 10:27 ` Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210715021642.17478-1-ting.xu@intel.com \
--to=ting.xu@intel.com \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).