patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH v1] net/ice: fix not clear bandwidth correctly when DCF close
@ 2021-07-15  2:16 Ting Xu
  2021-07-18 10:27 ` Zhang, Qi Z
  0 siblings, 1 reply; 2+ messages in thread
From: Ting Xu @ 2021-07-15  2:16 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, jingjing.wu, beilei.xing, Ting Xu, stable

When closing DCF, the bandwidth limit configured for VFs by DCF is not
cleared correctly. The configuration will still take effect when DCF starts
again, if VFs are not re-allocated. This patch cleared VFs bandwidth limit
when DCF closes, and DCF needs to re-configure bandwidth for VFs when it
starts next time.

Fixes: 3a6bfc37eaf4 ("net/ice: support QoS config VF bandwidth in DCF")
Cc: stable@dpdk.org

Signed-off-by: Ting Xu <ting.xu@intel.com>
---
 drivers/net/ice/ice_dcf.c       | 10 ++++++---
 drivers/net/ice/ice_dcf.h       |  1 +
 drivers/net/ice/ice_dcf_sched.c | 39 ++++++++++++++++++++++++++++++++-
 3 files changed, 46 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 045800a2d9..4c2e0c7216 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -706,6 +706,12 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 
+	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+		if (hw->tm_conf.committed) {
+			ice_dcf_clear_bw(hw);
+			ice_dcf_tm_conf_uninit(eth_dev);
+		}
+
 	ice_dcf_disable_irq0(hw);
 	rte_intr_disable(intr_handle);
 	rte_intr_callback_unregister(intr_handle,
@@ -714,14 +720,12 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 	ice_dcf_mode_disable(hw);
 	iavf_shutdown_adminq(&hw->avf);
 
-	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
-		ice_dcf_tm_conf_uninit(eth_dev);
-
 	rte_free(hw->arq_buf);
 	rte_free(hw->vf_vsi_map);
 	rte_free(hw->vf_res);
 	rte_free(hw->rss_lut);
 	rte_free(hw->rss_key);
+	rte_free(hw->qos_bw_cfg);
 }
 
 static int
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index 711c0cf3ad..36c2d0965e 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -133,5 +133,6 @@ int ice_dcf_link_update(struct rte_eth_dev *dev,
 void ice_dcf_tm_conf_init(struct rte_eth_dev *dev);
 void ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev);
 int ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id);
+int ice_dcf_clear_bw(struct ice_dcf_hw *hw);
 
 #endif /* _ICE_DCF_H_ */
diff --git a/drivers/net/ice/ice_dcf_sched.c b/drivers/net/ice/ice_dcf_sched.c
index 1e16654d90..090988c6e1 100644
--- a/drivers/net/ice/ice_dcf_sched.c
+++ b/drivers/net/ice/ice_dcf_sched.c
@@ -32,6 +32,9 @@ const struct rte_tm_ops ice_dcf_tm_ops = {
 	.node_delete = ice_dcf_node_delete,
 };
 
+#define ICE_DCF_SCHED_TC_NODE 0xffff
+#define ICE_DCF_VFID	0
+
 void
 ice_dcf_tm_conf_init(struct rte_eth_dev *dev)
 {
@@ -709,6 +712,32 @@ ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id)
 	return ICE_SUCCESS;
 }
 
+int
+ice_dcf_clear_bw(struct ice_dcf_hw *hw)
+{
+	uint16_t vf_id;
+	uint32_t tc;
+	int ret, size;
+
+	size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
+		sizeof(struct virtchnl_dcf_bw_cfg) *
+		(hw->tm_conf.nb_tc_node - 1);
+
+	for (vf_id = 0; vf_id < hw->num_vfs; vf_id++) {
+		for (tc = 0; tc < hw->tm_conf.nb_tc_node; tc++) {
+			hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.peak = 0;
+			hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.committed = 0;
+		}
+		ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size);
+		if (ret) {
+			PMD_DRV_LOG(DEBUG, "VF %u BW clear failed", vf_id);
+			return ICE_ERR_CFG;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
 static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
 				 int clear_on_fail,
 				 __rte_unused struct rte_tm_error *error)
@@ -748,7 +777,6 @@ static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
 	cir_total = 0;
 
 	/* init tc bw configuration */
-#define ICE_DCF_SCHED_TC_NODE 0xffff
 	tc_bw->vf_id = ICE_DCF_SCHED_TC_NODE;
 	tc_bw->node_type = VIRTCHNL_DCF_TARGET_TC_BW;
 	tc_bw->num_elem = hw->tm_conf.nb_tc_node;
@@ -825,6 +853,15 @@ static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
 	if (ret_val)
 		goto fail_clear;
 
+	/* store TC node bw configuration */
+	hw->qos_bw_cfg[ICE_DCF_VFID] = rte_zmalloc("tc_bw_cfg", size, 0);
+	if (!hw->qos_bw_cfg[ICE_DCF_VFID]) {
+		ret_val = ICE_ERR_NO_MEMORY;
+		goto fail_clear;
+	}
+	ice_memcpy(hw->qos_bw_cfg[ICE_DCF_VFID], tc_bw, sizeof(*tc_bw),
+		   ICE_NONDMA_TO_NONDMA);
+
 	hw->tm_conf.committed = true;
 	return ret_val;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [dpdk-stable] [PATCH v1] net/ice: fix not clear bandwidth correctly when DCF close
  2021-07-15  2:16 [dpdk-stable] [PATCH v1] net/ice: fix not clear bandwidth correctly when DCF close Ting Xu
@ 2021-07-18 10:27 ` Zhang, Qi Z
  0 siblings, 0 replies; 2+ messages in thread
From: Zhang, Qi Z @ 2021-07-18 10:27 UTC (permalink / raw)
  To: Xu, Ting, dev; +Cc: Wu, Jingjing, Xing, Beilei, stable



> -----Original Message-----
> From: Xu, Ting <ting.xu@intel.com>
> Sent: Thursday, July 15, 2021 10:17 AM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; Xu, Ting <ting.xu@intel.com>;
> stable@dpdk.org
> Subject: [PATCH v1] net/ice: fix not clear bandwidth correctly when DCF close
> 
> When closing DCF, the bandwidth limit configured for VFs by DCF is not cleared
> correctly. The configuration will still take effect when DCF starts again, if VFs
> are not re-allocated. This patch cleared VFs bandwidth limit when DCF closes,
> and DCF needs to re-configure bandwidth for VFs when it starts next time.
> 
> Fixes: 3a6bfc37eaf4 ("net/ice: support QoS config VF bandwidth in DCF")
> Cc: stable@dpdk.org

No need cc stable as in the same release
> 
> Signed-off-by: Ting Xu <ting.xu@intel.com>

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-07-18 10:27 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-15  2:16 [dpdk-stable] [PATCH v1] net/ice: fix not clear bandwidth correctly when DCF close Ting Xu
2021-07-18 10:27 ` Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).