DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ting Xu <ting.xu@intel.com>
To: dev@dpdk.org
Cc: qi.z.zhang@intel.com, jingjing.wu@intel.com,
	beilei.xing@intel.com, Ting Xu <ting.xu@intel.com>
Subject: [dpdk-dev] [PATCH v2] net/ice: support QoS BW config after VF reset in DCF
Date: Thu,  8 Jul 2021 10:33:46 +0800	[thread overview]
Message-ID: <20210708023346.34436-1-ting.xu@intel.com> (raw)
In-Reply-To: <20210702150027.26294-1-ting.xu@intel.com>

When VF reset happens, the QoS bandwidth configuration will be lost. If
the reset is not caused by DCB change, it is supposed to replay the
bandwidth configuration to VF by DCF. In this patch, when a vsi update
PF event is received from PF after VF reset, and it is confirmed that
DCB is not changed, bandwidth configuration will be replayed.

Signed-off-by: Ting Xu <ting.xu@intel.com>

---
v1->v2: rebase
---
 drivers/net/ice/ice_dcf.c        | 11 +++++--
 drivers/net/ice/ice_dcf.h        |  2 ++
 drivers/net/ice/ice_dcf_ethdev.c |  1 -
 drivers/net/ice/ice_dcf_parent.c |  3 ++
 drivers/net/ice/ice_dcf_sched.c  | 52 +++++++++++++++++++++++++++++++-
 5 files changed, 65 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 349d23ee4f..045800a2d9 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -577,7 +577,7 @@ int
 ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
-	int ret;
+	int ret, size;
 
 	hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
 	hw->avf.back = hw;
@@ -669,8 +669,15 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 		}
 	}
 
-	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
 		ice_dcf_tm_conf_init(eth_dev);
+		size = sizeof(struct virtchnl_dcf_bw_cfg_list *) * hw->num_vfs;
+		hw->qos_bw_cfg = rte_zmalloc("qos_bw_cfg", size, 0);
+		if (!hw->qos_bw_cfg) {
+			PMD_INIT_LOG(ERR, "no memory for qos_bw_cfg");
+			goto err_rss;
+		}
+	}
 
 	hw->eth_dev = eth_dev;
 	rte_intr_callback_register(&pci_dev->intr_handle,
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index 1c7653de3d..711c0cf3ad 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -90,6 +90,7 @@ struct ice_dcf_hw {
 	uint16_t pf_vsi_id;
 
 	struct ice_dcf_tm_conf tm_conf;
+	struct virtchnl_dcf_bw_cfg_list **qos_bw_cfg;
 	struct ice_aqc_port_ets_elem *ets_config;
 	struct virtchnl_version_info virtchnl_version;
 	struct virtchnl_vf_resource *vf_res; /* VF resource */
@@ -131,5 +132,6 @@ int ice_dcf_link_update(struct rte_eth_dev *dev,
 		    __rte_unused int wait_to_complete);
 void ice_dcf_tm_conf_init(struct rte_eth_dev *dev);
 void ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev);
+int ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id);
 
 #endif /* _ICE_DCF_H_ */
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 69fe6e63d1..cab7c4da87 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -622,7 +622,6 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
 	ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
 	ad->pf.adapter_stopped = 1;
-	dcf_ad->real_hw.tm_conf.committed = false;
 
 	return 0;
 }
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index c59cd0bef9..03155c9df0 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -143,6 +143,9 @@ ice_dcf_vsi_update_service_handler(void *param)
 		}
 	}
 
+	if (hw->tm_conf.committed)
+		ice_dcf_replay_vf_bw(hw, reset_param->vf_id);
+
 	rte_spinlock_unlock(&vsi_update_lock);
 
 	free(param);
diff --git a/drivers/net/ice/ice_dcf_sched.c b/drivers/net/ice/ice_dcf_sched.c
index 8a0529a3bc..1e16654d90 100644
--- a/drivers/net/ice/ice_dcf_sched.c
+++ b/drivers/net/ice/ice_dcf_sched.c
@@ -668,6 +668,47 @@ static int ice_dcf_commit_check(struct ice_dcf_hw *hw)
 	return ICE_SUCCESS;
 }
 
+int
+ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id)
+{
+	struct ice_aqc_port_ets_elem old_ets_config;
+	struct ice_dcf_adapter *adapter;
+	struct ice_hw *parent_hw;
+	int ret, size;
+
+	adapter = hw->eth_dev->data->dev_private;
+	parent_hw = &adapter->parent.hw;
+
+	/* store the old ets config */
+	old_ets_config = *hw->ets_config;
+
+	ice_memset(hw->ets_config, 0, sizeof(*hw->ets_config), ICE_NONDMA_MEM);
+	ret = ice_aq_query_port_ets(parent_hw->port_info,
+			hw->ets_config, sizeof(*hw->ets_config),
+			NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "DCF Query Port ETS failed");
+		return ret;
+	}
+
+	if (memcmp(&old_ets_config, hw->ets_config, sizeof(old_ets_config))) {
+		PMD_DRV_LOG(DEBUG, "ETS config changes, do not replay BW");
+		return ICE_SUCCESS;
+	}
+
+	size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
+		sizeof(struct virtchnl_dcf_bw_cfg) *
+		(hw->tm_conf.nb_tc_node - 1);
+
+	ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size);
+	if (ret) {
+		PMD_DRV_LOG(DEBUG, "VF %u BW replay failed", vf_id);
+		return ICE_ERR_CFG;
+	}
+
+	return ICE_SUCCESS;
+}
+
 static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
 				 int clear_on_fail,
 				 __rte_unused struct rte_tm_error *error)
@@ -757,7 +798,16 @@ static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
 		ret_val = ice_dcf_set_vf_bw(hw, vf_bw, size);
 		if (ret_val)
 			goto fail_clear;
-		memset(vf_bw, 0, size);
+
+		hw->qos_bw_cfg[vf_id] = rte_zmalloc("vf_bw_cfg", size, 0);
+		if (!hw->qos_bw_cfg[vf_id]) {
+			ret_val = ICE_ERR_NO_MEMORY;
+			goto fail_clear;
+		}
+		/* store the bandwidth information for replay */
+		ice_memcpy(hw->qos_bw_cfg[vf_id], vf_bw, sizeof(*vf_bw),
+			   ICE_NONDMA_TO_NONDMA);
+		ice_memset(vf_bw, 0, size, ICE_NONDMA_MEM);
 	}
 
 	/* check if total CIR is larger than port bandwidth */
-- 
2.17.1


  reply	other threads:[~2021-07-08  2:30 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-02 15:00 [dpdk-dev] [PATCH v1] " Ting Xu
2021-07-08  2:33 ` Ting Xu [this message]
2021-07-08  5:28   ` [dpdk-dev] [PATCH v2] " Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210708023346.34436-1-ting.xu@intel.com \
    --to=ting.xu@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).