DPDK patches and discussions
 help / color / mirror / Atom feed
From: Qi Zhang <qi.z.zhang@intel.com>
To: qiming.yang@intel.com, wenjun1.wu@intel.com
Cc: dev@dpdk.org, Qi Zhang <qi.z.zhang@intel.com>
Subject: [PATCH v4 1/2] net/ice: reset Tx sched node during commit
Date: Tue,  2 Jan 2024 05:28:59 -0500	[thread overview]
Message-ID: <20240102102900.3435496-1-qi.z.zhang@intel.com> (raw)
In-Reply-To: <20231226185428.3158880-1-qi.z.zhang@intel.com>

1. Always reset all Tx scheduler at the begining of a commit action.
   This prevent unexpected remains from previous commit.
2. Reset all Tx scheduler nodes if a commit failed.

For leaf node, stop queues which will remove sched node from
scheduler tree, then start queues which will add sched node back to
default topo.
For noleaf node, simply reset to default parameters.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
v4:
- show node type in brief mode.

v3:
- fix incorrect parameter when query rl profile

v2:
- fix CI build issue

 drivers/net/ice/ice_ethdev.h |   1 +
 drivers/net/ice/ice_tm.c     | 134 ++++++++++++++++++++++++++++-------
 2 files changed, 111 insertions(+), 24 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1338c80d14..3b2db6aaa6 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -478,6 +478,7 @@ struct ice_tm_node {
 	struct ice_tm_node **children;
 	struct ice_tm_shaper_profile *shaper_profile;
 	struct rte_tm_node_params params;
+	struct ice_sched_node *sched_node;
 };
 
 /* node type of Traffic Manager */
diff --git a/drivers/net/ice/ice_tm.c b/drivers/net/ice/ice_tm.c
index 1a30524b05..2ae55418b0 100644
--- a/drivers/net/ice/ice_tm.c
+++ b/drivers/net/ice/ice_tm.c
@@ -764,16 +764,94 @@ static int ice_cfg_hw_node(struct ice_hw *hw,
 	return 0;
 }
 
+static struct ice_sched_node *ice_get_vsi_node(struct ice_hw *hw)
+{
+	struct ice_sched_node *node = hw->port_info->root;
+	uint32_t vsi_layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
+	uint32_t i;
+
+	for (i = 0; i < vsi_layer; i++)
+		node = node->children[0];
+
+	return node;
+}
+
+static int ice_reset_noleaf_nodes(struct rte_eth_dev *dev)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_tm_node_list *qgroup_list = &pf->tm_conf.qgroup_list;
+	struct ice_sched_node *vsi_node = ice_get_vsi_node(hw);
+	struct ice_tm_node *tm_node;
+	int ret;
+
+	/* reset vsi_node */
+	ret = ice_set_node_rate(hw, NULL, vsi_node->info.node_teid, ICE_AGG_TYPE_VSI);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "reset vsi node failed");
+		return ret;
+	}
+
+	/* reset queue group nodes */
+	TAILQ_FOREACH(tm_node, qgroup_list, node) {
+		if (tm_node->sched_node == NULL)
+			continue;
+
+		ret = ice_cfg_hw_node(hw, NULL,
+				      tm_node->sched_node,
+				      ICE_AGG_TYPE_Q);
+
+		if (ret) {
+			PMD_DRV_LOG(ERR, "reset queue group node %u failed", tm_node->id);
+			return ret;
+		}
+		tm_node->sched_node = NULL;
+	}
+
+	return 0;
+}
+
+static int ice_remove_leaf_nodes(struct rte_eth_dev *dev)
+{
+	int ret = 0;
+	int i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		ret = ice_tx_queue_stop(dev, i);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "stop queue %u failed", i);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int ice_add_leaf_nodes(struct rte_eth_dev *dev)
+{
+	int ret = 0;
+	int i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		ret = ice_tx_queue_start(dev, i);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "start queue %u failed", i);
+			break;
+		}
+	}
+
+	return ret;
+}
+
 static int ice_hierarchy_commit(struct rte_eth_dev *dev,
 				 int clear_on_fail,
-				 __rte_unused struct rte_tm_error *error)
+				 struct rte_tm_error *error)
 {
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ice_tm_node_list *qgroup_list = &pf->tm_conf.qgroup_list;
 	struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
 	struct ice_tm_node *tm_node;
-	struct ice_sched_node *node;
 	struct ice_sched_node *vsi_node = NULL;
 	struct ice_sched_node *queue_node;
 	struct ice_tx_queue *txq;
@@ -785,23 +863,25 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
 	uint32_t nb_qg;
 	uint32_t qid;
 	uint32_t q_teid;
-	uint32_t vsi_layer;
 
-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		ret_val = ice_tx_queue_stop(dev, i);
-		if (ret_val) {
-			error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
-			PMD_DRV_LOG(ERR, "stop queue %u failed", i);
-			goto fail_clear;
-		}
+	/* remove leaf nodes */
+	ret_val = ice_remove_leaf_nodes(dev);
+	if (ret_val) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		PMD_DRV_LOG(ERR, "reset no-leaf nodes failed");
+		goto fail_clear;
 	}
 
-	node = hw->port_info->root;
-	vsi_layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
-	for (i = 0; i < vsi_layer; i++)
-		node = node->children[0];
-	vsi_node = node;
+	/* reset no-leaf nodes. */
+	ret_val = ice_reset_noleaf_nodes(dev);
+	if (ret_val) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		PMD_DRV_LOG(ERR, "reset leaf nodes failed");
+		goto add_leaf;
+	}
 
+	/* config vsi node */
+	vsi_node = ice_get_vsi_node(hw);
 	tm_node = TAILQ_FIRST(&pf->tm_conf.vsi_list);
 
 	ret_val = ice_set_node_rate(hw, tm_node,
@@ -812,9 +892,10 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
 		PMD_DRV_LOG(ERR,
 			    "configure vsi node %u bandwidth failed",
 			    tm_node->id);
-		goto reset_vsi;
+		goto add_leaf;
 	}
 
+	/* config queue group nodes */
 	nb_vsi_child = vsi_node->num_children;
 	nb_qg = vsi_node->children[0]->num_children;
 
@@ -833,7 +914,7 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
 			if (ret_val) {
 				error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 				PMD_DRV_LOG(ERR, "start queue %u failed", qid);
-				goto reset_vsi;
+				goto reset_leaf;
 			}
 			txq = dev->data->tx_queues[qid];
 			q_teid = txq->q_teid;
@@ -841,7 +922,7 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
 			if (queue_node == NULL) {
 				error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 				PMD_DRV_LOG(ERR, "get queue %u node failed", qid);
-				goto reset_vsi;
+				goto reset_leaf;
 			}
 			if (queue_node->info.parent_teid == qgroup_sched_node->info.node_teid)
 				continue;
@@ -849,19 +930,20 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
 			if (ret_val) {
 				error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 				PMD_DRV_LOG(ERR, "move queue %u failed", qid);
-				goto reset_vsi;
+				goto reset_leaf;
 			}
 		}
 
 		ret_val = ice_cfg_hw_node(hw, tm_node,
 					  qgroup_sched_node,
 					  ICE_AGG_TYPE_Q);
+		tm_node->sched_node = qgroup_sched_node;
 		if (ret_val) {
 			error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 			PMD_DRV_LOG(ERR,
 				    "configure queue group node %u failed",
 				    tm_node->id);
-			goto reset_vsi;
+			goto reset_leaf;
 		}
 
 		idx_qg++;
@@ -872,10 +954,11 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
 		if (idx_vsi_child >= nb_vsi_child) {
 			error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 			PMD_DRV_LOG(ERR, "too many queues");
-			goto reset_vsi;
+			goto reset_leaf;
 		}
 	}
 
+	/* config queue nodes */
 	TAILQ_FOREACH(tm_node, queue_list, node) {
 		qid = tm_node->id;
 		txq = dev->data->tx_queues[qid];
@@ -890,14 +973,17 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
 			PMD_DRV_LOG(ERR,
 				    "configure queue group node %u failed",
 				    tm_node->id);
-			goto reset_vsi;
+			goto reset_leaf;
 		}
 	}
 
 	return ret_val;
 
-reset_vsi:
-	ice_set_node_rate(hw, NULL, vsi_node->info.node_teid, ICE_AGG_TYPE_VSI);
+reset_leaf:
+	ice_remove_leaf_nodes(dev);
+add_leaf:
+	ice_add_leaf_nodes(dev);
+	ice_reset_noleaf_nodes(dev);
 fail_clear:
 	/* clear all the traffic manager configuration */
 	if (clear_on_fail) {
-- 
2.31.1


  parent reply	other threads:[~2024-01-02  2:08 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-26 18:54 [PATCH 1/2] net/ice: add Tx scheduling tree dump support Qi Zhang
2023-12-26 18:54 ` [PATCH 2/2] doc: add document for diagnostic utilities Qi Zhang
2023-12-27 12:31 ` [PATCH v2 1/2] net/ice: add Tx scheduling tree dump support Qi Zhang
2023-12-27 12:31   ` [PATCH v2 2/2] doc: add document for diagnostic utilities Qi Zhang
2023-12-28 10:10 ` [PATCH v3 1/2] net/ice: add Tx scheduling tree dump support Qi Zhang
2023-12-28 10:10   ` [PATCH v3 2/2] doc: add document for diagnostic utilities Qi Zhang
2024-01-02 10:28 ` Qi Zhang [this message]
2024-01-02 10:29   ` [PATCH v4 2/2] net/ice: support Tx sched commit before dev_start Qi Zhang
2024-01-02 10:32 ` [PATCH 1/2] net/ice: add Tx scheduling tree dump support Qi Zhang
2024-01-02 10:32   ` [PATCH 2/2] doc: add document for diagnostic utilities Qi Zhang
2024-01-02 10:33 ` [PATCH v4 1/2] net/ice: add Tx scheduling tree dump support Qi Zhang
2024-01-02 10:33   ` [PATCH v4 2/2] doc: add document for diagnostic utilities Qi Zhang
2024-01-02 12:24 ` [PATCH v5 1/2] net/ice: add Tx scheduling tree dump support Qi Zhang
2024-01-02 12:24   ` [PATCH v5 2/2] doc: add document for diagnostic utilities Qi Zhang
2024-01-02 23:30   ` [PATCH v5 1/2] net/ice: add Tx scheduling tree dump support Stephen Hemminger
2024-01-02 12:29 ` [PATCH v6 " Qi Zhang
2024-01-02 12:29   ` [PATCH v6 2/2] doc: add document for ice diagnostic utilities Qi Zhang
2024-01-02 17:05     ` Stephen Hemminger
2024-01-02 12:39 ` [PATCH v7 1/2] net/ice: add Tx scheduling tree dump support Qi Zhang
2024-01-02  6:22   ` Wu, Wenjun1
2024-01-02 12:39   ` [PATCH v7 2/2] doc: add document for ice diagnostic utilities Qi Zhang
2024-01-02  7:14     ` Yang, Qiming

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240102102900.3435496-1-qi.z.zhang@intel.com \
    --to=qi.z.zhang@intel.com \
    --cc=dev@dpdk.org \
    --cc=qiming.yang@intel.com \
    --cc=wenjun1.wu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).