From: Qi Zhang <qi.z.zhang@intel.com>
To: qiming.yang@intel.com, wenjun1.wu@intel.com
Cc: dev@dpdk.org, Qi Zhang <qi.z.zhang@intel.com>
Subject: [PATCH 4/6] net/ice: refactor hardware Tx sched node config
Date: Tue, 2 Jan 2024 14:42:30 -0500 [thread overview]
Message-ID: <20240102194232.3614305-5-qi.z.zhang@intel.com> (raw)
In-Reply-To: <20240102194232.3614305-1-qi.z.zhang@intel.com>
Consolidate Tx scheduler node configuration into a function:
'ice_cfg_hw_node", where rate limit, weight, priority will be
configured for queue group level and queue level.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/ice_tm.c | 97 ++++++++++++++++++++--------------------
1 file changed, 49 insertions(+), 48 deletions(-)
diff --git a/drivers/net/ice/ice_tm.c b/drivers/net/ice/ice_tm.c
index 604d045e2c..20cc47fff1 100644
--- a/drivers/net/ice/ice_tm.c
+++ b/drivers/net/ice/ice_tm.c
@@ -713,6 +713,49 @@ static int ice_set_node_rate(struct ice_hw *hw,
return 0;
}
+static int ice_cfg_hw_node(struct ice_hw *hw,
+ struct ice_tm_node *tm_node,
+ struct ice_sched_node *sched_node)
+{
+ enum ice_status status;
+ uint8_t priority;
+ uint16_t weight;
+ int ret;
+
+ ret = ice_set_node_rate(hw, tm_node, sched_node);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "configure queue group %u bandwidth failed",
+ sched_node->info.node_teid);
+ return ret;
+ }
+
+ priority = tm_node ? (7 - tm_node->priority) : 0;
+ status = ice_sched_cfg_sibl_node_prio(hw->port_info,
+ sched_node,
+ priority);
+ if (status) {
+ PMD_DRV_LOG(ERR, "configure node %u priority %u failed",
+ sched_node->info.node_teid,
+ priority);
+ return -EINVAL;
+ }
+
+ weight = tm_node ? (uint16_t)tm_node->weight : 4;
+
+ status = ice_sched_cfg_node_bw_alloc(hw, sched_node,
+ ICE_MAX_BW,
+ weight);
+ if (status) {
+ PMD_DRV_LOG(ERR, "configure node %u weight %u failed",
+ sched_node->info.node_teid,
+ weight);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ice_hierarchy_commit(struct rte_eth_dev *dev,
int clear_on_fail,
__rte_unused struct rte_tm_error *error)
@@ -726,8 +769,7 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
struct ice_sched_node *vsi_node = NULL;
struct ice_sched_node *queue_node;
struct ice_tx_queue *txq;
- int ret_val = ICE_SUCCESS;
- uint8_t priority;
+ int ret_val = 0;
uint32_t i;
uint32_t idx_vsi_child;
uint32_t idx_qg;
@@ -801,36 +843,15 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
}
}
- ret_val = ice_set_node_rate(hw, tm_node, qgroup_sched_node);
+ ret_val = ice_cfg_hw_node(hw, tm_node, qgroup_sched_node);
if (ret_val) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
PMD_DRV_LOG(ERR,
- "configure queue group %u bandwidth failed",
+ "configure queue group node %u failed",
tm_node->id);
goto reset_vsi;
}
- priority = 7 - tm_node->priority;
- ret_val = ice_sched_cfg_sibl_node_prio_lock(hw->port_info, qgroup_sched_node,
- priority);
- if (ret_val) {
- error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
- PMD_DRV_LOG(ERR, "configure queue group %u priority failed",
- tm_node->priority);
- goto fail_clear;
- }
-
- ret_val = ice_sched_cfg_node_bw_alloc(hw, qgroup_sched_node,
- ICE_MAX_BW,
- (uint16_t)tm_node->weight);
- if (ret_val) {
- error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
- PMD_DRV_LOG(ERR, "configure queue group %u weight %u failed",
- tm_node->id,
- tm_node->weight);
- goto fail_clear;
- }
-
idx_qg++;
if (idx_qg >= nb_qg) {
idx_qg = 0;
@@ -847,36 +868,16 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
qid = tm_node->id;
txq = dev->data->tx_queues[qid];
q_teid = txq->q_teid;
-
queue_node = ice_sched_get_node(hw->port_info, q_teid);
- ret_val = ice_set_node_rate(hw, tm_node, queue_node);
+
+ ret_val = ice_cfg_hw_node(hw, tm_node, queue_node);
if (ret_val) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
PMD_DRV_LOG(ERR,
- "configure queue %u bandwidth failed",
+ "configure queue group node %u failed",
tm_node->id);
goto reset_vsi;
}
-
- priority = 7 - tm_node->priority;
- ret_val = ice_cfg_vsi_q_priority(hw->port_info, 1,
- &q_teid, &priority);
- if (ret_val) {
- error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
- PMD_DRV_LOG(ERR, "configure queue %u priority failed", tm_node->priority);
- goto fail_clear;
- }
-
- queue_node = ice_sched_get_node(hw->port_info, q_teid);
- ret_val = ice_sched_cfg_node_bw_alloc(hw, queue_node, ICE_MAX_BW,
- (uint16_t)tm_node->weight);
- if (ret_val) {
- error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
- PMD_DRV_LOG(ERR, "configure queue %u weight %u failed",
- tm_node->id,
- tm_node->weight);
- goto fail_clear;
- }
}
return ret_val;
--
2.31.1
next prev parent reply other threads:[~2024-01-02 11:21 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-02 19:42 [PATCH 0/6] net/ice improve qos Qi Zhang
2024-01-02 19:42 ` [PATCH 1/6] net/ice: remove redundent code Qi Zhang
2024-01-02 19:42 ` [PATCH 2/6] net/ice: support VSI level bandwidth config Qi Zhang
2024-01-02 19:42 ` [PATCH 3/6] net/ice: support queue group weight configure Qi Zhang
2024-01-02 19:42 ` Qi Zhang [this message]
2024-01-02 19:42 ` [PATCH 5/6] net/ice: reset Tx sched node during commit Qi Zhang
2024-01-02 19:42 ` [PATCH 6/6] net/ice: support Tx sched commit before device start Qi Zhang
2024-01-04 2:28 ` [PATCH 0/6] net/ice improve qos Wu, Wenjun1
2024-01-04 2:57 ` Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240102194232.3614305-5-qi.z.zhang@intel.com \
--to=qi.z.zhang@intel.com \
--cc=dev@dpdk.org \
--cc=qiming.yang@intel.com \
--cc=wenjun1.wu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).