From: Qi Zhang <qi.z.zhang@intel.com>
To: qiming.yang@intel.com, wenjun1.wu@intel.com
Cc: dev@dpdk.org, Qi Zhang <qi.z.zhang@intel.com>
Subject: [PATCH 2/6] net/ice: support VSI level bandwidth config
Date: Tue, 2 Jan 2024 14:42:28 -0500 [thread overview]
Message-ID: <20240102194232.3614305-3-qi.z.zhang@intel.com> (raw)
In-Reply-To: <20240102194232.3614305-1-qi.z.zhang@intel.com>
Enable the configuration of peak and committed rates for a Tx scheduler
node at the VSI level. This patch also consolidate rate configuration
across various levels into a single function 'ice_set_node_rate.'
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_sched.c | 2 +-
drivers/net/ice/base/ice_sched.h | 4 +-
drivers/net/ice/ice_tm.c | 142 +++++++++++++++++++------------
3 files changed, 91 insertions(+), 57 deletions(-)
diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c
index a4d31647fe..23cc1ee50a 100644
--- a/drivers/net/ice/base/ice_sched.c
+++ b/drivers/net/ice/base/ice_sched.c
@@ -4429,7 +4429,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
* NOTE: Caller provides the correct SRL node in case of shared profile
* settings.
*/
-static enum ice_status
+enum ice_status
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw)
{
diff --git a/drivers/net/ice/base/ice_sched.h b/drivers/net/ice/base/ice_sched.h
index 4b68f3f535..a600ff9a24 100644
--- a/drivers/net/ice/base/ice_sched.h
+++ b/drivers/net/ice/base/ice_sched.h
@@ -237,5 +237,7 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi);
enum ice_status
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
-
+enum ice_status
+ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ice/ice_tm.c b/drivers/net/ice/ice_tm.c
index 9e2f981fa3..d9187af8af 100644
--- a/drivers/net/ice/ice_tm.c
+++ b/drivers/net/ice/ice_tm.c
@@ -663,6 +663,55 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev,
return ret;
}
+static int ice_set_node_rate(struct ice_hw *hw,
+ struct ice_tm_node *tm_node,
+ struct ice_sched_node *sched_node)
+{
+ enum ice_status status;
+ bool reset = false;
+ uint32_t peak = 0;
+ uint32_t committed = 0;
+ uint32_t rate;
+
+ if (tm_node == NULL || tm_node->shaper_profile == NULL) {
+ reset = true;
+ } else {
+ peak = (uint32_t)tm_node->shaper_profile->profile.peak.rate;
+ committed = (uint32_t)tm_node->shaper_profile->profile.committed.rate;
+ }
+
+ if (reset || peak == 0)
+ rate = ICE_SCHED_DFLT_BW;
+ else
+ rate = peak / 1000 * BITS_PER_BYTE;
+
+
+ status = ice_sched_set_node_bw_lmt(hw->port_info,
+ sched_node,
+ ICE_MAX_BW,
+ rate);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to set max bandwidth for node %u", tm_node->id);
+ return -EINVAL;
+ }
+
+ if (reset || committed == 0)
+ rate = ICE_SCHED_DFLT_BW;
+ else
+ rate = committed / 1000 * BITS_PER_BYTE;
+
+ status = ice_sched_set_node_bw_lmt(hw->port_info,
+ sched_node,
+ ICE_MIN_BW,
+ rate);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to set min bandwidth for node %u", tm_node->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ice_hierarchy_commit(struct rte_eth_dev *dev,
int clear_on_fail,
__rte_unused struct rte_tm_error *error)
@@ -673,13 +722,11 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
struct ice_tm_node *tm_node;
struct ice_sched_node *node;
- struct ice_sched_node *vsi_node;
+ struct ice_sched_node *vsi_node = NULL;
struct ice_sched_node *queue_node;
struct ice_tx_queue *txq;
struct ice_vsi *vsi;
int ret_val = ICE_SUCCESS;
- uint64_t peak = 0;
- uint64_t committed = 0;
uint8_t priority;
uint32_t i;
uint32_t idx_vsi_child;
@@ -704,6 +751,18 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
for (i = 0; i < vsi_layer; i++)
node = node->children[0];
vsi_node = node;
+
+ tm_node = TAILQ_FIRST(&pf->tm_conf.vsi_list);
+
+ ret_val = ice_set_node_rate(hw, tm_node, vsi_node);
+ if (ret_val) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ PMD_DRV_LOG(ERR,
+ "configure vsi node %u bandwidth failed",
+ tm_node->id);
+ goto reset_vsi;
+ }
+
nb_vsi_child = vsi_node->num_children;
nb_qg = vsi_node->children[0]->num_children;
@@ -722,7 +781,7 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
if (ret_val) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
PMD_DRV_LOG(ERR, "start queue %u failed", qid);
- goto fail_clear;
+ goto reset_vsi;
}
txq = dev->data->tx_queues[qid];
q_teid = txq->q_teid;
@@ -730,7 +789,7 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
if (queue_node == NULL) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
PMD_DRV_LOG(ERR, "get queue %u node failed", qid);
- goto fail_clear;
+ goto reset_vsi;
}
if (queue_node->info.parent_teid == qgroup_sched_node->info.node_teid)
continue;
@@ -738,28 +797,19 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
if (ret_val) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
PMD_DRV_LOG(ERR, "move queue %u failed", qid);
- goto fail_clear;
+ goto reset_vsi;
}
}
- if (tm_node->reference_count != 0 && tm_node->shaper_profile) {
- uint32_t node_teid = qgroup_sched_node->info.node_teid;
- /* Transfer from Byte per seconds to Kbps */
- peak = tm_node->shaper_profile->profile.peak.rate;
- peak = peak / 1000 * BITS_PER_BYTE;
- ret_val = ice_sched_set_node_bw_lmt_per_tc(hw->port_info,
- node_teid,
- ICE_AGG_TYPE_Q,
- tm_node->tc,
- ICE_MAX_BW,
- (u32)peak);
- if (ret_val) {
- error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
- PMD_DRV_LOG(ERR,
- "configure queue group %u bandwidth failed",
- tm_node->id);
- goto fail_clear;
- }
+
+ ret_val = ice_set_node_rate(hw, tm_node, qgroup_sched_node);
+ if (ret_val) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ PMD_DRV_LOG(ERR,
+ "configure queue group %u bandwidth failed",
+ tm_node->id);
+ goto reset_vsi;
}
+
priority = 7 - tm_node->priority;
ret_val = ice_sched_cfg_sibl_node_prio_lock(hw->port_info, qgroup_sched_node,
priority);
@@ -777,7 +827,7 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
if (idx_vsi_child >= nb_vsi_child) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
PMD_DRV_LOG(ERR, "too many queues");
- goto fail_clear;
+ goto reset_vsi;
}
}
@@ -786,37 +836,17 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
txq = dev->data->tx_queues[qid];
vsi = txq->vsi;
q_teid = txq->q_teid;
- if (tm_node->shaper_profile) {
- /* Transfer from Byte per seconds to Kbps */
- if (tm_node->shaper_profile->profile.peak.rate > 0) {
- peak = tm_node->shaper_profile->profile.peak.rate;
- peak = peak / 1000 * BITS_PER_BYTE;
- ret_val = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx,
- tm_node->tc, tm_node->id,
- ICE_MAX_BW, (u32)peak);
- if (ret_val) {
- error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
- PMD_DRV_LOG(ERR,
- "configure queue %u peak bandwidth failed",
- tm_node->id);
- goto fail_clear;
- }
- }
- if (tm_node->shaper_profile->profile.committed.rate > 0) {
- committed = tm_node->shaper_profile->profile.committed.rate;
- committed = committed / 1000 * BITS_PER_BYTE;
- ret_val = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx,
- tm_node->tc, tm_node->id,
- ICE_MIN_BW, (u32)committed);
- if (ret_val) {
- error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
- PMD_DRV_LOG(ERR,
- "configure queue %u committed bandwidth failed",
- tm_node->id);
- goto fail_clear;
- }
- }
+
+ queue_node = ice_sched_get_node(hw->port_info, q_teid);
+ ret_val = ice_set_node_rate(hw, tm_node, queue_node);
+ if (ret_val) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ PMD_DRV_LOG(ERR,
+ "configure queue %u bandwidth failed",
+ tm_node->id);
+ goto reset_vsi;
}
+
priority = 7 - tm_node->priority;
ret_val = ice_cfg_vsi_q_priority(hw->port_info, 1,
&q_teid, &priority);
@@ -838,6 +868,8 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev,
return ret_val;
+reset_vsi:
+ ice_set_node_rate(hw, NULL, vsi_node);
fail_clear:
/* clear all the traffic manager configuration */
if (clear_on_fail) {
--
2.31.1
next prev parent reply other threads:[~2024-01-02 11:21 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-02 19:42 [PATCH 0/6] net/ice improve qos Qi Zhang
2024-01-02 19:42 ` [PATCH 1/6] net/ice: remove redundent code Qi Zhang
2024-01-02 19:42 ` Qi Zhang [this message]
2024-01-02 19:42 ` [PATCH 3/6] net/ice: support queue group weight configure Qi Zhang
2024-01-02 19:42 ` [PATCH 4/6] net/ice: refactor hardware Tx sched node config Qi Zhang
2024-01-02 19:42 ` [PATCH 5/6] net/ice: reset Tx sched node during commit Qi Zhang
2024-01-02 19:42 ` [PATCH 6/6] net/ice: support Tx sched commit before device start Qi Zhang
2024-01-04 2:28 ` [PATCH 0/6] net/ice improve qos Wu, Wenjun1
2024-01-04 2:57 ` Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240102194232.3614305-3-qi.z.zhang@intel.com \
--to=qi.z.zhang@intel.com \
--cc=dev@dpdk.org \
--cc=qiming.yang@intel.com \
--cc=wenjun1.wu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).