From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
Sachin Saxena <sachin.saxena@nxp.com>,
Gagandeep Singh <g.singh@nxp.com>
Cc: stable@dpdk.org
Subject: [v4 34/42] net/dpaa2: fix memory corruption in TM
Date: Wed, 23 Oct 2024 00:42:47 +0530 [thread overview]
Message-ID: <20241022191256.516818-35-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20241022191256.516818-1-vanshika.shukla@nxp.com>
From: Gagandeep Singh <g.singh@nxp.com>
driver was reserving memory in an array for 8 queues only,
but it can support many more queues configuration.
This patch fixes the memory corruption issue by defining the
queue array with correct size.
Fixes: 72100f0dee21 ("net/dpaa2: support level 2 in traffic management")
Cc: g.singh@nxp.com
Cc: stable@dpdk.org
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/dpaa2/dpaa2_tm.c | 29 ++++++++++++++++++-----------
1 file changed, 18 insertions(+), 11 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c
index fb8c384ca4..ab3e355853 100644
--- a/drivers/net/dpaa2/dpaa2_tm.c
+++ b/drivers/net/dpaa2/dpaa2_tm.c
@@ -684,6 +684,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
int ret, t;
+ bool conf_schedule = false;
/* Populate TCs */
LIST_FOREACH(channel_node, &priv->nodes, next) {
@@ -757,7 +758,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
LIST_FOREACH(channel_node, &priv->nodes, next) {
- int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
+ int wfq_grp = 0, is_wfq_grp = 0, conf[priv->nb_tx_queues];
struct dpni_tx_priorities_cfg prio_cfg;
memset(&prio_cfg, 0, sizeof(prio_cfg));
@@ -767,6 +768,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (channel_node->level_id != CHANNEL_LEVEL)
continue;
+ conf_schedule = false;
LIST_FOREACH(leaf_node, &priv->nodes, next) {
struct dpaa2_queue *leaf_dpaa2_q;
uint8_t leaf_tc_id;
@@ -789,6 +791,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (leaf_node->parent != channel_node)
continue;
+ conf_schedule = true;
leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
leaf_tc_id = leaf_dpaa2_q->tc_index;
/* Process sibling leaf nodes */
@@ -829,8 +832,8 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
goto out;
}
is_wfq_grp = 1;
- conf[temp_leaf_node->id] = 1;
}
+ conf[temp_leaf_node->id] = 1;
}
if (is_wfq_grp) {
if (wfq_grp == 0) {
@@ -851,6 +854,9 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
conf[leaf_node->id] = 1;
}
+ if (!conf_schedule)
+ continue;
+
if (wfq_grp > 1) {
prio_cfg.separate_groups = 1;
if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
@@ -864,6 +870,16 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
prio_cfg.prio_group_A = 1;
prio_cfg.channel_idx = channel_node->channel_id;
+ DPAA2_PMD_DEBUG("########################################");
+ DPAA2_PMD_DEBUG("Channel idx = %d", prio_cfg.channel_idx);
+ for (t = 0; t < DPNI_MAX_TC; t++)
+ DPAA2_PMD_DEBUG("tc = %d mode = %d, delta = %d", t,
+ prio_cfg.tc_sched[t].mode,
+ prio_cfg.tc_sched[t].delta_bandwidth);
+
+ DPAA2_PMD_DEBUG("prioritya = %d, priorityb = %d, separate grps"
+ " = %d", prio_cfg.prio_group_A,
+ prio_cfg.prio_group_B, prio_cfg.separate_groups);
ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
@@ -871,15 +887,6 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
"Scheduling Failed\n");
goto out;
}
- DPAA2_PMD_DEBUG("########################################");
- DPAA2_PMD_DEBUG("Channel idx = %d", prio_cfg.channel_idx);
- for (t = 0; t < DPNI_MAX_TC; t++) {
- DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
- DPAA2_PMD_DEBUG("delta = %d", prio_cfg.tc_sched[t].delta_bandwidth);
- }
- DPAA2_PMD_DEBUG("prioritya = %d", prio_cfg.prio_group_A);
- DPAA2_PMD_DEBUG("priorityb = %d", prio_cfg.prio_group_B);
- DPAA2_PMD_DEBUG("separate grps = %d", prio_cfg.separate_groups);
}
return 0;
--
2.25.1
next prev parent reply other threads:[~2024-10-22 19:15 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20240918075056.1838654-2-vanshika.shukla@nxp.com>
[not found] ` <20241014120126.170790-1-vanshika.shukla@nxp.com>
2024-10-14 12:01 ` [v3 35/43] " vanshika.shukla
[not found] ` <20241022191256.516818-1-vanshika.shukla@nxp.com>
2024-10-22 19:12 ` [v4 19/42] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-22 19:12 ` vanshika.shukla [this message]
[not found] ` <20241023115955.1207617-1-vanshika.shukla@nxp.com>
2024-10-23 11:59 ` [v5 " vanshika.shukla
2024-10-23 11:59 ` [v5 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241022191256.516818-35-vanshika.shukla@nxp.com \
--to=vanshika.shukla@nxp.com \
--cc=dev@dpdk.org \
--cc=g.singh@nxp.com \
--cc=hemant.agrawal@nxp.com \
--cc=sachin.saxena@nxp.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).