* [v1 35/43] net/dpaa2: fix memory corruption in TM
[not found] <20240913055959.3246917-1-vanshika.shukla@nxp.com>
@ 2024-09-13 5:59 ` vanshika.shukla
[not found] ` <20240918075056.1838654-1-vanshika.shukla@nxp.com>
1 sibling, 0 replies; 2+ messages in thread
From: vanshika.shukla @ 2024-09-13 5:59 UTC (permalink / raw)
To: dev, Hemant Agrawal, Sachin Saxena, Gagandeep Singh; +Cc: stable
From: Gagandeep Singh <g.singh@nxp.com>
driver was reserving memory in an array for 8 queues only,
but it can support many more queues configuration.
This patch fixes the memory corruption issue by defining the
queue array with correct size.
Fixes: 72100f0dee21 ("net/dpaa2: support level 2 in traffic management")
Cc: g.singh@nxp.com
Cc: stable@dpdk.org
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/dpaa2/dpaa2_tm.c | 29 ++++++++++++++++++-----------
1 file changed, 18 insertions(+), 11 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c
index cb854964b4..83d0d669ce 100644
--- a/drivers/net/dpaa2/dpaa2_tm.c
+++ b/drivers/net/dpaa2/dpaa2_tm.c
@@ -684,6 +684,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
int ret, t;
+ bool conf_schedule = false;
/* Populate TCs */
LIST_FOREACH(channel_node, &priv->nodes, next) {
@@ -757,7 +758,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
LIST_FOREACH(channel_node, &priv->nodes, next) {
- int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
+ int wfq_grp = 0, is_wfq_grp = 0, conf[priv->nb_tx_queues];
struct dpni_tx_priorities_cfg prio_cfg;
memset(&prio_cfg, 0, sizeof(prio_cfg));
@@ -767,6 +768,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (channel_node->level_id != CHANNEL_LEVEL)
continue;
+ conf_schedule = false;
LIST_FOREACH(leaf_node, &priv->nodes, next) {
struct dpaa2_queue *leaf_dpaa2_q;
uint8_t leaf_tc_id;
@@ -789,6 +791,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (leaf_node->parent != channel_node)
continue;
+ conf_schedule = true;
leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
leaf_tc_id = leaf_dpaa2_q->tc_index;
/* Process sibling leaf nodes */
@@ -829,8 +832,8 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
goto out;
}
is_wfq_grp = 1;
- conf[temp_leaf_node->id] = 1;
}
+ conf[temp_leaf_node->id] = 1;
}
if (is_wfq_grp) {
if (wfq_grp == 0) {
@@ -851,6 +854,9 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
conf[leaf_node->id] = 1;
}
+ if (!conf_schedule)
+ continue;
+
if (wfq_grp > 1) {
prio_cfg.separate_groups = 1;
if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
@@ -864,6 +870,16 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
prio_cfg.prio_group_A = 1;
prio_cfg.channel_idx = channel_node->channel_id;
+ DPAA2_PMD_DEBUG("########################################\n");
+ DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx);
+ for (t = 0; t < DPNI_MAX_TC; t++)
+ DPAA2_PMD_DEBUG("tc = %d mode = %d, delta = %d\n", t,
+ prio_cfg.tc_sched[t].mode,
+ prio_cfg.tc_sched[t].delta_bandwidth);
+
+ DPAA2_PMD_DEBUG("prioritya = %d, priorityb = %d, separate grps"
+ " = %d\n\n", prio_cfg.prio_group_A,
+ prio_cfg.prio_group_B, prio_cfg.separate_groups);
ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
@@ -871,15 +887,6 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
"Scheduling Failed\n");
goto out;
}
- DPAA2_PMD_DEBUG("########################################\n");
- DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx);
- for (t = 0; t < DPNI_MAX_TC; t++) {
- DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
- DPAA2_PMD_DEBUG("delta = %d\n", prio_cfg.tc_sched[t].delta_bandwidth);
- }
- DPAA2_PMD_DEBUG("prioritya = %d\n", prio_cfg.prio_group_A);
- DPAA2_PMD_DEBUG("priorityb = %d\n", prio_cfg.prio_group_B);
- DPAA2_PMD_DEBUG("separate grps = %d\n\n", prio_cfg.separate_groups);
}
return 0;
--
2.25.1
^ permalink raw reply [flat|nested] 2+ messages in thread
* [v2 35/43] net/dpaa2: fix memory corruption in TM
[not found] ` <20240918075056.1838654-1-vanshika.shukla@nxp.com>
@ 2024-09-18 7:50 ` vanshika.shukla
0 siblings, 0 replies; 2+ messages in thread
From: vanshika.shukla @ 2024-09-18 7:50 UTC (permalink / raw)
To: dev, Hemant Agrawal, Sachin Saxena, Gagandeep Singh; +Cc: stable
From: Gagandeep Singh <g.singh@nxp.com>
driver was reserving memory in an array for 8 queues only,
but it can support many more queues configuration.
This patch fixes the memory corruption issue by defining the
queue array with correct size.
Fixes: 72100f0dee21 ("net/dpaa2: support level 2 in traffic management")
Cc: g.singh@nxp.com
Cc: stable@dpdk.org
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/dpaa2/dpaa2_tm.c | 29 ++++++++++++++++++-----------
1 file changed, 18 insertions(+), 11 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c
index cb854964b4..83d0d669ce 100644
--- a/drivers/net/dpaa2/dpaa2_tm.c
+++ b/drivers/net/dpaa2/dpaa2_tm.c
@@ -684,6 +684,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
int ret, t;
+ bool conf_schedule = false;
/* Populate TCs */
LIST_FOREACH(channel_node, &priv->nodes, next) {
@@ -757,7 +758,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
LIST_FOREACH(channel_node, &priv->nodes, next) {
- int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
+ int wfq_grp = 0, is_wfq_grp = 0, conf[priv->nb_tx_queues];
struct dpni_tx_priorities_cfg prio_cfg;
memset(&prio_cfg, 0, sizeof(prio_cfg));
@@ -767,6 +768,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (channel_node->level_id != CHANNEL_LEVEL)
continue;
+ conf_schedule = false;
LIST_FOREACH(leaf_node, &priv->nodes, next) {
struct dpaa2_queue *leaf_dpaa2_q;
uint8_t leaf_tc_id;
@@ -789,6 +791,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (leaf_node->parent != channel_node)
continue;
+ conf_schedule = true;
leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
leaf_tc_id = leaf_dpaa2_q->tc_index;
/* Process sibling leaf nodes */
@@ -829,8 +832,8 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
goto out;
}
is_wfq_grp = 1;
- conf[temp_leaf_node->id] = 1;
}
+ conf[temp_leaf_node->id] = 1;
}
if (is_wfq_grp) {
if (wfq_grp == 0) {
@@ -851,6 +854,9 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
conf[leaf_node->id] = 1;
}
+ if (!conf_schedule)
+ continue;
+
if (wfq_grp > 1) {
prio_cfg.separate_groups = 1;
if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
@@ -864,6 +870,16 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
prio_cfg.prio_group_A = 1;
prio_cfg.channel_idx = channel_node->channel_id;
+ DPAA2_PMD_DEBUG("########################################\n");
+ DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx);
+ for (t = 0; t < DPNI_MAX_TC; t++)
+ DPAA2_PMD_DEBUG("tc = %d mode = %d, delta = %d\n", t,
+ prio_cfg.tc_sched[t].mode,
+ prio_cfg.tc_sched[t].delta_bandwidth);
+
+ DPAA2_PMD_DEBUG("prioritya = %d, priorityb = %d, separate grps"
+ " = %d\n\n", prio_cfg.prio_group_A,
+ prio_cfg.prio_group_B, prio_cfg.separate_groups);
ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
@@ -871,15 +887,6 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
"Scheduling Failed\n");
goto out;
}
- DPAA2_PMD_DEBUG("########################################\n");
- DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx);
- for (t = 0; t < DPNI_MAX_TC; t++) {
- DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
- DPAA2_PMD_DEBUG("delta = %d\n", prio_cfg.tc_sched[t].delta_bandwidth);
- }
- DPAA2_PMD_DEBUG("prioritya = %d\n", prio_cfg.prio_group_A);
- DPAA2_PMD_DEBUG("priorityb = %d\n", prio_cfg.prio_group_B);
- DPAA2_PMD_DEBUG("separate grps = %d\n\n", prio_cfg.separate_groups);
}
return 0;
--
2.25.1
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2024-09-18 7:51 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <20240913055959.3246917-1-vanshika.shukla@nxp.com>
2024-09-13 5:59 ` [v1 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
[not found] ` <20240918075056.1838654-1-vanshika.shukla@nxp.com>
2024-09-18 7:50 ` [v2 " vanshika.shukla
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).