* [v3 35/43] net/dpaa2: fix memory corruption in TM
[not found] ` <20241014120126.170790-1-vanshika.shukla@nxp.com>
@ 2024-10-14 12:01 ` vanshika.shukla
[not found] ` <20241022191256.516818-1-vanshika.shukla@nxp.com>
1 sibling, 0 replies; 5+ messages in thread
From: vanshika.shukla @ 2024-10-14 12:01 UTC (permalink / raw)
To: dev, Hemant Agrawal, Sachin Saxena, Gagandeep Singh; +Cc: stable
From: Gagandeep Singh <g.singh@nxp.com>
driver was reserving memory in an array for 8 queues only,
but it can support many more queues configuration.
This patch fixes the memory corruption issue by defining the
queue array with correct size.
Fixes: 72100f0dee21 ("net/dpaa2: support level 2 in traffic management")
Cc: g.singh@nxp.com
Cc: stable@dpdk.org
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/dpaa2/dpaa2_tm.c | 29 ++++++++++++++++++-----------
1 file changed, 18 insertions(+), 11 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c
index 97d65e7181..14c47b41be 100644
--- a/drivers/net/dpaa2/dpaa2_tm.c
+++ b/drivers/net/dpaa2/dpaa2_tm.c
@@ -684,6 +684,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
int ret, t;
+ bool conf_schedule = false;
/* Populate TCs */
LIST_FOREACH(channel_node, &priv->nodes, next) {
@@ -757,7 +758,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
LIST_FOREACH(channel_node, &priv->nodes, next) {
- int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
+ int wfq_grp = 0, is_wfq_grp = 0, conf[priv->nb_tx_queues];
struct dpni_tx_priorities_cfg prio_cfg;
memset(&prio_cfg, 0, sizeof(prio_cfg));
@@ -767,6 +768,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (channel_node->level_id != CHANNEL_LEVEL)
continue;
+ conf_schedule = false;
LIST_FOREACH(leaf_node, &priv->nodes, next) {
struct dpaa2_queue *leaf_dpaa2_q;
uint8_t leaf_tc_id;
@@ -789,6 +791,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (leaf_node->parent != channel_node)
continue;
+ conf_schedule = true;
leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
leaf_tc_id = leaf_dpaa2_q->tc_index;
/* Process sibling leaf nodes */
@@ -829,8 +832,8 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
goto out;
}
is_wfq_grp = 1;
- conf[temp_leaf_node->id] = 1;
}
+ conf[temp_leaf_node->id] = 1;
}
if (is_wfq_grp) {
if (wfq_grp == 0) {
@@ -851,6 +854,9 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
conf[leaf_node->id] = 1;
}
+ if (!conf_schedule)
+ continue;
+
if (wfq_grp > 1) {
prio_cfg.separate_groups = 1;
if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
@@ -864,6 +870,16 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
prio_cfg.prio_group_A = 1;
prio_cfg.channel_idx = channel_node->channel_id;
+ DPAA2_PMD_DEBUG("########################################");
+ DPAA2_PMD_DEBUG("Channel idx = %d", prio_cfg.channel_idx);
+ for (t = 0; t < DPNI_MAX_TC; t++)
+ DPAA2_PMD_DEBUG("tc = %d mode = %d, delta = %d", t,
+ prio_cfg.tc_sched[t].mode,
+ prio_cfg.tc_sched[t].delta_bandwidth);
+
+ DPAA2_PMD_DEBUG("prioritya = %d, priorityb = %d, separate grps"
+ " = %d", prio_cfg.prio_group_A,
+ prio_cfg.prio_group_B, prio_cfg.separate_groups);
ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
@@ -871,15 +887,6 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
"Scheduling Failed\n");
goto out;
}
- DPAA2_PMD_DEBUG("########################################");
- DPAA2_PMD_DEBUG("Channel idx = %d", prio_cfg.channel_idx);
- for (t = 0; t < DPNI_MAX_TC; t++) {
- DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
- DPAA2_PMD_DEBUG("delta = %d", prio_cfg.tc_sched[t].delta_bandwidth);
- }
- DPAA2_PMD_DEBUG("prioritya = %d", prio_cfg.prio_group_A);
- DPAA2_PMD_DEBUG("priorityb = %d", prio_cfg.prio_group_B);
- DPAA2_PMD_DEBUG("separate grps = %d", prio_cfg.separate_groups);
}
return 0;
--
2.25.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [v4 19/42] bus/fslmc: fix coverity issue
[not found] ` <20241022191256.516818-1-vanshika.shukla@nxp.com>
@ 2024-10-22 19:12 ` vanshika.shukla
2024-10-22 19:12 ` [v4 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
[not found] ` <20241023115955.1207617-1-vanshika.shukla@nxp.com>
2 siblings, 0 replies; 5+ messages in thread
From: vanshika.shukla @ 2024-10-22 19:12 UTC (permalink / raw)
To: dev, Hemant Agrawal, Sachin Saxena, Youri Querry, Nipun Gupta,
Roy Pledge
Cc: stable, Rohit Raj
From: Rohit Raj <rohit.raj@nxp.com>
Fix Issues reported by NXP Internal Coverity.
Fixes: 64f131a82fbe ("bus/fslmc: add qbman debug")
Cc: hemant.agrawal@nxp.com
Cc: stable@dpdk.org
Signed-off-by: Rohit Raj <rohit.raj@nxp.com>
---
drivers/bus/fslmc/qbman/qbman_debug.c | 49 +++++++++++++++++----------
1 file changed, 32 insertions(+), 17 deletions(-)
diff --git a/drivers/bus/fslmc/qbman/qbman_debug.c b/drivers/bus/fslmc/qbman/qbman_debug.c
index eea06988ff..0e471ec3fd 100644
--- a/drivers/bus/fslmc/qbman/qbman_debug.c
+++ b/drivers/bus/fslmc/qbman/qbman_debug.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2015 Freescale Semiconductor, Inc.
- * Copyright 2018-2020 NXP
+ * Copyright 2018-2020,2022 NXP
*/
#include "compat.h"
@@ -37,6 +37,7 @@ int qbman_bp_query(struct qbman_swp *s, uint32_t bpid,
struct qbman_bp_query_rslt *r)
{
struct qbman_bp_query_desc *p;
+ struct qbman_bp_query_rslt *bp_query_rslt;
/* Start the management command */
p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
@@ -47,14 +48,16 @@ int qbman_bp_query(struct qbman_swp *s, uint32_t bpid,
p->bpid = bpid;
/* Complete the management command */
- *r = *(struct qbman_bp_query_rslt *)qbman_swp_mc_complete(s, p,
- QBMAN_BP_QUERY);
- if (!r) {
+ bp_query_rslt = (struct qbman_bp_query_rslt *)qbman_swp_mc_complete(s,
+ p, QBMAN_BP_QUERY);
+ if (!bp_query_rslt) {
pr_err("qbman: Query BPID %d failed, no response\n",
bpid);
return -EIO;
}
+ *r = *bp_query_rslt;
+
/* Decode the outcome */
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
@@ -202,20 +205,23 @@ int qbman_fq_query(struct qbman_swp *s, uint32_t fqid,
struct qbman_fq_query_rslt *r)
{
struct qbman_fq_query_desc *p;
+ struct qbman_fq_query_rslt *fq_query_rslt;
p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
if (!p)
return -EBUSY;
p->fqid = fqid;
- *r = *(struct qbman_fq_query_rslt *)qbman_swp_mc_complete(s, p,
- QBMAN_FQ_QUERY);
- if (!r) {
+ fq_query_rslt = (struct qbman_fq_query_rslt *)qbman_swp_mc_complete(s,
+ p, QBMAN_FQ_QUERY);
+ if (!fq_query_rslt) {
pr_err("qbman: Query FQID %d failed, no response\n",
fqid);
return -EIO;
}
+ *r = *fq_query_rslt;
+
/* Decode the outcome */
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY);
@@ -398,20 +404,23 @@ int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid,
struct qbman_cgr_query_rslt *r)
{
struct qbman_cgr_query_desc *p;
+ struct qbman_cgr_query_rslt *cgr_query_rslt;
p = (struct qbman_cgr_query_desc *)qbman_swp_mc_start(s);
if (!p)
return -EBUSY;
p->cgid = cgid;
- *r = *(struct qbman_cgr_query_rslt *)qbman_swp_mc_complete(s, p,
- QBMAN_CGR_QUERY);
- if (!r) {
+ cgr_query_rslt = (struct qbman_cgr_query_rslt *)qbman_swp_mc_complete(s,
+ p, QBMAN_CGR_QUERY);
+ if (!cgr_query_rslt) {
pr_err("qbman: Query CGID %d failed, no response\n",
cgid);
return -EIO;
}
+ *r = *cgr_query_rslt;
+
/* Decode the outcome */
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_CGR_QUERY);
@@ -473,20 +482,23 @@ int qbman_cgr_wred_query(struct qbman_swp *s, uint32_t cgid,
struct qbman_wred_query_rslt *r)
{
struct qbman_cgr_query_desc *p;
+ struct qbman_wred_query_rslt *wred_query_rslt;
p = (struct qbman_cgr_query_desc *)qbman_swp_mc_start(s);
if (!p)
return -EBUSY;
p->cgid = cgid;
- *r = *(struct qbman_wred_query_rslt *)qbman_swp_mc_complete(s, p,
- QBMAN_WRED_QUERY);
- if (!r) {
+ wred_query_rslt = (struct qbman_wred_query_rslt *)qbman_swp_mc_complete(
+ s, p, QBMAN_WRED_QUERY);
+ if (!wred_query_rslt) {
pr_err("qbman: Query CGID WRED %d failed, no response\n",
cgid);
return -EIO;
}
+ *r = *wred_query_rslt;
+
/* Decode the outcome */
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_WRED_QUERY);
@@ -527,7 +539,7 @@ void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth,
if (mn == 0)
*maxth = ma;
else
- *maxth = ((ma+256) * (1<<(mn-1)));
+ *maxth = ((uint64_t)(ma+256) * (1<<(mn-1)));
if (step_s == 0)
*minth = *maxth - step_i;
@@ -630,6 +642,7 @@ int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid,
struct qbman_wqchan_query_rslt *r)
{
struct qbman_wqchan_query_desc *p;
+ struct qbman_wqchan_query_rslt *wqchan_query_rslt;
/* Start the management command */
p = (struct qbman_wqchan_query_desc *)qbman_swp_mc_start(s);
@@ -640,14 +653,16 @@ int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid,
p->chid = chanid;
/* Complete the management command */
- *r = *(struct qbman_wqchan_query_rslt *)qbman_swp_mc_complete(s, p,
- QBMAN_WQ_QUERY);
- if (!r) {
+ wqchan_query_rslt = (struct qbman_wqchan_query_rslt *)qbman_swp_mc_complete(
+ s, p, QBMAN_WQ_QUERY);
+ if (!wqchan_query_rslt) {
pr_err("qbman: Query WQ Channel %d failed, no response\n",
chanid);
return -EIO;
}
+ *r = *wqchan_query_rslt;
+
/* Decode the outcome */
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_WQ_QUERY);
--
2.25.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [v4 34/42] net/dpaa2: fix memory corruption in TM
[not found] ` <20241022191256.516818-1-vanshika.shukla@nxp.com>
2024-10-22 19:12 ` [v4 19/42] bus/fslmc: fix coverity issue vanshika.shukla
@ 2024-10-22 19:12 ` vanshika.shukla
[not found] ` <20241023115955.1207617-1-vanshika.shukla@nxp.com>
2 siblings, 0 replies; 5+ messages in thread
From: vanshika.shukla @ 2024-10-22 19:12 UTC (permalink / raw)
To: dev, Hemant Agrawal, Sachin Saxena, Gagandeep Singh; +Cc: stable
From: Gagandeep Singh <g.singh@nxp.com>
driver was reserving memory in an array for 8 queues only,
but it can support many more queues configuration.
This patch fixes the memory corruption issue by defining the
queue array with correct size.
Fixes: 72100f0dee21 ("net/dpaa2: support level 2 in traffic management")
Cc: g.singh@nxp.com
Cc: stable@dpdk.org
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/dpaa2/dpaa2_tm.c | 29 ++++++++++++++++++-----------
1 file changed, 18 insertions(+), 11 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c
index fb8c384ca4..ab3e355853 100644
--- a/drivers/net/dpaa2/dpaa2_tm.c
+++ b/drivers/net/dpaa2/dpaa2_tm.c
@@ -684,6 +684,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
int ret, t;
+ bool conf_schedule = false;
/* Populate TCs */
LIST_FOREACH(channel_node, &priv->nodes, next) {
@@ -757,7 +758,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
LIST_FOREACH(channel_node, &priv->nodes, next) {
- int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
+ int wfq_grp = 0, is_wfq_grp = 0, conf[priv->nb_tx_queues];
struct dpni_tx_priorities_cfg prio_cfg;
memset(&prio_cfg, 0, sizeof(prio_cfg));
@@ -767,6 +768,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (channel_node->level_id != CHANNEL_LEVEL)
continue;
+ conf_schedule = false;
LIST_FOREACH(leaf_node, &priv->nodes, next) {
struct dpaa2_queue *leaf_dpaa2_q;
uint8_t leaf_tc_id;
@@ -789,6 +791,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (leaf_node->parent != channel_node)
continue;
+ conf_schedule = true;
leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
leaf_tc_id = leaf_dpaa2_q->tc_index;
/* Process sibling leaf nodes */
@@ -829,8 +832,8 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
goto out;
}
is_wfq_grp = 1;
- conf[temp_leaf_node->id] = 1;
}
+ conf[temp_leaf_node->id] = 1;
}
if (is_wfq_grp) {
if (wfq_grp == 0) {
@@ -851,6 +854,9 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
conf[leaf_node->id] = 1;
}
+ if (!conf_schedule)
+ continue;
+
if (wfq_grp > 1) {
prio_cfg.separate_groups = 1;
if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
@@ -864,6 +870,16 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
prio_cfg.prio_group_A = 1;
prio_cfg.channel_idx = channel_node->channel_id;
+ DPAA2_PMD_DEBUG("########################################");
+ DPAA2_PMD_DEBUG("Channel idx = %d", prio_cfg.channel_idx);
+ for (t = 0; t < DPNI_MAX_TC; t++)
+ DPAA2_PMD_DEBUG("tc = %d mode = %d, delta = %d", t,
+ prio_cfg.tc_sched[t].mode,
+ prio_cfg.tc_sched[t].delta_bandwidth);
+
+ DPAA2_PMD_DEBUG("prioritya = %d, priorityb = %d, separate grps"
+ " = %d", prio_cfg.prio_group_A,
+ prio_cfg.prio_group_B, prio_cfg.separate_groups);
ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
@@ -871,15 +887,6 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
"Scheduling Failed\n");
goto out;
}
- DPAA2_PMD_DEBUG("########################################");
- DPAA2_PMD_DEBUG("Channel idx = %d", prio_cfg.channel_idx);
- for (t = 0; t < DPNI_MAX_TC; t++) {
- DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
- DPAA2_PMD_DEBUG("delta = %d", prio_cfg.tc_sched[t].delta_bandwidth);
- }
- DPAA2_PMD_DEBUG("prioritya = %d", prio_cfg.prio_group_A);
- DPAA2_PMD_DEBUG("priorityb = %d", prio_cfg.prio_group_B);
- DPAA2_PMD_DEBUG("separate grps = %d", prio_cfg.separate_groups);
}
return 0;
--
2.25.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [v5 19/42] bus/fslmc: fix coverity issue
[not found] ` <20241023115955.1207617-1-vanshika.shukla@nxp.com>
@ 2024-10-23 11:59 ` vanshika.shukla
2024-10-23 11:59 ` [v5 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
1 sibling, 0 replies; 5+ messages in thread
From: vanshika.shukla @ 2024-10-23 11:59 UTC (permalink / raw)
To: dev, Hemant Agrawal, Sachin Saxena, Nipun Gupta, Roy Pledge,
Youri Querry
Cc: stable, Rohit Raj
From: Rohit Raj <rohit.raj@nxp.com>
Fix Issues reported by NXP Internal Coverity.
Fixes: 64f131a82fbe ("bus/fslmc: add qbman debug")
Cc: hemant.agrawal@nxp.com
Cc: stable@dpdk.org
Signed-off-by: Rohit Raj <rohit.raj@nxp.com>
---
drivers/bus/fslmc/qbman/qbman_debug.c | 49 +++++++++++++++++----------
1 file changed, 32 insertions(+), 17 deletions(-)
diff --git a/drivers/bus/fslmc/qbman/qbman_debug.c b/drivers/bus/fslmc/qbman/qbman_debug.c
index eea06988ff..0e471ec3fd 100644
--- a/drivers/bus/fslmc/qbman/qbman_debug.c
+++ b/drivers/bus/fslmc/qbman/qbman_debug.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2015 Freescale Semiconductor, Inc.
- * Copyright 2018-2020 NXP
+ * Copyright 2018-2020,2022 NXP
*/
#include "compat.h"
@@ -37,6 +37,7 @@ int qbman_bp_query(struct qbman_swp *s, uint32_t bpid,
struct qbman_bp_query_rslt *r)
{
struct qbman_bp_query_desc *p;
+ struct qbman_bp_query_rslt *bp_query_rslt;
/* Start the management command */
p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
@@ -47,14 +48,16 @@ int qbman_bp_query(struct qbman_swp *s, uint32_t bpid,
p->bpid = bpid;
/* Complete the management command */
- *r = *(struct qbman_bp_query_rslt *)qbman_swp_mc_complete(s, p,
- QBMAN_BP_QUERY);
- if (!r) {
+ bp_query_rslt = (struct qbman_bp_query_rslt *)qbman_swp_mc_complete(s,
+ p, QBMAN_BP_QUERY);
+ if (!bp_query_rslt) {
pr_err("qbman: Query BPID %d failed, no response\n",
bpid);
return -EIO;
}
+ *r = *bp_query_rslt;
+
/* Decode the outcome */
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
@@ -202,20 +205,23 @@ int qbman_fq_query(struct qbman_swp *s, uint32_t fqid,
struct qbman_fq_query_rslt *r)
{
struct qbman_fq_query_desc *p;
+ struct qbman_fq_query_rslt *fq_query_rslt;
p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
if (!p)
return -EBUSY;
p->fqid = fqid;
- *r = *(struct qbman_fq_query_rslt *)qbman_swp_mc_complete(s, p,
- QBMAN_FQ_QUERY);
- if (!r) {
+ fq_query_rslt = (struct qbman_fq_query_rslt *)qbman_swp_mc_complete(s,
+ p, QBMAN_FQ_QUERY);
+ if (!fq_query_rslt) {
pr_err("qbman: Query FQID %d failed, no response\n",
fqid);
return -EIO;
}
+ *r = *fq_query_rslt;
+
/* Decode the outcome */
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY);
@@ -398,20 +404,23 @@ int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid,
struct qbman_cgr_query_rslt *r)
{
struct qbman_cgr_query_desc *p;
+ struct qbman_cgr_query_rslt *cgr_query_rslt;
p = (struct qbman_cgr_query_desc *)qbman_swp_mc_start(s);
if (!p)
return -EBUSY;
p->cgid = cgid;
- *r = *(struct qbman_cgr_query_rslt *)qbman_swp_mc_complete(s, p,
- QBMAN_CGR_QUERY);
- if (!r) {
+ cgr_query_rslt = (struct qbman_cgr_query_rslt *)qbman_swp_mc_complete(s,
+ p, QBMAN_CGR_QUERY);
+ if (!cgr_query_rslt) {
pr_err("qbman: Query CGID %d failed, no response\n",
cgid);
return -EIO;
}
+ *r = *cgr_query_rslt;
+
/* Decode the outcome */
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_CGR_QUERY);
@@ -473,20 +482,23 @@ int qbman_cgr_wred_query(struct qbman_swp *s, uint32_t cgid,
struct qbman_wred_query_rslt *r)
{
struct qbman_cgr_query_desc *p;
+ struct qbman_wred_query_rslt *wred_query_rslt;
p = (struct qbman_cgr_query_desc *)qbman_swp_mc_start(s);
if (!p)
return -EBUSY;
p->cgid = cgid;
- *r = *(struct qbman_wred_query_rslt *)qbman_swp_mc_complete(s, p,
- QBMAN_WRED_QUERY);
- if (!r) {
+ wred_query_rslt = (struct qbman_wred_query_rslt *)qbman_swp_mc_complete(
+ s, p, QBMAN_WRED_QUERY);
+ if (!wred_query_rslt) {
pr_err("qbman: Query CGID WRED %d failed, no response\n",
cgid);
return -EIO;
}
+ *r = *wred_query_rslt;
+
/* Decode the outcome */
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_WRED_QUERY);
@@ -527,7 +539,7 @@ void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth,
if (mn == 0)
*maxth = ma;
else
- *maxth = ((ma+256) * (1<<(mn-1)));
+ *maxth = ((uint64_t)(ma+256) * (1<<(mn-1)));
if (step_s == 0)
*minth = *maxth - step_i;
@@ -630,6 +642,7 @@ int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid,
struct qbman_wqchan_query_rslt *r)
{
struct qbman_wqchan_query_desc *p;
+ struct qbman_wqchan_query_rslt *wqchan_query_rslt;
/* Start the management command */
p = (struct qbman_wqchan_query_desc *)qbman_swp_mc_start(s);
@@ -640,14 +653,16 @@ int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid,
p->chid = chanid;
/* Complete the management command */
- *r = *(struct qbman_wqchan_query_rslt *)qbman_swp_mc_complete(s, p,
- QBMAN_WQ_QUERY);
- if (!r) {
+ wqchan_query_rslt = (struct qbman_wqchan_query_rslt *)qbman_swp_mc_complete(
+ s, p, QBMAN_WQ_QUERY);
+ if (!wqchan_query_rslt) {
pr_err("qbman: Query WQ Channel %d failed, no response\n",
chanid);
return -EIO;
}
+ *r = *wqchan_query_rslt;
+
/* Decode the outcome */
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_WQ_QUERY);
--
2.25.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [v5 34/42] net/dpaa2: fix memory corruption in TM
[not found] ` <20241023115955.1207617-1-vanshika.shukla@nxp.com>
2024-10-23 11:59 ` [v5 19/42] bus/fslmc: fix coverity issue vanshika.shukla
@ 2024-10-23 11:59 ` vanshika.shukla
1 sibling, 0 replies; 5+ messages in thread
From: vanshika.shukla @ 2024-10-23 11:59 UTC (permalink / raw)
To: dev, Hemant Agrawal, Sachin Saxena, Gagandeep Singh; +Cc: stable
From: Gagandeep Singh <g.singh@nxp.com>
driver was reserving memory in an array for 8 queues only,
but it can support many more queues configuration.
This patch fixes the memory corruption issue by defining the
queue array with correct size.
Fixes: 72100f0dee21 ("net/dpaa2: support level 2 in traffic management")
Cc: g.singh@nxp.com
Cc: stable@dpdk.org
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/dpaa2/dpaa2_tm.c | 29 ++++++++++++++++++-----------
1 file changed, 18 insertions(+), 11 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c
index fb8c384ca4..ab3e355853 100644
--- a/drivers/net/dpaa2/dpaa2_tm.c
+++ b/drivers/net/dpaa2/dpaa2_tm.c
@@ -684,6 +684,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
int ret, t;
+ bool conf_schedule = false;
/* Populate TCs */
LIST_FOREACH(channel_node, &priv->nodes, next) {
@@ -757,7 +758,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
LIST_FOREACH(channel_node, &priv->nodes, next) {
- int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
+ int wfq_grp = 0, is_wfq_grp = 0, conf[priv->nb_tx_queues];
struct dpni_tx_priorities_cfg prio_cfg;
memset(&prio_cfg, 0, sizeof(prio_cfg));
@@ -767,6 +768,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (channel_node->level_id != CHANNEL_LEVEL)
continue;
+ conf_schedule = false;
LIST_FOREACH(leaf_node, &priv->nodes, next) {
struct dpaa2_queue *leaf_dpaa2_q;
uint8_t leaf_tc_id;
@@ -789,6 +791,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
if (leaf_node->parent != channel_node)
continue;
+ conf_schedule = true;
leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
leaf_tc_id = leaf_dpaa2_q->tc_index;
/* Process sibling leaf nodes */
@@ -829,8 +832,8 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
goto out;
}
is_wfq_grp = 1;
- conf[temp_leaf_node->id] = 1;
}
+ conf[temp_leaf_node->id] = 1;
}
if (is_wfq_grp) {
if (wfq_grp == 0) {
@@ -851,6 +854,9 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
}
conf[leaf_node->id] = 1;
}
+ if (!conf_schedule)
+ continue;
+
if (wfq_grp > 1) {
prio_cfg.separate_groups = 1;
if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
@@ -864,6 +870,16 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
prio_cfg.prio_group_A = 1;
prio_cfg.channel_idx = channel_node->channel_id;
+ DPAA2_PMD_DEBUG("########################################");
+ DPAA2_PMD_DEBUG("Channel idx = %d", prio_cfg.channel_idx);
+ for (t = 0; t < DPNI_MAX_TC; t++)
+ DPAA2_PMD_DEBUG("tc = %d mode = %d, delta = %d", t,
+ prio_cfg.tc_sched[t].mode,
+ prio_cfg.tc_sched[t].delta_bandwidth);
+
+ DPAA2_PMD_DEBUG("prioritya = %d, priorityb = %d, separate grps"
+ " = %d", prio_cfg.prio_group_A,
+ prio_cfg.prio_group_B, prio_cfg.separate_groups);
ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
@@ -871,15 +887,6 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
"Scheduling Failed\n");
goto out;
}
- DPAA2_PMD_DEBUG("########################################");
- DPAA2_PMD_DEBUG("Channel idx = %d", prio_cfg.channel_idx);
- for (t = 0; t < DPNI_MAX_TC; t++) {
- DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
- DPAA2_PMD_DEBUG("delta = %d", prio_cfg.tc_sched[t].delta_bandwidth);
- }
- DPAA2_PMD_DEBUG("prioritya = %d", prio_cfg.prio_group_A);
- DPAA2_PMD_DEBUG("priorityb = %d", prio_cfg.prio_group_B);
- DPAA2_PMD_DEBUG("separate grps = %d", prio_cfg.separate_groups);
}
return 0;
--
2.25.1
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2024-10-23 12:00 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <20240918075056.1838654-2-vanshika.shukla@nxp.com>
[not found] ` <20241014120126.170790-1-vanshika.shukla@nxp.com>
2024-10-14 12:01 ` [v3 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
[not found] ` <20241022191256.516818-1-vanshika.shukla@nxp.com>
2024-10-22 19:12 ` [v4 19/42] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-22 19:12 ` [v4 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
[not found] ` <20241023115955.1207617-1-vanshika.shukla@nxp.com>
2024-10-23 11:59 ` [v5 19/42] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-23 11:59 ` [v5 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).