* [PATCH 01/19] common/cnxk: add new TM tree for SDP interface
@ 2025-09-01 7:30 Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 02/19] net/cnxk: new " Nithin Dabilpuram
` (17 more replies)
0 siblings, 18 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
From: Satha Rao <skoteshwar@marvell.com>
Create a new default tree for the SDP interface if more than one TX
queue is requested. This helps to backpressure each queue independently
when they are created with separate channels.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/roc_nix.h | 2 +
drivers/common/cnxk/roc_nix_priv.h | 2 +
drivers/common/cnxk/roc_nix_tm.c | 158 ++++++++++++++++++
drivers/common/cnxk/roc_nix_tm_ops.c | 5 +-
drivers/common/cnxk/roc_nix_tm_utils.c | 2 +-
.../common/cnxk/roc_platform_base_symbols.c | 1 +
6 files changed, 168 insertions(+), 2 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index a9cdc42617..a156d83200 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -596,6 +596,7 @@ enum roc_nix_tm_tree {
ROC_NIX_TM_DEFAULT = 0,
ROC_NIX_TM_RLIMIT,
ROC_NIX_TM_PFC,
+ ROC_NIX_TM_SDP,
ROC_NIX_TM_USER,
ROC_NIX_TM_TREE_MAX,
};
@@ -768,6 +769,7 @@ int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_sdp_prepare_tree(struct roc_nix *roc_nix);
bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
int __roc_api roc_nix_tm_mark_config(struct roc_nix *roc_nix,
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index dc3450a3d4..d7ea3c6be2 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -387,6 +387,8 @@ nix_tm_tree2str(enum roc_nix_tm_tree tree)
return "Rate Limit Tree";
else if (tree == ROC_NIX_TM_PFC)
return "PFC Tree";
+ else if (tree == ROC_NIX_TM_SDP)
+ return "SDP Tree";
else if (tree == ROC_NIX_TM_USER)
return "User Tree";
return "???";
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index abfe80978b..2771fd8fc4 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -1890,6 +1890,164 @@ roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
return rc;
}
+int
+roc_nix_tm_sdp_prepare_tree(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint32_t nonleaf_id = nix->nb_tx_queues;
+ uint32_t tl2_node_id, tl3_node_id;
+ uint8_t leaf_lvl, lvl, lvl_start;
+ struct nix_tm_node *node = NULL;
+ uint32_t parent, i;
+ int rc = -ENOMEM;
+
+ parent = ROC_NIX_TM_NODE_ID_INVALID;
+ leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE : ROC_TM_LVL_SCH4);
+
+ /* TL1 node */
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_ROOT;
+ node->tree = ROC_NIX_TM_SDP;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+
+ lvl_start = ROC_TM_LVL_SCH1;
+ if (roc_nix_is_pf(roc_nix)) {
+ /* TL2 node */
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_SCH1;
+ node->tree = ROC_NIX_TM_SDP;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ lvl_start = ROC_TM_LVL_SCH2;
+ tl2_node_id = nonleaf_id;
+ nonleaf_id++;
+ } else {
+ tl2_node_id = parent;
+ }
+
+ /* Allocate TL3 node */
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = tl2_node_id;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl_start;
+ node->tree = ROC_NIX_TM_SDP;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ tl3_node_id = nonleaf_id;
+ nonleaf_id++;
+ lvl_start++;
+
+ for (i = 0; i < nix->nb_tx_queues; i++) {
+ parent = tl3_node_id;
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl_start;
+ node->tree = ROC_NIX_TM_SDP;
+ /* For SDP, if BP enabled use channel to PAUSE the corresponding queue */
+ node->rel_chan = (i % nix->tx_chan_cnt);
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+
+ lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : ROC_TM_LVL_SCH3);
+
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_SDP;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = i;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = leaf_lvl;
+ node->tree = ROC_NIX_TM_SDP;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+ }
+
+ return 0;
+error:
+ nix_tm_node_free(node);
+ return rc;
+}
+
int
nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
{
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index b89f08ac66..951c310a56 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -1035,7 +1035,10 @@ roc_nix_tm_init(struct roc_nix *roc_nix)
}
/* Prepare default tree */
- rc = nix_tm_prepare_default_tree(roc_nix);
+ if (roc_nix_is_sdp(roc_nix) && (nix->nb_tx_queues > 1))
+ rc = roc_nix_tm_sdp_prepare_tree(roc_nix);
+ else
+ rc = nix_tm_prepare_default_tree(roc_nix);
if (rc) {
plt_err("failed to prepare default tm tree, rc=%d", rc);
return rc;
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 4a09cc2aae..eaf6f9e4c7 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -582,7 +582,7 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
/* Configure TL4 to send to SDP channel instead of CGX/LBK */
if (nix->sdp_link) {
- relchan = nix->tx_chan_base & 0xff;
+ relchan = (nix->tx_chan_base & 0xff) + node->rel_chan;
plt_tm_dbg("relchan=%u schq=%u tx_chan_cnt=%u", relchan, schq,
nix->tx_chan_cnt);
reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
diff --git a/drivers/common/cnxk/roc_platform_base_symbols.c b/drivers/common/cnxk/roc_platform_base_symbols.c
index 7f0fe601ad..cc35c46456 100644
--- a/drivers/common/cnxk/roc_platform_base_symbols.c
+++ b/drivers/common/cnxk/roc_platform_base_symbols.c
@@ -221,6 +221,7 @@ RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_rq_dump)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_cq_dump)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_sq_dump)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_tm_dump)
+RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_tm_sdp_prepare_tree)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_dump)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_inl_dev_dump)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_inl_outb_cpt_lfs_dump)
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 02/19] net/cnxk: new tree for SDP interface
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 03/19] net/cnxk: disable CQ when SQ stopped Nithin Dabilpuram
` (16 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
From: Satha Rao <skoteshwar@marvell.com>
Create a new default tree for the SDP interface if more than one TX
queue is requested. This helps to backpressure each queue independently
when they are created with separate channels.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 6c723c9cec..bfef06ae4a 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1451,7 +1451,10 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
goto free_nix_lf;
}
- rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
+ if (roc_nix_is_sdp(&dev->nix) && nb_txq > 1)
+ rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_SDP, false);
+ else
+ rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
if (rc) {
plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
goto tm_fini;
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 03/19] net/cnxk: disable CQ when SQ stopped
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 02/19] net/cnxk: new " Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 04/19] net/cnxk: update scatter check as warning for SDP Nithin Dabilpuram
` (15 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
From: Satha Rao <skoteshwar@marvell.com>
Drain all CQ buffers and close CQ when SQ enabled completion is about to
stop.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/net/cnxk/cn10k_ethdev.c | 16 ++++++++--------
drivers/net/cnxk/cn20k_ethdev.c | 11 ++++++-----
drivers/net/cnxk/cnxk_ethdev.c | 11 ++++++++++-
3 files changed, 24 insertions(+), 14 deletions(-)
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 9c1621dbfa..23a2341c8b 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -198,21 +198,21 @@ cn10k_nix_tx_compl_setup(struct cnxk_eth_dev *dev,
static void
cn10k_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
+ struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qid];
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *nix = &dev->nix;
- struct cn10k_eth_txq *txq;
- cnxk_nix_tx_queue_release(eth_dev, qid);
- txq = eth_dev->data->tx_queues[qid];
-
- if (nix->tx_compl_ena)
+ if (nix->tx_compl_ena) {
+ /* First process all CQ entries */
+ handle_tx_completion_pkts(txq, 0);
plt_free(txq->tx_compl.ptr);
+ }
+ cnxk_nix_tx_queue_release(eth_dev, qid);
}
static int
-cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
- uint16_t nb_desc, unsigned int socket,
- const struct rte_eth_txconf *tx_conf)
+cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t nb_desc,
+ unsigned int socket, const struct rte_eth_txconf *tx_conf)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *nix = &dev->nix;
diff --git a/drivers/net/cnxk/cn20k_ethdev.c b/drivers/net/cnxk/cn20k_ethdev.c
index 376e334588..a7ef1dd386 100644
--- a/drivers/net/cnxk/cn20k_ethdev.c
+++ b/drivers/net/cnxk/cn20k_ethdev.c
@@ -192,15 +192,16 @@ cn20k_nix_tx_compl_setup(struct cnxk_eth_dev *dev, struct cn20k_eth_txq *txq, st
static void
cn20k_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
+ struct cn20k_eth_txq *txq = eth_dev->data->tx_queues[qid];
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *nix = &dev->nix;
- struct cn20k_eth_txq *txq;
- cnxk_nix_tx_queue_release(eth_dev, qid);
- txq = eth_dev->data->tx_queues[qid];
-
- if (nix->tx_compl_ena)
+ if (nix->tx_compl_ena) {
+ /* First process all CQ entries */
+ handle_tx_completion_pkts(txq, 0);
plt_free(txq->tx_compl.ptr);
+ }
+ cnxk_nix_tx_queue_release(eth_dev, qid);
}
static int
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index bfef06ae4a..4550fca3b5 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -570,6 +570,7 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
struct cnxk_eth_txq_sp *txq_sp;
struct cnxk_eth_dev *dev;
struct roc_nix_sq *sq;
+ struct roc_nix_cq *cq;
int rc;
if (!txq)
@@ -578,11 +579,19 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
txq_sp = cnxk_eth_txq_to_sp(txq);
dev = txq_sp->dev;
+ sq = &dev->sqs[qid];
plt_nix_dbg("Releasing txq %u", qid);
+ if (dev->nix.tx_compl_ena) {
+ /* Cleanup ROC CQ */
+ cq = &dev->cqs[sq->cqid];
+ rc = roc_nix_cq_fini(cq);
+ if (rc)
+ plt_err("Failed to cleanup cq, rc=%d", rc);
+ }
+
/* Cleanup ROC SQ */
- sq = &dev->sqs[qid];
rc = roc_nix_sq_fini(sq);
if (rc)
plt_err("Failed to cleanup sq, rc=%d", rc);
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 04/19] net/cnxk: update scatter check as warning for SDP
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 02/19] net/cnxk: new " Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 03/19] net/cnxk: disable CQ when SQ stopped Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 05/19] common/cnxk: fix inline device API Nithin Dabilpuram
` (14 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
Update scatter check as warning for SDP interfaces instead of error
to support cases where host application is already aware for the max
buf size.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev_ops.c | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 3921c38649..db1d583fd0 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -613,8 +613,11 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
*/
if (data->dev_started && frame_size > buffsz &&
!(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
- plt_err("Scatter offload is not enabled for mtu");
- goto exit;
+ if (!roc_nix_is_sdp(nix)) {
+ plt_err("Scatter offload is not enabled for mtu");
+ goto exit;
+ }
+ plt_warn("Scatter offload is not enabled for mtu on SDP interface");
}
/* Check <seg size> * <max_seg> >= max_frame */
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 05/19] common/cnxk: fix inline device API
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (2 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 04/19] net/cnxk: update scatter check as warning for SDP Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 06/19] common/cnxk: add new mailbox to configure LSO alt flags Nithin Dabilpuram
` (13 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev, Monendra Singh Kushwaha
From: Monendra Singh Kushwaha <kmonendra@marvell.com>
This patch fixes the inline device functions to work
when roc_nix is NULL.
Fixes: f81ee7133b48 ("common/cnxk: support inline SA context invalidate")
Signed-off-by: Monendra Singh Kushwaha <kmonendra@marvell.com>
---
drivers/common/cnxk/roc_nix_inl.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index c7637ddbdc..a5fc33b5c9 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -2324,7 +2324,7 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
if (outb_lf == NULL)
goto exit;
- if (roc_model_is_cn10k() || roc_nix->use_write_sa) {
+ if (roc_model_is_cn10k() || (roc_nix && roc_nix->use_write_sa)) {
rbase = outb_lf->rbase;
flush.u = 0;
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 06/19] common/cnxk: add new mailbox to configure LSO alt flags
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (3 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 05/19] common/cnxk: fix inline device API Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 07/19] common/cnxk: add IPv4 fragmentation offload Nithin Dabilpuram
` (12 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
From: Satha Rao <skoteshwar@marvell.com>
LSO enahanced to support flags modification. Added new mbox to enable
this feature.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 46 ++++++++++++++++++++++++++++--------
1 file changed, 36 insertions(+), 10 deletions(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index d16fa3b3ec..f5811ee1ab 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2508,18 +2508,44 @@ struct nix_lso_format {
uint64_t sizem1 : 2;
uint64_t rsvd_14_15 : 2;
uint64_t alg : 3;
- uint64_t rsvd_19_63 : 45;
+ uint64_t alt_flags : 1;
+ uint64_t alt_flags_index : 2;
+ uint64_t shift : 3;
+ uint64_t rsvd_25_63 : 39;
};
-#define NIX_LSO_FIELD_MAX (8)
-#define NIX_LSO_FIELD_ALG_MASK GENMASK(18, 16)
-#define NIX_LSO_FIELD_SZ_MASK GENMASK(13, 12)
-#define NIX_LSO_FIELD_LY_MASK GENMASK(9, 8)
-#define NIX_LSO_FIELD_OFF_MASK GENMASK(7, 0)
-
-#define NIX_LSO_FIELD_MASK \
- (NIX_LSO_FIELD_OFF_MASK | NIX_LSO_FIELD_LY_MASK | \
- NIX_LSO_FIELD_SZ_MASK | NIX_LSO_FIELD_ALG_MASK)
+/* NIX LSO ALT_FLAGS field structure */
+typedef union nix_lso_alt_flg_format {
+ uint64_t u[2];
+
+ struct nix_lso_alt_flg_cfg {
+ /* NIX_AF_LSO_ALT_FLAGS_CFG */
+ uint64_t alt_msf_set : 16;
+ uint64_t alt_msf_mask : 16;
+ uint64_t alt_fsf_set : 16;
+ uint64_t alt_fsf_mask : 16;
+
+ /* NIX_AF_LSO_ALT_FLAGS_CFG1 */
+ uint64_t alt_lsf_set : 16;
+ uint64_t alt_lsf_mask : 16;
+ uint64_t alt_ssf_set : 16;
+ uint64_t alt_ssf_mask : 16;
+ } s;
+} nix_lso_alt_flg_format_t;
+
+#define NIX_LSO_FIELD_MAX (8)
+#define NIX_LSO_FIELD_SHIFT_MASK GENMASK(24, 22)
+#define NIX_LSO_FIELD_ALT_FLG_IDX_MASK GENMASK(21, 20)
+#define NIX_LSO_FIELD_ALT_FLG_MASK BIT_ULL(19)
+#define NIX_LSO_FIELD_ALG_MASK GENMASK(18, 16)
+#define NIX_LSO_FIELD_SZ_MASK GENMASK(13, 12)
+#define NIX_LSO_FIELD_LY_MASK GENMASK(9, 8)
+#define NIX_LSO_FIELD_OFF_MASK GENMASK(7, 0)
+
+#define NIX_LSO_FIELD_MASK \
+ (NIX_LSO_FIELD_OFF_MASK | NIX_LSO_FIELD_LY_MASK | NIX_LSO_FIELD_SZ_MASK | \
+ NIX_LSO_FIELD_ALG_MASK | NIX_LSO_FIELD_ALT_FLG_MASK | NIX_LSO_FIELD_ALT_FLG_IDX_MASK | \
+ NIX_LSO_FIELD_SHIFT_MASK)
#define NIX_CN9K_MAX_HW_FRS 9212UL
#define NIX_LBK_MAX_HW_FRS 65535UL
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 07/19] common/cnxk: add IPv4 fragmentation offload
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (4 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 06/19] common/cnxk: add new mailbox to configure LSO alt flags Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 08/19] common/cnxk: update DF flag in IPv4 fragments Nithin Dabilpuram
` (11 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
From: Satha Rao <skoteshwar@marvell.com>
Extend LSO offload to support IPv4 fragmentation.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 2 +-
drivers/common/cnxk/roc_nix.h | 8 ++
drivers/common/cnxk/roc_nix_ops.c | 113 +++++++++++++++++-
drivers/common/cnxk/roc_nix_priv.h | 1 +
.../common/cnxk/roc_platform_base_symbols.c | 2 +
5 files changed, 124 insertions(+), 2 deletions(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index f5811ee1ab..8956b95040 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2692,7 +2692,7 @@ typedef union nix_lso_alt_flg_format {
#define NIX_LSO_SEG_MAX 256
#define NIX_LSO_MPS_MAX (BIT_ULL(14) - 1)
-/* Software defined LSO base format IDX */
+/* Kernel defined LSO base format IDX */
#define NIX_LSO_FORMAT_IDX_TSOV4 0
#define NIX_LSO_FORMAT_IDX_TSOV6 1
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index a156d83200..35eb855986 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -28,6 +28,11 @@
#define ROC_NIX_INTF_TYPE_CPT_NIX 254
#define ROC_NIX_INTF_TYPE_SSO 253
+/* Software defined LSO base format IDX */
+#define ROC_NIX_LSO_FORMAT_IDX_TSOV4 0
+#define ROC_NIX_LSO_FORMAT_IDX_TSOV6 1
+#define ROC_NIX_LSO_FORMAT_IDX_IPV4 2
+
enum roc_nix_rss_reta_sz {
ROC_NIX_RSS_RETA_SZ_64 = 64,
ROC_NIX_RSS_RETA_SZ_128 = 128,
@@ -878,9 +883,12 @@ int __roc_api roc_nix_lso_fmt_setup(struct roc_nix *roc_nix);
int __roc_api roc_nix_lso_fmt_get(struct roc_nix *roc_nix,
uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX],
uint8_t tun[ROC_NIX_LSO_TUN_MAX]);
+int __roc_api roc_nix_lso_fmt_ipv4_frag_get(struct roc_nix *roc_nix);
int __roc_api roc_nix_lso_custom_fmt_setup(struct roc_nix *roc_nix,
struct nix_lso_format *fields,
uint16_t nb_fields);
+int __roc_api roc_nix_lso_alt_flags_profile_setup(struct roc_nix *roc_nix,
+ nix_lso_alt_flg_format_t *fmt);
int __roc_api roc_nix_eeprom_info_get(struct roc_nix *roc_nix,
struct roc_nix_eeprom_info *info);
diff --git a/drivers/common/cnxk/roc_nix_ops.c b/drivers/common/cnxk/roc_nix_ops.c
index efb0a41d07..138090317a 100644
--- a/drivers/common/cnxk/roc_nix_ops.c
+++ b/drivers/common/cnxk/roc_nix_ops.c
@@ -5,6 +5,8 @@
#include "roc_api.h"
#include "roc_priv.h"
+#define NIX_LSO_FRMT_IPV4_OFFSET_SHFT 3
+
static void
nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4)
{
@@ -159,6 +161,34 @@ nix_lso_tun_tcp(struct nix_lso_format_cfg *req, bool outer_v4, bool inner_v4)
field++;
}
+int
+roc_nix_lso_alt_flags_profile_setup(struct roc_nix *roc_nix, nix_lso_alt_flg_format_t *fmt)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct dev *dev = &nix->dev;
+ struct mbox *mbox = mbox_get(dev->mbox);
+ struct nix_lso_alt_flags_cfg_rsp *rsp;
+ struct nix_lso_alt_flags_cfg_req *req;
+ int rc = -ENOSPC;
+
+ req = mbox_alloc_msg_nix_lso_alt_flags_cfg(mbox);
+ if (req == NULL)
+ goto exit;
+
+ req->cfg = fmt->u[0];
+ req->cfg1 = fmt->u[1];
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ goto exit;
+
+ plt_nix_dbg("Setup alt flags format %u", rsp->lso_alt_flags_idx);
+ rc = rsp->lso_alt_flags_idx;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
int
roc_nix_lso_custom_fmt_setup(struct roc_nix *roc_nix,
struct nix_lso_format *fields, uint16_t nb_fields)
@@ -194,6 +224,74 @@ roc_nix_lso_custom_fmt_setup(struct roc_nix *roc_nix,
return rc;
}
+static int
+nix_lso_ipv4(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct nix_lso_format_cfg_rsp *rsp;
+ nix_lso_alt_flg_format_t alt_flags;
+
+ __io struct nix_lso_format *field;
+ struct nix_lso_format_cfg *req;
+ int flag_idx = 0, rc = -ENOSPC;
+ struct dev *dev = &nix->dev;
+ struct mbox *mbox;
+
+ /* First get flags profile to update v4 flags */
+ memset(&alt_flags, 0, sizeof(alt_flags));
+ alt_flags.s.alt_fsf_set = 0x2000;
+ alt_flags.s.alt_fsf_mask = 0x1FFF;
+ alt_flags.s.alt_msf_set = 0x2000;
+ alt_flags.s.alt_msf_mask = 0x1FFF;
+ alt_flags.s.alt_lsf_set = 0x0000;
+ alt_flags.s.alt_lsf_mask = 0x1FFF;
+ flag_idx = roc_nix_lso_alt_flags_profile_setup(roc_nix, &alt_flags);
+ if (flag_idx < 0)
+ return rc;
+
+ mbox = mbox_get(dev->mbox);
+
+ /*
+ * IPv4 Fragmentation
+ */
+ req = mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ if (req == NULL) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+
+ /* Format works only with TCP packet marked by OL3/OL4 */
+ field = (__io struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Update Payload Length */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 2;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+
+ /* Update fragment offset and flags */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 6;
+ field->sizem1 = 1;
+ field->shift = NIX_LSO_FRMT_IPV4_OFFSET_SHFT;
+ field->alt_flags_index = flag_idx;
+ field->alt_flags = 1;
+ /* Cumulative length of previous segments */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ goto exit;
+
+ /* IPv4 fragment offset shifted by 3 bits, store this value in profile ID */
+ nix->lso_ipv4_idx = (NIX_LSO_FRMT_IPV4_OFFSET_SHFT << 8) | (rsp->lso_format_idx & 0x1F);
+ plt_nix_dbg("ipv4 fmt=%u", rsp->lso_format_idx);
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
int
roc_nix_lso_fmt_setup(struct roc_nix *roc_nix)
{
@@ -370,12 +468,25 @@ roc_nix_lso_fmt_setup(struct roc_nix *roc_nix)
nix->lso_tun_idx[ROC_NIX_LSO_TUN_V6V6] = rsp->lso_format_idx;
plt_nix_dbg("tun v6v6 fmt=%u", rsp->lso_format_idx);
- rc = 0;
+
exit:
mbox_put(mbox);
+
+ nix->lso_ipv4_idx = 0; /* IPv4 fragmentation not supported */
+ if (!rc && roc_model_is_cn20k())
+ return nix_lso_ipv4(roc_nix);
+
return rc;
}
+int
+roc_nix_lso_fmt_ipv4_frag_get(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ return nix->lso_ipv4_idx;
+}
+
int
roc_nix_lso_fmt_get(struct roc_nix *roc_nix,
uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX],
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index d7ea3c6be2..dc61a55d1b 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -152,6 +152,7 @@ struct nix {
uint8_t lso_tsov4_idx;
uint8_t lso_udp_tun_idx[ROC_NIX_LSO_TUN_MAX];
uint8_t lso_tun_idx[ROC_NIX_LSO_TUN_MAX];
+ uint16_t lso_ipv4_idx;
uint8_t lf_rx_stats;
uint8_t lf_tx_stats;
uint8_t rx_chan_cnt;
diff --git a/drivers/common/cnxk/roc_platform_base_symbols.c b/drivers/common/cnxk/roc_platform_base_symbols.c
index cc35c46456..7174e5fe08 100644
--- a/drivers/common/cnxk/roc_platform_base_symbols.c
+++ b/drivers/common/cnxk/roc_platform_base_symbols.c
@@ -323,9 +323,11 @@ RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_npc_mac_addr_set)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_npc_mac_addr_get)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_npc_rx_ena_dis)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_npc_mcast_config)
+RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_lso_alt_flags_profile_setup)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_lso_custom_fmt_setup)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_lso_fmt_setup)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_lso_fmt_get)
+RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_lso_fmt_ipv4_frag_get)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_switch_hdr_set)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_eeprom_info_get)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_rx_drop_re_set)
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 08/19] common/cnxk: update DF flag in IPv4 fragments
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (5 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 07/19] common/cnxk: add IPv4 fragmentation offload Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 09/19] common/cnxk: add support for per packet SQ count update Nithin Dabilpuram
` (10 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
From: Satha Rao <skoteshwar@marvell.com>
While performing IPv4 fragmentation, consider the DF flag from the
original packet header instead of setting it to zero.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/roc_nix_ops.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix_ops.c b/drivers/common/cnxk/roc_nix_ops.c
index 138090317a..12a12c6e35 100644
--- a/drivers/common/cnxk/roc_nix_ops.c
+++ b/drivers/common/cnxk/roc_nix_ops.c
@@ -240,11 +240,11 @@ nix_lso_ipv4(struct roc_nix *roc_nix)
/* First get flags profile to update v4 flags */
memset(&alt_flags, 0, sizeof(alt_flags));
alt_flags.s.alt_fsf_set = 0x2000;
- alt_flags.s.alt_fsf_mask = 0x1FFF;
+ alt_flags.s.alt_fsf_mask = 0x5FFF;
alt_flags.s.alt_msf_set = 0x2000;
- alt_flags.s.alt_msf_mask = 0x1FFF;
+ alt_flags.s.alt_msf_mask = 0x5FFF;
alt_flags.s.alt_lsf_set = 0x0000;
- alt_flags.s.alt_lsf_mask = 0x1FFF;
+ alt_flags.s.alt_lsf_mask = 0x5FFF;
flag_idx = roc_nix_lso_alt_flags_profile_setup(roc_nix, &alt_flags);
if (flag_idx < 0)
return rc;
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 09/19] common/cnxk: add support for per packet SQ count update
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (6 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 08/19] common/cnxk: update DF flag in IPv4 fragments Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 10/19] common/cnxk: feature fn to check 16B alignment Nithin Dabilpuram
` (9 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
From: Satha Rao <skoteshwar@marvell.com>
SQ context extended with new feature, if enabled the counter is updated
when a packet if processed, whether it is transmitted or dropped.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 47 +++++++------
drivers/common/cnxk/roc_features.h | 6 ++
drivers/common/cnxk/roc_nix.h | 3 +
drivers/common/cnxk/roc_nix_queue.c | 70 ++++++++++++++++++-
drivers/common/cnxk/roc_nix_tm.c | 2 +-
drivers/common/cnxk/roc_nix_tm_ops.c | 8 ++-
drivers/common/cnxk/roc_platform.h | 6 ++
.../common/cnxk/roc_platform_base_symbols.c | 1 +
8 files changed, 118 insertions(+), 25 deletions(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index 8956b95040..314beb9e0b 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2092,21 +2092,25 @@ struct nix_cn20k_sq_ctx_hw_s {
uint64_t default_chan : 12;
uint64_t sdp_mcast : 1;
uint64_t sso_ena : 1;
- uint64_t dse_rsvd1 : 28;
+ uint64_t dse_rsvd1 : 10;
+ uint64_t update_sq_count : 2;
+ uint64_t seb_count : 16;
uint64_t sqb_enqueue_count : 16; /* W4 */
uint64_t tail_offset : 6;
uint64_t lmt_dis : 1;
uint64_t smq_rr_weight : 14;
- uint64_t dnq_rsvd1 : 27;
+ uint64_t dnq_rsvd1 : 4;
+ uint64_t sq_count_iova_lo : 23;
uint64_t tail_sqb : 64; /* W5 */
uint64_t next_sqb : 64; /* W6 */
- uint64_t smq : 11; /* W7 */
+ uint64_t smq : 11; /* W7 */
uint64_t smq_pend : 1;
uint64_t smq_next_sq : 20;
uint64_t smq_next_sq_vld : 1;
uint64_t mnq_dis : 1;
- uint64_t scm1_rsvd2 : 30;
- uint64_t smenq_sqb : 64; /* W8 */
+ uint64_t scm1_rsvd2 : 7;
+ uint64_t sq_count_iova_hi : 23;
+ uint64_t smenq_sqb : 64; /* W8 */
uint64_t smenq_offset : 6; /* W9 */
uint64_t cq_limit : 8;
uint64_t smq_rr_count : 32;
@@ -2122,7 +2126,7 @@ struct nix_cn20k_sq_ctx_hw_s {
uint64_t smenq_next_sqb_vld : 1;
uint64_t scm_dq_rsvd1 : 9;
uint64_t smenq_next_sqb : 64; /* W11 */
- uint64_t age_drop_octs : 32; /* W12 */
+ uint64_t age_drop_octs : 32; /* W12 */
uint64_t age_drop_pkts : 32;
uint64_t drop_pkts : 48; /* W13 */
uint64_t drop_octs_lsw : 16;
@@ -2160,19 +2164,20 @@ struct nix_cn20k_sq_ctx_s {
uint64_t lmt_dis : 1;
uint64_t mnq_dis : 1;
uint64_t smq_next_sq : 20;
- uint64_t smq_lso_segnum : 8;
- uint64_t tail_offset : 6;
- uint64_t smenq_offset : 6;
- uint64_t head_offset : 6;
- uint64_t smenq_next_sqb_vld : 1;
- uint64_t smq_pend : 1;
- uint64_t smq_next_sq_vld : 1;
- uint64_t reserved_253_255 : 3;
- uint64_t next_sqb : 64; /* W4 */
- uint64_t tail_sqb : 64; /* W5 */
- uint64_t smenq_sqb : 64; /* W6 */
- uint64_t smenq_next_sqb : 64; /* W7 */
- uint64_t head_sqb : 64; /* W8 */
+ uint64_t smq_lso_segnum : 8;
+ uint64_t tail_offset : 6;
+ uint64_t smenq_offset : 6;
+ uint64_t head_offset : 6;
+ uint64_t smenq_next_sqb_vld : 1;
+ uint64_t smq_pend : 1;
+ uint64_t smq_next_sq_vld : 1;
+ uint64_t update_sq_count : 2;
+ uint64_t reserved_255_255 : 1;
+ uint64_t next_sqb : 64; /* W4 */
+ uint64_t tail_sqb : 64; /* W5 */
+ uint64_t smenq_sqb : 64; /* W6 */
+ uint64_t smenq_next_sqb : 64; /* W7 */
+ uint64_t head_sqb : 64; /* W8 */
uint64_t reserved_576_583 : 8; /* W9 */
uint64_t vfi_lso_total : 18;
uint64_t vfi_lso_sizem1 : 3;
@@ -2183,7 +2188,7 @@ struct nix_cn20k_sq_ctx_s {
uint64_t vfi_lso_vld : 1;
uint64_t reserved_630_639 : 10;
uint64_t scm_lso_rem : 18; /* W10 */
- uint64_t reserved_658_703 : 46;
+ uint64_t sq_count_iova : 46;
uint64_t octs : 48; /* W11 */
uint64_t reserved_752_767 : 16;
uint64_t pkts : 48; /* W12 */
@@ -2193,7 +2198,7 @@ struct nix_cn20k_sq_ctx_s {
uint64_t drop_octs : 48; /* W14 */
uint64_t reserved_944_959 : 16;
uint64_t drop_pkts : 48; /* W15 */
- uint64_t reserved_1008_1023 : 16;
+ uint64_t seb_count : 16;
};
/* [CN10K, .) NIX sq context hardware structure */
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 48ba2fade7..62a1b9e0b2 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -120,4 +120,10 @@ roc_feature_nix_has_plain_pkt_reassembly(void)
return roc_model_is_cn20k();
}
+static inline bool
+roc_feature_nix_has_sq_cnt_update(void)
+{
+ return roc_model_is_cn20k();
+}
+
#endif
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 35eb855986..e070db1baa 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -409,6 +409,8 @@ struct roc_nix_sq {
void *lmt_addr;
void *sqe_mem;
void *fc;
+ void *sq_cnt_ptr;
+ uint8_t update_sq_cnt;
uint8_t tc;
bool enable;
};
@@ -989,6 +991,7 @@ int __roc_api roc_nix_sq_fini(struct roc_nix_sq *sq);
int __roc_api roc_nix_sq_ena_dis(struct roc_nix_sq *sq, bool enable);
void __roc_api roc_nix_sq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid,
uint32_t *head, uint32_t *tail);
+int __roc_api roc_nix_sq_cnt_update(struct roc_nix_sq *sq, bool enable);
/* PTP */
int __roc_api roc_nix_ptp_rx_ena_dis(struct roc_nix *roc_nix, int enable);
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index e19a6877e6..356367624f 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1464,7 +1464,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
if (roc_nix->sqb_slack)
nb_sqb_bufs += roc_nix->sqb_slack;
- else
+ else if (!sq->sq_cnt_ptr)
nb_sqb_bufs += PLT_MAX((int)thr, (int)ROC_NIX_SQB_SLACK_DFLT);
/* Explicitly set nat_align alone as by default pool is with both
* nat_align and buf_offset = 1 which we don't want for SQB.
@@ -1473,7 +1473,9 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
pool.nat_align = 1;
memset(&aura, 0, sizeof(aura));
- aura.fc_ena = 1;
+ /* Disable SQ pool FC updates when SQ count updates are used */
+ if (!sq->sq_cnt_ptr)
+ aura.fc_ena = 1;
if (roc_model_is_cn9k() || roc_errata_npa_has_no_fc_stype_ststp())
aura.fc_stype = 0x0; /* STF */
else
@@ -1827,6 +1829,11 @@ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, uint16_t sm
aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
+ /* HW atomic update of SQ count */
+ if (sq->sq_cnt_ptr) {
+ aq->sq.sq_count_iova = ((uintptr_t)sq->sq_cnt_ptr) >> 3;
+ aq->sq.update_sq_count = sq->update_sq_cnt;
+ }
/* Many to one reduction */
aq->sq.qint_idx = sq->qid % nix->qints;
if (roc_errata_nix_assign_incorrect_qint()) {
@@ -2133,3 +2140,62 @@ roc_nix_q_err_cb_unregister(struct roc_nix *roc_nix)
dev->ops->q_err_cb = NULL;
}
+
+int
+roc_nix_sq_cnt_update(struct roc_nix_sq *sq, bool enable)
+{
+ struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
+ struct mbox *mbox = mbox_get((&nix->dev)->mbox);
+ int64_t __rte_atomic *sq_cntm = (int64_t __rte_atomic *)sq->sq_cnt_ptr;
+ struct nix_cn20k_aq_enq_rsp *rsp;
+ struct nix_cn20k_aq_enq_req *aq;
+ int rc;
+
+ aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox);
+ if (!aq) {
+ mbox_put(mbox);
+ return -ENOSPC;
+ }
+
+ aq->qidx = sq->qid;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ mbox_put(mbox);
+ return rc;
+ }
+
+ /* Check if sq is already in same state */
+ if ((enable && rsp->sq.update_sq_count) || (!enable && !rsp->sq.update_sq_count)) {
+ mbox_put(mbox);
+ return 0;
+ }
+
+ /* Disable sq */
+ aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox);
+ if (!aq) {
+ mbox_put(mbox);
+ return -ENOSPC;
+ }
+
+ aq->qidx = sq->qid;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+ aq->sq_mask.update_sq_count = ~aq->sq_mask.update_sq_count;
+ aq->sq.update_sq_count = enable;
+ if (enable)
+ aq->sq.update_sq_count = sq->update_sq_cnt;
+ rc = mbox_process(mbox);
+ if (rc) {
+ mbox_put(mbox);
+ return rc;
+ }
+ if (enable)
+ plt_atomic_store_explicit(sq_cntm, sq->nb_desc, plt_memory_order_relaxed);
+ else
+ plt_atomic_store_explicit(sq_cntm, 0, plt_memory_order_relaxed);
+
+ mbox_put(mbox);
+ return 0;
+}
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 2771fd8fc4..76c0f01884 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -601,7 +601,7 @@ roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
/* SQ reached quiescent state */
if (sqb_cnt <= 1 && head_off == tail_off &&
- (*(volatile uint64_t *)sq->fc == sq->aura_sqb_bufs)) {
+ (sq->sq_cnt_ptr || (*(volatile uint64_t *)sq->fc == sq->aura_sqb_bufs))) {
break;
}
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 951c310a56..09d014a276 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -19,6 +19,12 @@ roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
plt_tm_dbg("Setting SQ %u SQB aura FC to %s", sq->qid,
enable ? "enable" : "disable");
+ /* For cn20K, enable/disable SQ count updates if the SQ count pointer
+ * was allocated based on the enable field.
+ */
+ if (sq->sq_cnt_ptr)
+ return roc_nix_sq_cnt_update(sq, enable);
+
lf = idev_npa_obj_get();
if (!lf)
return NPA_ERR_DEVICE_NOT_BOUNDED;
@@ -554,7 +560,7 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
tail_off = (val >> 28) & 0x3F;
if (sqb_cnt > 1 || head_off != tail_off ||
- (*(uint64_t *)sq->fc != sq->aura_sqb_bufs))
+ (!sq->sq_cnt_ptr && (*(uint64_t *)sq->fc != sq->aura_sqb_bufs)))
plt_err("Failed to gracefully flush sq %u", sq->qid);
}
diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h
index ff3a25e57f..e22a50d47a 100644
--- a/drivers/common/cnxk/roc_platform.h
+++ b/drivers/common/cnxk/roc_platform.h
@@ -212,6 +212,12 @@ plt_thread_is_valid(plt_thread_t thr)
#define plt_io_rmb() rte_io_rmb()
#define plt_atomic_thread_fence rte_atomic_thread_fence
+#define plt_atomic_store_explicit rte_atomic_store_explicit
+#define plt_atomic_load_explicit rte_atomic_load_explicit
+#define plt_memory_order_release rte_memory_order_release
+#define plt_memory_order_acquire rte_memory_order_acquire
+#define plt_memory_order_relaxed rte_memory_order_relaxed
+
#define plt_bit_relaxed_get32 rte_bit_relaxed_get32
#define plt_bit_relaxed_set32 rte_bit_relaxed_set32
#define plt_bit_relaxed_clear32 rte_bit_relaxed_clear32
diff --git a/drivers/common/cnxk/roc_platform_base_symbols.c b/drivers/common/cnxk/roc_platform_base_symbols.c
index 7174e5fe08..5f75d11e24 100644
--- a/drivers/common/cnxk/roc_platform_base_symbols.c
+++ b/drivers/common/cnxk/roc_platform_base_symbols.c
@@ -361,6 +361,7 @@ RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_rss_reta_get)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_rss_flowkey_set)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_rss_default_setup)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_num_xstats_get)
+RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_sq_cnt_update)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_stats_get)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_stats_reset)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_stats_queue_get)
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 10/19] common/cnxk: feature fn to check 16B alignment
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (7 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 09/19] common/cnxk: add support for per packet SQ count update Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 11/19] common/cnxk: add API to configure backpressure on pool Nithin Dabilpuram
` (8 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev, Rakesh Kudurumalla
From: Rakesh Kudurumalla <rkudurumalla@marvell.com>
Added function to check whether board supports
16B alignment
Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
drivers/common/cnxk/roc_features.h | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 62a1b9e0b2..0663aef4b0 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -126,4 +126,9 @@ roc_feature_nix_has_sq_cnt_update(void)
return roc_model_is_cn20k();
}
+static inline bool
+roc_feature_nix_has_16b_align(void)
+{
+ return roc_model_is_cn20k();
+}
#endif
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 11/19] common/cnxk: add API to configure backpressure on pool
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (8 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 10/19] common/cnxk: feature fn to check 16B alignment Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 12/19] common/cnxk: fix max number of SQB bufs in clean up Nithin Dabilpuram
` (7 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
From: Sunil Kumar Kori <skori@marvell.com>
On CN20K platform, backpressure can be configured for eight
different traffic classes per pool along with threshold and
BPIDs.
RoC API is added to configure the same.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
drivers/common/cnxk/roc_nix.h | 1 +
drivers/common/cnxk/roc_nix_fc.c | 60 +++++++
drivers/common/cnxk/roc_npa.c | 155 +++++++++++++++++-
drivers/common/cnxk/roc_npa.h | 2 +
drivers/common/cnxk/roc_npa_priv.h | 5 +-
.../common/cnxk/roc_platform_base_symbols.c | 1 +
6 files changed, 222 insertions(+), 2 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index e070db1baa..274ece68a9 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -507,6 +507,7 @@ struct roc_nix {
uint16_t rep_cnt;
uint16_t rep_pfvf_map[MAX_PFVF_REP];
bool reass_ena;
+ bool use_multi_bpids;
TAILQ_ENTRY(roc_nix) next;
#define ROC_NIX_MEM_SZ (6 * 1112)
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index e35c993f96..ddabd15a5d 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -549,6 +549,61 @@ nix_rx_chan_multi_bpid_cfg(struct roc_nix *roc_nix, uint8_t chan, uint16_t bpid,
#define NIX_BPID_INVALID 0xFFFF
+static void
+nix_fc_npa_multi_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_handle, uint8_t ena, uint8_t force,
+ uint8_t tc, uint64_t drop_percent)
+{
+ uint32_t pool_id = roc_npa_aura_handle_to_aura(pool_handle);
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct npa_lf *lf = idev_npa_obj_get();
+ struct npa_aura_attr *aura_attr;
+ uint8_t bp_thresh, bp_ena;
+ uint16_t bpid;
+ int i;
+
+ if (!lf)
+ return;
+
+ aura_attr = &lf->aura_attr[pool_id];
+
+ bp_thresh = NIX_RQ_AURA_BP_THRESH(drop_percent, aura_attr->limit, aura_attr->shift);
+ bpid = aura_attr->nix0_bpid;
+ bp_ena = aura_attr->bp_ena;
+
+ /* BP is already enabled. */
+ if ((bp_ena & (0x1 << tc)) && ena) {
+ if (bp_thresh != aura_attr->bp_thresh[tc]) {
+ if (roc_npa_pool_bp_configure(pool_id, nix->bpid[0], bp_thresh, tc, true))
+ plt_err("Enabling backpressue failed on pool 0x%" PRIx32, pool_id);
+ } else {
+ aura_attr->ref_count++;
+ }
+
+ return;
+ }
+
+ if (ena) {
+ if (roc_npa_pool_bp_configure(pool_id, nix->bpid[0], bp_thresh, tc, true))
+ plt_err("Enabling backpressue failed on pool 0x%" PRIx32, pool_id);
+ else
+ aura_attr->ref_count++;
+ } else {
+ bool found = !!force;
+
+ /* Don't disable if existing BPID is not within this port's list */
+ for (i = 0; i < nix->chan_cnt; i++)
+ if (bpid == nix->bpid[i])
+ found = true;
+ if (!found)
+ return;
+ else if ((aura_attr->ref_count > 0) && --(aura_attr->ref_count))
+ return;
+
+ if (roc_npa_pool_bp_configure(pool_id, 0, 0, 0, false))
+ plt_err("Disabling backpressue failed on pool 0x%" PRIx32, pool_id);
+ }
+}
+
void
roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, uint8_t force,
uint8_t tc, uint64_t drop_percent)
@@ -567,6 +622,11 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui
if (!lf)
return;
+ if (roc_model_is_cn20k() && roc_nix->use_multi_bpids) {
+ nix_fc_npa_multi_bp_cfg(roc_nix, pool_id, ena, force, tc, drop_percent);
+ return;
+ }
+
aura_attr = &lf->aura_attr[aura_id];
bp_intf = 1 << nix->is_nix1;
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index d5ebfbfc11..f9824f6656 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -172,10 +172,47 @@ npa_aura_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura)
return rc;
}
+static inline void
+npa_pool_multi_bp_reset(struct npa_cn20k_aq_enq_req *pool_req)
+{
+ pool_req->pool.bp_0 = 0;
+ pool_req->pool.bp_1 = 0;
+ pool_req->pool.bp_2 = 0;
+ pool_req->pool.bp_3 = 0;
+ pool_req->pool.bp_4 = 0;
+ pool_req->pool.bp_5 = 0;
+ pool_req->pool.bp_6 = 0;
+ pool_req->pool.bp_7 = 0;
+ pool_req->pool.bp_ena_0 = 0;
+ pool_req->pool.bp_ena_1 = 0;
+ pool_req->pool.bp_ena_2 = 0;
+ pool_req->pool.bp_ena_3 = 0;
+ pool_req->pool.bp_ena_4 = 0;
+ pool_req->pool.bp_ena_5 = 0;
+ pool_req->pool.bp_ena_6 = 0;
+ pool_req->pool.bp_ena_7 = 0;
+ pool_req->pool_mask.bp_0 = ~(pool_req->pool_mask.bp_0);
+ pool_req->pool_mask.bp_1 = ~(pool_req->pool_mask.bp_1);
+ pool_req->pool_mask.bp_2 = ~(pool_req->pool_mask.bp_2);
+ pool_req->pool_mask.bp_3 = ~(pool_req->pool_mask.bp_3);
+ pool_req->pool_mask.bp_4 = ~(pool_req->pool_mask.bp_4);
+ pool_req->pool_mask.bp_5 = ~(pool_req->pool_mask.bp_5);
+ pool_req->pool_mask.bp_6 = ~(pool_req->pool_mask.bp_6);
+ pool_req->pool_mask.bp_7 = ~(pool_req->pool_mask.bp_7);
+ pool_req->pool_mask.bp_ena_0 = ~(pool_req->pool_mask.bp_ena_0);
+ pool_req->pool_mask.bp_ena_1 = ~(pool_req->pool_mask.bp_ena_1);
+ pool_req->pool_mask.bp_ena_2 = ~(pool_req->pool_mask.bp_ena_2);
+ pool_req->pool_mask.bp_ena_3 = ~(pool_req->pool_mask.bp_ena_3);
+ pool_req->pool_mask.bp_ena_4 = ~(pool_req->pool_mask.bp_ena_4);
+ pool_req->pool_mask.bp_ena_5 = ~(pool_req->pool_mask.bp_ena_5);
+ pool_req->pool_mask.bp_ena_6 = ~(pool_req->pool_mask.bp_ena_6);
+ pool_req->pool_mask.bp_ena_7 = ~(pool_req->pool_mask.bp_ena_7);
+}
+
static int
npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
{
- struct npa_cn20k_aq_enq_req *aura_req_cn20k, *pool_req_cn20k;
+ struct npa_cn20k_aq_enq_req *aura_req_cn20k, *pool_req_cn20k = NULL;
struct npa_aq_enq_req *aura_req, *pool_req;
struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
struct mbox_dev *mdev = &m_box->dev[0];
@@ -201,6 +238,10 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
}
if (pool_req == NULL)
goto exit;
+
+ /* Disable backpressure on pool on CN20K */
+ if (roc_model_is_cn20k())
+ npa_pool_multi_bp_reset(pool_req_cn20k);
pool_req->aura_id = aura_id;
pool_req->ctype = NPA_AQ_CTYPE_POOL;
pool_req->op = NPA_AQ_INSTOP_WRITE;
@@ -983,6 +1024,118 @@ roc_npa_zero_aura_handle(void)
return 0;
}
+int
+roc_npa_pool_bp_configure(uint64_t aura_handle, uint16_t bpid, uint8_t bp_thresh, uint8_t bp_class,
+ bool enable)
+{
+ uint32_t pool_id = roc_npa_aura_handle_to_aura(aura_handle);
+ struct npa_lf *lf = idev_npa_obj_get();
+ struct npa_cn20k_aq_enq_req *aq;
+ uint8_t bp, bp_ena;
+ struct mbox *mbox;
+ int rc = 0;
+
+ plt_npa_dbg("Setting BPID %u BP_CLASS %u enable %u on pool %" PRIx64, bpid, bp_class,
+ bp_thresh, aura_handle);
+
+ if (lf == NULL)
+ return NPA_ERR_PARAM;
+
+ mbox = mbox_get(lf->mbox);
+ aq = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+ if (aq == NULL) {
+ rc = -ENOSPC;
+ goto fail;
+ }
+
+ aq->aura_id = pool_id;
+ aq->ctype = NPA_AQ_CTYPE_POOL;
+ aq->op = NPA_AQ_INSTOP_WRITE;
+
+ if (enable) {
+ aq->pool.bpid_0 = bpid;
+ aq->pool_mask.bpid_0 = ~(aq->pool_mask.bpid_0);
+
+ bp = bp_thresh;
+ } else {
+ bp = 0;
+ }
+
+ switch (bp_class) {
+ case 0:
+ aq->pool.bp_0 = bp;
+ aq->pool_mask.bp_0 = ~(aq->pool_mask.bp_0);
+ aq->pool.bp_ena_0 = enable;
+ aq->pool_mask.bp_ena_0 = ~(aq->pool_mask.bp_ena_0);
+ break;
+ case 1:
+ aq->pool.bp_1 = bp;
+ aq->pool_mask.bp_1 = ~(aq->pool_mask.bp_1);
+ aq->pool.bp_ena_1 = enable;
+ aq->pool_mask.bp_ena_1 = ~(aq->pool_mask.bp_ena_1);
+ break;
+ case 2:
+ aq->pool.bp_2 = bp;
+ aq->pool_mask.bp_2 = ~(aq->pool_mask.bp_2);
+ aq->pool.bp_ena_2 = enable;
+ aq->pool_mask.bp_ena_2 = ~(aq->pool_mask.bp_ena_2);
+ break;
+ case 3:
+ aq->pool.bp_3 = bp;
+ aq->pool_mask.bp_3 = ~(aq->pool_mask.bp_3);
+ aq->pool.bp_ena_3 = enable;
+ aq->pool_mask.bp_ena_3 = ~(aq->pool_mask.bp_ena_3);
+ break;
+ case 4:
+ aq->pool.bp_4 = bp;
+ aq->pool_mask.bp_4 = ~(aq->pool_mask.bp_4);
+ aq->pool.bp_ena_4 = enable;
+ aq->pool_mask.bp_ena_4 = ~(aq->pool_mask.bp_ena_4);
+ break;
+ case 5:
+ aq->pool.bp_5 = bp;
+ aq->pool_mask.bp_5 = ~(aq->pool_mask.bp_5);
+ aq->pool.bp_ena_5 = enable;
+ aq->pool_mask.bp_ena_5 = ~(aq->pool_mask.bp_ena_5);
+ break;
+ case 6:
+ aq->pool.bp_6 = bp;
+ aq->pool_mask.bp_6 = ~(aq->pool_mask.bp_6);
+ aq->pool.bp_ena_6 = enable;
+ aq->pool_mask.bp_ena_6 = ~(aq->pool_mask.bp_ena_6);
+ break;
+ case 7:
+ aq->pool.bp_7 = bp;
+ aq->pool_mask.bp_7 = ~(aq->pool_mask.bp_7);
+ aq->pool.bp_ena_7 = enable;
+ aq->pool_mask.bp_ena_7 = ~(aq->pool_mask.bp_ena_7);
+ break;
+ default:
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ rc = mbox_process(mbox);
+ if (rc)
+ goto fail;
+
+ bp_ena = lf->aura_attr[pool_id].bp_ena;
+ bp_ena &= ~(1 << bp_class);
+ bp_ena |= (enable << bp_class);
+
+ if (enable && !lf->aura_attr[pool_id].bp_ena)
+ lf->aura_attr[pool_id].nix0_bpid = bpid;
+ else if (!enable && !lf->aura_attr[pool_id].bp_ena)
+ lf->aura_attr[pool_id].nix0_bpid = 0;
+
+ lf->aura_attr[pool_id].bp_ena = bp_ena;
+ lf->aura_attr[pool_id].bp_thresh[bp_class] = bp;
+
+fail:
+ mbox_put(mbox);
+ return rc;
+}
+
int
roc_npa_aura_bp_configure(uint64_t aura_handle, uint16_t bpid, uint8_t bp_intf, uint8_t bp_thresh,
bool enable)
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index 853c0fed43..336a43f95c 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -816,6 +816,8 @@ uint64_t __roc_api roc_npa_buf_type_mask(uint64_t aura_handle);
uint64_t __roc_api roc_npa_buf_type_limit_get(uint64_t type_mask);
int __roc_api roc_npa_aura_bp_configure(uint64_t aura_id, uint16_t bpid, uint8_t bp_intf,
uint8_t bp_thresh, bool enable);
+int __roc_api roc_npa_pool_bp_configure(uint64_t pool_id, uint16_t bpid, uint8_t bp_thresh,
+ uint8_t bp_class, bool enable);
/* Init callbacks */
typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev);
diff --git a/drivers/common/cnxk/roc_npa_priv.h b/drivers/common/cnxk/roc_npa_priv.h
index 060df9ab04..0223e4a438 100644
--- a/drivers/common/cnxk/roc_npa_priv.h
+++ b/drivers/common/cnxk/roc_npa_priv.h
@@ -55,7 +55,10 @@ struct npa_aura_attr {
uint64_t shift;
uint64_t limit;
uint8_t bp_ena;
- uint8_t bp;
+ union {
+ uint8_t bp; /* CN9K, CN10K */
+ uint8_t bp_thresh[8]; /* CN20K */
+ };
};
struct dev;
diff --git a/drivers/common/cnxk/roc_platform_base_symbols.c b/drivers/common/cnxk/roc_platform_base_symbols.c
index 5f75d11e24..f8d6fdd8df 100644
--- a/drivers/common/cnxk/roc_platform_base_symbols.c
+++ b/drivers/common/cnxk/roc_platform_base_symbols.c
@@ -427,6 +427,7 @@ RTE_EXPORT_INTERNAL_SYMBOL(roc_npa_aura_op_range_set)
RTE_EXPORT_INTERNAL_SYMBOL(roc_npa_aura_op_range_get)
RTE_EXPORT_INTERNAL_SYMBOL(roc_npa_pool_op_pc_reset)
RTE_EXPORT_INTERNAL_SYMBOL(roc_npa_aura_drop_set)
+RTE_EXPORT_INTERNAL_SYMBOL(roc_npa_pool_bp_configure)
RTE_EXPORT_INTERNAL_SYMBOL(roc_npa_pool_create)
RTE_EXPORT_INTERNAL_SYMBOL(roc_npa_aura_create)
RTE_EXPORT_INTERNAL_SYMBOL(roc_npa_aura_limit_modify)
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 12/19] common/cnxk: fix max number of SQB bufs in clean up
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (9 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 11/19] common/cnxk: add API to configure backpressure on pool Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 13/19] common/cnxk: add support for SQ resize Nithin Dabilpuram
` (6 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
From: Sunil Kumar Kori <skori@marvell.com>
By default, SQB pool is created with max (512 buffers) +
extra threshold buffers and aura limit is set to 512 + thr.
But while clean up, aura limit is reset to MAX (512 buffers)
only before destroying the pool.
Hence while destroying the pool, only 512 buffers are cleaned
from aura and extra threshold buffers are left as it is.
At later stage if same SQB pool is created then H/W
throws error for extra threshold buffers that it is already
in pool.
Fixes: 780f90e951a5 ("common/cnxk: restore NIX SQB pool limit before destroy")
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
drivers/common/cnxk/roc_nix_queue.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 356367624f..8737728dd5 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -2057,7 +2057,7 @@ roc_nix_sq_fini(struct roc_nix_sq *sq)
/* Restore limit to max SQB count that the pool was created
* for aura drain to succeed.
*/
- roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB);
+ roc_npa_aura_limit_modify(sq->aura_handle, sq->aura_sqb_bufs);
rc |= roc_npa_pool_destroy(sq->aura_handle);
plt_free(sq->fc);
plt_free(sq->sqe_mem);
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 13/19] common/cnxk: add support for SQ resize
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (10 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 12/19] common/cnxk: fix max number of SQB bufs in clean up Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 14/19] common/cnxk: increase Tx schedular count Nithin Dabilpuram
` (5 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
Add support for SQ resize by making SQB mem allocated
in chunks of SQB size.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_nix.h | 3 +
drivers/common/cnxk/roc_nix_queue.c | 389 ++++++++++++++++--
.../common/cnxk/roc_platform_base_symbols.c | 1 +
3 files changed, 366 insertions(+), 27 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 274ece68a9..41a8576fca 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -399,6 +399,7 @@ struct roc_nix_sq {
bool cq_ena;
uint8_t fc_hyst_bits;
/* End of Input parameters */
+ uint16_t sqes_per_sqb;
uint16_t sqes_per_sqb_log2;
struct roc_nix *roc_nix;
uint64_t aura_handle;
@@ -495,6 +496,7 @@ struct roc_nix {
uint16_t inb_cfg_param2;
bool force_tail_drop;
bool dis_xqe_drop;
+ bool sq_resize_ena;
/* End of input parameters */
/* LMT line base for "Per Core Tx LMT line" mode*/
uintptr_t lmt_base;
@@ -993,6 +995,7 @@ int __roc_api roc_nix_sq_ena_dis(struct roc_nix_sq *sq, bool enable);
void __roc_api roc_nix_sq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid,
uint32_t *head, uint32_t *tail);
int __roc_api roc_nix_sq_cnt_update(struct roc_nix_sq *sq, bool enable);
+int __roc_api roc_nix_sq_resize(struct roc_nix_sq *sq, uint32_t nb_desc);
/* PTP */
int __roc_api roc_nix_ptp_rx_ena_dis(struct roc_nix *roc_nix, int enable);
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 8737728dd5..3f11aa89fc 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1430,42 +1430,77 @@ roc_nix_cq_fini(struct roc_nix_cq *cq)
return 0;
}
-static int
-sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
+static uint16_t
+sqes_per_sqb_calc(uint16_t sqb_size, enum roc_nix_sq_max_sqe_sz max_sqe_sz)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
- uint16_t sqes_per_sqb, count, nb_sqb_bufs, thr;
- struct npa_pool_s pool;
- struct npa_aura_s aura;
- uint64_t blk_sz;
- uint64_t iova;
- int rc;
+ uint16_t sqes_per_sqb;
- blk_sz = nix->sqb_size;
- if (sq->max_sqe_sz == roc_nix_maxsqesz_w16)
- sqes_per_sqb = (blk_sz / 8) / 16;
+ if (max_sqe_sz == roc_nix_maxsqesz_w16)
+ sqes_per_sqb = (sqb_size / 8) / 16;
else
- sqes_per_sqb = (blk_sz / 8) / 8;
+ sqes_per_sqb = (sqb_size / 8) / 8;
/* Reserve One SQE in each SQB to hold pointer for next SQB */
sqes_per_sqb -= 1;
+ return sqes_per_sqb;
+}
+
+static uint16_t
+sq_desc_to_sqb(struct nix *nix, uint16_t sqes_per_sqb, uint32_t nb_desc)
+{
+ struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix);
+ uint16_t nb_sqb_bufs;
+
+ nb_desc = PLT_MAX(512U, nb_desc);
+ nb_sqb_bufs = PLT_DIV_CEIL(nb_desc, sqes_per_sqb);
- sq->nb_desc = PLT_MAX(512U, sq->nb_desc);
- nb_sqb_bufs = PLT_DIV_CEIL(sq->nb_desc, sqes_per_sqb);
- thr = PLT_DIV_CEIL((nb_sqb_bufs * ROC_NIX_SQB_THRESH), 100);
nb_sqb_bufs += NIX_SQB_PREFETCH;
/* Clamp up the SQB count */
nb_sqb_bufs = PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs);
nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count, (uint16_t)nb_sqb_bufs);
- sq->nb_sqb_bufs = nb_sqb_bufs;
- sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
- sq->nb_sqb_bufs_adj = nb_sqb_bufs;
+ return nb_sqb_bufs;
+}
+static uint16_t
+sqb_slack_adjust(struct nix *nix, uint16_t nb_sqb_bufs, bool sq_cnt_ena)
+{
+ struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix);
+ uint16_t thr;
+
+ thr = PLT_DIV_CEIL((nb_sqb_bufs * ROC_NIX_SQB_THRESH), 100);
if (roc_nix->sqb_slack)
nb_sqb_bufs += roc_nix->sqb_slack;
- else if (!sq->sq_cnt_ptr)
+ else if (!sq_cnt_ena)
nb_sqb_bufs += PLT_MAX((int)thr, (int)ROC_NIX_SQB_SLACK_DFLT);
+ return nb_sqb_bufs;
+}
+
+static int
+sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint16_t sqes_per_sqb, count, nb_sqb_bufs;
+ struct npa_pool_s pool;
+ struct npa_aura_s aura;
+ uint64_t blk_sz;
+ uint64_t iova;
+ int rc;
+
+ blk_sz = nix->sqb_size;
+ sqes_per_sqb = sqes_per_sqb_calc(blk_sz, sq->max_sqe_sz);
+
+ /* Translate desc count to SQB count */
+ nb_sqb_bufs = sq_desc_to_sqb(nix, sqes_per_sqb, sq->nb_desc);
+
+ sq->sqes_per_sqb = sqes_per_sqb;
+ sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
+ sq->nb_sqb_bufs_adj = nb_sqb_bufs;
+ sq->nb_sqb_bufs = nb_sqb_bufs;
+
+ /* Add slack to SQB's */
+ nb_sqb_bufs = sqb_slack_adjust(nix, nb_sqb_bufs, !!sq->sq_cnt_ptr);
+
/* Explicitly set nat_align alone as by default pool is with both
* nat_align and buf_offset = 1 which we don't want for SQB.
*/
@@ -1520,6 +1555,96 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
return rc;
}
+static int
+sqb_pool_dyn_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint16_t count, nb_sqb_bufs;
+ uint16_t max_sqb_count;
+ struct npa_pool_s pool;
+ struct npa_aura_s aura;
+ uint16_t sqes_per_sqb;
+ uint64_t blk_sz;
+ uint64_t iova;
+ int rc;
+
+ blk_sz = nix->sqb_size;
+ sqes_per_sqb = sqes_per_sqb_calc(blk_sz, sq->max_sqe_sz);
+
+ /* Translate desc count to SQB count */
+ nb_sqb_bufs = sq_desc_to_sqb(nix, sqes_per_sqb, sq->nb_desc);
+
+ sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
+ sq->sqes_per_sqb = sqes_per_sqb;
+ sq->nb_sqb_bufs_adj = nb_sqb_bufs;
+ sq->nb_sqb_bufs = nb_sqb_bufs;
+
+ /* Add slack to SQB's */
+ nb_sqb_bufs = sqb_slack_adjust(nix, nb_sqb_bufs, !!sq->sq_cnt_ptr);
+
+ /* Explicitly set nat_align alone as by default pool is with both
+ * nat_align and buf_offset = 1 which we don't want for SQB.
+ */
+ memset(&pool, 0, sizeof(struct npa_pool_s));
+ pool.nat_align = 0;
+
+ memset(&aura, 0, sizeof(aura));
+ if (!sq->sq_cnt_ptr)
+ aura.fc_ena = 1;
+ if (roc_model_is_cn9k() || roc_errata_npa_has_no_fc_stype_ststp())
+ aura.fc_stype = 0x0; /* STF */
+ else
+ aura.fc_stype = 0x3; /* STSTP */
+ aura.fc_addr = (uint64_t)sq->fc;
+ aura.fc_hyst_bits = sq->fc_hyst_bits & 0xF;
+ max_sqb_count = sqb_slack_adjust(nix, roc_nix->max_sqb_count, false);
+ rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, max_sqb_count, &aura, &pool, 0);
+ if (rc)
+ goto fail;
+
+ roc_npa_buf_type_update(sq->aura_handle, ROC_NPA_BUF_TYPE_SQB, 1);
+ roc_npa_aura_op_cnt_set(sq->aura_handle, 0, nb_sqb_bufs);
+
+ /* Fill the initial buffers */
+ for (count = 0; count < nb_sqb_bufs; count++) {
+ iova = (uint64_t)plt_zmalloc(blk_sz, ROC_ALIGN);
+ if (!iova) {
+ rc = -ENOMEM;
+ goto nomem;
+ }
+ plt_io_wmb();
+
+ roc_npa_aura_op_free(sq->aura_handle, 0, iova);
+ }
+
+ if (roc_npa_aura_op_available_wait(sq->aura_handle, nb_sqb_bufs, 0) != nb_sqb_bufs) {
+ plt_err("Failed to free all pointers to the pool");
+ rc = NIX_ERR_NO_MEM;
+ goto npa_fail;
+ }
+
+ /* Update aura count */
+ roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
+ roc_npa_pool_op_range_set(sq->aura_handle, 0, UINT64_MAX);
+ sq->aura_sqb_bufs = nb_sqb_bufs;
+
+ return rc;
+npa_fail:
+nomem:
+ while (count) {
+ iova = roc_npa_aura_op_alloc(sq->aura_handle, 0);
+ if (!iova)
+ break;
+ plt_free((uint64_t *)iova);
+ count--;
+ }
+ if (count)
+ plt_err("Failed to recover %u SQB's", count);
+ roc_npa_pool_destroy(sq->aura_handle);
+fail:
+ return rc;
+}
+
static int
sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
uint16_t smq)
@@ -1768,10 +1893,10 @@ sq_cn10k_fini(struct nix *nix, struct roc_nix_sq *sq)
return rc;
}
- if (aq->sq.smq_pend)
+ if (rsp->sq.smq_pend)
plt_err("SQ has pending SQE's");
- count = aq->sq.sqb_count;
+ count = rsp->sq.sqb_count;
sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
/* Free SQB's that are used */
sqb_buf = (void *)rsp->sq.head_sqb;
@@ -1939,6 +2064,7 @@ int
roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ bool sq_resize_ena = roc_nix->sq_resize_ena;
struct mbox *m_box = (&nix->dev)->mbox;
uint16_t qid, smq = UINT16_MAX;
uint32_t rr_quantum = 0;
@@ -1964,7 +2090,10 @@ roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
goto fail;
}
- rc = sqb_pool_populate(roc_nix, sq);
+ if (sq_resize_ena)
+ rc = sqb_pool_dyn_populate(roc_nix, sq);
+ else
+ rc = sqb_pool_populate(roc_nix, sq);
if (rc)
goto nomem;
@@ -2014,19 +2143,38 @@ roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
return rc;
}
+static void
+nix_sqb_mem_dyn_free(uint64_t aura_handle, uint16_t count)
+{
+ uint64_t iova;
+
+ /* Recover SQB's and free them back */
+ while (count) {
+ iova = roc_npa_aura_op_alloc(aura_handle, 0);
+ if (!iova)
+ break;
+ plt_free((uint64_t *)iova);
+ count--;
+ }
+ if (count)
+ plt_err("Failed to recover %u SQB's", count);
+}
+
int
roc_nix_sq_fini(struct roc_nix_sq *sq)
{
- struct nix *nix;
- struct mbox *mbox;
+ struct roc_nix *roc_nix = sq->roc_nix;
+ bool sq_resize_ena = roc_nix->sq_resize_ena;
struct ndc_sync_op *ndc_req;
+ struct mbox *mbox;
+ struct nix *nix;
uint16_t qid;
int rc = 0;
if (sq == NULL)
return NIX_ERR_PARAM;
- nix = roc_nix_to_nix_priv(sq->roc_nix);
+ nix = roc_nix_to_nix_priv(roc_nix);
mbox = (&nix->dev)->mbox;
qid = sq->qid;
@@ -2058,14 +2206,201 @@ roc_nix_sq_fini(struct roc_nix_sq *sq)
* for aura drain to succeed.
*/
roc_npa_aura_limit_modify(sq->aura_handle, sq->aura_sqb_bufs);
+
+ if (sq_resize_ena)
+ nix_sqb_mem_dyn_free(sq->aura_handle, sq->aura_sqb_bufs);
+
rc |= roc_npa_pool_destroy(sq->aura_handle);
plt_free(sq->fc);
- plt_free(sq->sqe_mem);
+ if (!sq_resize_ena)
+ plt_free(sq->sqe_mem);
nix->sqs[qid] = NULL;
return rc;
}
+static int
+sqb_aura_dyn_expand(struct roc_nix_sq *sq, uint16_t count)
+{
+ struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
+ uint64_t *sqbs = NULL;
+ uint16_t blk_sz;
+ int i;
+
+ blk_sz = nix->sqb_size;
+ sqbs = calloc(1, count * sizeof(uint64_t *));
+ if (!sqbs)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ sqbs[i] = (uint64_t)plt_zmalloc(blk_sz, ROC_ALIGN);
+ if (!sqbs[i])
+ break;
+ }
+
+ if (i != count) {
+ i = i - 1;
+ for (; i >= 0; i--)
+ plt_free((void *)sqbs[i]);
+ free(sqbs);
+ return -ENOMEM;
+ }
+
+ plt_io_wmb();
+
+ /* Add new buffers to sqb aura */
+ for (i = 0; i < count; i++)
+ roc_npa_aura_op_free(sq->aura_handle, 0, sqbs[i]);
+ free(sqbs);
+
+ /* Adjust SQ info */
+ sq->nb_sqb_bufs += count;
+ sq->nb_sqb_bufs_adj += count;
+ sq->aura_sqb_bufs += count;
+ return 0;
+}
+
+static int
+sqb_aura_dyn_contract(struct roc_nix_sq *sq, uint16_t count)
+{
+ struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
+ struct dev *dev = &nix->dev;
+ struct ndc_sync_op *ndc_req;
+ uint64_t *sqbs = NULL;
+ struct mbox *mbox;
+ uint64_t timeout; /* 10's of usec */
+ uint64_t cycles;
+ int i, rc;
+
+ mbox = dev->mbox;
+ /* Sync NDC-NIX-TX for LF */
+ ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox));
+ if (ndc_req == NULL) {
+ mbox_put(mbox);
+ return -EFAULT;
+ }
+
+ ndc_req->nix_lf_tx_sync = 1;
+ rc = mbox_process(mbox);
+ if (rc) {
+ mbox_put(mbox);
+ return rc;
+ }
+ mbox_put(mbox);
+
+ /* Wait for enough time based on shaper min rate */
+ timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
+ /* Wait for worst case scenario of this SQ being last priority
+ * and so have to wait for all other SQ's drain out by their own.
+ */
+ timeout = timeout * nix->nb_tx_queues;
+ timeout = timeout / nix->tm_rate_min;
+ if (!timeout)
+ timeout = 10000;
+ cycles = (timeout * 10 * plt_tsc_hz()) / (uint64_t)1E6;
+ cycles += plt_tsc_cycles();
+
+ sqbs = calloc(1, count * sizeof(uint64_t *));
+ if (!sqbs)
+ return -ENOMEM;
+
+ i = 0;
+ while (i < count && plt_tsc_cycles() < cycles) {
+ sqbs[i] = roc_npa_aura_op_alloc(sq->aura_handle, 0);
+ if (sqbs[i])
+ i++;
+ else
+ plt_delay_us(1);
+ }
+
+ if (i != count) {
+ plt_warn("SQ %u busy, unable to recover %u SQB's(%u desc)", sq->qid, count,
+ count * sq->sqes_per_sqb);
+
+ /* Restore the SQB aura state and return */
+ i--;
+ for (; i >= 0; i--)
+ roc_npa_aura_op_free(sq->aura_handle, 0, sqbs[i]);
+ free(sqbs);
+ return -EAGAIN;
+ }
+
+ /* Extracted necessary SQB's, on free them */
+ for (i = 0; i < count; i++)
+ plt_free((void *)sqbs[i]);
+ free(sqbs);
+
+ /* Adjust SQ info */
+ sq->nb_sqb_bufs -= count;
+ sq->nb_sqb_bufs_adj -= count;
+ sq->aura_sqb_bufs -= count;
+ return 0;
+}
+
+int
+roc_nix_sq_resize(struct roc_nix_sq *sq, uint32_t nb_desc)
+{
+ struct roc_nix *roc_nix = sq->roc_nix;
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint16_t aura_sqb_bufs, nb_sqb_bufs, sqes_per_sqb;
+ int64_t *regaddr;
+ uint64_t wdata;
+ uint16_t diff;
+ int rc;
+
+ if (!roc_nix->sq_resize_ena)
+ return -ENOTSUP;
+
+ sqes_per_sqb = sq->sqes_per_sqb;
+
+ /* Calculate new nb_sqb_bufs */
+ nb_sqb_bufs = sq_desc_to_sqb(nix, sqes_per_sqb, nb_desc);
+ aura_sqb_bufs = sqb_slack_adjust(nix, nb_sqb_bufs, !!sq->sq_cnt_ptr);
+
+ if (aura_sqb_bufs == sq->aura_sqb_bufs)
+ return 0;
+
+ /* Issue atomic op to make sure all inflight LMTST's are complete
+ * assuming no new submissions will take place.
+ */
+ wdata = ((uint64_t)sq->qid) << 32;
+ regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
+ roc_atomic64_add_nosync(wdata, regaddr);
+
+ /* Expand or Contract SQB aura */
+ if (aura_sqb_bufs > sq->aura_sqb_bufs) {
+ /* Increase the limit */
+ roc_npa_aura_limit_modify(sq->aura_handle, aura_sqb_bufs);
+ diff = aura_sqb_bufs - sq->aura_sqb_bufs;
+ roc_npa_aura_op_cnt_set(sq->aura_handle, 1, diff);
+
+ rc = sqb_aura_dyn_expand(sq, diff);
+ } else {
+ diff = sq->aura_sqb_bufs - aura_sqb_bufs;
+ rc = sqb_aura_dyn_contract(sq, diff);
+
+ /* Decrease the limit */
+ if (!rc) {
+ roc_npa_aura_limit_modify(sq->aura_handle, aura_sqb_bufs);
+ roc_npa_aura_op_cnt_set(sq->aura_handle, 1, -(int64_t)diff);
+ }
+ }
+
+ plt_io_wmb();
+ if (!rc) {
+ sq->nb_desc = nb_desc;
+ if (sq->sq_cnt_ptr)
+ plt_atomic_store_explicit((uint64_t __rte_atomic *)sq->sq_cnt_ptr, nb_desc,
+ plt_memory_order_release);
+ *(uint64_t *)sq->fc = roc_npa_aura_op_cnt_get(sq->aura_handle);
+ } else {
+ roc_npa_aura_limit_modify(sq->aura_handle, sq->aura_sqb_bufs);
+ }
+
+ plt_io_wmb();
+ return rc;
+}
+
void
roc_nix_cq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head,
uint32_t *tail)
diff --git a/drivers/common/cnxk/roc_platform_base_symbols.c b/drivers/common/cnxk/roc_platform_base_symbols.c
index f8d6fdd8df..138009198e 100644
--- a/drivers/common/cnxk/roc_platform_base_symbols.c
+++ b/drivers/common/cnxk/roc_platform_base_symbols.c
@@ -348,6 +348,7 @@ RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_rq_fini)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_cq_init)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_cq_fini)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_sq_init)
+RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_sq_resize)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_sq_fini)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_cq_head_tail_get)
RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_sq_head_tail_get)
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 14/19] common/cnxk: increase Tx schedular count
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (11 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 13/19] common/cnxk: add support for SQ resize Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 15/19] common/cnxk: resolve klocwork issues Nithin Dabilpuram
` (4 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
From: Satha Rao <skoteshwar@marvell.com>
CN10K platform supports Tx schedulars up to 2K.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/roc_nix_priv.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index dc61a55d1b..b5553223cd 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -41,7 +41,7 @@ struct nix_qint {
};
/* Traffic Manager */
-#define NIX_TM_MAX_HW_TXSCHQ 1024
+#define NIX_TM_MAX_HW_TXSCHQ 2048
#define NIX_TM_HW_ID_INVALID UINT32_MAX
#define NIX_TM_CHAN_INVALID UINT16_MAX
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 15/19] common/cnxk: resolve klocwork issues
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (12 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 14/19] common/cnxk: increase Tx schedular count Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 16/19] common/cnxk: avoid null SQ access Nithin Dabilpuram
` (3 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev, Rakesh Kudurumalla
From: Rakesh Kudurumalla <rkudurumalla@marvell.com>
Fix klockwork for NULL pointer dereference
Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
drivers/common/cnxk/roc_nix_queue.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 3f11aa89fc..6e4079302b 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -112,6 +112,11 @@ roc_nix_sq_ena_dis(struct roc_nix_sq *sq, bool enable)
{
int rc = 0;
+ if (!sq) {
+ rc = NIX_ERR_PARAM;
+ goto done;
+ }
+
rc = roc_nix_tm_sq_aura_fc(sq, enable);
if (rc)
goto done;
@@ -2163,9 +2168,9 @@ nix_sqb_mem_dyn_free(uint64_t aura_handle, uint16_t count)
int
roc_nix_sq_fini(struct roc_nix_sq *sq)
{
- struct roc_nix *roc_nix = sq->roc_nix;
- bool sq_resize_ena = roc_nix->sq_resize_ena;
struct ndc_sync_op *ndc_req;
+ struct roc_nix *roc_nix;
+ bool sq_resize_ena;
struct mbox *mbox;
struct nix *nix;
uint16_t qid;
@@ -2174,6 +2179,9 @@ roc_nix_sq_fini(struct roc_nix_sq *sq)
if (sq == NULL)
return NIX_ERR_PARAM;
+ roc_nix = sq->roc_nix;
+ sq_resize_ena = roc_nix->sq_resize_ena;
+
nix = roc_nix_to_nix_priv(roc_nix);
mbox = (&nix->dev)->mbox;
@@ -2228,7 +2236,7 @@ sqb_aura_dyn_expand(struct roc_nix_sq *sq, uint16_t count)
int i;
blk_sz = nix->sqb_size;
- sqbs = calloc(1, count * sizeof(uint64_t *));
+ sqbs = calloc(1, count * sizeof(uint64_t));
if (!sqbs)
return -ENOMEM;
@@ -2300,7 +2308,7 @@ sqb_aura_dyn_contract(struct roc_nix_sq *sq, uint16_t count)
cycles = (timeout * 10 * plt_tsc_hz()) / (uint64_t)1E6;
cycles += plt_tsc_cycles();
- sqbs = calloc(1, count * sizeof(uint64_t *));
+ sqbs = calloc(1, count * sizeof(uint64_t));
if (!sqbs)
return -ENOMEM;
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 16/19] common/cnxk: avoid null SQ access
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (13 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 15/19] common/cnxk: resolve klocwork issues Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 17/19] common/cnxk: change in aura field width Nithin Dabilpuram
` (2 subsequent siblings)
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
From: Satha Rao <skoteshwar@marvell.com>
Condition to check SQ is non NULL before access. Also pktio locks
are simplied while doing threshold_profile config.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/roc_nix_tm_ops.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 09d014a276..230e9b72f6 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -624,6 +624,13 @@ roc_nix_tm_hierarchy_xmit_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree t
sq_id = node->id;
sq = nix->sqs[sq_id];
+ if (!sq) {
+ plt_err("nb_rxq %d nb_txq %d sq_id %d lvl %d", nix->nb_rx_queues,
+ nix->nb_tx_queues, sq_id, node->lvl);
+ roc_nix_tm_dump(roc_nix, NULL);
+ roc_nix_dump(roc_nix, NULL);
+ return NIX_ERR_TM_INVALID_NODE;
+ }
rc = roc_nix_sq_ena_dis(sq, true);
if (rc) {
plt_err("TM sw xon failed on SQ %u, rc=%d", node->id,
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 17/19] common/cnxk: change in aura field width
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (14 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 16/19] common/cnxk: avoid null SQ access Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 18/19] common/cnxk: fix error handling on inline inbound setup Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 19/19] drivers: fix Klocwork issues Nithin Dabilpuram
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
Aura field width has changed from 20 bits to 17 bits for
cn20k.
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/common/cnxk/roc_npa_type.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/common/cnxk/roc_npa_type.c b/drivers/common/cnxk/roc_npa_type.c
index ed90138944..4c794972c0 100644
--- a/drivers/common/cnxk/roc_npa_type.c
+++ b/drivers/common/cnxk/roc_npa_type.c
@@ -60,7 +60,7 @@ roc_npa_buf_type_mask(uint64_t aura_handle)
uint64_t
roc_npa_buf_type_limit_get(uint64_t type_mask)
{
- uint64_t wdata, reg;
+ uint64_t wdata, reg, shift;
uint64_t limit = 0;
struct npa_lf *lf;
uint64_t aura_id;
@@ -72,6 +72,7 @@ roc_npa_buf_type_limit_get(uint64_t type_mask)
if (lf == NULL)
return NPA_ERR_PARAM;
+ shift = roc_model_is_cn20k() ? 47 : 44;
for (aura_id = 0; aura_id < lf->nr_pools; aura_id++) {
if (plt_bitmap_get(lf->npa_bmp, aura_id))
continue;
@@ -87,7 +88,7 @@ roc_npa_buf_type_limit_get(uint64_t type_mask)
continue;
}
- wdata = aura_id << 44;
+ wdata = aura_id << shift;
addr = (int64_t *)(lf->base + NPA_LF_AURA_OP_LIMIT);
reg = roc_atomic64_add_nosync(wdata, addr);
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 18/19] common/cnxk: fix error handling on inline inbound setup
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (15 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 17/19] common/cnxk: change in aura field width Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 19/19] drivers: fix Klocwork issues Nithin Dabilpuram
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Harman Kalra
Cc: jerinj, dev
Fix issue reported by kloc work.
Fixes: f410059baac6 ("common/cnxk: support inline inbound queue")
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_nix_inl_dev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 75d03c1077..38ff96334e 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -244,7 +244,7 @@ nix_inl_inb_queue_setup(struct nix_inl_dev *inl_dev, uint8_t slot_id)
if (!cpt_req) {
rc |= -ENOSPC;
} else {
- nix_req->enable = false;
+ cpt_req->enable = 0;
rc |= mbox_process(mbox);
}
cpt_cfg_fail:
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 19/19] drivers: fix Klocwork issues
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
` (16 preceding siblings ...)
2025-09-01 7:30 ` [PATCH 18/19] common/cnxk: fix error handling on inline inbound setup Nithin Dabilpuram
@ 2025-09-01 7:30 ` Nithin Dabilpuram
17 siblings, 0 replies; 19+ messages in thread
From: Nithin Dabilpuram @ 2025-09-01 7:30 UTC (permalink / raw)
To: Thomas Monjalon, Nithin Dabilpuram, Kiran Kumar K,
Sunil Kumar Kori, Satha Rao, Harman Kalra
Cc: jerinj, dev, Aarnav JP
From: Aarnav JP <ajp@marvell.com>
fixed klocwork suggested issues in
cnxk component of drivers module
Fixes: db5744d3cd23 ("common/cnxk: support NIX debug for CN20K")
Fixes: 3c31a7485172 ("common/cnxk: config CPT result address for CN20K")
Fixes: 4b8eb5bd6627 ("common/cnxk: reserve CPT LF for Rx inject")
Fixes: f410059baac6 ("common/cnxk: support inline inbound queue")
Fixes: 47cca253d605 ("net/cnxk: support Rx inject")
Fixes: ac35d4bf4cd6 ("net/cnxk: support ingress meter pre-color")
Signed-off-by: Aarnav JP <ajp@marvell.com>
---
.mailmap | 1 +
drivers/common/cnxk/roc_nix_debug.c | 4 ++--
drivers/common/cnxk/roc_nix_inl.c | 13 ++++++-------
drivers/net/cnxk/cn10k_ethdev_sec.c | 2 ++
drivers/net/cnxk/cnxk_ethdev_mtr.c | 8 +++++++-
5 files changed, 18 insertions(+), 10 deletions(-)
diff --git a/.mailmap b/.mailmap
index 34a99f93a1..309551d007 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1,4 +1,5 @@
Aakash Sasidharan <asasidharan@marvell.com>
+Aarnav JP <ajp@marvell.com>
Aaro Koskinen <aaro.koskinen@nsn.com>
Aaron Campbell <aaron@arbor.net>
Aaron Conole <aconole@redhat.com>
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index f9294e693b..11994bf131 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -769,8 +769,8 @@ nix_lf_rq_dump(__io struct nix_cn20k_rq_ctx_s *ctx, FILE *file)
nix_dump(file, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d",
ctx->xqe_hdr_split, ctx->xqe_imm_copy);
- nix_dump(file, "W2: band_prof_id\t\t%d\n",
- ((ctx->band_prof_id_h << 10) | ctx->band_prof_id_l));
+ nix_dump(file, "W2: band_prof_id\t\t0x%" PRIx64 "\n",
+ (uint64_t)((ctx->band_prof_id_h << 10) | ctx->band_prof_id_l));
nix_dump(file, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d",
ctx->xqe_imm_size, ctx->later_skip);
nix_dump(file, "W2: sso_bp_ena\t\t%d\n", ctx->sso_bp_ena);
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index a5fc33b5c9..9135c1c172 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -581,7 +581,7 @@ nix_inl_reass_inb_sa_tbl_setup(struct roc_nix *roc_nix)
struct nix_inl_dev *inl_dev = NULL;
uint64_t max_sa = 1, sa_pow2_sz;
uint64_t sa_idx_w, lenm1_max;
- uint64_t res_addr_offset;
+ uint64_t res_addr_offset = 0;
uint64_t def_cptq = 0;
size_t inb_sa_sz = 1;
uint8_t profile_id;
@@ -626,12 +626,11 @@ nix_inl_reass_inb_sa_tbl_setup(struct roc_nix *roc_nix)
inl_dev = idev->nix_inl_dev;
if (inl_dev->nb_inb_cptlfs)
def_cptq = inl_dev->nix_inb_qids[inl_dev->inb_cpt_lf_id];
+ res_addr_offset = (uint64_t)(inl_dev->res_addr_offset & 0xFF) << 48;
+ if (res_addr_offset)
+ res_addr_offset |= (1UL << 56);
}
- res_addr_offset = (uint64_t)(inl_dev->res_addr_offset & 0xFF) << 48;
- if (res_addr_offset)
- res_addr_offset |= (1UL << 56);
-
lf_cfg->enable = 1;
lf_cfg->profile_id = profile_id;
lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id];
@@ -850,12 +849,12 @@ roc_nix_inl_inb_rx_inject_enable(struct roc_nix *roc_nix, bool inb_inl_dev)
if (inb_inl_dev) {
inl_dev = idev->nix_inl_dev;
- if (inl_dev && inl_dev->attach_cptlf && inl_dev->rx_inj_ena &&
+ if (inl_dev && inl_dev->attach_cptlf && inl_dev->rx_inj_ena && roc_nix &&
roc_nix->rx_inj_ena)
return true;
}
- return roc_nix->rx_inj_ena;
+ return roc_nix ? roc_nix->rx_inj_ena : 0;
}
uint32_t
diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 110630596e..5ecdc2b463 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -1336,6 +1336,8 @@ cn10k_eth_sec_rx_inject_config(void *device, uint16_t port_id, bool enable)
roc_idev_nix_rx_inject_set(port_id, enable);
inl_lf = roc_nix_inl_inb_inj_lf_get(nix);
+ if (!inl_lf)
+ return -ENOTSUP;
sa_base = roc_nix_inl_inb_sa_base_get(nix, dev->inb.inl_dev);
inj_cfg = &dev->inj_cfg;
diff --git a/drivers/net/cnxk/cnxk_ethdev_mtr.c b/drivers/net/cnxk/cnxk_ethdev_mtr.c
index edeca6dcc3..992e2d446e 100644
--- a/drivers/net/cnxk/cnxk_ethdev_mtr.c
+++ b/drivers/net/cnxk/cnxk_ethdev_mtr.c
@@ -1261,7 +1261,13 @@ nix_mtr_config_map(struct cnxk_meter_node *mtr, struct roc_nix_bpf_cfg *cfg)
cfg->alg = alg_map[profile->profile.alg];
cfg->lmode = profile->profile.packet_mode;
- cfg->icolor = color_map[mtr->params.default_input_color];
+ int idx = mtr->params.default_input_color;
+
+ /* Index validation */
+ if (idx >= RTE_COLORS)
+ cfg->icolor = ROC_NIX_BPF_COLOR_GREEN;
+ else
+ cfg->icolor = color_map[idx];
switch (RTE_MTR_COLOR_IN_PROTO_OUTER_IP) {
case RTE_MTR_COLOR_IN_PROTO_OUTER_IP:
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
end of thread, other threads:[~2025-09-01 7:33 UTC | newest]
Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-09-01 7:30 [PATCH 01/19] common/cnxk: add new TM tree for SDP interface Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 02/19] net/cnxk: new " Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 03/19] net/cnxk: disable CQ when SQ stopped Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 04/19] net/cnxk: update scatter check as warning for SDP Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 05/19] common/cnxk: fix inline device API Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 06/19] common/cnxk: add new mailbox to configure LSO alt flags Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 07/19] common/cnxk: add IPv4 fragmentation offload Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 08/19] common/cnxk: update DF flag in IPv4 fragments Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 09/19] common/cnxk: add support for per packet SQ count update Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 10/19] common/cnxk: feature fn to check 16B alignment Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 11/19] common/cnxk: add API to configure backpressure on pool Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 12/19] common/cnxk: fix max number of SQB bufs in clean up Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 13/19] common/cnxk: add support for SQ resize Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 14/19] common/cnxk: increase Tx schedular count Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 15/19] common/cnxk: resolve klocwork issues Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 16/19] common/cnxk: avoid null SQ access Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 17/19] common/cnxk: change in aura field width Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 18/19] common/cnxk: fix error handling on inline inbound setup Nithin Dabilpuram
2025-09-01 7:30 ` [PATCH 19/19] drivers: fix Klocwork issues Nithin Dabilpuram
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).