From: <skori@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>, Ray Kinsella <mdr@ashroe.eu>
Cc: <dev@dpdk.org>
Subject: [PATCH v9 1/2] common/cnxk: support priority flow ctrl config API
Date: Mon, 14 Feb 2022 15:40:00 +0530 [thread overview]
Message-ID: <20220214101001.498992-1-skori@marvell.com> (raw)
In-Reply-To: <20220214090247.493995-2-skori@marvell.com>
From: Sunil Kumar Kori <skori@marvell.com>
CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.
Patch adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
- fix RoC API naming convention.
v2..v3:
- fix pause quanta configuration for cn10k.
- remove unnecessary code
v3..v4:
- fix PFC configuration with other type of TM tree
i.e. default, user and rate limit tree.
v4..v5:
- rebase on top of tree
- fix review comments
- fix initialization error for LBK devices
v5..v6:
- fix review comments
v6..v7:
- no change
v7..v8:
- rebase on top of 22.03-rc1
v8..v9:
- no change
drivers/common/cnxk/roc_mbox.h | 19 ++-
drivers/common/cnxk/roc_nix.h | 21 ++++
drivers/common/cnxk/roc_nix_fc.c | 95 +++++++++++++--
drivers/common/cnxk/roc_nix_priv.h | 6 +-
drivers/common/cnxk/roc_nix_tm.c | 171 ++++++++++++++++++++++++++-
drivers/common/cnxk/roc_nix_tm_ops.c | 14 ++-
drivers/common/cnxk/version.map | 4 +
7 files changed, 310 insertions(+), 20 deletions(-)
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 8967858914..b608f58357 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
msg_rsp) \
M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
+ M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req, \
npa_lf_alloc_rsp) \
@@ -551,6 +553,19 @@ struct cgx_pause_frm_cfg {
uint8_t __io tx_pause;
};
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ uint8_t __io rx_pause;
+ uint8_t __io tx_pause;
+ uint16_t __io pfc_en; /* bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __io rx_pause;
+ uint8_t __io tx_pause;
+};
+
struct sfp_eeprom_s {
#define SFP_EEPROM_SIZE 256
uint16_t __io sff_id;
@@ -1125,7 +1140,9 @@ struct nix_bp_cfg_req {
/* PF can be mapped to either CGX or LBK interface,
* so maximum 64 channels are possible.
*/
-#define NIX_MAX_CHAN 64
+#define NIX_MAX_CHAN 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN 1
struct nix_bp_cfg_rsp {
struct mbox_msghdr hdr;
/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 755212c8f9..680a34cdcd 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
struct {
uint32_t rq;
+ uint16_t tc;
uint16_t cq_drop;
bool enable;
} cq_cfg;
struct {
+ uint32_t sq;
+ uint16_t tc;
bool enable;
} tm_cfg;
};
};
+struct roc_nix_pfc_cfg {
+ enum roc_nix_fc_mode mode;
+ /* For SET, tc must be [0, 15].
+ * For GET, TC will represent bitmap
+ */
+ uint16_t tc;
+};
+
struct roc_nix_eeprom_info {
#define ROC_NIX_EEPROM_SIZE 256
uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
enum roc_nix_tm_tree {
ROC_NIX_TM_DEFAULT = 0,
ROC_NIX_TM_RLIMIT,
+ ROC_NIX_TM_PFC,
ROC_NIX_TM_USER,
ROC_NIX_TM_TREE_MAX,
};
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
@@ -739,6 +752,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
enum roc_nix_fc_mode mode);
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+ struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+ struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index d31137188e..8e31443b8f 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
struct mbox *mbox = get_mbox(roc_nix);
struct nix_bp_cfg_req *req;
struct nix_bp_cfg_rsp *rsp;
- int rc = -ENOSPC;
+ int rc = -ENOSPC, i;
if (roc_nix_is_sdp(roc_nix))
return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
req = mbox_alloc_msg_nix_bp_enable(mbox);
if (req == NULL)
return rc;
+
req->chan_base = 0;
- req->chan_cnt = 1;
- req->bpid_per_chan = 0;
+ if (roc_nix_is_lbk(roc_nix))
+ req->chan_cnt = NIX_LBK_MAX_CHAN;
+ else
+ req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+ req->bpid_per_chan = true;
rc = mbox_process_msg(mbox, (void *)&rsp);
if (rc || (req->chan_cnt != rsp->chan_cnt))
goto exit;
- nix->bpid[0] = rsp->chan_bpid[0];
nix->chan_cnt = rsp->chan_cnt;
+ for (i = 0; i < rsp->chan_cnt; i++)
+ nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
} else {
req = mbox_alloc_msg_nix_bp_disable(mbox);
if (req == NULL)
return rc;
req->chan_base = 0;
- req->chan_cnt = 1;
+ req->chan_cnt = nix->chan_cnt;
rc = mbox_process(mbox);
if (rc)
@@ -161,7 +167,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
aq->op = NIX_AQ_INSTOP_WRITE;
if (fc_cfg->cq_cfg.enable) {
- aq->cq.bpid = nix->bpid[0];
+ aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -181,7 +187,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
aq->op = NIX_AQ_INSTOP_WRITE;
if (fc_cfg->cq_cfg.enable) {
- aq->cq.bpid = nix->bpid[0];
+ aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -222,7 +228,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
return nix_fc_rxchan_bpid_set(roc_nix,
fc_cfg->rxchan_cfg.enable);
else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
- return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+ return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+ fc_cfg->tm_cfg.tc,
+ fc_cfg->tm_cfg.enable);
return -EINVAL;
}
@@ -403,3 +411,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
mbox_process(mbox);
}
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct mbox *mbox = get_mbox(roc_nix);
+ uint8_t tx_pause, rx_pause;
+ struct cgx_pfc_cfg *req;
+ struct cgx_pfc_rsp *rsp;
+ int rc = -ENOSPC;
+
+ if (roc_nix_is_lbk(roc_nix))
+ return NIX_ERR_OP_NOTSUP;
+
+ rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+ (pfc_cfg->mode == ROC_NIX_FC_RX);
+ tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+ (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+ req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+ if (req == NULL)
+ goto exit;
+
+ req->pfc_en = pfc_cfg->tc;
+ req->rx_pause = rx_pause;
+ req->tx_pause = tx_pause;
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ goto exit;
+
+ nix->rx_pause = rsp->rx_pause;
+ nix->tx_pause = rsp->tx_pause;
+ if (rsp->tx_pause)
+ nix->cev |= BIT(pfc_cfg->tc);
+ else
+ nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+ return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ if (roc_nix_is_lbk(roc_nix))
+ return NIX_ERR_OP_NOTSUP;
+
+ pfc_cfg->tc = nix->cev;
+
+ if (nix->rx_pause && nix->tx_pause)
+ pfc_cfg->mode = ROC_NIX_FC_FULL;
+ else if (nix->rx_pause)
+ pfc_cfg->mode = ROC_NIX_FC_RX;
+ else if (nix->tx_pause)
+ pfc_cfg->mode = ROC_NIX_FC_TX;
+ else
+ pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+ return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index deb2a6ba11..f3889424c4 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -33,6 +33,7 @@ struct nix_qint {
/* Traffic Manager */
#define NIX_TM_MAX_HW_TXSCHQ 512
#define NIX_TM_HW_ID_INVALID UINT32_MAX
+#define NIX_TM_CHAN_INVALID UINT16_MAX
/* TM flags */
#define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
@@ -56,6 +57,7 @@ struct nix_tm_node {
uint32_t priority;
uint32_t weight;
uint16_t lvl;
+ uint16_t rel_chan;
uint32_t parent_id;
uint32_t shaper_profile_id;
void (*free_fn)(void *node);
@@ -139,6 +141,7 @@ struct nix {
uint16_t msixoff;
uint8_t rx_pause;
uint8_t tx_pause;
+ uint16_t cev;
uint64_t rx_cfg;
struct dev dev;
uint16_t cints;
@@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
bool ena);
int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+ bool enable);
void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
/*
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index a0448bec61..670cf66db4 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
if (is_pf_or_lbk && !skip_bp &&
node->hw_lvl == nix->tm_link_cfg_lvl) {
node->bp_capa = 1;
- skip_bp = true;
+ skip_bp = false;
}
rc = nix_tm_node_reg_conf(nix, node);
@@ -317,21 +317,38 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
}
int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+ bool enable)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
enum roc_nix_tm_tree tree = nix->tm_tree;
struct mbox *mbox = (&nix->dev)->mbox;
struct nix_txschq_config *req = NULL;
struct nix_tm_node_list *list;
+ struct nix_tm_node *sq_node;
+ struct nix_tm_node *parent;
struct nix_tm_node *node;
uint8_t k = 0;
uint16_t link;
int rc = 0;
+ sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+ parent = sq_node->parent;
+ while (parent) {
+ if (parent->lvl == ROC_TM_LVL_SCH2)
+ break;
+
+ parent = parent->parent;
+ }
+
list = nix_tm_node_list(nix, tree);
link = nix->tx_link;
+ if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+ rc = -EINVAL;
+ goto err;
+ }
+
TAILQ_FOREACH(node, list, node) {
if (node->hw_lvl != nix->tm_link_cfg_lvl)
continue;
@@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
continue;
+ if (node->hw_id != parent->hw_id)
+ continue;
+
if (!req) {
req = mbox_alloc_msg_nix_txschq_cfg(mbox);
req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
}
req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
- req->regval[k] = enable ? BIT_ULL(13) : 0;
- req->regval_mask[k] = ~BIT_ULL(13);
+ req->regval[k] = enable ? tc : 0;
+ req->regval[k] |= enable ? BIT_ULL(13) : 0;
+ req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
k++;
if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
goto err;
}
+ parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
return 0;
err:
plt_err("Failed to %s bp on link %u, rc=%d(%s)",
@@ -602,7 +624,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
}
/* Disable backpressure */
- rc = nix_tm_bp_config_set(roc_nix, false);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
if (rc) {
plt_err("Failed to disable backpressure for flush, rc=%d", rc);
return rc;
@@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
return 0;
/* Restore backpressure */
- rc = nix_tm_bp_config_set(roc_nix, true);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
if (rc) {
plt_err("Failed to restore backpressure, rc=%d", rc);
return rc;
@@ -1299,6 +1321,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = lvl;
node->tree = ROC_NIX_TM_DEFAULT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
@@ -1325,6 +1348,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = leaf_lvl;
node->tree = ROC_NIX_TM_DEFAULT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
@@ -1365,6 +1389,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = lvl;
node->tree = ROC_NIX_TM_RLIMIT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
@@ -1390,6 +1415,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = lvl;
node->tree = ROC_NIX_TM_RLIMIT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
@@ -1414,6 +1440,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = leaf_lvl;
node->tree = ROC_NIX_TM_RLIMIT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+ }
+
+ return 0;
+error:
+ nix_tm_node_free(node);
+ return rc;
+}
+
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint32_t nonleaf_id = nix->nb_tx_queues;
+ struct nix_tm_node *node = NULL;
+ uint8_t leaf_lvl, lvl, lvl_end;
+ uint32_t tl2_node_id;
+ uint32_t parent, i;
+ int rc = -ENOMEM;
+
+ parent = ROC_NIX_TM_NODE_ID_INVALID;
+ lvl_end = ROC_TM_LVL_SCH3;
+ leaf_lvl = ROC_TM_LVL_QUEUE;
+
+ /* TL1 node */
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_ROOT;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+
+ /* TL2 node */
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_SCH1;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ tl2_node_id = nonleaf_id;
+ nonleaf_id++;
+
+ for (i = 0; i < nix->nb_tx_queues; i++) {
+ parent = tl2_node_id;
+ for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id =
+ ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+ }
+
+ lvl = ROC_TM_LVL_SCH4;
+
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = i;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = leaf_lvl;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3d81247a12..d3d39eeb99 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
/* Disable backpressure, it will be enabled back if needed on
* hierarchy enable
*/
- rc = nix_tm_bp_config_set(roc_nix, false);
- if (rc) {
- plt_err("Failed to disable backpressure for flush, rc=%d", rc);
- goto cleanup;
+ for (i = 0; i < sq_cnt; i++) {
+ sq = nix->sqs[i];
+ if (!sq)
+ continue;
+
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+ if (rc) {
+ plt_err("Failed to disable backpressure, rc=%d", rc);
+ goto cleanup;
+ }
}
/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index ad1b5e8476..37ec100451 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -107,6 +107,7 @@ INTERNAL {
roc_nix_bpf_stats_reset;
roc_nix_bpf_stats_to_idx;
roc_nix_bpf_timeunit_get;
+ roc_nix_chan_count_get;
roc_nix_cq_dump;
roc_nix_cq_fini;
roc_nix_cq_head_tail_get;
@@ -198,6 +199,8 @@ INTERNAL {
roc_nix_npc_promisc_ena_dis;
roc_nix_npc_rx_ena_dis;
roc_nix_npc_mcast_config;
+ roc_nix_pfc_mode_get;
+ roc_nix_pfc_mode_set;
roc_nix_ptp_clock_read;
roc_nix_ptp_info_cb_register;
roc_nix_ptp_info_cb_unregister;
@@ -263,6 +266,7 @@ INTERNAL {
roc_nix_tm_node_stats_get;
roc_nix_tm_node_suspend_resume;
roc_nix_tm_prealloc_res;
+ roc_nix_tm_pfc_prepare_tree;
roc_nix_tm_prepare_rate_limited_tree;
roc_nix_tm_rlimit_sq;
roc_nix_tm_root_has_sp;
--
2.25.1
next prev parent reply other threads:[~2022-02-14 10:10 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-09 11:11 [PATCH v1 " skori
2022-01-09 11:11 ` [PATCH v1 2/2] net/cnxk: support priority flow control skori
2022-01-09 11:18 ` Sunil Kumar Kori
2022-01-11 8:18 ` [PATCH v2 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-11 8:18 ` [PATCH v2 2/2] net/cnxk: support priority flow control skori
2022-01-18 13:28 ` [PATCH v3 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-18 13:28 ` [PATCH v3 2/2] net/cnxk: support priority flow control skori
2022-01-20 16:59 ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-20 16:59 ` [PATCH v4 2/2] net/cnxk: support priority flow control skori
2022-01-25 10:02 ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API Ray Kinsella
2022-01-25 10:19 ` [EXT] " Sunil Kumar Kori
2022-01-25 11:23 ` [PATCH v5 " skori
2022-01-25 11:23 ` [PATCH v5 2/2] net/cnxk: support priority flow control skori
2022-01-28 13:28 ` [PATCH v6 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-28 13:29 ` [PATCH v6 2/2] net/cnxk: support priority flow control skori
2022-02-07 17:21 ` [PATCH v7 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-07 17:21 ` [PATCH v7 2/2] net/cnxk: support priority flow control skori
2022-02-14 9:02 ` [PATCH v8 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-14 9:02 ` [PATCH v8 2/2] net/cnxk: support priority flow control skori
2022-02-14 10:10 ` skori [this message]
2022-02-14 10:10 ` [PATCH v9 " skori
2022-02-18 6:11 ` Jerin Jacob
2022-02-22 8:06 ` [EXT] " Sunil Kumar Kori
2022-02-22 8:58 ` [PATCH v10 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-22 8:58 ` [PATCH v10 2/2] net/cnxk: support priority flow control skori
2022-02-22 10:37 ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-22 10:37 ` [PATCH v11 2/2] net/cnxk: support priority flow control skori
2022-02-23 11:36 ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API Jerin Jacob
2022-02-14 10:06 ` [PATCH v8 " Ray Kinsella
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220214101001.498992-1-skori@marvell.com \
--to=skori@marvell.com \
--cc=dev@dpdk.org \
--cc=kirankumark@marvell.com \
--cc=mdr@ashroe.eu \
--cc=ndabilpuram@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).