From: <skori@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>, Ray Kinsella <mdr@ashroe.eu>
Cc: <dev@dpdk.org>
Subject: [PATCH v2 1/2] common/cnxk: support priority flow ctrl config API
Date: Tue, 11 Jan 2022 13:48:30 +0530 [thread overview]
Message-ID: <20220111081831.881374-1-skori@marvell.com> (raw)
In-Reply-To: <20220109111130.751933-2-skori@marvell.com>
From: Sunil Kumar Kori <skori@marvell.com>
CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.
Patch adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v2:
- fix RoC API naming convention.
drivers/common/cnxk/roc_mbox.h | 17 +++
drivers/common/cnxk/roc_nix.h | 21 ++++
drivers/common/cnxk/roc_nix_fc.c | 95 ++++++++++++++--
drivers/common/cnxk/roc_nix_priv.h | 4 +-
drivers/common/cnxk/roc_nix_tm.c | 162 ++++++++++++++++++++++++++-
drivers/common/cnxk/roc_nix_tm_ops.c | 14 ++-
drivers/common/cnxk/version.map | 4 +
7 files changed, 298 insertions(+), 19 deletions(-)
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index b63fe108c9..12a5922229 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
msg_rsp) \
M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
+ M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req, \
npa_lf_alloc_rsp) \
@@ -540,6 +542,19 @@ struct cgx_pause_frm_cfg {
uint8_t __io tx_pause;
};
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ uint8_t __io rx_pause;
+ uint8_t __io tx_pause;
+ uint16_t __io pfc_en; /* bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __io rx_pause;
+ uint8_t __io tx_pause;
+};
+
struct sfp_eeprom_s {
#define SFP_EEPROM_SIZE 256
uint16_t __io sff_id;
@@ -1115,6 +1130,8 @@ struct nix_bp_cfg_req {
* so maximum 64 channels are possible.
*/
#define NIX_MAX_CHAN 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN NIX_MAX_CHAN
struct nix_bp_cfg_rsp {
struct mbox_msghdr hdr;
/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 69a5e8e7b4..e05b7b7dd8 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
struct {
uint32_t rq;
+ uint16_t tc;
uint16_t cq_drop;
bool enable;
} cq_cfg;
struct {
+ uint32_t sq;
+ uint16_t tc;
bool enable;
} tm_cfg;
};
};
+struct roc_nix_pfc_cfg {
+ enum roc_nix_fc_mode mode;
+ /* For SET, tc must be [0, 15].
+ * For GET, TC will represent bitmap
+ */
+ uint16_t tc;
+};
+
struct roc_nix_eeprom_info {
#define ROC_NIX_EEPROM_SIZE 256
uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
enum roc_nix_tm_tree {
ROC_NIX_TM_DEFAULT = 0,
ROC_NIX_TM_RLIMIT,
+ ROC_NIX_TM_PFC,
ROC_NIX_TM_USER,
ROC_NIX_TM_TREE_MAX,
};
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
@@ -736,6 +749,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
enum roc_nix_fc_mode mode);
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+ struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+ struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index ca29cd2bf9..95f466242a 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
struct mbox *mbox = get_mbox(roc_nix);
struct nix_bp_cfg_req *req;
struct nix_bp_cfg_rsp *rsp;
- int rc = -ENOSPC;
+ int rc = -ENOSPC, i;
if (roc_nix_is_sdp(roc_nix))
return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
req = mbox_alloc_msg_nix_bp_enable(mbox);
if (req == NULL)
return rc;
+
req->chan_base = 0;
- req->chan_cnt = 1;
- req->bpid_per_chan = 0;
+ if (roc_nix_is_lbk(roc_nix))
+ req->chan_cnt = NIX_LBK_MAX_CHAN;
+ else
+ req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+ req->bpid_per_chan = true;
rc = mbox_process_msg(mbox, (void *)&rsp);
if (rc || (req->chan_cnt != rsp->chan_cnt))
goto exit;
- nix->bpid[0] = rsp->chan_bpid[0];
nix->chan_cnt = rsp->chan_cnt;
+ for (i = 0; i < rsp->chan_cnt; i++)
+ nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
} else {
req = mbox_alloc_msg_nix_bp_disable(mbox);
if (req == NULL)
return rc;
req->chan_base = 0;
- req->chan_cnt = 1;
+ req->chan_cnt = nix->chan_cnt;
rc = mbox_process(mbox);
if (rc)
@@ -152,7 +158,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
aq->op = NIX_AQ_INSTOP_WRITE;
if (fc_cfg->cq_cfg.enable) {
- aq->cq.bpid = nix->bpid[0];
+ aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -169,7 +175,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
aq->op = NIX_AQ_INSTOP_WRITE;
if (fc_cfg->cq_cfg.enable) {
- aq->cq.bpid = nix->bpid[0];
+ aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -210,7 +216,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
return nix_fc_rxchan_bpid_set(roc_nix,
fc_cfg->rxchan_cfg.enable);
else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
- return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+ return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+ fc_cfg->tm_cfg.tc,
+ fc_cfg->tm_cfg.enable);
return -EINVAL;
}
@@ -391,3 +399,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
mbox_process(mbox);
}
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct mbox *mbox = get_mbox(roc_nix);
+ uint8_t tx_pause, rx_pause;
+ struct cgx_pfc_cfg *req;
+ struct cgx_pfc_rsp *rsp;
+ int rc = -ENOSPC;
+
+ if (roc_nix_is_lbk(roc_nix))
+ return NIX_ERR_OP_NOTSUP;
+
+ rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+ (pfc_cfg->mode == ROC_NIX_FC_RX);
+ tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+ (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+ req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+ if (req == NULL)
+ goto exit;
+
+ req->pfc_en = BIT(pfc_cfg->tc);
+ req->rx_pause = rx_pause;
+ req->tx_pause = tx_pause;
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ goto exit;
+
+ nix->rx_pause = rsp->rx_pause;
+ nix->tx_pause = rsp->tx_pause;
+ if (rsp->tx_pause)
+ nix->cev |= BIT(pfc_cfg->tc);
+ else
+ nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+ return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ if (roc_nix_is_lbk(roc_nix))
+ return NIX_ERR_OP_NOTSUP;
+
+ pfc_cfg->tc = nix->cev;
+
+ if (nix->rx_pause && nix->tx_pause)
+ pfc_cfg->mode = ROC_NIX_FC_FULL;
+ else if (nix->rx_pause)
+ pfc_cfg->mode = ROC_NIX_FC_RX;
+ else if (nix->tx_pause)
+ pfc_cfg->mode = ROC_NIX_FC_TX;
+ else
+ pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+ return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 04575af295..f5bb09dc3b 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -139,6 +139,7 @@ struct nix {
uint16_t msixoff;
uint8_t rx_pause;
uint8_t tx_pause;
+ uint16_t cev;
uint64_t rx_cfg;
struct dev dev;
uint16_t cints;
@@ -376,7 +377,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
bool ena);
int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+ bool enable);
/*
* TM priv utils.
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index b3d8ebd3c2..0775bca7b0 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
if (is_pf_or_lbk && !skip_bp &&
node->hw_lvl == nix->tm_link_cfg_lvl) {
node->bp_capa = 1;
- skip_bp = true;
+ skip_bp = false;
}
rc = nix_tm_node_reg_conf(nix, node);
@@ -317,18 +317,31 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
}
int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+ bool enable)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
enum roc_nix_tm_tree tree = nix->tm_tree;
struct mbox *mbox = (&nix->dev)->mbox;
struct nix_txschq_config *req = NULL;
struct nix_tm_node_list *list;
+ struct nix_tm_node *sq_node;
+ struct nix_tm_node *parent;
struct nix_tm_node *node;
uint8_t k = 0;
uint16_t link;
int rc = 0;
+ sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+ parent = sq_node->parent;
+ while (parent) {
+ if (parent->lvl == ROC_TM_LVL_SCH2)
+ break;
+
+ parent = parent->parent;
+ }
+
+
list = nix_tm_node_list(nix, tree);
link = nix->tx_link;
@@ -339,6 +352,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
continue;
+ if (node->hw_id != parent->hw_id)
+ continue;
+
if (!req) {
req = mbox_alloc_msg_nix_txschq_cfg(mbox);
req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +362,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
}
req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
- req->regval[k] = enable ? BIT_ULL(13) : 0;
- req->regval_mask[k] = ~BIT_ULL(13);
+ req->regval[k] = enable ? tc : 0;
+ req->regval[k] |= enable ? BIT_ULL(13) : 0;
+ req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
k++;
if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -602,7 +619,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
}
/* Disable backpressure */
- rc = nix_tm_bp_config_set(roc_nix, false);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
if (rc) {
plt_err("Failed to disable backpressure for flush, rc=%d", rc);
return rc;
@@ -731,7 +748,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
return 0;
/* Restore backpressure */
- rc = nix_tm_bp_config_set(roc_nix, true);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
if (rc) {
plt_err("Failed to restore backpressure, rc=%d", rc);
return rc;
@@ -1420,6 +1437,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
return rc;
}
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint32_t nonleaf_id = nix->nb_tx_queues;
+ struct nix_tm_node *node = NULL;
+ uint8_t leaf_lvl, lvl, lvl_end;
+ uint32_t tl2_node_id;
+ uint32_t parent, i;
+ int rc = -ENOMEM;
+
+ parent = ROC_NIX_TM_NODE_ID_INVALID;
+ lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
+ ROC_TM_LVL_SCH2);
+ leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
+ ROC_TM_LVL_SCH4);
+
+ /* TL1 node */
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_ROOT;
+ node->tree = ROC_NIX_TM_PFC;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+
+ /* TL2 node */
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_SCH1;
+ node->tree = ROC_NIX_TM_PFC;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ tl2_node_id = nonleaf_id;
+ nonleaf_id++;
+
+ for (i = 0; i < nix->nb_tx_queues; i++) {
+ parent = tl2_node_id;
+ for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id =
+ ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_PFC;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+ }
+
+ /* SMQ is mapped to SCH4 when we have TL1 access and SCH3
+ * otherwise
+ */
+ lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
+ ROC_TM_LVL_SCH3);
+
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_PFC;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = i;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = leaf_lvl;
+ node->tree = ROC_NIX_TM_PFC;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+ }
+
+ return 0;
+error:
+ nix_tm_node_free(node);
+ return rc;
+}
+
int
nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
{
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3257fa67c7..8aa2996956 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
/* Disable backpressure, it will be enabled back if needed on
* hierarchy enable
*/
- rc = nix_tm_bp_config_set(roc_nix, false);
- if (rc) {
- plt_err("Failed to disable backpressure for flush, rc=%d", rc);
- goto cleanup;
+ for (i = 0; i < sq_cnt; i++) {
+ sq = nix->sqs[i];
+ if (!sq)
+ continue;
+
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+ if (rc) {
+ plt_err("Failed to disable backpressure, rc=%d", rc);
+ goto cleanup;
+ }
}
/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 07c6720f0c..cee33c40cc 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -105,6 +105,7 @@ INTERNAL {
roc_nix_bpf_stats_reset;
roc_nix_bpf_stats_to_idx;
roc_nix_bpf_timeunit_get;
+ roc_nix_chan_count_get;
roc_nix_cq_dump;
roc_nix_cq_fini;
roc_nix_cq_init;
@@ -195,6 +196,8 @@ INTERNAL {
roc_nix_npc_promisc_ena_dis;
roc_nix_npc_rx_ena_dis;
roc_nix_npc_mcast_config;
+ roc_nix_pfc_mode_set;
+ roc_nix_pfc_mode_get;
roc_nix_ptp_clock_read;
roc_nix_ptp_info_cb_register;
roc_nix_ptp_info_cb_unregister;
@@ -259,6 +262,7 @@ INTERNAL {
roc_nix_tm_node_stats_get;
roc_nix_tm_node_suspend_resume;
roc_nix_tm_prealloc_res;
+ roc_nix_tm_pfc_prepare_tree;
roc_nix_tm_prepare_rate_limited_tree;
roc_nix_tm_rlimit_sq;
roc_nix_tm_root_has_sp;
--
2.25.1
next prev parent reply other threads:[~2022-01-11 8:18 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-09 11:11 [PATCH v1 " skori
2022-01-09 11:11 ` [PATCH v1 2/2] net/cnxk: support priority flow control skori
2022-01-09 11:18 ` Sunil Kumar Kori
2022-01-11 8:18 ` skori [this message]
2022-01-11 8:18 ` [PATCH v2 " skori
2022-01-18 13:28 ` [PATCH v3 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-18 13:28 ` [PATCH v3 2/2] net/cnxk: support priority flow control skori
2022-01-20 16:59 ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-20 16:59 ` [PATCH v4 2/2] net/cnxk: support priority flow control skori
2022-01-25 10:02 ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API Ray Kinsella
2022-01-25 10:19 ` [EXT] " Sunil Kumar Kori
2022-01-25 11:23 ` [PATCH v5 " skori
2022-01-25 11:23 ` [PATCH v5 2/2] net/cnxk: support priority flow control skori
2022-01-28 13:28 ` [PATCH v6 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-28 13:29 ` [PATCH v6 2/2] net/cnxk: support priority flow control skori
2022-02-07 17:21 ` [PATCH v7 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-07 17:21 ` [PATCH v7 2/2] net/cnxk: support priority flow control skori
2022-02-14 9:02 ` [PATCH v8 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-14 9:02 ` [PATCH v8 2/2] net/cnxk: support priority flow control skori
2022-02-14 10:10 ` [PATCH v9 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-14 10:10 ` [PATCH v9 2/2] net/cnxk: support priority flow control skori
2022-02-18 6:11 ` Jerin Jacob
2022-02-22 8:06 ` [EXT] " Sunil Kumar Kori
2022-02-22 8:58 ` [PATCH v10 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-22 8:58 ` [PATCH v10 2/2] net/cnxk: support priority flow control skori
2022-02-22 10:37 ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-22 10:37 ` [PATCH v11 2/2] net/cnxk: support priority flow control skori
2022-02-23 11:36 ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API Jerin Jacob
2022-02-14 10:06 ` [PATCH v8 " Ray Kinsella
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220111081831.881374-1-skori@marvell.com \
--to=skori@marvell.com \
--cc=dev@dpdk.org \
--cc=kirankumark@marvell.com \
--cc=mdr@ashroe.eu \
--cc=ndabilpuram@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).