* [PATCH 02/12] common/cnxk: avoid CPT backpressure due to errata
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
@ 2022-06-16 7:07 ` Nithin Dabilpuram
2022-06-16 7:07 ` [PATCH 03/12] common/cnxk: add PFC support for VFs Nithin Dabilpuram
` (11 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 7:07 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
Avoid enabling CPT backpressure due to errata where
backpressure would block requests from even other
CPT LF's. Also allow CQ size >=16K.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_errata.h | 7 +++++++
drivers/common/cnxk/roc_nix.h | 2 +-
drivers/common/cnxk/roc_nix_fc.c | 3 ++-
3 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h
index 31162d5..f048297 100644
--- a/drivers/common/cnxk/roc_errata.h
+++ b/drivers/common/cnxk/roc_errata.h
@@ -77,4 +77,11 @@ roc_errata_nix_has_perf_issue_on_stats_update(void)
return true;
}
+/* Errata IPBUCPT-38726, IPBUCPT-38727 */
+static inline bool
+roc_errata_cpt_hang_on_x2p_bp(void)
+{
+ return roc_model_is_cn10ka_a0();
+}
+
#endif /* _ROC_ERRATA_H_ */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index aedde1c..944e4c6 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -309,7 +309,7 @@ struct roc_nix_rq {
struct roc_nix_cq {
/* Input parameters */
uint16_t qid;
- uint16_t nb_desc;
+ uint32_t nb_desc;
/* End of Input parameters */
uint16_t drop_thresh;
struct roc_nix *roc_nix;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index a0505bd..cef5d07 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -77,7 +77,8 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
goto exit;
/* Enable backpressure on CPT if inline inb is enabled */
- if (enable && roc_nix_inl_inb_is_enabled(roc_nix)) {
+ if (enable && roc_nix_inl_inb_is_enabled(roc_nix) &&
+ !roc_errata_cpt_hang_on_x2p_bp()) {
req = mbox_alloc_msg_nix_cpt_bp_enable(mbox);
if (req == NULL)
return rc;
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 03/12] common/cnxk: add PFC support for VFs
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
2022-06-16 7:07 ` [PATCH 02/12] common/cnxk: avoid CPT backpressure due to errata Nithin Dabilpuram
@ 2022-06-16 7:07 ` Nithin Dabilpuram
2022-06-23 16:01 ` Ray Kinsella
2022-06-16 7:07 ` [PATCH 04/12] common/cnxk: support same TC value across multiple queues Nithin Dabilpuram
` (10 subsequent siblings)
12 siblings, 1 reply; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 7:07 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Ray Kinsella, Pavan Nikhilesh, Shijith Thotton
Cc: dev
From: Sunil Kumar Kori <skori@marvell.com>
Current PFC implementation does not support VFs.
Patch enables PFC on VFs too.
Also fix the config of aura.bp to be based on number
of buffers(aura.limit) and corresponding shift
value(aura.shift).
Fixes: cb4bfd6e7bdf ("event/cnxk: support Rx adapter")
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
drivers/common/cnxk/roc_nix.h | 14 +++-
drivers/common/cnxk/roc_nix_fc.c | 120 +++++++++++++++++++++++++++----
drivers/common/cnxk/roc_nix_priv.h | 2 +
drivers/common/cnxk/roc_nix_queue.c | 47 ++++++++++++
drivers/common/cnxk/roc_nix_tm.c | 67 +++++++++--------
drivers/common/cnxk/version.map | 3 +-
drivers/event/cnxk/cnxk_eventdev_adptr.c | 12 ++--
drivers/net/cnxk/cnxk_ethdev.h | 2 +
8 files changed, 217 insertions(+), 50 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 944e4c6..f0d7fc8 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -157,6 +157,7 @@ struct roc_nix_fc_cfg {
#define ROC_NIX_FC_RXCHAN_CFG 0
#define ROC_NIX_FC_CQ_CFG 1
#define ROC_NIX_FC_TM_CFG 2
+#define ROC_NIX_FC_RQ_CFG 3
uint8_t type;
union {
struct {
@@ -171,6 +172,14 @@ struct roc_nix_fc_cfg {
} cq_cfg;
struct {
+ uint32_t rq;
+ uint16_t tc;
+ uint16_t cq_drop;
+ bool enable;
+ uint64_t pool;
+ } rq_cfg;
+
+ struct {
uint32_t sq;
uint16_t tc;
bool enable;
@@ -791,8 +800,8 @@ uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
-void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
- uint8_t ena, uint8_t force);
+void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
+ uint8_t ena, uint8_t force, uint8_t tc);
/* NPC */
int __roc_api roc_nix_npc_promisc_ena_dis(struct roc_nix *roc_nix, int enable);
@@ -845,6 +854,7 @@ int __roc_api roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq,
int __roc_api roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq,
bool ena);
int __roc_api roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable);
+int __roc_api roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid);
int __roc_api roc_nix_rq_fini(struct roc_nix_rq *rq);
int __roc_api roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq);
int __roc_api roc_nix_cq_fini(struct roc_nix_cq *cq);
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index cef5d07..daae285 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -148,6 +148,61 @@ nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
}
static int
+nix_fc_rq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
+{
+ struct mbox *mbox = get_mbox(roc_nix);
+ struct nix_aq_enq_rsp *rsp;
+ struct npa_aq_enq_req *npa_req;
+ struct npa_aq_enq_rsp *npa_rsp;
+ int rc;
+
+ if (roc_model_is_cn9k()) {
+ struct nix_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = fc_cfg->rq_cfg.rq;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ } else {
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = fc_cfg->rq_cfg.rq;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ }
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ goto exit;
+
+ npa_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (!npa_req)
+ return -ENOSPC;
+
+ npa_req->aura_id = rsp->rq.lpb_aura;
+ npa_req->ctype = NPA_AQ_CTYPE_AURA;
+ npa_req->op = NPA_AQ_INSTOP_READ;
+
+ rc = mbox_process_msg(mbox, (void *)&npa_rsp);
+ if (rc)
+ goto exit;
+
+ fc_cfg->cq_cfg.cq_drop = npa_rsp->aura.bp;
+ fc_cfg->cq_cfg.enable = npa_rsp->aura.bp_ena;
+ fc_cfg->type = ROC_NIX_FC_RQ_CFG;
+
+exit:
+ return rc;
+}
+
+static int
nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
@@ -198,6 +253,33 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
return mbox_process(mbox);
}
+static int
+nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
+{
+ struct roc_nix_fc_cfg tmp;
+ int sso_ena = 0;
+
+ /* Check whether RQ is connected to SSO or not */
+ sso_ena = roc_nix_rq_is_sso_enable(roc_nix, fc_cfg->rq_cfg.rq);
+ if (sso_ena < 0)
+ return -EINVAL;
+
+ if (sso_ena)
+ roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool,
+ fc_cfg->rq_cfg.enable, true,
+ fc_cfg->rq_cfg.tc);
+
+ /* Copy RQ config to CQ config as they are occupying same area */
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.type = ROC_NIX_FC_CQ_CFG;
+ tmp.cq_cfg.rq = fc_cfg->rq_cfg.rq;
+ tmp.cq_cfg.tc = fc_cfg->rq_cfg.tc;
+ tmp.cq_cfg.cq_drop = fc_cfg->rq_cfg.cq_drop;
+ tmp.cq_cfg.enable = fc_cfg->rq_cfg.enable;
+
+ return nix_fc_cq_config_set(roc_nix, &tmp);
+}
+
int
roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
{
@@ -207,6 +289,8 @@ roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
return nix_fc_cq_config_get(roc_nix, fc_cfg);
+ else if (fc_cfg->type == ROC_NIX_FC_RQ_CFG)
+ return nix_fc_rq_config_get(roc_nix, fc_cfg);
else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
return nix_fc_rxchan_bpid_get(roc_nix, fc_cfg);
else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
@@ -218,12 +302,10 @@ roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
int
roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
{
- if (!roc_nix_is_pf(roc_nix) && !roc_nix_is_lbk(roc_nix) &&
- !roc_nix_is_sdp(roc_nix))
- return 0;
-
if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
return nix_fc_cq_config_set(roc_nix, fc_cfg);
+ else if (fc_cfg->type == ROC_NIX_FC_RQ_CFG)
+ return nix_fc_rq_config_set(roc_nix, fc_cfg);
else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
return nix_fc_rxchan_bpid_set(roc_nix,
fc_cfg->rxchan_cfg.enable);
@@ -320,8 +402,8 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
}
void
-rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
- uint8_t force)
+roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
+ uint8_t force, uint8_t tc)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct npa_lf *lf = idev_npa_obj_get();
@@ -329,6 +411,7 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
struct npa_aq_enq_rsp *rsp;
struct mbox *mbox;
uint32_t limit;
+ uint64_t shift;
int rc;
if (roc_nix_is_sdp(roc_nix))
@@ -351,8 +434,10 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
return;
limit = rsp->aura.limit;
+ shift = rsp->aura.shift;
+
/* BP is already enabled. */
- if (rsp->aura.bp_ena) {
+ if (rsp->aura.bp_ena && ena) {
uint16_t bpid;
bool nix1;
@@ -363,12 +448,15 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
bpid = rsp->aura.nix0_bpid;
/* If BP ids don't match disable BP. */
- if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[0])) &&
+ if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc])) &&
!force) {
req = mbox_alloc_msg_npa_aq_enq(mbox);
if (req == NULL)
return;
+ plt_info("Disabling BP/FC on aura 0x%" PRIx64
+ " as it shared across ports or tc",
+ pool_id);
req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
req->ctype = NPA_AQ_CTYPE_AURA;
req->op = NPA_AQ_INSTOP_WRITE;
@@ -378,11 +466,15 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
mbox_process(mbox);
}
+
+ if ((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc]))
+ plt_info("Ignoring aura 0x%" PRIx64 "->%u bpid mapping",
+ pool_id, nix->bpid[tc]);
return;
}
/* BP was previously enabled but now disabled skip. */
- if (rsp->aura.bp)
+ if (rsp->aura.bp && ena)
return;
req = mbox_alloc_msg_npa_aq_enq(mbox);
@@ -395,14 +487,16 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
if (ena) {
if (nix->is_nix1) {
- req->aura.nix1_bpid = nix->bpid[0];
+ req->aura.nix1_bpid = nix->bpid[tc];
req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
} else {
- req->aura.nix0_bpid = nix->bpid[0];
+ req->aura.nix0_bpid = nix->bpid[tc];
req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
}
- req->aura.bp = NIX_RQ_AURA_THRESH(
- limit > 128 ? 256 : limit); /* 95% of size*/
+ req->aura.bp = NIX_RQ_AURA_THRESH(limit >> shift);
+ req->aura_mask.bp = ~(req->aura_mask.bp);
+ } else {
+ req->aura.bp = 0;
req->aura_mask.bp = ~(req->aura_mask.bp);
}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index cc69d71..5e865f8 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -357,6 +357,8 @@ nix_tm_tree2str(enum roc_nix_tm_tree tree)
return "Default Tree";
else if (tree == ROC_NIX_TM_RLIMIT)
return "Rate Limit Tree";
+ else if (tree == ROC_NIX_TM_PFC)
+ return "PFC Tree";
else if (tree == ROC_NIX_TM_USER)
return "User Tree";
return "???";
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 76c049c..fa4c954 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -94,6 +94,53 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
}
int
+roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct dev *dev = &nix->dev;
+ struct mbox *mbox = dev->mbox;
+ bool sso_enable;
+ int rc;
+
+ if (roc_model_is_cn9k()) {
+ struct nix_aq_enq_rsp *rsp;
+ struct nix_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ sso_enable = rsp->rq.sso_ena;
+ } else {
+ struct nix_cn10k_aq_enq_rsp *rsp;
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ sso_enable = rsp->rq.sso_ena;
+ }
+
+ return sso_enable ? true : false;
+}
+
+int
nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
bool cfg, bool ena)
{
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 7fd54ef..151e217 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -98,7 +98,6 @@ int
nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
{
struct nix_tm_node_list *list;
- bool is_pf_or_lbk = false;
struct nix_tm_node *node;
bool skip_bp = false;
uint32_t hw_lvl;
@@ -106,9 +105,6 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
list = nix_tm_node_list(nix, tree);
- if ((!dev_is_vf(&nix->dev) || nix->lbk_link) && !nix->sdp_link)
- is_pf_or_lbk = true;
-
for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
TAILQ_FOREACH(node, list, node) {
if (node->hw_lvl != hw_lvl)
@@ -118,7 +114,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
* set per channel only for PF or lbk vf.
*/
node->bp_capa = 0;
- if (is_pf_or_lbk && !skip_bp &&
+ if (!nix->sdp_link && !skip_bp &&
node->hw_lvl == nix->tm_link_cfg_lvl) {
node->bp_capa = 1;
skip_bp = false;
@@ -329,6 +325,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
struct nix_tm_node *sq_node;
struct nix_tm_node *parent;
struct nix_tm_node *node;
+ uint8_t parent_lvl;
uint8_t k = 0;
int rc = 0;
@@ -336,9 +333,12 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
if (!sq_node)
return -ENOENT;
+ parent_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH2 :
+ ROC_TM_LVL_SCH1);
+
parent = sq_node->parent;
while (parent) {
- if (parent->lvl == ROC_TM_LVL_SCH2)
+ if (parent->lvl == parent_lvl)
break;
parent = parent->parent;
@@ -1469,16 +1469,18 @@ int
roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint8_t leaf_lvl, lvl, lvl_start, lvl_end;
uint32_t nonleaf_id = nix->nb_tx_queues;
struct nix_tm_node *node = NULL;
- uint8_t leaf_lvl, lvl, lvl_end;
uint32_t tl2_node_id;
uint32_t parent, i;
int rc = -ENOMEM;
parent = ROC_NIX_TM_NODE_ID_INVALID;
- lvl_end = ROC_TM_LVL_SCH3;
- leaf_lvl = ROC_TM_LVL_QUEUE;
+ lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
+ ROC_TM_LVL_SCH2);
+ leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
+ ROC_TM_LVL_SCH4);
/* TL1 node */
node = nix_tm_node_alloc();
@@ -1501,31 +1503,37 @@ roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
parent = nonleaf_id;
nonleaf_id++;
- /* TL2 node */
- rc = -ENOMEM;
- node = nix_tm_node_alloc();
- if (!node)
- goto error;
+ lvl_start = ROC_TM_LVL_SCH1;
+ if (roc_nix_is_pf(roc_nix)) {
+ /* TL2 node */
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
- node->id = nonleaf_id;
- node->parent_id = parent;
- node->priority = 0;
- node->weight = NIX_TM_DFLT_RR_WT;
- node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
- node->lvl = ROC_TM_LVL_SCH1;
- node->tree = ROC_NIX_TM_PFC;
- node->rel_chan = NIX_TM_CHAN_INVALID;
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_SCH1;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
- rc = nix_tm_node_add(roc_nix, node);
- if (rc)
- goto error;
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
- tl2_node_id = nonleaf_id;
- nonleaf_id++;
+ lvl_start = ROC_TM_LVL_SCH2;
+ tl2_node_id = nonleaf_id;
+ nonleaf_id++;
+ } else {
+ tl2_node_id = parent;
+ }
for (i = 0; i < nix->nb_tx_queues; i++) {
parent = tl2_node_id;
- for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+ for (lvl = lvl_start; lvl <= lvl_end; lvl++) {
rc = -ENOMEM;
node = nix_tm_node_alloc();
if (!node)
@@ -1549,7 +1557,8 @@ roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
nonleaf_id++;
}
- lvl = ROC_TM_LVL_SCH4;
+ lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
+ ROC_TM_LVL_SCH3);
rc = -ENOMEM;
node = nix_tm_node_alloc();
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 1ba5b4f..27e81f2 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -122,7 +122,7 @@ INTERNAL {
roc_nix_fc_config_set;
roc_nix_fc_mode_set;
roc_nix_fc_mode_get;
- rox_nix_fc_npa_bp_cfg;
+ roc_nix_fc_npa_bp_cfg;
roc_nix_get_base_chan;
roc_nix_get_pf;
roc_nix_get_pf_func;
@@ -220,6 +220,7 @@ INTERNAL {
roc_nix_rq_ena_dis;
roc_nix_rq_fini;
roc_nix_rq_init;
+ roc_nix_rq_is_sso_enable;
roc_nix_rq_modify;
roc_nix_rss_default_setup;
roc_nix_rss_flowkey_set;
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index cf5b1dd..8fcc377 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -250,9 +250,11 @@ cnxk_sso_rx_adapter_queue_add(
rc |= roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix,
false);
}
- rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
- rxq_sp->qconf.mp->pool_id, true,
- dev->force_ena_bp);
+
+ if (rxq_sp->tx_pause)
+ roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
+ rxq_sp->qconf.mp->pool_id, true,
+ dev->force_ena_bp, rxq_sp->tc);
cnxk_eth_dev->nb_rxq_sso++;
}
@@ -293,9 +295,9 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
rxq_sp = cnxk_eth_rxq_to_sp(
eth_dev->data->rx_queues[rx_queue_id]);
rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
- rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
+ roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
rxq_sp->qconf.mp->pool_id, false,
- dev->force_ena_bp);
+ dev->force_ena_bp, 0);
cnxk_eth_dev->nb_rxq_sso--;
/* Enable drop_re if it was disabled earlier */
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index e992302..0400d73 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -443,6 +443,8 @@ struct cnxk_eth_rxq_sp {
struct cnxk_eth_dev *dev;
struct cnxk_eth_qconf qconf;
uint16_t qid;
+ uint8_t tx_pause;
+ uint8_t tc;
} __plt_cache_aligned;
struct cnxk_eth_txq_sp {
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH 03/12] common/cnxk: add PFC support for VFs
2022-06-16 7:07 ` [PATCH 03/12] common/cnxk: add PFC support for VFs Nithin Dabilpuram
@ 2022-06-23 16:01 ` Ray Kinsella
0 siblings, 0 replies; 29+ messages in thread
From: Ray Kinsella @ 2022-06-23 16:01 UTC (permalink / raw)
To: Nithin Dabilpuram
Cc: jerinj, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Pavan Nikhilesh, Shijith Thotton, dev
Nithin Dabilpuram <ndabilpuram@marvell.com> writes:
> From: Sunil Kumar Kori <skori@marvell.com>
>
> Current PFC implementation does not support VFs.
> Patch enables PFC on VFs too.
>
> Also fix the config of aura.bp to be based on number
> of buffers(aura.limit) and corresponding shift
> value(aura.shift).
> Fixes: cb4bfd6e7bdf ("event/cnxk: support Rx adapter")
>
> Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
> ---
> drivers/common/cnxk/roc_nix.h | 14 +++-
> drivers/common/cnxk/roc_nix_fc.c | 120 +++++++++++++++++++++++++++----
> drivers/common/cnxk/roc_nix_priv.h | 2 +
> drivers/common/cnxk/roc_nix_queue.c | 47 ++++++++++++
> drivers/common/cnxk/roc_nix_tm.c | 67 +++++++++--------
> drivers/common/cnxk/version.map | 3 +-
> drivers/event/cnxk/cnxk_eventdev_adptr.c | 12 ++--
> drivers/net/cnxk/cnxk_ethdev.h | 2 +
> 8 files changed, 217 insertions(+), 50 deletions(-)
Acked-by: Ray Kinsella <mdr@ashroe.eu>
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 04/12] common/cnxk: support same TC value across multiple queues
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
2022-06-16 7:07 ` [PATCH 02/12] common/cnxk: avoid CPT backpressure due to errata Nithin Dabilpuram
2022-06-16 7:07 ` [PATCH 03/12] common/cnxk: add PFC support for VFs Nithin Dabilpuram
@ 2022-06-16 7:07 ` Nithin Dabilpuram
2022-06-16 7:07 ` [PATCH 05/12] common/cnxk: enhance CPT parse header dump Nithin Dabilpuram
` (9 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 7:07 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: dev, Harman Kalra
From: Harman Kalra <hkalra@marvell.com>
User may want to configure same TC value across multiple queues, but
for that all queues should have a common TL3 where this TC value will
get configured.
Changed the pfc_tc_cq_map/pfc_tc_sq_map array indexing to qid and store
TC values in the array. As multiple queues may have same TC value.
Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
drivers/common/cnxk/roc_dev.c | 18 ++++++++
drivers/common/cnxk/roc_nix.h | 4 +-
drivers/common/cnxk/roc_nix_fc.c | 2 +-
drivers/common/cnxk/roc_nix_priv.h | 3 +-
drivers/common/cnxk/roc_nix_tm.c | 87 ++++++++++++++++++++++++------------
drivers/common/cnxk/roc_nix_tm_ops.c | 3 +-
6 files changed, 84 insertions(+), 33 deletions(-)
diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 09199ac..59128a3 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -421,6 +421,24 @@ process_msgs(struct dev *dev, struct mbox *mbox)
/* Get our identity */
dev->pf_func = msg->pcifunc;
break;
+ case MBOX_MSG_CGX_PRIO_FLOW_CTRL_CFG:
+ /* Handling the case where one VF tries to disable PFC
+ * while PFC already configured on other VFs. This is
+ * not an error but a warning which can be ignored.
+ */
+#define LMAC_AF_ERR_PERM_DENIED -1103
+ if (msg->rc) {
+ if (msg->rc == LMAC_AF_ERR_PERM_DENIED) {
+ plt_mbox_dbg(
+ "Receive Flow control disable not permitted "
+ "as its used by other PFVFs");
+ msg->rc = 0;
+ } else {
+ plt_err("Message (%s) response has err=%d",
+ mbox_id2name(msg->id), msg->rc);
+ }
+ }
+ break;
default:
if (msg->rc)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index f0d7fc8..4e5cf05 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -11,7 +11,8 @@
#define ROC_NIX_BPF_LEVEL_IDX_INVALID 0xFF
#define ROC_NIX_BPF_LEVEL_MAX 3
#define ROC_NIX_BPF_STATS_MAX 12
-#define ROC_NIX_MTR_ID_INVALID UINT32_MAX
+#define ROC_NIX_MTR_ID_INVALID UINT32_MAX
+#define ROC_NIX_PFC_CLASS_INVALID UINT8_MAX
enum roc_nix_rss_reta_sz {
ROC_NIX_RSS_RETA_SZ_64 = 64,
@@ -349,6 +350,7 @@ struct roc_nix_sq {
void *lmt_addr;
void *sqe_mem;
void *fc;
+ uint8_t tc;
};
struct roc_nix_link_info {
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index daae285..f4cfa11 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -312,7 +312,7 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
fc_cfg->tm_cfg.tc,
- fc_cfg->tm_cfg.enable);
+ fc_cfg->tm_cfg.enable, false);
return -EINVAL;
}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 5e865f8..5b0522c 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -100,6 +100,7 @@ struct nix_tm_node {
/* Last stats */
uint64_t last_pkts;
uint64_t last_bytes;
+ uint32_t tc_refcnt;
};
struct nix_tm_shaper_profile {
@@ -402,7 +403,7 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
- bool enable);
+ bool enable, bool force_flush);
void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
int nix_tm_mark_init(struct nix *nix);
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 151e217..a31abde 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -314,7 +314,7 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
int
nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
- bool enable)
+ bool enable, bool force_flush)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
enum roc_nix_tm_tree tree = nix->tm_tree;
@@ -325,10 +325,15 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
struct nix_tm_node *sq_node;
struct nix_tm_node *parent;
struct nix_tm_node *node;
+ struct roc_nix_sq *sq_s;
uint8_t parent_lvl;
uint8_t k = 0;
int rc = 0;
+ sq_s = nix->sqs[sq];
+ if (!sq_s)
+ return -ENOENT;
+
sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
if (!sq_node)
return -ENOENT;
@@ -348,11 +353,22 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
list = nix_tm_node_list(nix, tree);
- if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+ /* Enable request, parent rel chan already configured */
+ if (enable && parent->rel_chan != NIX_TM_CHAN_INVALID &&
+ parent->rel_chan != tc) {
rc = -EINVAL;
goto err;
}
+ /* No action if enable request for a non participating SQ. This case is
+ * required to handle post flush where TCs should be reconfigured after
+ * pre flush.
+ */
+ if (enable && sq_s->tc == ROC_NIX_PFC_CLASS_INVALID &&
+ tc == ROC_NIX_PFC_CLASS_INVALID)
+ return 0;
+
+ /* Find the parent TL3 */
TAILQ_FOREACH(node, list, node) {
if (node->hw_lvl != nix->tm_link_cfg_lvl)
continue;
@@ -360,38 +376,51 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
continue;
- if (node->hw_id != parent->hw_id)
- continue;
-
- if (!req) {
- req = mbox_alloc_msg_nix_txschq_cfg(mbox);
- req->lvl = nix->tm_link_cfg_lvl;
- k = 0;
+ /* Restrict sharing of TL3 across the queues */
+ if (enable && node != parent && node->rel_chan == tc) {
+ plt_err("SQ %d node TL3 id %d already has %d tc value set",
+ sq, node->hw_id, tc);
+ return -EINVAL;
}
+ }
- req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
- req->regval[k] = enable ? tc : 0;
- req->regval[k] |= enable ? BIT_ULL(13) : 0;
- req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
- k++;
-
- if (k >= MAX_REGS_PER_MBOX_MSG) {
- req->num_regs = k;
- rc = mbox_process(mbox);
- if (rc)
- goto err;
- req = NULL;
- }
+ /* In case of user tree i.e. multiple SQs may share a TL3, disabling PFC
+ * on one of such SQ should not hamper the traffic control on other SQs.
+ * Maitaining a reference count scheme to account no of SQs sharing the
+ * TL3 before disabling PFC on it.
+ */
+ if (!force_flush && !enable &&
+ parent->rel_chan != NIX_TM_CHAN_INVALID) {
+ if (sq_s->tc != ROC_NIX_PFC_CLASS_INVALID)
+ parent->tc_refcnt--;
+ if (parent->tc_refcnt > 0)
+ return 0;
}
- if (req) {
- req->num_regs = k;
- rc = mbox_process(mbox);
- if (rc)
- goto err;
+ /* Allocating TL3 resources */
+ if (!req) {
+ req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = nix->tm_link_cfg_lvl;
+ k = 0;
}
+ /* Enable PFC on the identified TL3 */
+ req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(parent->hw_id, link);
+ req->regval[k] = enable ? tc : 0;
+ req->regval[k] |= enable ? BIT_ULL(13) : 0;
+ req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
+ k++;
+
+ req->num_regs = k;
+ rc = mbox_process(mbox);
+ if (rc)
+ goto err;
+
parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
+ /* Increase reference count for parent TL3 */
+ if (enable && sq_s->tc == ROC_NIX_PFC_CLASS_INVALID)
+ parent->tc_refcnt++;
+
return 0;
err:
plt_err("Failed to %s bp on link %u, rc=%d(%s)",
@@ -629,7 +658,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
}
/* Disable backpressure */
- rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false, true);
if (rc) {
plt_err("Failed to disable backpressure for flush, rc=%d", rc);
return rc;
@@ -764,7 +793,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
return 0;
/* Restore backpressure */
- rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, sq->tc, true, false);
if (rc) {
plt_err("Failed to restore backpressure, rc=%d", rc);
return rc;
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 5884ce5..4aa5500 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -292,6 +292,7 @@ roc_nix_tm_node_add(struct roc_nix *roc_nix, struct roc_nix_tm_node *roc_node)
node->pkt_mode_set = roc_node->pkt_mode_set;
node->free_fn = roc_node->free_fn;
node->tree = ROC_NIX_TM_USER;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
return nix_tm_node_add(roc_nix, node);
}
@@ -473,7 +474,7 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
if (!sq)
continue;
- rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false, false);
if (rc && rc != -ENOENT) {
plt_err("Failed to disable backpressure, rc=%d", rc);
goto cleanup;
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 05/12] common/cnxk: enhance CPT parse header dump
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
` (2 preceding siblings ...)
2022-06-16 7:07 ` [PATCH 04/12] common/cnxk: support same TC value across multiple queues Nithin Dabilpuram
@ 2022-06-16 7:07 ` Nithin Dabilpuram
2022-06-16 7:07 ` [PATCH 06/12] common/cnxk: fix mbox structs to avoid unaligned access Nithin Dabilpuram
` (8 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 7:07 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
Enhance CPT parse header dump to dump fragment info
and swap pointers before printing.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_cpt_debug.c | 33 +++++++++++++++++++++++++++++++--
1 file changed, 31 insertions(+), 2 deletions(-)
diff --git a/drivers/common/cnxk/roc_cpt_debug.c b/drivers/common/cnxk/roc_cpt_debug.c
index be6ddb5..5602e53 100644
--- a/drivers/common/cnxk/roc_cpt_debug.c
+++ b/drivers/common/cnxk/roc_cpt_debug.c
@@ -8,6 +8,10 @@
void
roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth)
{
+ struct cpt_frag_info_s *frag_info;
+ uint32_t offset;
+ uint64_t *slot;
+
plt_print("CPT_PARSE \t0x%p:", cpth);
/* W0 */
@@ -19,7 +23,7 @@ roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth)
cpth->w0.pad_len, cpth->w0.num_frags, cpth->w0.pkt_out);
/* W1 */
- plt_print("W1: wqe_ptr \t0x%016lx\t", cpth->wqe_ptr);
+ plt_print("W1: wqe_ptr \t0x%016lx\t", plt_be_to_cpu_64(cpth->wqe_ptr));
/* W2 */
plt_print("W2: frag_age \t0x%x\t\torig_pf_func \t0x%04x",
@@ -33,7 +37,32 @@ roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth)
/* W4 */
plt_print("W4: esn \t%" PRIx64 " \t OR frag1_wqe_ptr \t0x%" PRIx64,
- cpth->esn, cpth->frag1_wqe_ptr);
+ cpth->esn, plt_be_to_cpu_64(cpth->frag1_wqe_ptr));
+
+ /* offset of 0 implies 256B, otherwise it implies offset*8B */
+ offset = cpth->w2.fi_offset;
+ offset = (((offset - 1) & 0x1f) + 1) * 8;
+ frag_info = PLT_PTR_ADD(cpth, offset);
+
+ plt_print("CPT Fraginfo \t0x%p:", frag_info);
+
+ /* W0 */
+ plt_print("W0: f0.info \t0x%x", frag_info->w0.f0.info);
+ plt_print("W0: f1.info \t0x%x", frag_info->w0.f1.info);
+ plt_print("W0: f2.info \t0x%x", frag_info->w0.f2.info);
+ plt_print("W0: f3.info \t0x%x", frag_info->w0.f3.info);
+
+ /* W1 */
+ plt_print("W1: frag_size0 \t0x%x", frag_info->w1.frag_size0);
+ plt_print("W1: frag_size1 \t0x%x", frag_info->w1.frag_size1);
+ plt_print("W1: frag_size2 \t0x%x", frag_info->w1.frag_size2);
+ plt_print("W1: frag_size3 \t0x%x", frag_info->w1.frag_size3);
+
+ slot = (uint64_t *)(frag_info + 1);
+ plt_print("Frag Slot2: WQE ptr \t%p",
+ (void *)plt_be_to_cpu_64(slot[0]));
+ plt_print("Frag Slot3: WQE ptr \t%p",
+ (void *)plt_be_to_cpu_64(slot[1]));
}
static int
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 06/12] common/cnxk: fix mbox structs to avoid unaligned access
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
` (3 preceding siblings ...)
2022-06-16 7:07 ` [PATCH 05/12] common/cnxk: enhance CPT parse header dump Nithin Dabilpuram
@ 2022-06-16 7:07 ` Nithin Dabilpuram
2022-06-16 7:07 ` [PATCH 07/12] net/cnxk: add SDP link status Nithin Dabilpuram
` (7 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 7:07 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
Fix mbox structs to avoid unaligned access as mbox memory
is from BAR space.
Fixes: 503b82de2cbf ("common/cnxk: add mbox request and response definitions")
Fixes: e746aec161cc ("common/cnxk: fix SQ flush sequence")
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_mbox.h | 18 +++++++++---------
drivers/common/cnxk/roc_nix_inl.c | 2 ++
2 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 2c30f19..965c704 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -777,7 +777,7 @@ struct nix_lf_alloc_req {
uint64_t __io way_mask;
#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0)
#define NIX_LF_LBK_BLK_SEL BIT_ULL(1)
- uint64_t flags;
+ uint64_t __io flags;
};
struct nix_lf_alloc_rsp {
@@ -798,7 +798,7 @@ struct nix_lf_alloc_rsp {
uint8_t __io cgx_links; /* No. of CGX links present in HW */
uint8_t __io lbk_links; /* No. of LBK links present in HW */
uint8_t __io sdp_links; /* No. of SDP links present in HW */
- uint8_t tx_link; /* Transmit channel link number */
+ uint8_t __io tx_link; /* Transmit channel link number */
};
struct nix_lf_free_req {
@@ -1275,8 +1275,8 @@ struct ssow_lf_free_req {
#define SSOW_INVAL_SELECTIVE_VER 0x1000
struct ssow_lf_inv_req {
struct mbox_msghdr hdr;
- uint16_t nb_hws; /* Number of HWS to invalidate*/
- uint16_t hws[MAX_RVU_BLKLF_CNT]; /* Array of HWS */
+ uint16_t __io nb_hws; /* Number of HWS to invalidate*/
+ uint16_t __io hws[MAX_RVU_BLKLF_CNT]; /* Array of HWS */
};
struct ssow_config_lsw {
@@ -1453,11 +1453,11 @@ struct cpt_sts_rsp {
struct cpt_rxc_time_cfg_req {
struct mbox_msghdr hdr;
int blkaddr;
- uint32_t step;
- uint16_t zombie_thres;
- uint16_t zombie_limit;
- uint16_t active_thres;
- uint16_t active_limit;
+ uint32_t __io step;
+ uint16_t __io zombie_thres;
+ uint16_t __io zombie_limit;
+ uint16_t __io active_thres;
+ uint16_t __io active_limit;
};
struct cpt_rx_inline_lf_cfg_msg {
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 39b9bec..7da8938 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -246,6 +246,8 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
struct roc_cpt_rxc_time_cfg cfg;
PLT_SET_USED(max_frags);
+ if (idev == NULL)
+ return -ENOTSUP;
roc_cpt = idev->cpt;
if (!roc_cpt) {
plt_err("Cannot support inline inbound, cryptodev not probed");
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 07/12] net/cnxk: add SDP link status
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
` (4 preceding siblings ...)
2022-06-16 7:07 ` [PATCH 06/12] common/cnxk: fix mbox structs to avoid unaligned access Nithin Dabilpuram
@ 2022-06-16 7:07 ` Nithin Dabilpuram
2022-06-16 7:07 ` [PATCH 08/12] net/cnxk: remove restriction on VFs for PFC config Nithin Dabilpuram
` (6 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 7:07 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: dev, Satananda Burla
From: Satananda Burla <sburla@marvell.com>
Add SDP link status reporting
Signed-off-by: Satananda Burla <sburla@marvell.com>
---
drivers/net/cnxk/cnxk_link.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_link.c b/drivers/net/cnxk/cnxk_link.c
index b1d59e3..127c9e7 100644
--- a/drivers/net/cnxk/cnxk_link.c
+++ b/drivers/net/cnxk/cnxk_link.c
@@ -13,7 +13,7 @@ cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set)
dev->flags &= ~CNXK_LINK_CFG_IN_PROGRESS_F;
/* Update link info for LBK */
- if (!set && roc_nix_is_lbk(&dev->nix)) {
+ if (!set && (roc_nix_is_lbk(&dev->nix) || roc_nix_is_sdp(&dev->nix))) {
struct rte_eth_link link;
link.link_status = RTE_ETH_LINK_UP;
@@ -124,10 +124,10 @@ cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
RTE_SET_USED(wait_to_complete);
memset(&link, 0, sizeof(struct rte_eth_link));
- if (!eth_dev->data->dev_started || roc_nix_is_sdp(&dev->nix))
+ if (!eth_dev->data->dev_started)
return 0;
- if (roc_nix_is_lbk(&dev->nix)) {
+ if (roc_nix_is_lbk(&dev->nix) || roc_nix_is_sdp(&dev->nix)) {
link.link_status = RTE_ETH_LINK_UP;
link.link_speed = RTE_ETH_SPEED_NUM_100G;
link.link_autoneg = RTE_ETH_LINK_FIXED;
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 08/12] net/cnxk: remove restriction on VFs for PFC config
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
` (5 preceding siblings ...)
2022-06-16 7:07 ` [PATCH 07/12] net/cnxk: add SDP link status Nithin Dabilpuram
@ 2022-06-16 7:07 ` Nithin Dabilpuram
2022-06-16 7:07 ` [PATCH 09/12] net/cnxk: pfc class disable resulting in invalid behaviour Nithin Dabilpuram
` (5 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 7:07 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Sunil Kumar Kori <skori@marvell.com>
Currently PFC configuration is not allowed on VFs.
Patch enables PFC configuration on VFs
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 9 +-
drivers/net/cnxk/cnxk_ethdev.h | 13 +--
drivers/net/cnxk/cnxk_ethdev_ops.c | 219 +++++++++++++++++++++----------------
3 files changed, 137 insertions(+), 104 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 09e5736..941b270 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -323,7 +323,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
struct cnxk_fc_cfg *fc = &dev->fc_cfg;
int rc;
- if (roc_nix_is_sdp(&dev->nix))
+ if (roc_nix_is_vf_or_sdp(&dev->nix))
return 0;
/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
@@ -604,6 +604,9 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
rxq_sp->qconf.nb_desc = nb_desc;
rxq_sp->qconf.mp = mp;
+ rxq_sp->tc = 0;
+ rxq_sp->tx_pause = (dev->fc_cfg.mode == RTE_ETH_FC_FULL ||
+ dev->fc_cfg.mode == RTE_ETH_FC_TX_PAUSE);
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Pass a tagmask used to handle error packets in inline device.
@@ -1795,7 +1798,6 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
pfc_conf.rx_pause.tc = i;
- pfc_conf.tx_pause.rx_qid = i;
pfc_conf.tx_pause.tc = i;
rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
&pfc_conf);
@@ -1805,9 +1807,6 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
}
}
- fc_conf.mode = RTE_ETH_FC_FULL;
- rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
-
/* Disable and free rte_meter entries */
nix_meter_fini(dev);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 0400d73..db2d849 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -156,13 +156,10 @@ struct cnxk_fc_cfg {
};
struct cnxk_pfc_cfg {
- struct cnxk_fc_cfg fc_cfg;
uint16_t class_en;
uint16_t pause_time;
- uint8_t rx_tc;
- uint8_t rx_qid;
- uint8_t tx_tc;
- uint8_t tx_qid;
+ uint16_t rx_pause_en;
+ uint16_t tx_pause_en;
};
struct cnxk_eth_qconf {
@@ -669,8 +666,10 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
uint32_t *prev_id, uint32_t *next_id,
struct cnxk_mtr_policy_node *policy,
int *tree_level);
-int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
- struct cnxk_pfc_cfg *conf);
+int nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint8_t tx_pause, uint8_t tc);
+int nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint8_t rx_pause, uint8_t tc);
/* Inlines */
static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 15d8e8e..caace9d 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -225,15 +225,17 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
struct roc_nix *nix = &dev->nix;
struct roc_nix_fc_cfg fc_cfg;
struct roc_nix_cq *cq;
+ struct roc_nix_rq *rq;
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ rq = &dev->rqs[qid];
cq = &dev->cqs[qid];
- fc_cfg.type = ROC_NIX_FC_CQ_CFG;
- fc_cfg.cq_cfg.enable = enable;
- /* Map all CQs to last channel */
- fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
- fc_cfg.cq_cfg.rq = qid;
- fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+ fc_cfg.type = ROC_NIX_FC_RQ_CFG;
+ fc_cfg.rq_cfg.enable = enable;
+ fc_cfg.rq_cfg.tc = 0;
+ fc_cfg.rq_cfg.rq = qid;
+ fc_cfg.rq_cfg.pool = rq->aura_handle;
+ fc_cfg.rq_cfg.cq_drop = cq->drop_thresh;
return roc_nix_fc_config_set(nix, &fc_cfg);
}
@@ -255,10 +257,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
uint8_t rx_pause, tx_pause;
int rc, i;
- if (roc_nix_is_vf_or_sdp(nix) && !roc_nix_is_lbk(nix)) {
- plt_err("Flow control configuration is not allowed on VFs");
- return -ENOTSUP;
- }
+ if (roc_nix_is_sdp(nix))
+ return 0;
if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
@@ -266,14 +266,18 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
- if (fc_conf->mode == fc->mode)
- return 0;
rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
(fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
(fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
+ if (fc_conf->mode == fc->mode) {
+ fc->rx_pause = rx_pause;
+ fc->tx_pause = tx_pause;
+ return 0;
+ }
+
/* Check if TX pause frame is already enabled or not */
if (fc->tx_pause ^ tx_pause) {
if (roc_model_is_cn96_ax() && data->dev_started) {
@@ -291,6 +295,7 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
1;
+ rxq->tx_pause = !!tx_pause;
rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
if (rc)
return rc;
@@ -321,13 +326,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
fc->rx_pause = rx_pause;
fc->tx_pause = tx_pause;
fc->mode = fc_conf->mode;
-
return rc;
}
int
cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
- struct rte_eth_pfc_queue_info *pfc_info)
+ struct rte_eth_pfc_queue_info *pfc_info)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
@@ -338,25 +342,42 @@ cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
int
cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
- struct rte_eth_pfc_queue_conf *pfc_conf)
+ struct rte_eth_pfc_queue_conf *pfc_conf)
{
- struct cnxk_pfc_cfg conf;
- int rc;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ enum rte_eth_fc_mode mode;
+ uint8_t en, tc;
+ uint16_t qid;
+ int rc = 0;
- memset(&conf, 0, sizeof(struct cnxk_pfc_cfg));
+ if (dev->fc_cfg.mode != RTE_ETH_FC_NONE) {
+ plt_err("Disable Flow Control before configuring PFC");
+ return -ENOTSUP;
+ }
- conf.fc_cfg.mode = pfc_conf->mode;
+ if (roc_nix_is_sdp(nix)) {
+ plt_err("Prio flow ctrl config is not allowed on SDP");
+ return -ENOTSUP;
+ }
- conf.pause_time = pfc_conf->tx_pause.pause_time;
- conf.rx_tc = pfc_conf->tx_pause.tc;
- conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+ mode = pfc_conf->mode;
- conf.tx_tc = pfc_conf->rx_pause.tc;
- conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+ /* Perform Tx pause configuration on RQ */
+ qid = pfc_conf->tx_pause.rx_qid;
+ if (qid < eth_dev->data->nb_rx_queues) {
+ en = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
+ tc = pfc_conf->tx_pause.tc;
+ rc = nix_priority_flow_ctrl_rq_conf(eth_dev, qid, en, tc);
+ }
- rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
- if (rc)
- return rc;
+ /* Perform Rx pause configuration on SQ */
+ qid = pfc_conf->rx_pause.tx_qid;
+ if (qid < eth_dev->data->nb_tx_queues) {
+ en = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
+ tc = pfc_conf->rx_pause.tc;
+ rc |= nix_priority_flow_ctrl_sq_conf(eth_dev, qid, en, tc);
+ }
return rc;
}
@@ -1026,11 +1047,9 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
}
int
-nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
- struct cnxk_pfc_cfg *conf)
+nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint8_t tx_pause, uint8_t tc)
{
- enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
- ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct rte_eth_dev_data *data = eth_dev->data;
struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
@@ -1038,18 +1057,11 @@ nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
struct roc_nix_pfc_cfg pfc_cfg;
struct roc_nix_fc_cfg fc_cfg;
struct cnxk_eth_rxq_sp *rxq;
- struct cnxk_eth_txq_sp *txq;
- uint8_t rx_pause, tx_pause;
- enum rte_eth_fc_mode mode;
+ enum roc_nix_fc_mode mode;
+ struct roc_nix_rq *rq;
struct roc_nix_cq *cq;
- struct roc_nix_sq *sq;
int rc;
- if (roc_nix_is_vf_or_sdp(nix)) {
- plt_err("Prio flow ctrl config is not allowed on VF and SDP");
- return -ENOTSUP;
- }
-
if (roc_model_is_cn96_ax() && data->dev_started) {
/* On Ax, CQ should be in disabled state
* while setting flow control configuration.
@@ -1059,39 +1071,83 @@ nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
return 0;
}
- if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
- dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+ if (data->rx_queues == NULL)
+ return -EINVAL;
+
+ if (qid >= eth_dev->data->nb_rx_queues)
+ return -ENOTSUP;
+
+ /* Configure RQ */
+ rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[qid]) - 1;
+ rq = &dev->rqs[qid];
+ cq = &dev->cqs[qid];
+
+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ fc_cfg.type = ROC_NIX_FC_RQ_CFG;
+ fc_cfg.rq_cfg.tc = tc;
+ fc_cfg.rq_cfg.enable = !!tx_pause;
+ fc_cfg.rq_cfg.rq = rq->qid;
+ fc_cfg.rq_cfg.pool = rxq->qconf.mp->pool_id;
+ fc_cfg.rq_cfg.cq_drop = cq->drop_thresh;
+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
+ if (rc)
+ return rc;
+
+ if (rxq->tx_pause != tx_pause) {
+ if (tx_pause)
+ pfc->tx_pause_en++;
+ else
+ pfc->tx_pause_en--;
+ }
+
+ rxq->tx_pause = !!tx_pause;
+ rxq->tc = tc;
+
+ /* Skip if PFC already enabled in mac */
+ if (pfc->tx_pause_en > 1)
+ return 0;
+
+ /* Configure MAC block */
+ pfc->class_en = pfc->tx_pause_en ? 0xFF : 0x0;
+
+ if (pfc->rx_pause_en)
+ mode = pfc->tx_pause_en ? ROC_NIX_FC_FULL : ROC_NIX_FC_RX;
+ else
+ mode = pfc->tx_pause_en ? ROC_NIX_FC_TX : ROC_NIX_FC_NONE;
+
+ memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+ pfc_cfg.mode = mode;
+ pfc_cfg.tc = pfc->class_en;
+ return roc_nix_pfc_mode_set(nix, &pfc_cfg);
+}
+
+int
+nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint8_t rx_pause, uint8_t tc)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_fc_cfg fc_cfg;
+ struct cnxk_eth_txq_sp *txq;
+ struct roc_nix_sq *sq;
+ int rc;
+
+ if (data->tx_queues == NULL)
+ return -EINVAL;
+
+ if (qid >= eth_dev->data->nb_tx_queues)
+ return -ENOTSUP;
+
+ if (dev->pfc_tc_sq_map[tc] != 0xFFFF &&
+ dev->pfc_tc_sq_map[tc] != qid) {
plt_err("Same TC can not be configured on multiple SQs");
return -ENOTSUP;
}
- mode = conf->fc_cfg.mode;
- rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
- tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
-
- if (data->rx_queues == NULL || data->tx_queues == NULL) {
- rc = 0;
- goto exit;
- }
-
- /* Configure CQs */
- memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
- rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
- cq = &dev->cqs[rxq->qid];
- fc_cfg.type = ROC_NIX_FC_CQ_CFG;
- fc_cfg.cq_cfg.tc = conf->rx_tc;
- fc_cfg.cq_cfg.enable = !!tx_pause;
- fc_cfg.cq_cfg.rq = cq->qid;
- fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
- rc = roc_nix_fc_config_set(nix, &fc_cfg);
- if (rc)
- goto exit;
-
/* Check if RX pause frame is enabled or not */
- if (pfc->fc_cfg.rx_pause ^ rx_pause) {
- if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
- goto exit;
-
+ if (!pfc->rx_pause_en) {
if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
eth_dev->data->nb_tx_queues > 1) {
/*
@@ -1113,39 +1169,18 @@ nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
}
}
- txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+ txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[qid]) - 1;
sq = &dev->sqs[txq->qid];
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
fc_cfg.type = ROC_NIX_FC_TM_CFG;
fc_cfg.tm_cfg.sq = sq->qid;
- fc_cfg.tm_cfg.tc = conf->tx_tc;
+ fc_cfg.tm_cfg.tc = tc;
fc_cfg.tm_cfg.enable = !!rx_pause;
rc = roc_nix_fc_config_set(nix, &fc_cfg);
if (rc)
return rc;
- dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
-
- /* Configure MAC block */
- if (tx_pause)
- pfc->class_en |= BIT(conf->rx_tc);
- else
- pfc->class_en &= ~BIT(conf->rx_tc);
-
- if (pfc->class_en)
- mode = RTE_ETH_FC_FULL;
-
- memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
- pfc_cfg.mode = mode_map[mode];
- pfc_cfg.tc = pfc->class_en;
- rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
- if (rc)
- return rc;
-
- pfc->fc_cfg.rx_pause = rx_pause;
- pfc->fc_cfg.tx_pause = tx_pause;
- pfc->fc_cfg.mode = mode;
-
+ dev->pfc_tc_sq_map[tc] = sq->qid;
exit:
return rc;
}
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 09/12] net/cnxk: pfc class disable resulting in invalid behaviour
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
` (6 preceding siblings ...)
2022-06-16 7:07 ` [PATCH 08/12] net/cnxk: remove restriction on VFs for PFC config Nithin Dabilpuram
@ 2022-06-16 7:07 ` Nithin Dabilpuram
2022-06-16 7:07 ` [PATCH 10/12] net/cnxk: resize CQ for Rx security for errata Nithin Dabilpuram
` (4 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 7:07 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: dev, Harman Kalra
From: Harman Kalra <hkalra@marvell.com>
Disabling a specific pfc class on a SQ is resulting in disabling PFC
on the entire port.
Signed-off-by: Harman Kalra <hkalra@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 25 ++++++++++++-------------
drivers/net/cnxk/cnxk_ethdev.h | 1 -
drivers/net/cnxk/cnxk_ethdev_ops.c | 34 +++++++++++++++++++++++++++-------
3 files changed, 39 insertions(+), 21 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 941b270..4ea1617 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -439,6 +439,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
sq->qid = qid;
sq->nb_desc = nb_desc;
sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
+ sq->tc = ROC_NIX_PFC_CLASS_INVALID;
rc = roc_nix_sq_init(&dev->nix, sq);
if (rc) {
@@ -1281,8 +1282,6 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
goto cq_fini;
}
- /* Initialize TC to SQ mapping as invalid */
- memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
/*
* Restore queue config when reconfigure followed by
* reconfigure and no queue configure invoked from application case.
@@ -1794,17 +1793,17 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
pfc_conf.mode = RTE_ETH_FC_NONE;
- for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
- if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
- pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
- pfc_conf.rx_pause.tc = i;
- pfc_conf.tx_pause.tc = i;
- rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
- &pfc_conf);
- if (rc)
- plt_err("Failed to reset PFC. error code(%d)",
- rc);
- }
+ for (i = 0; i < RTE_MAX(eth_dev->data->nb_rx_queues,
+ eth_dev->data->nb_tx_queues);
+ i++) {
+ pfc_conf.rx_pause.tc = ROC_NIX_PFC_CLASS_INVALID;
+ pfc_conf.rx_pause.tx_qid = i;
+ pfc_conf.tx_pause.tc = ROC_NIX_PFC_CLASS_INVALID;
+ pfc_conf.tx_pause.rx_qid = i;
+ rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
+ &pfc_conf);
+ if (rc)
+ plt_err("Failed to reset PFC. error code(%d)", rc);
}
/* Disable and free rte_meter entries */
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index db2d849..a4e96f0 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -396,7 +396,6 @@ struct cnxk_eth_dev {
struct cnxk_eth_qconf *rx_qconf;
/* Flow control configuration */
- uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
struct cnxk_pfc_cfg pfc_cfg;
struct cnxk_fc_cfg fc_cfg;
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index caace9d..1592971 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -1129,8 +1129,10 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
struct rte_eth_dev_data *data = eth_dev->data;
struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
struct roc_nix *nix = &dev->nix;
+ struct roc_nix_pfc_cfg pfc_cfg;
struct roc_nix_fc_cfg fc_cfg;
struct cnxk_eth_txq_sp *txq;
+ enum roc_nix_fc_mode mode;
struct roc_nix_sq *sq;
int rc;
@@ -1140,12 +1142,6 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
if (qid >= eth_dev->data->nb_tx_queues)
return -ENOTSUP;
- if (dev->pfc_tc_sq_map[tc] != 0xFFFF &&
- dev->pfc_tc_sq_map[tc] != qid) {
- plt_err("Same TC can not be configured on multiple SQs");
- return -ENOTSUP;
- }
-
/* Check if RX pause frame is enabled or not */
if (!pfc->rx_pause_en) {
if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
@@ -1180,7 +1176,31 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
if (rc)
return rc;
- dev->pfc_tc_sq_map[tc] = sq->qid;
+ /* Maintaining a count for SQs which are configured for PFC. This is
+ * required to handle disabling of a particular SQ without affecting
+ * PFC on other SQs.
+ */
+ if (!fc_cfg.tm_cfg.enable && sq->tc != ROC_NIX_PFC_CLASS_INVALID) {
+ sq->tc = ROC_NIX_PFC_CLASS_INVALID;
+ pfc->rx_pause_en--;
+ } else if (fc_cfg.tm_cfg.enable &&
+ sq->tc == ROC_NIX_PFC_CLASS_INVALID) {
+ sq->tc = tc;
+ pfc->rx_pause_en++;
+ }
+
+ if (pfc->rx_pause_en > 1)
+ goto exit;
+
+ if (pfc->tx_pause_en)
+ mode = pfc->rx_pause_en ? ROC_NIX_FC_FULL : ROC_NIX_FC_TX;
+ else
+ mode = pfc->rx_pause_en ? ROC_NIX_FC_RX : ROC_NIX_FC_NONE;
+
+ memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+ pfc_cfg.mode = mode;
+ pfc_cfg.tc = pfc->class_en;
+ rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
exit:
return rc;
}
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 10/12] net/cnxk: resize CQ for Rx security for errata
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
` (7 preceding siblings ...)
2022-06-16 7:07 ` [PATCH 09/12] net/cnxk: pfc class disable resulting in invalid behaviour Nithin Dabilpuram
@ 2022-06-16 7:07 ` Nithin Dabilpuram
2022-06-16 8:50 ` Jerin Jacob
2022-06-16 7:07 ` [PATCH 11/12] net/cnxk: add SDP VF device ID for probe matching Nithin Dabilpuram
` (3 subsequent siblings)
12 siblings, 1 reply; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 7:07 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
Resize CQ for Rx security offload in case of HW errata.
ci: skip_checkpatch skip_klocwork
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 43 +++++++++++++++++++++++++++++++++++++++++-
drivers/net/cnxk/cnxk_ethdev.h | 2 +-
2 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 4ea1617..2418290 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -5,6 +5,8 @@
#include <rte_eventdev.h>
+#define CNXK_NIX_CQ_INL_CLAMP_MAX (64UL * 1024UL)
+
static inline uint64_t
nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
{
@@ -40,6 +42,39 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
return speed_capa;
}
+static uint32_t
+nix_inl_cq_sz_clamp_up(struct roc_nix *nix, struct rte_mempool *mp,
+ uint32_t nb_desc)
+{
+ struct roc_nix_rq *inl_rq;
+ uint64_t limit;
+
+ if (!roc_errata_cpt_hang_on_x2p_bp())
+ return nb_desc;
+
+ /* CQ should be able to hold all buffers in first pass RQ's aura
+ * this RQ's aura.
+ */
+ inl_rq = roc_nix_inl_dev_rq(nix);
+ if (!inl_rq) {
+ /* This itself is going to be inline RQ's aura */
+ limit = roc_npa_aura_op_limit_get(mp->pool_id);
+ } else {
+ limit = roc_npa_aura_op_limit_get(inl_rq->aura_handle);
+ /* Also add this RQ's aura if it is different */
+ if (inl_rq->aura_handle != mp->pool_id)
+ limit += roc_npa_aura_op_limit_get(mp->pool_id);
+ }
+ nb_desc = PLT_MAX(limit + 1, nb_desc);
+ if (nb_desc > CNXK_NIX_CQ_INL_CLAMP_MAX) {
+ plt_warn("Could not setup CQ size to accommodate"
+ " all buffers in related auras (%" PRIu64 ")",
+ limit);
+ nb_desc = CNXK_NIX_CQ_INL_CLAMP_MAX;
+ }
+ return nb_desc;
+}
+
int
cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
{
@@ -504,7 +539,7 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
int
cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
- uint16_t nb_desc, uint16_t fp_rx_q_sz,
+ uint32_t nb_desc, uint16_t fp_rx_q_sz,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
@@ -552,6 +587,12 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
roc_nix_inl_dev_xaq_realloc(mp->pool_id);
+ /* Increase CQ size to Aura size to avoid CQ overflow and
+ * then CPT buffer leak.
+ */
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+ nb_desc = nix_inl_cq_sz_clamp_up(nix, mp, nb_desc);
+
/* Setup ROC CQ */
cq = &dev->cqs[qid];
cq->qid = qid;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index a4e96f0..4cb7c9e 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -530,7 +530,7 @@ int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, uint16_t fp_tx_q_sz,
const struct rte_eth_txconf *tx_conf);
int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
- uint16_t nb_desc, uint16_t fp_rx_q_sz,
+ uint32_t nb_desc, uint16_t fp_rx_q_sz,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid);
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH 10/12] net/cnxk: resize CQ for Rx security for errata
2022-06-16 7:07 ` [PATCH 10/12] net/cnxk: resize CQ for Rx security for errata Nithin Dabilpuram
@ 2022-06-16 8:50 ` Jerin Jacob
0 siblings, 0 replies; 29+ messages in thread
From: Jerin Jacob @ 2022-06-16 8:50 UTC (permalink / raw)
To: Nithin Dabilpuram
Cc: Jerin Jacob, Kiran Kumar K, Sunil Kumar Kori, Satha Rao, dpdk-dev
On Thu, Jun 16, 2022 at 12:40 PM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> Resize CQ for Rx security offload in case of HW errata.
>
> ci: skip_checkpatch skip_klocwork
Remove this.
Please fix any ./devtools/checkpatches.sh ./devtools/check-git-log.sh
in issues in the series.
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> ---
> drivers/net/cnxk/cnxk_ethdev.c | 43 +++++++++++++++++++++++++++++++++++++++++-
> drivers/net/cnxk/cnxk_ethdev.h | 2 +-
> 2 files changed, 43 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
> index 4ea1617..2418290 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.c
> +++ b/drivers/net/cnxk/cnxk_ethdev.c
> @@ -5,6 +5,8 @@
>
> #include <rte_eventdev.h>
>
> +#define CNXK_NIX_CQ_INL_CLAMP_MAX (64UL * 1024UL)
> +
> static inline uint64_t
> nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
> {
> @@ -40,6 +42,39 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
> return speed_capa;
> }
>
> +static uint32_t
> +nix_inl_cq_sz_clamp_up(struct roc_nix *nix, struct rte_mempool *mp,
> + uint32_t nb_desc)
> +{
> + struct roc_nix_rq *inl_rq;
> + uint64_t limit;
> +
> + if (!roc_errata_cpt_hang_on_x2p_bp())
> + return nb_desc;
> +
> + /* CQ should be able to hold all buffers in first pass RQ's aura
> + * this RQ's aura.
> + */
> + inl_rq = roc_nix_inl_dev_rq(nix);
> + if (!inl_rq) {
> + /* This itself is going to be inline RQ's aura */
> + limit = roc_npa_aura_op_limit_get(mp->pool_id);
> + } else {
> + limit = roc_npa_aura_op_limit_get(inl_rq->aura_handle);
> + /* Also add this RQ's aura if it is different */
> + if (inl_rq->aura_handle != mp->pool_id)
> + limit += roc_npa_aura_op_limit_get(mp->pool_id);
> + }
> + nb_desc = PLT_MAX(limit + 1, nb_desc);
> + if (nb_desc > CNXK_NIX_CQ_INL_CLAMP_MAX) {
> + plt_warn("Could not setup CQ size to accommodate"
> + " all buffers in related auras (%" PRIu64 ")",
> + limit);
> + nb_desc = CNXK_NIX_CQ_INL_CLAMP_MAX;
> + }
> + return nb_desc;
> +}
> +
> int
> cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
> {
> @@ -504,7 +539,7 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
>
> int
> cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
> - uint16_t nb_desc, uint16_t fp_rx_q_sz,
> + uint32_t nb_desc, uint16_t fp_rx_q_sz,
> const struct rte_eth_rxconf *rx_conf,
> struct rte_mempool *mp)
> {
> @@ -552,6 +587,12 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
> dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
> roc_nix_inl_dev_xaq_realloc(mp->pool_id);
>
> + /* Increase CQ size to Aura size to avoid CQ overflow and
> + * then CPT buffer leak.
> + */
> + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
> + nb_desc = nix_inl_cq_sz_clamp_up(nix, mp, nb_desc);
> +
> /* Setup ROC CQ */
> cq = &dev->cqs[qid];
> cq->qid = qid;
> diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
> index a4e96f0..4cb7c9e 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.h
> +++ b/drivers/net/cnxk/cnxk_ethdev.h
> @@ -530,7 +530,7 @@ int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
> uint16_t nb_desc, uint16_t fp_tx_q_sz,
> const struct rte_eth_txconf *tx_conf);
> int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
> - uint16_t nb_desc, uint16_t fp_rx_q_sz,
> + uint32_t nb_desc, uint16_t fp_rx_q_sz,
> const struct rte_eth_rxconf *rx_conf,
> struct rte_mempool *mp);
> int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid);
> --
> 2.8.4
>
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 11/12] net/cnxk: add SDP VF device ID for probe matching
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
` (8 preceding siblings ...)
2022-06-16 7:07 ` [PATCH 10/12] net/cnxk: resize CQ for Rx security for errata Nithin Dabilpuram
@ 2022-06-16 7:07 ` Nithin Dabilpuram
2022-06-16 7:07 ` [PATCH 12/12] event/cnxk: offset timestamp data only if enabled on port Nithin Dabilpuram
` (2 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 7:07 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: dev, Radha Mohan Chintakuntla
From: Radha Mohan Chintakuntla <radhac@marvell.com>
Add SDP VF device ID in the table for probe matching.
Signed-off-by: Radha Mohan Chintakuntla <radhac@marvell.com>
---
drivers/net/cnxk/cn10k_ethdev.c | 5 +++++
drivers/net/cnxk/cn9k_ethdev.c | 6 ++++++
2 files changed, 11 insertions(+)
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 550cec5..80c5c0e 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -824,6 +824,11 @@ static const struct rte_pci_id cn10k_pci_nix_map[] = {
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_AF_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_AF_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KB, PCI_DEVID_CNXK_RVU_AF_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KB, PCI_DEVID_CNXK_RVU_SDP_VF),
{
.vendor_id = 0,
},
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 2663aa6..4fb0e2d 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -777,6 +777,12 @@ static const struct rte_pci_id cn9k_pci_nix_map[] = {
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_AF_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_AF_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF9KA, PCI_DEVID_CNXK_RVU_AF_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF9KA, PCI_DEVID_CNXK_RVU_SDP_VF),
{
.vendor_id = 0,
},
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 12/12] event/cnxk: offset timestamp data only if enabled on port
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
` (9 preceding siblings ...)
2022-06-16 7:07 ` [PATCH 11/12] net/cnxk: add SDP VF device ID for probe matching Nithin Dabilpuram
@ 2022-06-16 7:07 ` Nithin Dabilpuram
2022-06-16 8:45 ` [PATCH 01/12] common/cnxk: use computed value for wqe skip Jerin Jacob
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
12 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 7:07 UTC (permalink / raw)
To: jerinj, Pavan Nikhilesh, Shijith Thotton
Cc: dev, skoteshwar, skori, Nithin Dabilpuram
Offset timestamp data only when enabled on the port instead of
just checking for offload flags.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/event/cnxk/cn10k_worker.h | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 034f508..7412a1b 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -112,8 +112,7 @@ static __rte_always_inline void
cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
void *lookup_mem, void *tstamp, uintptr_t lbase)
{
- uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
- (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
+ uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
struct rte_event_vector *vec;
uint64_t aura_handle, laddr;
uint16_t nb_mbufs, non_vec;
@@ -133,6 +132,9 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
for (i = OBJS_PER_CLINE; i < vec->nb_elem; i += OBJS_PER_CLINE)
rte_prefetch0(&vec->ptrs[i]);
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
+ mbuf_init |= 8;
+
nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
flags | NIX_RX_VWQE_F, lookup_mem,
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH 01/12] common/cnxk: use computed value for wqe skip
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
` (10 preceding siblings ...)
2022-06-16 7:07 ` [PATCH 12/12] event/cnxk: offset timestamp data only if enabled on port Nithin Dabilpuram
@ 2022-06-16 8:45 ` Jerin Jacob
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
12 siblings, 0 replies; 29+ messages in thread
From: Jerin Jacob @ 2022-06-16 8:45 UTC (permalink / raw)
To: Nithin Dabilpuram
Cc: Jerin Jacob, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Pavan Nikhilesh, Shijith Thotton, dpdk-dev
On Thu, Jun 16, 2022 at 12:39 PM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> Use computed value for WQE skip instead of a hardcoded value.
> WQE skip needs to be number of 128B lines to accommodate rte_mbuf.
Change wqe -> WQE in subject
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> ---
> Depends-on: series=23500 ("common/cnxk: add cnf10kb support")
>
> drivers/common/cnxk/roc_nix_inl.h | 2 +-
> drivers/common/cnxk/roc_nix_inl_priv.h | 2 +-
> drivers/event/cnxk/cnxk_eventdev_adptr.c | 5 ++++-
> drivers/net/cnxk/cnxk_ethdev_sec.c | 5 ++++-
> 4 files changed, 10 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
> index b1b4c5b..c7b1817 100644
> --- a/drivers/common/cnxk/roc_nix_inl.h
> +++ b/drivers/common/cnxk/roc_nix_inl.h
> @@ -131,7 +131,7 @@ struct roc_nix_inl_dev {
> uint16_t channel;
> uint16_t chan_mask;
> bool attach_cptlf;
> - bool wqe_skip;
> + uint16_t wqe_skip;
> uint8_t spb_drop_pc;
> uint8_t lpb_drop_pc;
> bool set_soft_exp_poll;
> diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
> index d61c7b2..a775efc 100644
> --- a/drivers/common/cnxk/roc_nix_inl_priv.h
> +++ b/drivers/common/cnxk/roc_nix_inl_priv.h
> @@ -84,7 +84,7 @@ struct nix_inl_dev {
> uint32_t ipsec_in_max_spi;
> uint32_t inb_spi_mask;
> bool attach_cptlf;
> - bool wqe_skip;
> + uint16_t wqe_skip;
> bool ts_ena;
> };
>
> diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> index fa96090..cf5b1dd 100644
> --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
> +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> @@ -125,6 +125,7 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
> {
> struct roc_nix *nix = &cnxk_eth_dev->nix;
> struct roc_nix_rq *rq;
> + uint16_t wqe_skip;
> int rc;
>
> rq = &cnxk_eth_dev->rqs[rq_id];
> @@ -132,7 +133,9 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
> rq->tt = ev->sched_type;
> rq->hwgrp = ev->queue_id;
> rq->flow_tag_width = 20;
> - rq->wqe_skip = 1;
> + wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ);
> + wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ;
> + rq->wqe_skip = wqe_skip;
> rq->tag_mask = (port_id & 0xF) << 20;
> rq->tag_mask |= (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4))
> << 24;
> diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
> index d01ebb4..1de3454 100644
> --- a/drivers/net/cnxk/cnxk_ethdev_sec.c
> +++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
> @@ -264,6 +264,7 @@ cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
> char name[CNXK_NIX_INL_DEV_NAME_LEN];
> struct roc_nix_inl_dev *inl_dev;
> const struct rte_memzone *mz;
> + uint16_t wqe_skip;
> int rc = -ENOMEM;
>
> RTE_SET_USED(pci_drv);
> @@ -295,7 +296,9 @@ cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
>
> inl_dev->attach_cptlf = true;
> /* WQE skip is one for DPDK */
> - inl_dev->wqe_skip = true;
> + wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ);
> + wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ;
> + inl_dev->wqe_skip = wqe_skip;
> inl_dev->set_soft_exp_poll = true;
> rc = roc_nix_inl_dev_init(inl_dev);
> if (rc) {
> --
> 2.8.4
>
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 01/12] common/cnxk: use computed value for WQE skip
2022-06-16 7:07 [PATCH 01/12] common/cnxk: use computed value for wqe skip Nithin Dabilpuram
` (11 preceding siblings ...)
2022-06-16 8:45 ` [PATCH 01/12] common/cnxk: use computed value for wqe skip Jerin Jacob
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 02/12] common/cnxk: avoid CPT backpressure due to errata Nithin Dabilpuram
` (11 more replies)
12 siblings, 12 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Pavan Nikhilesh, Shijith Thotton
Cc: dev
Use computed value for WQE skip instead of a hardcoded value.
WQE skip needs to be number of 128B lines to accommodate rte_mbuf.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
Depends-on: series=23500 ("common/cnxk: add cnf10kb support")
v2:
- Fixed commit message in 10/12, 1/12 patches
drivers/common/cnxk/roc_nix_inl.h | 2 +-
drivers/common/cnxk/roc_nix_inl_priv.h | 2 +-
drivers/event/cnxk/cnxk_eventdev_adptr.c | 5 ++++-
drivers/net/cnxk/cnxk_ethdev_sec.c | 5 ++++-
4 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index b1b4c5b..c7b1817 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -131,7 +131,7 @@ struct roc_nix_inl_dev {
uint16_t channel;
uint16_t chan_mask;
bool attach_cptlf;
- bool wqe_skip;
+ uint16_t wqe_skip;
uint8_t spb_drop_pc;
uint8_t lpb_drop_pc;
bool set_soft_exp_poll;
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index d61c7b2..a775efc 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -84,7 +84,7 @@ struct nix_inl_dev {
uint32_t ipsec_in_max_spi;
uint32_t inb_spi_mask;
bool attach_cptlf;
- bool wqe_skip;
+ uint16_t wqe_skip;
bool ts_ena;
};
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index fa96090..cf5b1dd 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -125,6 +125,7 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
{
struct roc_nix *nix = &cnxk_eth_dev->nix;
struct roc_nix_rq *rq;
+ uint16_t wqe_skip;
int rc;
rq = &cnxk_eth_dev->rqs[rq_id];
@@ -132,7 +133,9 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
rq->tt = ev->sched_type;
rq->hwgrp = ev->queue_id;
rq->flow_tag_width = 20;
- rq->wqe_skip = 1;
+ wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ);
+ wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ;
+ rq->wqe_skip = wqe_skip;
rq->tag_mask = (port_id & 0xF) << 20;
rq->tag_mask |= (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4))
<< 24;
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index d01ebb4..1de3454 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -264,6 +264,7 @@ cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
char name[CNXK_NIX_INL_DEV_NAME_LEN];
struct roc_nix_inl_dev *inl_dev;
const struct rte_memzone *mz;
+ uint16_t wqe_skip;
int rc = -ENOMEM;
RTE_SET_USED(pci_drv);
@@ -295,7 +296,9 @@ cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
inl_dev->attach_cptlf = true;
/* WQE skip is one for DPDK */
- inl_dev->wqe_skip = true;
+ wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ);
+ wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ;
+ inl_dev->wqe_skip = wqe_skip;
inl_dev->set_soft_exp_poll = true;
rc = roc_nix_inl_dev_init(inl_dev);
if (rc) {
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 02/12] common/cnxk: avoid CPT backpressure due to errata
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 03/12] common/cnxk: add PFC support for VFs Nithin Dabilpuram
` (10 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
Avoid enabling CPT backpressure due to errata where
backpressure would block requests from even other
CPT LF's. Also allow CQ size >=16K.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_errata.h | 7 +++++++
drivers/common/cnxk/roc_nix.h | 2 +-
drivers/common/cnxk/roc_nix_fc.c | 3 ++-
3 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h
index 31162d5..f048297 100644
--- a/drivers/common/cnxk/roc_errata.h
+++ b/drivers/common/cnxk/roc_errata.h
@@ -77,4 +77,11 @@ roc_errata_nix_has_perf_issue_on_stats_update(void)
return true;
}
+/* Errata IPBUCPT-38726, IPBUCPT-38727 */
+static inline bool
+roc_errata_cpt_hang_on_x2p_bp(void)
+{
+ return roc_model_is_cn10ka_a0();
+}
+
#endif /* _ROC_ERRATA_H_ */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index aedde1c..944e4c6 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -309,7 +309,7 @@ struct roc_nix_rq {
struct roc_nix_cq {
/* Input parameters */
uint16_t qid;
- uint16_t nb_desc;
+ uint32_t nb_desc;
/* End of Input parameters */
uint16_t drop_thresh;
struct roc_nix *roc_nix;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index a0505bd..cef5d07 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -77,7 +77,8 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
goto exit;
/* Enable backpressure on CPT if inline inb is enabled */
- if (enable && roc_nix_inl_inb_is_enabled(roc_nix)) {
+ if (enable && roc_nix_inl_inb_is_enabled(roc_nix) &&
+ !roc_errata_cpt_hang_on_x2p_bp()) {
req = mbox_alloc_msg_nix_cpt_bp_enable(mbox);
if (req == NULL)
return rc;
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 03/12] common/cnxk: add PFC support for VFs
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 02/12] common/cnxk: avoid CPT backpressure due to errata Nithin Dabilpuram
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 04/12] common/cnxk: support same TC value across multiple queues Nithin Dabilpuram
` (9 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Ray Kinsella, Pavan Nikhilesh, Shijith Thotton
Cc: dev
From: Sunil Kumar Kori <skori@marvell.com>
Current PFC implementation does not support VFs.
Patch enables PFC on VFs too.
Also fix the config of aura.bp to be based on number
of buffers(aura.limit) and corresponding shift
value(aura.shift).
Fixes: cb4bfd6e7bdf ("event/cnxk: support Rx adapter")
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
drivers/common/cnxk/roc_nix.h | 14 +++-
drivers/common/cnxk/roc_nix_fc.c | 120 +++++++++++++++++++++++++++----
drivers/common/cnxk/roc_nix_priv.h | 2 +
drivers/common/cnxk/roc_nix_queue.c | 47 ++++++++++++
drivers/common/cnxk/roc_nix_tm.c | 67 +++++++++--------
drivers/common/cnxk/version.map | 3 +-
drivers/event/cnxk/cnxk_eventdev_adptr.c | 12 ++--
drivers/net/cnxk/cnxk_ethdev.h | 2 +
8 files changed, 217 insertions(+), 50 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 944e4c6..f0d7fc8 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -157,6 +157,7 @@ struct roc_nix_fc_cfg {
#define ROC_NIX_FC_RXCHAN_CFG 0
#define ROC_NIX_FC_CQ_CFG 1
#define ROC_NIX_FC_TM_CFG 2
+#define ROC_NIX_FC_RQ_CFG 3
uint8_t type;
union {
struct {
@@ -171,6 +172,14 @@ struct roc_nix_fc_cfg {
} cq_cfg;
struct {
+ uint32_t rq;
+ uint16_t tc;
+ uint16_t cq_drop;
+ bool enable;
+ uint64_t pool;
+ } rq_cfg;
+
+ struct {
uint32_t sq;
uint16_t tc;
bool enable;
@@ -791,8 +800,8 @@ uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
-void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
- uint8_t ena, uint8_t force);
+void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
+ uint8_t ena, uint8_t force, uint8_t tc);
/* NPC */
int __roc_api roc_nix_npc_promisc_ena_dis(struct roc_nix *roc_nix, int enable);
@@ -845,6 +854,7 @@ int __roc_api roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq,
int __roc_api roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq,
bool ena);
int __roc_api roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable);
+int __roc_api roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid);
int __roc_api roc_nix_rq_fini(struct roc_nix_rq *rq);
int __roc_api roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq);
int __roc_api roc_nix_cq_fini(struct roc_nix_cq *cq);
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index cef5d07..daae285 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -148,6 +148,61 @@ nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
}
static int
+nix_fc_rq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
+{
+ struct mbox *mbox = get_mbox(roc_nix);
+ struct nix_aq_enq_rsp *rsp;
+ struct npa_aq_enq_req *npa_req;
+ struct npa_aq_enq_rsp *npa_rsp;
+ int rc;
+
+ if (roc_model_is_cn9k()) {
+ struct nix_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = fc_cfg->rq_cfg.rq;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ } else {
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = fc_cfg->rq_cfg.rq;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ }
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ goto exit;
+
+ npa_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (!npa_req)
+ return -ENOSPC;
+
+ npa_req->aura_id = rsp->rq.lpb_aura;
+ npa_req->ctype = NPA_AQ_CTYPE_AURA;
+ npa_req->op = NPA_AQ_INSTOP_READ;
+
+ rc = mbox_process_msg(mbox, (void *)&npa_rsp);
+ if (rc)
+ goto exit;
+
+ fc_cfg->cq_cfg.cq_drop = npa_rsp->aura.bp;
+ fc_cfg->cq_cfg.enable = npa_rsp->aura.bp_ena;
+ fc_cfg->type = ROC_NIX_FC_RQ_CFG;
+
+exit:
+ return rc;
+}
+
+static int
nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
@@ -198,6 +253,33 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
return mbox_process(mbox);
}
+static int
+nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
+{
+ struct roc_nix_fc_cfg tmp;
+ int sso_ena = 0;
+
+ /* Check whether RQ is connected to SSO or not */
+ sso_ena = roc_nix_rq_is_sso_enable(roc_nix, fc_cfg->rq_cfg.rq);
+ if (sso_ena < 0)
+ return -EINVAL;
+
+ if (sso_ena)
+ roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool,
+ fc_cfg->rq_cfg.enable, true,
+ fc_cfg->rq_cfg.tc);
+
+ /* Copy RQ config to CQ config as they are occupying same area */
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.type = ROC_NIX_FC_CQ_CFG;
+ tmp.cq_cfg.rq = fc_cfg->rq_cfg.rq;
+ tmp.cq_cfg.tc = fc_cfg->rq_cfg.tc;
+ tmp.cq_cfg.cq_drop = fc_cfg->rq_cfg.cq_drop;
+ tmp.cq_cfg.enable = fc_cfg->rq_cfg.enable;
+
+ return nix_fc_cq_config_set(roc_nix, &tmp);
+}
+
int
roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
{
@@ -207,6 +289,8 @@ roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
return nix_fc_cq_config_get(roc_nix, fc_cfg);
+ else if (fc_cfg->type == ROC_NIX_FC_RQ_CFG)
+ return nix_fc_rq_config_get(roc_nix, fc_cfg);
else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
return nix_fc_rxchan_bpid_get(roc_nix, fc_cfg);
else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
@@ -218,12 +302,10 @@ roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
int
roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
{
- if (!roc_nix_is_pf(roc_nix) && !roc_nix_is_lbk(roc_nix) &&
- !roc_nix_is_sdp(roc_nix))
- return 0;
-
if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
return nix_fc_cq_config_set(roc_nix, fc_cfg);
+ else if (fc_cfg->type == ROC_NIX_FC_RQ_CFG)
+ return nix_fc_rq_config_set(roc_nix, fc_cfg);
else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
return nix_fc_rxchan_bpid_set(roc_nix,
fc_cfg->rxchan_cfg.enable);
@@ -320,8 +402,8 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
}
void
-rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
- uint8_t force)
+roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
+ uint8_t force, uint8_t tc)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct npa_lf *lf = idev_npa_obj_get();
@@ -329,6 +411,7 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
struct npa_aq_enq_rsp *rsp;
struct mbox *mbox;
uint32_t limit;
+ uint64_t shift;
int rc;
if (roc_nix_is_sdp(roc_nix))
@@ -351,8 +434,10 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
return;
limit = rsp->aura.limit;
+ shift = rsp->aura.shift;
+
/* BP is already enabled. */
- if (rsp->aura.bp_ena) {
+ if (rsp->aura.bp_ena && ena) {
uint16_t bpid;
bool nix1;
@@ -363,12 +448,15 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
bpid = rsp->aura.nix0_bpid;
/* If BP ids don't match disable BP. */
- if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[0])) &&
+ if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc])) &&
!force) {
req = mbox_alloc_msg_npa_aq_enq(mbox);
if (req == NULL)
return;
+ plt_info("Disabling BP/FC on aura 0x%" PRIx64
+ " as it shared across ports or tc",
+ pool_id);
req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
req->ctype = NPA_AQ_CTYPE_AURA;
req->op = NPA_AQ_INSTOP_WRITE;
@@ -378,11 +466,15 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
mbox_process(mbox);
}
+
+ if ((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc]))
+ plt_info("Ignoring aura 0x%" PRIx64 "->%u bpid mapping",
+ pool_id, nix->bpid[tc]);
return;
}
/* BP was previously enabled but now disabled skip. */
- if (rsp->aura.bp)
+ if (rsp->aura.bp && ena)
return;
req = mbox_alloc_msg_npa_aq_enq(mbox);
@@ -395,14 +487,16 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
if (ena) {
if (nix->is_nix1) {
- req->aura.nix1_bpid = nix->bpid[0];
+ req->aura.nix1_bpid = nix->bpid[tc];
req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
} else {
- req->aura.nix0_bpid = nix->bpid[0];
+ req->aura.nix0_bpid = nix->bpid[tc];
req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
}
- req->aura.bp = NIX_RQ_AURA_THRESH(
- limit > 128 ? 256 : limit); /* 95% of size*/
+ req->aura.bp = NIX_RQ_AURA_THRESH(limit >> shift);
+ req->aura_mask.bp = ~(req->aura_mask.bp);
+ } else {
+ req->aura.bp = 0;
req->aura_mask.bp = ~(req->aura_mask.bp);
}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index cc69d71..5e865f8 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -357,6 +357,8 @@ nix_tm_tree2str(enum roc_nix_tm_tree tree)
return "Default Tree";
else if (tree == ROC_NIX_TM_RLIMIT)
return "Rate Limit Tree";
+ else if (tree == ROC_NIX_TM_PFC)
+ return "PFC Tree";
else if (tree == ROC_NIX_TM_USER)
return "User Tree";
return "???";
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 76c049c..fa4c954 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -94,6 +94,53 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
}
int
+roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct dev *dev = &nix->dev;
+ struct mbox *mbox = dev->mbox;
+ bool sso_enable;
+ int rc;
+
+ if (roc_model_is_cn9k()) {
+ struct nix_aq_enq_rsp *rsp;
+ struct nix_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ sso_enable = rsp->rq.sso_ena;
+ } else {
+ struct nix_cn10k_aq_enq_rsp *rsp;
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ sso_enable = rsp->rq.sso_ena;
+ }
+
+ return sso_enable ? true : false;
+}
+
+int
nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
bool cfg, bool ena)
{
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 7fd54ef..151e217 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -98,7 +98,6 @@ int
nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
{
struct nix_tm_node_list *list;
- bool is_pf_or_lbk = false;
struct nix_tm_node *node;
bool skip_bp = false;
uint32_t hw_lvl;
@@ -106,9 +105,6 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
list = nix_tm_node_list(nix, tree);
- if ((!dev_is_vf(&nix->dev) || nix->lbk_link) && !nix->sdp_link)
- is_pf_or_lbk = true;
-
for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
TAILQ_FOREACH(node, list, node) {
if (node->hw_lvl != hw_lvl)
@@ -118,7 +114,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
* set per channel only for PF or lbk vf.
*/
node->bp_capa = 0;
- if (is_pf_or_lbk && !skip_bp &&
+ if (!nix->sdp_link && !skip_bp &&
node->hw_lvl == nix->tm_link_cfg_lvl) {
node->bp_capa = 1;
skip_bp = false;
@@ -329,6 +325,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
struct nix_tm_node *sq_node;
struct nix_tm_node *parent;
struct nix_tm_node *node;
+ uint8_t parent_lvl;
uint8_t k = 0;
int rc = 0;
@@ -336,9 +333,12 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
if (!sq_node)
return -ENOENT;
+ parent_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH2 :
+ ROC_TM_LVL_SCH1);
+
parent = sq_node->parent;
while (parent) {
- if (parent->lvl == ROC_TM_LVL_SCH2)
+ if (parent->lvl == parent_lvl)
break;
parent = parent->parent;
@@ -1469,16 +1469,18 @@ int
roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint8_t leaf_lvl, lvl, lvl_start, lvl_end;
uint32_t nonleaf_id = nix->nb_tx_queues;
struct nix_tm_node *node = NULL;
- uint8_t leaf_lvl, lvl, lvl_end;
uint32_t tl2_node_id;
uint32_t parent, i;
int rc = -ENOMEM;
parent = ROC_NIX_TM_NODE_ID_INVALID;
- lvl_end = ROC_TM_LVL_SCH3;
- leaf_lvl = ROC_TM_LVL_QUEUE;
+ lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
+ ROC_TM_LVL_SCH2);
+ leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
+ ROC_TM_LVL_SCH4);
/* TL1 node */
node = nix_tm_node_alloc();
@@ -1501,31 +1503,37 @@ roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
parent = nonleaf_id;
nonleaf_id++;
- /* TL2 node */
- rc = -ENOMEM;
- node = nix_tm_node_alloc();
- if (!node)
- goto error;
+ lvl_start = ROC_TM_LVL_SCH1;
+ if (roc_nix_is_pf(roc_nix)) {
+ /* TL2 node */
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
- node->id = nonleaf_id;
- node->parent_id = parent;
- node->priority = 0;
- node->weight = NIX_TM_DFLT_RR_WT;
- node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
- node->lvl = ROC_TM_LVL_SCH1;
- node->tree = ROC_NIX_TM_PFC;
- node->rel_chan = NIX_TM_CHAN_INVALID;
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_SCH1;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
- rc = nix_tm_node_add(roc_nix, node);
- if (rc)
- goto error;
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
- tl2_node_id = nonleaf_id;
- nonleaf_id++;
+ lvl_start = ROC_TM_LVL_SCH2;
+ tl2_node_id = nonleaf_id;
+ nonleaf_id++;
+ } else {
+ tl2_node_id = parent;
+ }
for (i = 0; i < nix->nb_tx_queues; i++) {
parent = tl2_node_id;
- for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+ for (lvl = lvl_start; lvl <= lvl_end; lvl++) {
rc = -ENOMEM;
node = nix_tm_node_alloc();
if (!node)
@@ -1549,7 +1557,8 @@ roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
nonleaf_id++;
}
- lvl = ROC_TM_LVL_SCH4;
+ lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
+ ROC_TM_LVL_SCH3);
rc = -ENOMEM;
node = nix_tm_node_alloc();
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 1ba5b4f..27e81f2 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -122,7 +122,7 @@ INTERNAL {
roc_nix_fc_config_set;
roc_nix_fc_mode_set;
roc_nix_fc_mode_get;
- rox_nix_fc_npa_bp_cfg;
+ roc_nix_fc_npa_bp_cfg;
roc_nix_get_base_chan;
roc_nix_get_pf;
roc_nix_get_pf_func;
@@ -220,6 +220,7 @@ INTERNAL {
roc_nix_rq_ena_dis;
roc_nix_rq_fini;
roc_nix_rq_init;
+ roc_nix_rq_is_sso_enable;
roc_nix_rq_modify;
roc_nix_rss_default_setup;
roc_nix_rss_flowkey_set;
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index cf5b1dd..8fcc377 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -250,9 +250,11 @@ cnxk_sso_rx_adapter_queue_add(
rc |= roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix,
false);
}
- rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
- rxq_sp->qconf.mp->pool_id, true,
- dev->force_ena_bp);
+
+ if (rxq_sp->tx_pause)
+ roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
+ rxq_sp->qconf.mp->pool_id, true,
+ dev->force_ena_bp, rxq_sp->tc);
cnxk_eth_dev->nb_rxq_sso++;
}
@@ -293,9 +295,9 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
rxq_sp = cnxk_eth_rxq_to_sp(
eth_dev->data->rx_queues[rx_queue_id]);
rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
- rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
+ roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
rxq_sp->qconf.mp->pool_id, false,
- dev->force_ena_bp);
+ dev->force_ena_bp, 0);
cnxk_eth_dev->nb_rxq_sso--;
/* Enable drop_re if it was disabled earlier */
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index e992302..0400d73 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -443,6 +443,8 @@ struct cnxk_eth_rxq_sp {
struct cnxk_eth_dev *dev;
struct cnxk_eth_qconf qconf;
uint16_t qid;
+ uint8_t tx_pause;
+ uint8_t tc;
} __plt_cache_aligned;
struct cnxk_eth_txq_sp {
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 04/12] common/cnxk: support same TC value across multiple queues
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 02/12] common/cnxk: avoid CPT backpressure due to errata Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 03/12] common/cnxk: add PFC support for VFs Nithin Dabilpuram
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 05/12] common/cnxk: enhance CPT parse header dump Nithin Dabilpuram
` (8 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: dev, Harman Kalra
From: Harman Kalra <hkalra@marvell.com>
User may want to configure same TC value across multiple queues, but
for that all queues should have a common TL3 where this TC value will
get configured.
Changed the pfc_tc_cq_map/pfc_tc_sq_map array indexing to qid and store
TC values in the array. As multiple queues may have same TC value.
Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
drivers/common/cnxk/roc_dev.c | 18 ++++++++
drivers/common/cnxk/roc_nix.h | 4 +-
drivers/common/cnxk/roc_nix_fc.c | 2 +-
drivers/common/cnxk/roc_nix_priv.h | 3 +-
drivers/common/cnxk/roc_nix_tm.c | 87 ++++++++++++++++++++++++------------
drivers/common/cnxk/roc_nix_tm_ops.c | 3 +-
6 files changed, 84 insertions(+), 33 deletions(-)
diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 09199ac..59128a3 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -421,6 +421,24 @@ process_msgs(struct dev *dev, struct mbox *mbox)
/* Get our identity */
dev->pf_func = msg->pcifunc;
break;
+ case MBOX_MSG_CGX_PRIO_FLOW_CTRL_CFG:
+ /* Handling the case where one VF tries to disable PFC
+ * while PFC already configured on other VFs. This is
+ * not an error but a warning which can be ignored.
+ */
+#define LMAC_AF_ERR_PERM_DENIED -1103
+ if (msg->rc) {
+ if (msg->rc == LMAC_AF_ERR_PERM_DENIED) {
+ plt_mbox_dbg(
+ "Receive Flow control disable not permitted "
+ "as its used by other PFVFs");
+ msg->rc = 0;
+ } else {
+ plt_err("Message (%s) response has err=%d",
+ mbox_id2name(msg->id), msg->rc);
+ }
+ }
+ break;
default:
if (msg->rc)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index f0d7fc8..4e5cf05 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -11,7 +11,8 @@
#define ROC_NIX_BPF_LEVEL_IDX_INVALID 0xFF
#define ROC_NIX_BPF_LEVEL_MAX 3
#define ROC_NIX_BPF_STATS_MAX 12
-#define ROC_NIX_MTR_ID_INVALID UINT32_MAX
+#define ROC_NIX_MTR_ID_INVALID UINT32_MAX
+#define ROC_NIX_PFC_CLASS_INVALID UINT8_MAX
enum roc_nix_rss_reta_sz {
ROC_NIX_RSS_RETA_SZ_64 = 64,
@@ -349,6 +350,7 @@ struct roc_nix_sq {
void *lmt_addr;
void *sqe_mem;
void *fc;
+ uint8_t tc;
};
struct roc_nix_link_info {
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index daae285..f4cfa11 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -312,7 +312,7 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
fc_cfg->tm_cfg.tc,
- fc_cfg->tm_cfg.enable);
+ fc_cfg->tm_cfg.enable, false);
return -EINVAL;
}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 5e865f8..5b0522c 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -100,6 +100,7 @@ struct nix_tm_node {
/* Last stats */
uint64_t last_pkts;
uint64_t last_bytes;
+ uint32_t tc_refcnt;
};
struct nix_tm_shaper_profile {
@@ -402,7 +403,7 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
- bool enable);
+ bool enable, bool force_flush);
void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
int nix_tm_mark_init(struct nix *nix);
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 151e217..a31abde 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -314,7 +314,7 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
int
nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
- bool enable)
+ bool enable, bool force_flush)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
enum roc_nix_tm_tree tree = nix->tm_tree;
@@ -325,10 +325,15 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
struct nix_tm_node *sq_node;
struct nix_tm_node *parent;
struct nix_tm_node *node;
+ struct roc_nix_sq *sq_s;
uint8_t parent_lvl;
uint8_t k = 0;
int rc = 0;
+ sq_s = nix->sqs[sq];
+ if (!sq_s)
+ return -ENOENT;
+
sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
if (!sq_node)
return -ENOENT;
@@ -348,11 +353,22 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
list = nix_tm_node_list(nix, tree);
- if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+ /* Enable request, parent rel chan already configured */
+ if (enable && parent->rel_chan != NIX_TM_CHAN_INVALID &&
+ parent->rel_chan != tc) {
rc = -EINVAL;
goto err;
}
+ /* No action if enable request for a non participating SQ. This case is
+ * required to handle post flush where TCs should be reconfigured after
+ * pre flush.
+ */
+ if (enable && sq_s->tc == ROC_NIX_PFC_CLASS_INVALID &&
+ tc == ROC_NIX_PFC_CLASS_INVALID)
+ return 0;
+
+ /* Find the parent TL3 */
TAILQ_FOREACH(node, list, node) {
if (node->hw_lvl != nix->tm_link_cfg_lvl)
continue;
@@ -360,38 +376,51 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
continue;
- if (node->hw_id != parent->hw_id)
- continue;
-
- if (!req) {
- req = mbox_alloc_msg_nix_txschq_cfg(mbox);
- req->lvl = nix->tm_link_cfg_lvl;
- k = 0;
+ /* Restrict sharing of TL3 across the queues */
+ if (enable && node != parent && node->rel_chan == tc) {
+ plt_err("SQ %d node TL3 id %d already has %d tc value set",
+ sq, node->hw_id, tc);
+ return -EINVAL;
}
+ }
- req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
- req->regval[k] = enable ? tc : 0;
- req->regval[k] |= enable ? BIT_ULL(13) : 0;
- req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
- k++;
-
- if (k >= MAX_REGS_PER_MBOX_MSG) {
- req->num_regs = k;
- rc = mbox_process(mbox);
- if (rc)
- goto err;
- req = NULL;
- }
+ /* In case of user tree i.e. multiple SQs may share a TL3, disabling PFC
+ * on one of such SQ should not hamper the traffic control on other SQs.
+ * Maitaining a reference count scheme to account no of SQs sharing the
+ * TL3 before disabling PFC on it.
+ */
+ if (!force_flush && !enable &&
+ parent->rel_chan != NIX_TM_CHAN_INVALID) {
+ if (sq_s->tc != ROC_NIX_PFC_CLASS_INVALID)
+ parent->tc_refcnt--;
+ if (parent->tc_refcnt > 0)
+ return 0;
}
- if (req) {
- req->num_regs = k;
- rc = mbox_process(mbox);
- if (rc)
- goto err;
+ /* Allocating TL3 resources */
+ if (!req) {
+ req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = nix->tm_link_cfg_lvl;
+ k = 0;
}
+ /* Enable PFC on the identified TL3 */
+ req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(parent->hw_id, link);
+ req->regval[k] = enable ? tc : 0;
+ req->regval[k] |= enable ? BIT_ULL(13) : 0;
+ req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
+ k++;
+
+ req->num_regs = k;
+ rc = mbox_process(mbox);
+ if (rc)
+ goto err;
+
parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
+ /* Increase reference count for parent TL3 */
+ if (enable && sq_s->tc == ROC_NIX_PFC_CLASS_INVALID)
+ parent->tc_refcnt++;
+
return 0;
err:
plt_err("Failed to %s bp on link %u, rc=%d(%s)",
@@ -629,7 +658,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
}
/* Disable backpressure */
- rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false, true);
if (rc) {
plt_err("Failed to disable backpressure for flush, rc=%d", rc);
return rc;
@@ -764,7 +793,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
return 0;
/* Restore backpressure */
- rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, sq->tc, true, false);
if (rc) {
plt_err("Failed to restore backpressure, rc=%d", rc);
return rc;
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 5884ce5..4aa5500 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -292,6 +292,7 @@ roc_nix_tm_node_add(struct roc_nix *roc_nix, struct roc_nix_tm_node *roc_node)
node->pkt_mode_set = roc_node->pkt_mode_set;
node->free_fn = roc_node->free_fn;
node->tree = ROC_NIX_TM_USER;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
return nix_tm_node_add(roc_nix, node);
}
@@ -473,7 +474,7 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
if (!sq)
continue;
- rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false, false);
if (rc && rc != -ENOENT) {
plt_err("Failed to disable backpressure, rc=%d", rc);
goto cleanup;
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 05/12] common/cnxk: enhance CPT parse header dump
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
` (2 preceding siblings ...)
2022-06-16 9:24 ` [PATCH v2 04/12] common/cnxk: support same TC value across multiple queues Nithin Dabilpuram
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 06/12] common/cnxk: fix mbox structs to avoid unaligned access Nithin Dabilpuram
` (7 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
Enhance CPT parse header dump to dump fragment info
and swap pointers before printing.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_cpt_debug.c | 33 +++++++++++++++++++++++++++++++--
1 file changed, 31 insertions(+), 2 deletions(-)
diff --git a/drivers/common/cnxk/roc_cpt_debug.c b/drivers/common/cnxk/roc_cpt_debug.c
index be6ddb5..5602e53 100644
--- a/drivers/common/cnxk/roc_cpt_debug.c
+++ b/drivers/common/cnxk/roc_cpt_debug.c
@@ -8,6 +8,10 @@
void
roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth)
{
+ struct cpt_frag_info_s *frag_info;
+ uint32_t offset;
+ uint64_t *slot;
+
plt_print("CPT_PARSE \t0x%p:", cpth);
/* W0 */
@@ -19,7 +23,7 @@ roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth)
cpth->w0.pad_len, cpth->w0.num_frags, cpth->w0.pkt_out);
/* W1 */
- plt_print("W1: wqe_ptr \t0x%016lx\t", cpth->wqe_ptr);
+ plt_print("W1: wqe_ptr \t0x%016lx\t", plt_be_to_cpu_64(cpth->wqe_ptr));
/* W2 */
plt_print("W2: frag_age \t0x%x\t\torig_pf_func \t0x%04x",
@@ -33,7 +37,32 @@ roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth)
/* W4 */
plt_print("W4: esn \t%" PRIx64 " \t OR frag1_wqe_ptr \t0x%" PRIx64,
- cpth->esn, cpth->frag1_wqe_ptr);
+ cpth->esn, plt_be_to_cpu_64(cpth->frag1_wqe_ptr));
+
+ /* offset of 0 implies 256B, otherwise it implies offset*8B */
+ offset = cpth->w2.fi_offset;
+ offset = (((offset - 1) & 0x1f) + 1) * 8;
+ frag_info = PLT_PTR_ADD(cpth, offset);
+
+ plt_print("CPT Fraginfo \t0x%p:", frag_info);
+
+ /* W0 */
+ plt_print("W0: f0.info \t0x%x", frag_info->w0.f0.info);
+ plt_print("W0: f1.info \t0x%x", frag_info->w0.f1.info);
+ plt_print("W0: f2.info \t0x%x", frag_info->w0.f2.info);
+ plt_print("W0: f3.info \t0x%x", frag_info->w0.f3.info);
+
+ /* W1 */
+ plt_print("W1: frag_size0 \t0x%x", frag_info->w1.frag_size0);
+ plt_print("W1: frag_size1 \t0x%x", frag_info->w1.frag_size1);
+ plt_print("W1: frag_size2 \t0x%x", frag_info->w1.frag_size2);
+ plt_print("W1: frag_size3 \t0x%x", frag_info->w1.frag_size3);
+
+ slot = (uint64_t *)(frag_info + 1);
+ plt_print("Frag Slot2: WQE ptr \t%p",
+ (void *)plt_be_to_cpu_64(slot[0]));
+ plt_print("Frag Slot3: WQE ptr \t%p",
+ (void *)plt_be_to_cpu_64(slot[1]));
}
static int
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 06/12] common/cnxk: fix mbox structs to avoid unaligned access
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
` (3 preceding siblings ...)
2022-06-16 9:24 ` [PATCH v2 05/12] common/cnxk: enhance CPT parse header dump Nithin Dabilpuram
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 07/12] net/cnxk: add SDP link status Nithin Dabilpuram
` (6 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
Fix mbox structs to avoid unaligned access as mbox memory
is from BAR space.
Fixes: 503b82de2cbf ("common/cnxk: add mbox request and response definitions")
Fixes: e746aec161cc ("common/cnxk: fix SQ flush sequence")
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_mbox.h | 18 +++++++++---------
drivers/common/cnxk/roc_nix_inl.c | 2 ++
2 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 2c30f19..965c704 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -777,7 +777,7 @@ struct nix_lf_alloc_req {
uint64_t __io way_mask;
#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0)
#define NIX_LF_LBK_BLK_SEL BIT_ULL(1)
- uint64_t flags;
+ uint64_t __io flags;
};
struct nix_lf_alloc_rsp {
@@ -798,7 +798,7 @@ struct nix_lf_alloc_rsp {
uint8_t __io cgx_links; /* No. of CGX links present in HW */
uint8_t __io lbk_links; /* No. of LBK links present in HW */
uint8_t __io sdp_links; /* No. of SDP links present in HW */
- uint8_t tx_link; /* Transmit channel link number */
+ uint8_t __io tx_link; /* Transmit channel link number */
};
struct nix_lf_free_req {
@@ -1275,8 +1275,8 @@ struct ssow_lf_free_req {
#define SSOW_INVAL_SELECTIVE_VER 0x1000
struct ssow_lf_inv_req {
struct mbox_msghdr hdr;
- uint16_t nb_hws; /* Number of HWS to invalidate*/
- uint16_t hws[MAX_RVU_BLKLF_CNT]; /* Array of HWS */
+ uint16_t __io nb_hws; /* Number of HWS to invalidate*/
+ uint16_t __io hws[MAX_RVU_BLKLF_CNT]; /* Array of HWS */
};
struct ssow_config_lsw {
@@ -1453,11 +1453,11 @@ struct cpt_sts_rsp {
struct cpt_rxc_time_cfg_req {
struct mbox_msghdr hdr;
int blkaddr;
- uint32_t step;
- uint16_t zombie_thres;
- uint16_t zombie_limit;
- uint16_t active_thres;
- uint16_t active_limit;
+ uint32_t __io step;
+ uint16_t __io zombie_thres;
+ uint16_t __io zombie_limit;
+ uint16_t __io active_thres;
+ uint16_t __io active_limit;
};
struct cpt_rx_inline_lf_cfg_msg {
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 39b9bec..7da8938 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -246,6 +246,8 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
struct roc_cpt_rxc_time_cfg cfg;
PLT_SET_USED(max_frags);
+ if (idev == NULL)
+ return -ENOTSUP;
roc_cpt = idev->cpt;
if (!roc_cpt) {
plt_err("Cannot support inline inbound, cryptodev not probed");
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 07/12] net/cnxk: add SDP link status
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
` (4 preceding siblings ...)
2022-06-16 9:24 ` [PATCH v2 06/12] common/cnxk: fix mbox structs to avoid unaligned access Nithin Dabilpuram
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 08/12] net/cnxk: remove restriction on VFs for PFC config Nithin Dabilpuram
` (5 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: dev, Satananda Burla
From: Satananda Burla <sburla@marvell.com>
Add SDP link status reporting
Signed-off-by: Satananda Burla <sburla@marvell.com>
---
drivers/net/cnxk/cnxk_link.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_link.c b/drivers/net/cnxk/cnxk_link.c
index b1d59e3..127c9e7 100644
--- a/drivers/net/cnxk/cnxk_link.c
+++ b/drivers/net/cnxk/cnxk_link.c
@@ -13,7 +13,7 @@ cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set)
dev->flags &= ~CNXK_LINK_CFG_IN_PROGRESS_F;
/* Update link info for LBK */
- if (!set && roc_nix_is_lbk(&dev->nix)) {
+ if (!set && (roc_nix_is_lbk(&dev->nix) || roc_nix_is_sdp(&dev->nix))) {
struct rte_eth_link link;
link.link_status = RTE_ETH_LINK_UP;
@@ -124,10 +124,10 @@ cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
RTE_SET_USED(wait_to_complete);
memset(&link, 0, sizeof(struct rte_eth_link));
- if (!eth_dev->data->dev_started || roc_nix_is_sdp(&dev->nix))
+ if (!eth_dev->data->dev_started)
return 0;
- if (roc_nix_is_lbk(&dev->nix)) {
+ if (roc_nix_is_lbk(&dev->nix) || roc_nix_is_sdp(&dev->nix)) {
link.link_status = RTE_ETH_LINK_UP;
link.link_speed = RTE_ETH_SPEED_NUM_100G;
link.link_autoneg = RTE_ETH_LINK_FIXED;
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 08/12] net/cnxk: remove restriction on VFs for PFC config
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
` (5 preceding siblings ...)
2022-06-16 9:24 ` [PATCH v2 07/12] net/cnxk: add SDP link status Nithin Dabilpuram
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 09/12] net/cnxk: pfc class disable resulting in invalid behaviour Nithin Dabilpuram
` (4 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Sunil Kumar Kori <skori@marvell.com>
Currently PFC configuration is not allowed on VFs.
Patch enables PFC configuration on VFs
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 9 +-
drivers/net/cnxk/cnxk_ethdev.h | 13 +--
drivers/net/cnxk/cnxk_ethdev_ops.c | 219 +++++++++++++++++++++----------------
3 files changed, 137 insertions(+), 104 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 09e5736..941b270 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -323,7 +323,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
struct cnxk_fc_cfg *fc = &dev->fc_cfg;
int rc;
- if (roc_nix_is_sdp(&dev->nix))
+ if (roc_nix_is_vf_or_sdp(&dev->nix))
return 0;
/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
@@ -604,6 +604,9 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
rxq_sp->qconf.nb_desc = nb_desc;
rxq_sp->qconf.mp = mp;
+ rxq_sp->tc = 0;
+ rxq_sp->tx_pause = (dev->fc_cfg.mode == RTE_ETH_FC_FULL ||
+ dev->fc_cfg.mode == RTE_ETH_FC_TX_PAUSE);
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Pass a tagmask used to handle error packets in inline device.
@@ -1795,7 +1798,6 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
pfc_conf.rx_pause.tc = i;
- pfc_conf.tx_pause.rx_qid = i;
pfc_conf.tx_pause.tc = i;
rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
&pfc_conf);
@@ -1805,9 +1807,6 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
}
}
- fc_conf.mode = RTE_ETH_FC_FULL;
- rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
-
/* Disable and free rte_meter entries */
nix_meter_fini(dev);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 0400d73..db2d849 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -156,13 +156,10 @@ struct cnxk_fc_cfg {
};
struct cnxk_pfc_cfg {
- struct cnxk_fc_cfg fc_cfg;
uint16_t class_en;
uint16_t pause_time;
- uint8_t rx_tc;
- uint8_t rx_qid;
- uint8_t tx_tc;
- uint8_t tx_qid;
+ uint16_t rx_pause_en;
+ uint16_t tx_pause_en;
};
struct cnxk_eth_qconf {
@@ -669,8 +666,10 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
uint32_t *prev_id, uint32_t *next_id,
struct cnxk_mtr_policy_node *policy,
int *tree_level);
-int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
- struct cnxk_pfc_cfg *conf);
+int nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint8_t tx_pause, uint8_t tc);
+int nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint8_t rx_pause, uint8_t tc);
/* Inlines */
static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 15d8e8e..caace9d 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -225,15 +225,17 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
struct roc_nix *nix = &dev->nix;
struct roc_nix_fc_cfg fc_cfg;
struct roc_nix_cq *cq;
+ struct roc_nix_rq *rq;
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ rq = &dev->rqs[qid];
cq = &dev->cqs[qid];
- fc_cfg.type = ROC_NIX_FC_CQ_CFG;
- fc_cfg.cq_cfg.enable = enable;
- /* Map all CQs to last channel */
- fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
- fc_cfg.cq_cfg.rq = qid;
- fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+ fc_cfg.type = ROC_NIX_FC_RQ_CFG;
+ fc_cfg.rq_cfg.enable = enable;
+ fc_cfg.rq_cfg.tc = 0;
+ fc_cfg.rq_cfg.rq = qid;
+ fc_cfg.rq_cfg.pool = rq->aura_handle;
+ fc_cfg.rq_cfg.cq_drop = cq->drop_thresh;
return roc_nix_fc_config_set(nix, &fc_cfg);
}
@@ -255,10 +257,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
uint8_t rx_pause, tx_pause;
int rc, i;
- if (roc_nix_is_vf_or_sdp(nix) && !roc_nix_is_lbk(nix)) {
- plt_err("Flow control configuration is not allowed on VFs");
- return -ENOTSUP;
- }
+ if (roc_nix_is_sdp(nix))
+ return 0;
if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
@@ -266,14 +266,18 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
- if (fc_conf->mode == fc->mode)
- return 0;
rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
(fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
(fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
+ if (fc_conf->mode == fc->mode) {
+ fc->rx_pause = rx_pause;
+ fc->tx_pause = tx_pause;
+ return 0;
+ }
+
/* Check if TX pause frame is already enabled or not */
if (fc->tx_pause ^ tx_pause) {
if (roc_model_is_cn96_ax() && data->dev_started) {
@@ -291,6 +295,7 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
1;
+ rxq->tx_pause = !!tx_pause;
rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
if (rc)
return rc;
@@ -321,13 +326,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
fc->rx_pause = rx_pause;
fc->tx_pause = tx_pause;
fc->mode = fc_conf->mode;
-
return rc;
}
int
cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
- struct rte_eth_pfc_queue_info *pfc_info)
+ struct rte_eth_pfc_queue_info *pfc_info)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
@@ -338,25 +342,42 @@ cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
int
cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
- struct rte_eth_pfc_queue_conf *pfc_conf)
+ struct rte_eth_pfc_queue_conf *pfc_conf)
{
- struct cnxk_pfc_cfg conf;
- int rc;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ enum rte_eth_fc_mode mode;
+ uint8_t en, tc;
+ uint16_t qid;
+ int rc = 0;
- memset(&conf, 0, sizeof(struct cnxk_pfc_cfg));
+ if (dev->fc_cfg.mode != RTE_ETH_FC_NONE) {
+ plt_err("Disable Flow Control before configuring PFC");
+ return -ENOTSUP;
+ }
- conf.fc_cfg.mode = pfc_conf->mode;
+ if (roc_nix_is_sdp(nix)) {
+ plt_err("Prio flow ctrl config is not allowed on SDP");
+ return -ENOTSUP;
+ }
- conf.pause_time = pfc_conf->tx_pause.pause_time;
- conf.rx_tc = pfc_conf->tx_pause.tc;
- conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+ mode = pfc_conf->mode;
- conf.tx_tc = pfc_conf->rx_pause.tc;
- conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+ /* Perform Tx pause configuration on RQ */
+ qid = pfc_conf->tx_pause.rx_qid;
+ if (qid < eth_dev->data->nb_rx_queues) {
+ en = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
+ tc = pfc_conf->tx_pause.tc;
+ rc = nix_priority_flow_ctrl_rq_conf(eth_dev, qid, en, tc);
+ }
- rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
- if (rc)
- return rc;
+ /* Perform Rx pause configuration on SQ */
+ qid = pfc_conf->rx_pause.tx_qid;
+ if (qid < eth_dev->data->nb_tx_queues) {
+ en = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
+ tc = pfc_conf->rx_pause.tc;
+ rc |= nix_priority_flow_ctrl_sq_conf(eth_dev, qid, en, tc);
+ }
return rc;
}
@@ -1026,11 +1047,9 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
}
int
-nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
- struct cnxk_pfc_cfg *conf)
+nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint8_t tx_pause, uint8_t tc)
{
- enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
- ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct rte_eth_dev_data *data = eth_dev->data;
struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
@@ -1038,18 +1057,11 @@ nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
struct roc_nix_pfc_cfg pfc_cfg;
struct roc_nix_fc_cfg fc_cfg;
struct cnxk_eth_rxq_sp *rxq;
- struct cnxk_eth_txq_sp *txq;
- uint8_t rx_pause, tx_pause;
- enum rte_eth_fc_mode mode;
+ enum roc_nix_fc_mode mode;
+ struct roc_nix_rq *rq;
struct roc_nix_cq *cq;
- struct roc_nix_sq *sq;
int rc;
- if (roc_nix_is_vf_or_sdp(nix)) {
- plt_err("Prio flow ctrl config is not allowed on VF and SDP");
- return -ENOTSUP;
- }
-
if (roc_model_is_cn96_ax() && data->dev_started) {
/* On Ax, CQ should be in disabled state
* while setting flow control configuration.
@@ -1059,39 +1071,83 @@ nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
return 0;
}
- if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
- dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+ if (data->rx_queues == NULL)
+ return -EINVAL;
+
+ if (qid >= eth_dev->data->nb_rx_queues)
+ return -ENOTSUP;
+
+ /* Configure RQ */
+ rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[qid]) - 1;
+ rq = &dev->rqs[qid];
+ cq = &dev->cqs[qid];
+
+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ fc_cfg.type = ROC_NIX_FC_RQ_CFG;
+ fc_cfg.rq_cfg.tc = tc;
+ fc_cfg.rq_cfg.enable = !!tx_pause;
+ fc_cfg.rq_cfg.rq = rq->qid;
+ fc_cfg.rq_cfg.pool = rxq->qconf.mp->pool_id;
+ fc_cfg.rq_cfg.cq_drop = cq->drop_thresh;
+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
+ if (rc)
+ return rc;
+
+ if (rxq->tx_pause != tx_pause) {
+ if (tx_pause)
+ pfc->tx_pause_en++;
+ else
+ pfc->tx_pause_en--;
+ }
+
+ rxq->tx_pause = !!tx_pause;
+ rxq->tc = tc;
+
+ /* Skip if PFC already enabled in mac */
+ if (pfc->tx_pause_en > 1)
+ return 0;
+
+ /* Configure MAC block */
+ pfc->class_en = pfc->tx_pause_en ? 0xFF : 0x0;
+
+ if (pfc->rx_pause_en)
+ mode = pfc->tx_pause_en ? ROC_NIX_FC_FULL : ROC_NIX_FC_RX;
+ else
+ mode = pfc->tx_pause_en ? ROC_NIX_FC_TX : ROC_NIX_FC_NONE;
+
+ memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+ pfc_cfg.mode = mode;
+ pfc_cfg.tc = pfc->class_en;
+ return roc_nix_pfc_mode_set(nix, &pfc_cfg);
+}
+
+int
+nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint8_t rx_pause, uint8_t tc)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_fc_cfg fc_cfg;
+ struct cnxk_eth_txq_sp *txq;
+ struct roc_nix_sq *sq;
+ int rc;
+
+ if (data->tx_queues == NULL)
+ return -EINVAL;
+
+ if (qid >= eth_dev->data->nb_tx_queues)
+ return -ENOTSUP;
+
+ if (dev->pfc_tc_sq_map[tc] != 0xFFFF &&
+ dev->pfc_tc_sq_map[tc] != qid) {
plt_err("Same TC can not be configured on multiple SQs");
return -ENOTSUP;
}
- mode = conf->fc_cfg.mode;
- rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
- tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
-
- if (data->rx_queues == NULL || data->tx_queues == NULL) {
- rc = 0;
- goto exit;
- }
-
- /* Configure CQs */
- memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
- rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
- cq = &dev->cqs[rxq->qid];
- fc_cfg.type = ROC_NIX_FC_CQ_CFG;
- fc_cfg.cq_cfg.tc = conf->rx_tc;
- fc_cfg.cq_cfg.enable = !!tx_pause;
- fc_cfg.cq_cfg.rq = cq->qid;
- fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
- rc = roc_nix_fc_config_set(nix, &fc_cfg);
- if (rc)
- goto exit;
-
/* Check if RX pause frame is enabled or not */
- if (pfc->fc_cfg.rx_pause ^ rx_pause) {
- if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
- goto exit;
-
+ if (!pfc->rx_pause_en) {
if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
eth_dev->data->nb_tx_queues > 1) {
/*
@@ -1113,39 +1169,18 @@ nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
}
}
- txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+ txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[qid]) - 1;
sq = &dev->sqs[txq->qid];
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
fc_cfg.type = ROC_NIX_FC_TM_CFG;
fc_cfg.tm_cfg.sq = sq->qid;
- fc_cfg.tm_cfg.tc = conf->tx_tc;
+ fc_cfg.tm_cfg.tc = tc;
fc_cfg.tm_cfg.enable = !!rx_pause;
rc = roc_nix_fc_config_set(nix, &fc_cfg);
if (rc)
return rc;
- dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
-
- /* Configure MAC block */
- if (tx_pause)
- pfc->class_en |= BIT(conf->rx_tc);
- else
- pfc->class_en &= ~BIT(conf->rx_tc);
-
- if (pfc->class_en)
- mode = RTE_ETH_FC_FULL;
-
- memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
- pfc_cfg.mode = mode_map[mode];
- pfc_cfg.tc = pfc->class_en;
- rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
- if (rc)
- return rc;
-
- pfc->fc_cfg.rx_pause = rx_pause;
- pfc->fc_cfg.tx_pause = tx_pause;
- pfc->fc_cfg.mode = mode;
-
+ dev->pfc_tc_sq_map[tc] = sq->qid;
exit:
return rc;
}
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 09/12] net/cnxk: pfc class disable resulting in invalid behaviour
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
` (6 preceding siblings ...)
2022-06-16 9:24 ` [PATCH v2 08/12] net/cnxk: remove restriction on VFs for PFC config Nithin Dabilpuram
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 10/12] net/cnxk: resize CQ for Rx security for errata Nithin Dabilpuram
` (3 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: dev, Harman Kalra
From: Harman Kalra <hkalra@marvell.com>
Disabling a specific pfc class on a SQ is resulting in disabling PFC
on the entire port.
Signed-off-by: Harman Kalra <hkalra@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 25 ++++++++++++-------------
drivers/net/cnxk/cnxk_ethdev.h | 1 -
drivers/net/cnxk/cnxk_ethdev_ops.c | 34 +++++++++++++++++++++++++++-------
3 files changed, 39 insertions(+), 21 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 941b270..4ea1617 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -439,6 +439,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
sq->qid = qid;
sq->nb_desc = nb_desc;
sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
+ sq->tc = ROC_NIX_PFC_CLASS_INVALID;
rc = roc_nix_sq_init(&dev->nix, sq);
if (rc) {
@@ -1281,8 +1282,6 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
goto cq_fini;
}
- /* Initialize TC to SQ mapping as invalid */
- memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
/*
* Restore queue config when reconfigure followed by
* reconfigure and no queue configure invoked from application case.
@@ -1794,17 +1793,17 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
pfc_conf.mode = RTE_ETH_FC_NONE;
- for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
- if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
- pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
- pfc_conf.rx_pause.tc = i;
- pfc_conf.tx_pause.tc = i;
- rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
- &pfc_conf);
- if (rc)
- plt_err("Failed to reset PFC. error code(%d)",
- rc);
- }
+ for (i = 0; i < RTE_MAX(eth_dev->data->nb_rx_queues,
+ eth_dev->data->nb_tx_queues);
+ i++) {
+ pfc_conf.rx_pause.tc = ROC_NIX_PFC_CLASS_INVALID;
+ pfc_conf.rx_pause.tx_qid = i;
+ pfc_conf.tx_pause.tc = ROC_NIX_PFC_CLASS_INVALID;
+ pfc_conf.tx_pause.rx_qid = i;
+ rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
+ &pfc_conf);
+ if (rc)
+ plt_err("Failed to reset PFC. error code(%d)", rc);
}
/* Disable and free rte_meter entries */
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index db2d849..a4e96f0 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -396,7 +396,6 @@ struct cnxk_eth_dev {
struct cnxk_eth_qconf *rx_qconf;
/* Flow control configuration */
- uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
struct cnxk_pfc_cfg pfc_cfg;
struct cnxk_fc_cfg fc_cfg;
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index caace9d..1592971 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -1129,8 +1129,10 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
struct rte_eth_dev_data *data = eth_dev->data;
struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
struct roc_nix *nix = &dev->nix;
+ struct roc_nix_pfc_cfg pfc_cfg;
struct roc_nix_fc_cfg fc_cfg;
struct cnxk_eth_txq_sp *txq;
+ enum roc_nix_fc_mode mode;
struct roc_nix_sq *sq;
int rc;
@@ -1140,12 +1142,6 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
if (qid >= eth_dev->data->nb_tx_queues)
return -ENOTSUP;
- if (dev->pfc_tc_sq_map[tc] != 0xFFFF &&
- dev->pfc_tc_sq_map[tc] != qid) {
- plt_err("Same TC can not be configured on multiple SQs");
- return -ENOTSUP;
- }
-
/* Check if RX pause frame is enabled or not */
if (!pfc->rx_pause_en) {
if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
@@ -1180,7 +1176,31 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
if (rc)
return rc;
- dev->pfc_tc_sq_map[tc] = sq->qid;
+ /* Maintaining a count for SQs which are configured for PFC. This is
+ * required to handle disabling of a particular SQ without affecting
+ * PFC on other SQs.
+ */
+ if (!fc_cfg.tm_cfg.enable && sq->tc != ROC_NIX_PFC_CLASS_INVALID) {
+ sq->tc = ROC_NIX_PFC_CLASS_INVALID;
+ pfc->rx_pause_en--;
+ } else if (fc_cfg.tm_cfg.enable &&
+ sq->tc == ROC_NIX_PFC_CLASS_INVALID) {
+ sq->tc = tc;
+ pfc->rx_pause_en++;
+ }
+
+ if (pfc->rx_pause_en > 1)
+ goto exit;
+
+ if (pfc->tx_pause_en)
+ mode = pfc->rx_pause_en ? ROC_NIX_FC_FULL : ROC_NIX_FC_TX;
+ else
+ mode = pfc->rx_pause_en ? ROC_NIX_FC_RX : ROC_NIX_FC_NONE;
+
+ memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+ pfc_cfg.mode = mode;
+ pfc_cfg.tc = pfc->class_en;
+ rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
exit:
return rc;
}
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 10/12] net/cnxk: resize CQ for Rx security for errata
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
` (7 preceding siblings ...)
2022-06-16 9:24 ` [PATCH v2 09/12] net/cnxk: pfc class disable resulting in invalid behaviour Nithin Dabilpuram
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 11/12] net/cnxk: add SDP VF device ID for probe matching Nithin Dabilpuram
` (2 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
Resize CQ for Rx security offload in case of HW errata.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 43 +++++++++++++++++++++++++++++++++++++++++-
drivers/net/cnxk/cnxk_ethdev.h | 2 +-
2 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 4ea1617..2418290 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -5,6 +5,8 @@
#include <rte_eventdev.h>
+#define CNXK_NIX_CQ_INL_CLAMP_MAX (64UL * 1024UL)
+
static inline uint64_t
nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
{
@@ -40,6 +42,39 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
return speed_capa;
}
+static uint32_t
+nix_inl_cq_sz_clamp_up(struct roc_nix *nix, struct rte_mempool *mp,
+ uint32_t nb_desc)
+{
+ struct roc_nix_rq *inl_rq;
+ uint64_t limit;
+
+ if (!roc_errata_cpt_hang_on_x2p_bp())
+ return nb_desc;
+
+ /* CQ should be able to hold all buffers in first pass RQ's aura
+ * this RQ's aura.
+ */
+ inl_rq = roc_nix_inl_dev_rq(nix);
+ if (!inl_rq) {
+ /* This itself is going to be inline RQ's aura */
+ limit = roc_npa_aura_op_limit_get(mp->pool_id);
+ } else {
+ limit = roc_npa_aura_op_limit_get(inl_rq->aura_handle);
+ /* Also add this RQ's aura if it is different */
+ if (inl_rq->aura_handle != mp->pool_id)
+ limit += roc_npa_aura_op_limit_get(mp->pool_id);
+ }
+ nb_desc = PLT_MAX(limit + 1, nb_desc);
+ if (nb_desc > CNXK_NIX_CQ_INL_CLAMP_MAX) {
+ plt_warn("Could not setup CQ size to accommodate"
+ " all buffers in related auras (%" PRIu64 ")",
+ limit);
+ nb_desc = CNXK_NIX_CQ_INL_CLAMP_MAX;
+ }
+ return nb_desc;
+}
+
int
cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
{
@@ -504,7 +539,7 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
int
cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
- uint16_t nb_desc, uint16_t fp_rx_q_sz,
+ uint32_t nb_desc, uint16_t fp_rx_q_sz,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
@@ -552,6 +587,12 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
roc_nix_inl_dev_xaq_realloc(mp->pool_id);
+ /* Increase CQ size to Aura size to avoid CQ overflow and
+ * then CPT buffer leak.
+ */
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+ nb_desc = nix_inl_cq_sz_clamp_up(nix, mp, nb_desc);
+
/* Setup ROC CQ */
cq = &dev->cqs[qid];
cq->qid = qid;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index a4e96f0..4cb7c9e 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -530,7 +530,7 @@ int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, uint16_t fp_tx_q_sz,
const struct rte_eth_txconf *tx_conf);
int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
- uint16_t nb_desc, uint16_t fp_rx_q_sz,
+ uint32_t nb_desc, uint16_t fp_rx_q_sz,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid);
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 11/12] net/cnxk: add SDP VF device ID for probe matching
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
` (8 preceding siblings ...)
2022-06-16 9:24 ` [PATCH v2 10/12] net/cnxk: resize CQ for Rx security for errata Nithin Dabilpuram
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 9:24 ` [PATCH v2 12/12] event/cnxk: offset timestamp data only if enabled on port Nithin Dabilpuram
2022-06-20 17:26 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Jerin Jacob
11 siblings, 0 replies; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: dev, Radha Mohan Chintakuntla
From: Radha Mohan Chintakuntla <radhac@marvell.com>
Add SDP VF device ID in the table for probe matching.
Signed-off-by: Radha Mohan Chintakuntla <radhac@marvell.com>
---
drivers/net/cnxk/cn10k_ethdev.c | 5 +++++
drivers/net/cnxk/cn9k_ethdev.c | 6 ++++++
2 files changed, 11 insertions(+)
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 550cec5..80c5c0e 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -824,6 +824,11 @@ static const struct rte_pci_id cn10k_pci_nix_map[] = {
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_AF_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_AF_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KB, PCI_DEVID_CNXK_RVU_AF_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KB, PCI_DEVID_CNXK_RVU_SDP_VF),
{
.vendor_id = 0,
},
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 2663aa6..4fb0e2d 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -777,6 +777,12 @@ static const struct rte_pci_id cn9k_pci_nix_map[] = {
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_AF_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_AF_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF9KA, PCI_DEVID_CNXK_RVU_AF_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SDP_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF9KA, PCI_DEVID_CNXK_RVU_SDP_VF),
{
.vendor_id = 0,
},
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 12/12] event/cnxk: offset timestamp data only if enabled on port
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
` (9 preceding siblings ...)
2022-06-16 9:24 ` [PATCH v2 11/12] net/cnxk: add SDP VF device ID for probe matching Nithin Dabilpuram
@ 2022-06-16 9:24 ` Nithin Dabilpuram
2022-06-16 10:30 ` Nithin Kumar Dabilpuram
2022-06-20 17:26 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Jerin Jacob
11 siblings, 1 reply; 29+ messages in thread
From: Nithin Dabilpuram @ 2022-06-16 9:24 UTC (permalink / raw)
To: jerinj, Pavan Nikhilesh, Shijith Thotton
Cc: dev, skoteshwar, skori, Nithin Dabilpuram
Offset timestamp data only when enabled on the port instead of
just checking for offload flags.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/event/cnxk/cn10k_worker.h | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 034f508..7412a1b 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -112,8 +112,7 @@ static __rte_always_inline void
cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
void *lookup_mem, void *tstamp, uintptr_t lbase)
{
- uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
- (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
+ uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
struct rte_event_vector *vec;
uint64_t aura_handle, laddr;
uint16_t nb_mbufs, non_vec;
@@ -133,6 +132,9 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
for (i = OBJS_PER_CLINE; i < vec->nb_elem; i += OBJS_PER_CLINE)
rte_prefetch0(&vec->ptrs[i]);
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
+ mbuf_init |= 8;
+
nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
flags | NIX_RX_VWQE_F, lookup_mem,
--
2.8.4
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH v2 12/12] event/cnxk: offset timestamp data only if enabled on port
2022-06-16 9:24 ` [PATCH v2 12/12] event/cnxk: offset timestamp data only if enabled on port Nithin Dabilpuram
@ 2022-06-16 10:30 ` Nithin Kumar Dabilpuram
0 siblings, 0 replies; 29+ messages in thread
From: Nithin Kumar Dabilpuram @ 2022-06-16 10:30 UTC (permalink / raw)
To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev, skoteshwar, skori
Please ignore this particular patch 12/12. It is already part of other
patch.
https://patchwork.dpdk.org/project/dpdk/patch/20220612175612.3101-1-pbhagavatula@marvell.com/
Thanks
Nithin
On 2022-06-16 2:54 PM, Nithin Dabilpuram wrote:
> Offset timestamp data only when enabled on the port instead of
> just checking for offload flags.
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> ---
> drivers/event/cnxk/cn10k_worker.h | 6 ++++--
> 1 file changed, 4 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
> index 034f508..7412a1b 100644
> --- a/drivers/event/cnxk/cn10k_worker.h
> +++ b/drivers/event/cnxk/cn10k_worker.h
> @@ -112,8 +112,7 @@ static __rte_always_inline void
> cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
> void *lookup_mem, void *tstamp, uintptr_t lbase)
> {
> - uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
> - (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
> + uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
> struct rte_event_vector *vec;
> uint64_t aura_handle, laddr;
> uint16_t nb_mbufs, non_vec;
> @@ -133,6 +132,9 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
> for (i = OBJS_PER_CLINE; i < vec->nb_elem; i += OBJS_PER_CLINE)
> rte_prefetch0(&vec->ptrs[i]);
>
> + if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
> + mbuf_init |= 8;
> +
> nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
> nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
> flags | NIX_RX_VWQE_F, lookup_mem,
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH v2 01/12] common/cnxk: use computed value for WQE skip
2022-06-16 9:24 ` [PATCH v2 01/12] common/cnxk: use computed value for WQE skip Nithin Dabilpuram
` (10 preceding siblings ...)
2022-06-16 9:24 ` [PATCH v2 12/12] event/cnxk: offset timestamp data only if enabled on port Nithin Dabilpuram
@ 2022-06-20 17:26 ` Jerin Jacob
11 siblings, 0 replies; 29+ messages in thread
From: Jerin Jacob @ 2022-06-20 17:26 UTC (permalink / raw)
To: Nithin Dabilpuram
Cc: Jerin Jacob, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Pavan Nikhilesh, Shijith Thotton, dpdk-dev
On Thu, Jun 16, 2022 at 2:54 PM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> Use computed value for WQE skip instead of a hardcoded value.
> WQE skip needs to be number of 128B lines to accommodate rte_mbuf.
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> ---
> Depends-on: series=23500 ("common/cnxk: add cnf10kb support")
Series(except 12/12) applied to dpdk-next-net-mrvl/for-next-net. Thanks
>
> v2:
> - Fixed commit message in 10/12, 1/12 patches
>
> drivers/common/cnxk/roc_nix_inl.h | 2 +-
> drivers/common/cnxk/roc_nix_inl_priv.h | 2 +-
> drivers/event/cnxk/cnxk_eventdev_adptr.c | 5 ++++-
> drivers/net/cnxk/cnxk_ethdev_sec.c | 5 ++++-
> 4 files changed, 10 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
> index b1b4c5b..c7b1817 100644
> --- a/drivers/common/cnxk/roc_nix_inl.h
> +++ b/drivers/common/cnxk/roc_nix_inl.h
> @@ -131,7 +131,7 @@ struct roc_nix_inl_dev {
> uint16_t channel;
> uint16_t chan_mask;
> bool attach_cptlf;
> - bool wqe_skip;
> + uint16_t wqe_skip;
> uint8_t spb_drop_pc;
> uint8_t lpb_drop_pc;
> bool set_soft_exp_poll;
> diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
> index d61c7b2..a775efc 100644
> --- a/drivers/common/cnxk/roc_nix_inl_priv.h
> +++ b/drivers/common/cnxk/roc_nix_inl_priv.h
> @@ -84,7 +84,7 @@ struct nix_inl_dev {
> uint32_t ipsec_in_max_spi;
> uint32_t inb_spi_mask;
> bool attach_cptlf;
> - bool wqe_skip;
> + uint16_t wqe_skip;
> bool ts_ena;
> };
>
> diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> index fa96090..cf5b1dd 100644
> --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
> +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> @@ -125,6 +125,7 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
> {
> struct roc_nix *nix = &cnxk_eth_dev->nix;
> struct roc_nix_rq *rq;
> + uint16_t wqe_skip;
> int rc;
>
> rq = &cnxk_eth_dev->rqs[rq_id];
> @@ -132,7 +133,9 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
> rq->tt = ev->sched_type;
> rq->hwgrp = ev->queue_id;
> rq->flow_tag_width = 20;
> - rq->wqe_skip = 1;
> + wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ);
> + wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ;
> + rq->wqe_skip = wqe_skip;
> rq->tag_mask = (port_id & 0xF) << 20;
> rq->tag_mask |= (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4))
> << 24;
> diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
> index d01ebb4..1de3454 100644
> --- a/drivers/net/cnxk/cnxk_ethdev_sec.c
> +++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
> @@ -264,6 +264,7 @@ cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
> char name[CNXK_NIX_INL_DEV_NAME_LEN];
> struct roc_nix_inl_dev *inl_dev;
> const struct rte_memzone *mz;
> + uint16_t wqe_skip;
> int rc = -ENOMEM;
>
> RTE_SET_USED(pci_drv);
> @@ -295,7 +296,9 @@ cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
>
> inl_dev->attach_cptlf = true;
> /* WQE skip is one for DPDK */
> - inl_dev->wqe_skip = true;
> + wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ);
> + wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ;
> + inl_dev->wqe_skip = wqe_skip;
> inl_dev->set_soft_exp_poll = true;
> rc = roc_nix_inl_dev_init(inl_dev);
> if (rc) {
> --
> 2.8.4
>
^ permalink raw reply [flat|nested] 29+ messages in thread