DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v1 1/2] common/cnxk: support priority flow ctrl config API
@ 2022-01-09 11:11 skori
  2022-01-09 11:11 ` [PATCH v1 2/2] net/cnxk: support priority flow control skori
  0 siblings, 1 reply; 29+ messages in thread
From: skori @ 2022-01-09 11:11 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.

Patch adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/common/cnxk/roc_mbox.h       |  17 +++
 drivers/common/cnxk/roc_nix.h        |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c     |  95 ++++++++++++++--
 drivers/common/cnxk/roc_nix_priv.h   |   4 +-
 drivers/common/cnxk/roc_nix_tm.c     | 162 ++++++++++++++++++++++++++-
 drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
 drivers/common/cnxk/version.map      |   4 +
 7 files changed, 298 insertions(+), 19 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index b63fe108c9..12a5922229 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
+	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
+	  cgx_pfc_rsp)                                                         \
 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
 	  npa_lf_alloc_rsp)                                                    \
@@ -540,6 +542,19 @@ struct cgx_pause_frm_cfg {
 	uint8_t __io tx_pause;
 };
 
+struct cgx_pfc_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+};
+
 struct sfp_eeprom_s {
 #define SFP_EEPROM_SIZE 256
 	uint16_t __io sff_id;
@@ -1115,6 +1130,8 @@ struct nix_bp_cfg_req {
  * so maximum 64 channels are possible.
  */
 #define NIX_MAX_CHAN 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN NIX_MAX_CHAN
 struct nix_bp_cfg_rsp {
 	struct mbox_msghdr hdr;
 	/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 69a5e8e7b4..506b05b43e 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
 
 		struct {
 			uint32_t rq;
+			uint16_t tc;
 			uint16_t cq_drop;
 			bool enable;
 		} cq_cfg;
 
 		struct {
+			uint32_t sq;
+			uint16_t tc;
 			bool enable;
 		} tm_cfg;
 	};
 };
 
+struct roc_nix_pfc_cfg {
+	enum roc_nix_fc_mode mode;
+	/* For SET, tc must be [0, 15].
+	 * For GET, TC will represent bitmap
+	 */
+	uint16_t tc;
+};
+
 struct roc_nix_eeprom_info {
 #define ROC_NIX_EEPROM_SIZE 256
 	uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
 enum roc_nix_tm_tree {
 	ROC_NIX_TM_DEFAULT = 0,
 	ROC_NIX_TM_RLIMIT,
+	ROC_NIX_TM_PFC,
 	ROC_NIX_TM_USER,
 	ROC_NIX_TM_TREE_MAX,
 };
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
 int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_prepare_pfc_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
 
@@ -736,6 +749,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
 				  enum roc_nix_fc_mode mode);
 
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index ca29cd2bf9..95f466242a 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 	struct mbox *mbox = get_mbox(roc_nix);
 	struct nix_bp_cfg_req *req;
 	struct nix_bp_cfg_rsp *rsp;
-	int rc = -ENOSPC;
+	int rc = -ENOSPC, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		req = mbox_alloc_msg_nix_bp_enable(mbox);
 		if (req == NULL)
 			return rc;
+
 		req->chan_base = 0;
-		req->chan_cnt = 1;
-		req->bpid_per_chan = 0;
+		if (roc_nix_is_lbk(roc_nix))
+			req->chan_cnt = NIX_LBK_MAX_CHAN;
+		else
+			req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+		req->bpid_per_chan = true;
 
 		rc = mbox_process_msg(mbox, (void *)&rsp);
 		if (rc || (req->chan_cnt != rsp->chan_cnt))
 			goto exit;
 
-		nix->bpid[0] = rsp->chan_bpid[0];
 		nix->chan_cnt = rsp->chan_cnt;
+		for (i = 0; i < rsp->chan_cnt; i++)
+			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
 	} else {
 		req = mbox_alloc_msg_nix_bp_disable(mbox);
 		if (req == NULL)
 			return rc;
 		req->chan_base = 0;
-		req->chan_cnt = 1;
+		req->chan_cnt = nix->chan_cnt;
 
 		rc = mbox_process(mbox);
 		if (rc)
@@ -152,7 +158,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -169,7 +175,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -210,7 +216,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return nix_fc_rxchan_bpid_set(roc_nix,
 					      fc_cfg->rxchan_cfg.enable);
 	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
-		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+					    fc_cfg->tm_cfg.tc,
+					    fc_cfg->tm_cfg.enable);
 
 	return -EINVAL;
 }
@@ -391,3 +399,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	mbox_process(mbox);
 }
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = get_mbox(roc_nix);
+	uint8_t tx_pause, rx_pause;
+	struct cgx_pfc_cfg *req;
+	struct cgx_pfc_rsp *rsp;
+	int rc = -ENOSPC;
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_RX);
+	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+
+	req->pfc_en = BIT(pfc_cfg->tc);
+	req->rx_pause = rx_pause;
+	req->tx_pause = tx_pause;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	nix->rx_pause = rsp->rx_pause;
+	nix->tx_pause = rsp->tx_pause;
+	if (rsp->tx_pause)
+		nix->cev |= BIT(pfc_cfg->tc);
+	else
+		nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+	return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	pfc_cfg->tc = nix->cev;
+
+	if (nix->rx_pause && nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_FULL;
+	else if (nix->rx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_RX;
+	else if (nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_TX;
+	else
+		pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+	return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 04575af295..f5bb09dc3b 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -139,6 +139,7 @@ struct nix {
 	uint16_t msixoff;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
+	uint16_t cev;
 	uint64_t rx_cfg;
 	struct dev dev;
 	uint16_t cints;
@@ -376,7 +377,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	       bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+			 bool enable);
 
 /*
  * TM priv utils.
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index b3d8ebd3c2..774a801805 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 			if (is_pf_or_lbk && !skip_bp &&
 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
 				node->bp_capa = 1;
-				skip_bp = true;
+				skip_bp = false;
 			}
 
 			rc = nix_tm_node_reg_conf(nix, node);
@@ -317,18 +317,31 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
 }
 
 int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+		     bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	enum roc_nix_tm_tree tree = nix->tm_tree;
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	struct nix_tm_node *sq_node;
+	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
 	uint16_t link;
 	int rc = 0;
 
+	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	parent = sq_node->parent;
+	while (parent) {
+		if (parent->lvl == ROC_TM_LVL_SCH2)
+			break;
+
+		parent = parent->parent;
+	}
+
+
 	list = nix_tm_node_list(nix, tree);
 	link = nix->tx_link;
 
@@ -339,6 +352,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
 			continue;
 
+		if (node->hw_id != parent->hw_id)
+			continue;
+
 		if (!req) {
 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
 			req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +362,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		}
 
 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
-		req->regval[k] = enable ? BIT_ULL(13) : 0;
-		req->regval_mask[k] = ~BIT_ULL(13);
+		req->regval[k] = enable ? tc : 0;
+		req->regval[k] |= enable ? BIT_ULL(13) : 0;
+		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
 		k++;
 
 		if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -602,7 +619,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
 	}
 
 	/* Disable backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, false);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
 	if (rc) {
 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
 		return rc;
@@ -731,7 +748,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
 		return 0;
 
 	/* Restore backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, true);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
 	if (rc) {
 		plt_err("Failed to restore backpressure, rc=%d", rc);
 		return rc;
@@ -1420,6 +1437,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 	return rc;
 }
 
+int
+roc_nix_tm_prepare_pfc_tree(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	uint32_t nonleaf_id = nix->nb_tx_queues;
+	struct nix_tm_node *node = NULL;
+	uint8_t leaf_lvl, lvl, lvl_end;
+	uint32_t tl2_node_id;
+	uint32_t parent, i;
+	int rc = -ENOMEM;
+
+	parent = ROC_NIX_TM_NODE_ID_INVALID;
+	lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
+						 ROC_TM_LVL_SCH2);
+	leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
+						  ROC_TM_LVL_SCH4);
+
+	/* TL1 node */
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_ROOT;
+	node->tree = ROC_NIX_TM_PFC;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	parent = nonleaf_id;
+	nonleaf_id++;
+
+	/* TL2 node */
+	rc = -ENOMEM;
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_SCH1;
+	node->tree = ROC_NIX_TM_PFC;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	tl2_node_id = nonleaf_id;
+	nonleaf_id++;
+
+	for (i = 0; i < nix->nb_tx_queues; i++) {
+		parent = tl2_node_id;
+		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+			rc = -ENOMEM;
+			node = nix_tm_node_alloc();
+			if (!node)
+				goto error;
+
+			node->id = nonleaf_id;
+			node->parent_id = parent;
+			node->priority = 0;
+			node->weight = NIX_TM_DFLT_RR_WT;
+			node->shaper_profile_id =
+				ROC_NIX_TM_SHAPER_PROFILE_NONE;
+			node->lvl = lvl;
+			node->tree = ROC_NIX_TM_PFC;
+
+			rc = nix_tm_node_add(roc_nix, node);
+			if (rc)
+				goto error;
+
+			parent = nonleaf_id;
+			nonleaf_id++;
+		}
+
+		/* SMQ is mapped to SCH4 when we have TL1 access and SCH3
+		 * otherwise
+		 */
+		lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
+						     ROC_TM_LVL_SCH3);
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = nonleaf_id;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = lvl;
+		node->tree = ROC_NIX_TM_PFC;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+
+		parent = nonleaf_id;
+		nonleaf_id++;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = i;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = leaf_lvl;
+		node->tree = ROC_NIX_TM_PFC;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+error:
+	nix_tm_node_free(node);
+	return rc;
+}
+
 int
 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
 {
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3257fa67c7..8aa2996956 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 	/* Disable backpressure, it will be enabled back if needed on
 	 * hierarchy enable
 	 */
-	rc = nix_tm_bp_config_set(roc_nix, false);
-	if (rc) {
-		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
-		goto cleanup;
+	for (i = 0; i < sq_cnt; i++) {
+		sq = nix->sqs[i];
+		if (!sq)
+			continue;
+
+		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+		if (rc) {
+			plt_err("Failed to disable backpressure, rc=%d", rc);
+			goto cleanup;
+		}
 	}
 
 	/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 07c6720f0c..3c990691e1 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -105,6 +105,7 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
 	roc_nix_cq_init;
@@ -195,6 +196,8 @@ INTERNAL {
 	roc_nix_npc_promisc_ena_dis;
 	roc_nix_npc_rx_ena_dis;
 	roc_nix_npc_mcast_config;
+	roc_nix_pfc_mode_set;
+	roc_nix_pfc_mode_get;
 	roc_nix_ptp_clock_read;
 	roc_nix_ptp_info_cb_register;
 	roc_nix_ptp_info_cb_unregister;
@@ -259,6 +262,7 @@ INTERNAL {
 	roc_nix_tm_node_stats_get;
 	roc_nix_tm_node_suspend_resume;
 	roc_nix_tm_prealloc_res;
+	roc_nix_tm_prepare_pfc_tree;
 	roc_nix_tm_prepare_rate_limited_tree;
 	roc_nix_tm_rlimit_sq;
 	roc_nix_tm_root_has_sp;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v1 2/2] net/cnxk: support priority flow control
  2022-01-09 11:11 [PATCH v1 1/2] common/cnxk: support priority flow ctrl config API skori
@ 2022-01-09 11:11 ` skori
  2022-01-09 11:18   ` Sunil Kumar Kori
  2022-01-11  8:18   ` [PATCH v2 1/2] common/cnxk: support priority flow ctrl config API skori
  0 siblings, 2 replies; 29+ messages in thread
From: skori @ 2022-01-09 11:11 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Patch implements priority flow control support for CNXK platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c     |  19 ++++
 drivers/net/cnxk/cnxk_ethdev.h     |  16 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 177 +++++++++++++++++++++++++++--
 3 files changed, 203 insertions(+), 9 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 74f625553d..382d88bbf3 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1260,6 +1260,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1548,6 +1550,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_set = cnxk_nix_priority_flow_ctrl_queue_set,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1721,6 +1724,8 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf = {0};
+	struct rte_eth_fc_conf fc_conf = {0};
 	struct roc_nix *nix = &dev->nix;
 	int rc, i;
 
@@ -1736,6 +1741,20 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	pfc_conf.rx_pause.tc = roc_nix_chan_count_get(nix) - 1;
+	pfc_conf.tx_pause.tc = roc_nix_chan_count_get(nix) - 1;
+	rc = cnxk_nix_priority_flow_ctrl_queue_set(eth_dev, &pfc_conf);
+	if (rc)
+		plt_err("Failed to reset PFC. error code(%d)", rc);
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 5bfda3d815..28fb19307a 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -143,6 +143,16 @@ struct cnxk_fc_cfg {
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -366,6 +376,8 @@ struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[16];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -467,6 +479,8 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+					  struct rte_eth_pfc_queue_conf *pfc_conf);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -606,6 +620,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index ce5f1f7240..27fa2da36d 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -69,6 +69,8 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
+	devinfo->pfc_queue_tc_max = roc_nix_chan_count_get(&dev->nix);
 	return 0;
 }
 
@@ -230,6 +232,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +252,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +288,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)
+				data->rx_queues[i]) - 1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +301,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)
+				data->tx_queues[i]) - 1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +327,29 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+				      struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf = {0};
+	int rc;
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -911,3 +950,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_PFC) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_prepare_pfc_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->rx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = conf->rx_tc;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* RE: [PATCH v1 2/2] net/cnxk: support priority flow control
  2022-01-09 11:11 ` [PATCH v1 2/2] net/cnxk: support priority flow control skori
@ 2022-01-09 11:18   ` Sunil Kumar Kori
  2022-01-11  8:18   ` [PATCH v2 1/2] common/cnxk: support priority flow ctrl config API skori
  1 sibling, 0 replies; 29+ messages in thread
From: Sunil Kumar Kori @ 2022-01-09 11:18 UTC (permalink / raw)
  To: Sunil Kumar Kori, Nithin Kumar Dabilpuram,
	Kiran Kumar Kokkilagadda, Satha Koteswara Rao Kottidi
  Cc: dev

Following patch sets are dependent on http://patches.dpdk.org/project/dpdk/patch/20220109105851.734687-1-skori@marvell.com/. 

Regards
Sunil Kumar Kori

>-----Original Message-----
>From: skori@marvell.com <skori@marvell.com>
>Sent: Sunday, January 9, 2022 4:42 PM
>To: Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>; Kiran Kumar
>Kokkilagadda <kirankumark@marvell.com>; Sunil Kumar Kori
><skori@marvell.com>; Satha Koteswara Rao Kottidi
><skoteshwar@marvell.com>
>Cc: dev@dpdk.org
>Subject: [PATCH v1 2/2] net/cnxk: support priority flow control
>
>From: Sunil Kumar Kori <skori@marvell.com>
>
>Patch implements priority flow control support for CNXK platforms.
>
>Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
>---
> drivers/net/cnxk/cnxk_ethdev.c     |  19 ++++
> drivers/net/cnxk/cnxk_ethdev.h     |  16 +++
> drivers/net/cnxk/cnxk_ethdev_ops.c | 177 +++++++++++++++++++++++++++--
> 3 files changed, 203 insertions(+), 9 deletions(-)
>
>diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
>index 74f625553d..382d88bbf3 100644
>--- a/drivers/net/cnxk/cnxk_ethdev.c
>+++ b/drivers/net/cnxk/cnxk_ethdev.c
>@@ -1260,6 +1260,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
> 		goto cq_fini;
> 	}
>
>+	/* Initialize TC to SQ mapping as invalid */
>+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
> 	/*
> 	 * Restore queue config when reconfigure followed by
> 	 * reconfigure and no queue configure invoked from application case.
>@@ -1548,6 +1550,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
> 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
> 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
> 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
>+	.priority_flow_ctrl_queue_set =
>cnxk_nix_priority_flow_ctrl_queue_set,
> 	.dev_set_link_up = cnxk_nix_set_link_up,
> 	.dev_set_link_down = cnxk_nix_set_link_down,
> 	.get_module_info = cnxk_nix_get_module_info, @@ -1721,6 +1724,8
>@@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)  {
> 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
> 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
>+	struct rte_eth_pfc_queue_conf pfc_conf = {0};
>+	struct rte_eth_fc_conf fc_conf = {0};
> 	struct roc_nix *nix = &dev->nix;
> 	int rc, i;
>
>@@ -1736,6 +1741,20 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev,
>bool reset)
>
> 	roc_nix_npc_rx_ena_dis(nix, false);
>
>+	/* Restore 802.3 Flow control configuration */
>+	fc_conf.mode = RTE_ETH_FC_NONE;
>+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
>+
>+	pfc_conf.mode = RTE_ETH_FC_NONE;
>+	pfc_conf.rx_pause.tc = roc_nix_chan_count_get(nix) - 1;
>+	pfc_conf.tx_pause.tc = roc_nix_chan_count_get(nix) - 1;
>+	rc = cnxk_nix_priority_flow_ctrl_queue_set(eth_dev, &pfc_conf);
>+	if (rc)
>+		plt_err("Failed to reset PFC. error code(%d)", rc);
>+
>+	fc_conf.mode = RTE_ETH_FC_FULL;
>+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
>+
> 	/* Disable and free rte_meter entries */
> 	nix_meter_fini(dev);
>
>diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
>index 5bfda3d815..28fb19307a 100644
>--- a/drivers/net/cnxk/cnxk_ethdev.h
>+++ b/drivers/net/cnxk/cnxk_ethdev.h
>@@ -143,6 +143,16 @@ struct cnxk_fc_cfg {
> 	uint8_t tx_pause;
> };
>
>+struct cnxk_pfc_cfg {
>+	struct cnxk_fc_cfg fc_cfg;
>+	uint16_t class_en;
>+	uint16_t pause_time;
>+	uint8_t rx_tc;
>+	uint8_t rx_qid;
>+	uint8_t tx_tc;
>+	uint8_t tx_qid;
>+};
>+
> struct cnxk_eth_qconf {
> 	union {
> 		struct rte_eth_txconf tx;
>@@ -366,6 +376,8 @@ struct cnxk_eth_dev {
> 	struct cnxk_eth_qconf *rx_qconf;
>
> 	/* Flow control configuration */
>+	uint16_t pfc_tc_sq_map[16];
>+	struct cnxk_pfc_cfg pfc_cfg;
> 	struct cnxk_fc_cfg fc_cfg;
>
> 	/* PTP Counters */
>@@ -467,6 +479,8 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev
>*eth_dev,
> 			   struct rte_eth_fc_conf *fc_conf);  int
>cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
> 			   struct rte_eth_fc_conf *fc_conf);
>+int cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
>+					  struct rte_eth_pfc_queue_conf
>*pfc_conf);
> int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);  int
>cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);  int
>cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev, @@ -606,6 +620,8
>@@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t
>id,
> 				  uint32_t *prev_id, uint32_t *next_id,
> 				  struct cnxk_mtr_policy_node *policy,
> 				  int *tree_level);
>+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
>+				     struct cnxk_pfc_cfg *conf);
>
> /* Inlines */
> static __rte_always_inline uint64_t
>diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c
>b/drivers/net/cnxk/cnxk_ethdev_ops.c
>index ce5f1f7240..27fa2da36d 100644
>--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
>+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
>@@ -69,6 +69,8 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct
>rte_eth_dev_info *devinfo)
> 	devinfo->dev_capa =
>RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
> 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
> 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
>+
>+	devinfo->pfc_queue_tc_max = roc_nix_chan_count_get(&dev->nix);
> 	return 0;
> }
>
>@@ -230,6 +232,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev,
>uint16_t qid, bool enable)
> 	cq = &dev->cqs[qid];
> 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
> 	fc_cfg.cq_cfg.enable = enable;
>+	/* Map all CQs to last channel */
>+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
> 	fc_cfg.cq_cfg.rq = qid;
> 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
>
>@@ -248,6 +252,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
> 	struct rte_eth_dev_data *data = eth_dev->data;
> 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
> 	struct roc_nix *nix = &dev->nix;
>+	struct cnxk_eth_rxq_sp *rxq;
>+	struct cnxk_eth_txq_sp *txq;
> 	uint8_t rx_pause, tx_pause;
> 	int rc, i;
>
>@@ -282,7 +288,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
> 		}
>
> 		for (i = 0; i < data->nb_rx_queues; i++) {
>-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
>+			struct roc_nix_fc_cfg fc_cfg;
>+
>+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
>+			rxq = ((struct cnxk_eth_rxq_sp *)
>+				data->rx_queues[i]) - 1;
>+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
> 			if (rc)
> 				return rc;
> 		}
>@@ -290,14 +301,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev
>*eth_dev,
>
> 	/* Check if RX pause frame is enabled or not */
> 	if (fc->rx_pause ^ rx_pause) {
>-		struct roc_nix_fc_cfg fc_cfg;
>-
>-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
>-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
>-		fc_cfg.tm_cfg.enable = !!rx_pause;
>-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
>-		if (rc)
>-			return rc;
>+		for (i = 0; i < data->nb_tx_queues; i++) {
>+			struct roc_nix_fc_cfg fc_cfg;
>+
>+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
>+			txq = ((struct cnxk_eth_txq_sp *)
>+				data->tx_queues[i]) - 1;
>+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
>+			fc_cfg.tm_cfg.sq = txq->qid;
>+			fc_cfg.tm_cfg.enable = !!rx_pause;
>+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
>+			if (rc)
>+				return rc;
>+		}
> 	}
>
> 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]); @@ -
>311,6 +327,29 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
> 	return rc;
> }
>
>+int
>+cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
>+				      struct rte_eth_pfc_queue_conf *pfc_conf) {
>+	struct cnxk_pfc_cfg conf = {0};
>+	int rc;
>+
>+	conf.fc_cfg.mode = pfc_conf->mode;
>+
>+	conf.pause_time = pfc_conf->tx_pause.pause_time;
>+	conf.rx_tc = pfc_conf->tx_pause.tc;
>+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
>+
>+	conf.tx_tc = pfc_conf->rx_pause.tc;
>+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
>+
>+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
>+	if (rc)
>+		return rc;
>+
>+	return rc;
>+}
>+
> int
> cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
> 		      const struct rte_flow_ops **ops) @@ -911,3 +950,123 @@
>cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
>
> 	return 0;
> }
>+
>+int
>+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
>+				 struct cnxk_pfc_cfg *conf)
>+{
>+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE,
>ROC_NIX_FC_RX,
>+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
>+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
>+	struct rte_eth_dev_data *data = eth_dev->data;
>+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
>+	struct roc_nix *nix = &dev->nix;
>+	struct roc_nix_pfc_cfg pfc_cfg;
>+	struct roc_nix_fc_cfg fc_cfg;
>+	struct cnxk_eth_rxq_sp *rxq;
>+	struct cnxk_eth_txq_sp *txq;
>+	uint8_t rx_pause, tx_pause;
>+	enum rte_eth_fc_mode mode;
>+	struct roc_nix_cq *cq;
>+	struct roc_nix_sq *sq;
>+	int rc;
>+
>+	if (roc_nix_is_vf_or_sdp(nix)) {
>+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
>+		return -ENOTSUP;
>+	}
>+
>+	if (roc_model_is_cn96_ax() && data->dev_started) {
>+		/* On Ax, CQ should be in disabled state
>+		 * while setting flow control configuration.
>+		 */
>+		plt_info("Stop the port=%d for setting flow control",
>+			 data->port_id);
>+		return 0;
>+	}
>+
>+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
>+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
>+		plt_err("Same TC can not be configured on multiple SQs");
>+		return -ENOTSUP;
>+	}
>+
>+	mode = conf->fc_cfg.mode;
>+	rx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_RX_PAUSE);
>+	tx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_TX_PAUSE);
>+
>+	/* Configure CQs */
>+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
>+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
>+	cq = &dev->cqs[rxq->qid];
>+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
>+	fc_cfg.cq_cfg.tc = conf->rx_tc;
>+	fc_cfg.cq_cfg.enable = !!tx_pause;
>+	fc_cfg.cq_cfg.rq = cq->qid;
>+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
>+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
>+	if (rc)
>+		goto exit;
>+
>+	/* Check if RX pause frame is enabled or not */
>+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
>+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
>+			goto exit;
>+
>+		if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_PFC) &&
>+		    eth_dev->data->nb_tx_queues > 1) {
>+			/*
>+			 * Disabled xmit will be enabled when
>+			 * new topology is available.
>+			 */
>+			rc = roc_nix_tm_hierarchy_disable(nix);
>+			if (rc)
>+				goto exit;
>+
>+			rc = roc_nix_tm_prepare_pfc_tree(nix);
>+			if (rc)
>+				goto exit;
>+
>+			rc = roc_nix_tm_hierarchy_enable(nix,
>ROC_NIX_TM_PFC,
>+							 true);
>+			if (rc)
>+				goto exit;
>+		}
>+	}
>+
>+	txq = ((struct cnxk_eth_txq_sp *)data->rx_queues[conf->tx_qid]) - 1;
>+	sq = &dev->sqs[txq->qid];
>+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
>+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
>+	fc_cfg.tm_cfg.sq = sq->qid;
>+	fc_cfg.tm_cfg.tc = conf->tx_tc;
>+	fc_cfg.tm_cfg.enable = !!rx_pause;
>+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
>+	if (rc)
>+		return rc;
>+
>+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
>+
>+	/* Configure MAC block */
>+	if (tx_pause)
>+		pfc->class_en |= BIT(conf->rx_tc);
>+	else
>+		pfc->class_en &= ~BIT(conf->rx_tc);
>+
>+	if (pfc->class_en)
>+		mode = RTE_ETH_FC_FULL;
>+
>+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
>+	pfc_cfg.mode = mode_map[mode];
>+	pfc_cfg.tc = conf->rx_tc;
>+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
>+	if (rc)
>+		return rc;
>+
>+	pfc->fc_cfg.rx_pause = rx_pause;
>+	pfc->fc_cfg.tx_pause = tx_pause;
>+	pfc->fc_cfg.mode = mode;
>+
>+exit:
>+	return rc;
>+}
>--
>2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v2 1/2] common/cnxk: support priority flow ctrl config API
  2022-01-09 11:11 ` [PATCH v1 2/2] net/cnxk: support priority flow control skori
  2022-01-09 11:18   ` Sunil Kumar Kori
@ 2022-01-11  8:18   ` skori
  2022-01-11  8:18     ` [PATCH v2 2/2] net/cnxk: support priority flow control skori
  1 sibling, 1 reply; 29+ messages in thread
From: skori @ 2022-01-11  8:18 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.

Patch adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v2:
 - fix RoC API naming convention.

 drivers/common/cnxk/roc_mbox.h       |  17 +++
 drivers/common/cnxk/roc_nix.h        |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c     |  95 ++++++++++++++--
 drivers/common/cnxk/roc_nix_priv.h   |   4 +-
 drivers/common/cnxk/roc_nix_tm.c     | 162 ++++++++++++++++++++++++++-
 drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
 drivers/common/cnxk/version.map      |   4 +
 7 files changed, 298 insertions(+), 19 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index b63fe108c9..12a5922229 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
+	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
+	  cgx_pfc_rsp)                                                         \
 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
 	  npa_lf_alloc_rsp)                                                    \
@@ -540,6 +542,19 @@ struct cgx_pause_frm_cfg {
 	uint8_t __io tx_pause;
 };
 
+struct cgx_pfc_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+};
+
 struct sfp_eeprom_s {
 #define SFP_EEPROM_SIZE 256
 	uint16_t __io sff_id;
@@ -1115,6 +1130,8 @@ struct nix_bp_cfg_req {
  * so maximum 64 channels are possible.
  */
 #define NIX_MAX_CHAN 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN NIX_MAX_CHAN
 struct nix_bp_cfg_rsp {
 	struct mbox_msghdr hdr;
 	/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 69a5e8e7b4..e05b7b7dd8 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
 
 		struct {
 			uint32_t rq;
+			uint16_t tc;
 			uint16_t cq_drop;
 			bool enable;
 		} cq_cfg;
 
 		struct {
+			uint32_t sq;
+			uint16_t tc;
 			bool enable;
 		} tm_cfg;
 	};
 };
 
+struct roc_nix_pfc_cfg {
+	enum roc_nix_fc_mode mode;
+	/* For SET, tc must be [0, 15].
+	 * For GET, TC will represent bitmap
+	 */
+	uint16_t tc;
+};
+
 struct roc_nix_eeprom_info {
 #define ROC_NIX_EEPROM_SIZE 256
 	uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
 enum roc_nix_tm_tree {
 	ROC_NIX_TM_DEFAULT = 0,
 	ROC_NIX_TM_RLIMIT,
+	ROC_NIX_TM_PFC,
 	ROC_NIX_TM_USER,
 	ROC_NIX_TM_TREE_MAX,
 };
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
 int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
 
@@ -736,6 +749,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
 				  enum roc_nix_fc_mode mode);
 
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index ca29cd2bf9..95f466242a 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 	struct mbox *mbox = get_mbox(roc_nix);
 	struct nix_bp_cfg_req *req;
 	struct nix_bp_cfg_rsp *rsp;
-	int rc = -ENOSPC;
+	int rc = -ENOSPC, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		req = mbox_alloc_msg_nix_bp_enable(mbox);
 		if (req == NULL)
 			return rc;
+
 		req->chan_base = 0;
-		req->chan_cnt = 1;
-		req->bpid_per_chan = 0;
+		if (roc_nix_is_lbk(roc_nix))
+			req->chan_cnt = NIX_LBK_MAX_CHAN;
+		else
+			req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+		req->bpid_per_chan = true;
 
 		rc = mbox_process_msg(mbox, (void *)&rsp);
 		if (rc || (req->chan_cnt != rsp->chan_cnt))
 			goto exit;
 
-		nix->bpid[0] = rsp->chan_bpid[0];
 		nix->chan_cnt = rsp->chan_cnt;
+		for (i = 0; i < rsp->chan_cnt; i++)
+			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
 	} else {
 		req = mbox_alloc_msg_nix_bp_disable(mbox);
 		if (req == NULL)
 			return rc;
 		req->chan_base = 0;
-		req->chan_cnt = 1;
+		req->chan_cnt = nix->chan_cnt;
 
 		rc = mbox_process(mbox);
 		if (rc)
@@ -152,7 +158,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -169,7 +175,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -210,7 +216,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return nix_fc_rxchan_bpid_set(roc_nix,
 					      fc_cfg->rxchan_cfg.enable);
 	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
-		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+					    fc_cfg->tm_cfg.tc,
+					    fc_cfg->tm_cfg.enable);
 
 	return -EINVAL;
 }
@@ -391,3 +399,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	mbox_process(mbox);
 }
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = get_mbox(roc_nix);
+	uint8_t tx_pause, rx_pause;
+	struct cgx_pfc_cfg *req;
+	struct cgx_pfc_rsp *rsp;
+	int rc = -ENOSPC;
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_RX);
+	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+
+	req->pfc_en = BIT(pfc_cfg->tc);
+	req->rx_pause = rx_pause;
+	req->tx_pause = tx_pause;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	nix->rx_pause = rsp->rx_pause;
+	nix->tx_pause = rsp->tx_pause;
+	if (rsp->tx_pause)
+		nix->cev |= BIT(pfc_cfg->tc);
+	else
+		nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+	return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	pfc_cfg->tc = nix->cev;
+
+	if (nix->rx_pause && nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_FULL;
+	else if (nix->rx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_RX;
+	else if (nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_TX;
+	else
+		pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+	return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 04575af295..f5bb09dc3b 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -139,6 +139,7 @@ struct nix {
 	uint16_t msixoff;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
+	uint16_t cev;
 	uint64_t rx_cfg;
 	struct dev dev;
 	uint16_t cints;
@@ -376,7 +377,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	       bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+			 bool enable);
 
 /*
  * TM priv utils.
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index b3d8ebd3c2..0775bca7b0 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 			if (is_pf_or_lbk && !skip_bp &&
 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
 				node->bp_capa = 1;
-				skip_bp = true;
+				skip_bp = false;
 			}
 
 			rc = nix_tm_node_reg_conf(nix, node);
@@ -317,18 +317,31 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
 }
 
 int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+		     bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	enum roc_nix_tm_tree tree = nix->tm_tree;
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	struct nix_tm_node *sq_node;
+	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
 	uint16_t link;
 	int rc = 0;
 
+	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	parent = sq_node->parent;
+	while (parent) {
+		if (parent->lvl == ROC_TM_LVL_SCH2)
+			break;
+
+		parent = parent->parent;
+	}
+
+
 	list = nix_tm_node_list(nix, tree);
 	link = nix->tx_link;
 
@@ -339,6 +352,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
 			continue;
 
+		if (node->hw_id != parent->hw_id)
+			continue;
+
 		if (!req) {
 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
 			req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +362,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		}
 
 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
-		req->regval[k] = enable ? BIT_ULL(13) : 0;
-		req->regval_mask[k] = ~BIT_ULL(13);
+		req->regval[k] = enable ? tc : 0;
+		req->regval[k] |= enable ? BIT_ULL(13) : 0;
+		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
 		k++;
 
 		if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -602,7 +619,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
 	}
 
 	/* Disable backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, false);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
 	if (rc) {
 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
 		return rc;
@@ -731,7 +748,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
 		return 0;
 
 	/* Restore backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, true);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
 	if (rc) {
 		plt_err("Failed to restore backpressure, rc=%d", rc);
 		return rc;
@@ -1420,6 +1437,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 	return rc;
 }
 
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	uint32_t nonleaf_id = nix->nb_tx_queues;
+	struct nix_tm_node *node = NULL;
+	uint8_t leaf_lvl, lvl, lvl_end;
+	uint32_t tl2_node_id;
+	uint32_t parent, i;
+	int rc = -ENOMEM;
+
+	parent = ROC_NIX_TM_NODE_ID_INVALID;
+	lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
+						 ROC_TM_LVL_SCH2);
+	leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
+						  ROC_TM_LVL_SCH4);
+
+	/* TL1 node */
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_ROOT;
+	node->tree = ROC_NIX_TM_PFC;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	parent = nonleaf_id;
+	nonleaf_id++;
+
+	/* TL2 node */
+	rc = -ENOMEM;
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_SCH1;
+	node->tree = ROC_NIX_TM_PFC;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	tl2_node_id = nonleaf_id;
+	nonleaf_id++;
+
+	for (i = 0; i < nix->nb_tx_queues; i++) {
+		parent = tl2_node_id;
+		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+			rc = -ENOMEM;
+			node = nix_tm_node_alloc();
+			if (!node)
+				goto error;
+
+			node->id = nonleaf_id;
+			node->parent_id = parent;
+			node->priority = 0;
+			node->weight = NIX_TM_DFLT_RR_WT;
+			node->shaper_profile_id =
+				ROC_NIX_TM_SHAPER_PROFILE_NONE;
+			node->lvl = lvl;
+			node->tree = ROC_NIX_TM_PFC;
+
+			rc = nix_tm_node_add(roc_nix, node);
+			if (rc)
+				goto error;
+
+			parent = nonleaf_id;
+			nonleaf_id++;
+		}
+
+		/* SMQ is mapped to SCH4 when we have TL1 access and SCH3
+		 * otherwise
+		 */
+		lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
+						     ROC_TM_LVL_SCH3);
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = nonleaf_id;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = lvl;
+		node->tree = ROC_NIX_TM_PFC;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+
+		parent = nonleaf_id;
+		nonleaf_id++;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = i;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = leaf_lvl;
+		node->tree = ROC_NIX_TM_PFC;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+error:
+	nix_tm_node_free(node);
+	return rc;
+}
+
 int
 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
 {
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3257fa67c7..8aa2996956 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 	/* Disable backpressure, it will be enabled back if needed on
 	 * hierarchy enable
 	 */
-	rc = nix_tm_bp_config_set(roc_nix, false);
-	if (rc) {
-		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
-		goto cleanup;
+	for (i = 0; i < sq_cnt; i++) {
+		sq = nix->sqs[i];
+		if (!sq)
+			continue;
+
+		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+		if (rc) {
+			plt_err("Failed to disable backpressure, rc=%d", rc);
+			goto cleanup;
+		}
 	}
 
 	/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 07c6720f0c..cee33c40cc 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -105,6 +105,7 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
 	roc_nix_cq_init;
@@ -195,6 +196,8 @@ INTERNAL {
 	roc_nix_npc_promisc_ena_dis;
 	roc_nix_npc_rx_ena_dis;
 	roc_nix_npc_mcast_config;
+	roc_nix_pfc_mode_set;
+	roc_nix_pfc_mode_get;
 	roc_nix_ptp_clock_read;
 	roc_nix_ptp_info_cb_register;
 	roc_nix_ptp_info_cb_unregister;
@@ -259,6 +262,7 @@ INTERNAL {
 	roc_nix_tm_node_stats_get;
 	roc_nix_tm_node_suspend_resume;
 	roc_nix_tm_prealloc_res;
+	roc_nix_tm_pfc_prepare_tree;
 	roc_nix_tm_prepare_rate_limited_tree;
 	roc_nix_tm_rlimit_sq;
 	roc_nix_tm_root_has_sp;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v2 2/2] net/cnxk: support priority flow control
  2022-01-11  8:18   ` [PATCH v2 1/2] common/cnxk: support priority flow ctrl config API skori
@ 2022-01-11  8:18     ` skori
  2022-01-18 13:28       ` [PATCH v3 1/2] common/cnxk: support priority flow ctrl config API skori
  0 siblings, 1 reply; 29+ messages in thread
From: skori @ 2022-01-11  8:18 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Patch implements priority flow control support for CNXK platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v2:
 - fix application restart issue.

 drivers/net/cnxk/cnxk_ethdev.c     |  27 +++++
 drivers/net/cnxk/cnxk_ethdev.h     |  16 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 177 +++++++++++++++++++++++++++--
 3 files changed, 211 insertions(+), 9 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 74f625553d..e2690f0640 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1260,6 +1260,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1548,6 +1550,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_set = cnxk_nix_priority_flow_ctrl_queue_set,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1721,6 +1724,8 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf = {0};
+	struct rte_eth_fc_conf fc_conf = {0};
 	struct roc_nix *nix = &dev->nix;
 	int rc, i;
 
@@ -1736,6 +1741,28 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	for (i = 0; i < 16; i++) {
+		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+			pfc_conf.rx_pause.tc = i;
+			pfc_conf.tx_pause.rx_qid = i;
+			pfc_conf.tx_pause.tc = i;
+			rc = cnxk_nix_priority_flow_ctrl_queue_set(eth_dev,
+								   &pfc_conf);
+			if (rc)
+				plt_err("Failed to reset PFC. error code(%d)",
+					rc);
+			}
+	}
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 5bfda3d815..28fb19307a 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -143,6 +143,16 @@ struct cnxk_fc_cfg {
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -366,6 +376,8 @@ struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[16];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -467,6 +479,8 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+					  struct rte_eth_pfc_queue_conf *pfc_conf);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -606,6 +620,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index ce5f1f7240..b3173c343e 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -69,6 +69,8 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
+	devinfo->pfc_queue_tc_max = roc_nix_chan_count_get(&dev->nix);
 	return 0;
 }
 
@@ -230,6 +232,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +252,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +288,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)
+				data->rx_queues[i]) - 1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +301,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)
+				data->tx_queues[i]) - 1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +327,29 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+				      struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf = {0};
+	int rc;
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -911,3 +950,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_PFC) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_pfc_prepare_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->rx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = conf->rx_tc;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v3 1/2] common/cnxk: support priority flow ctrl config API
  2022-01-11  8:18     ` [PATCH v2 2/2] net/cnxk: support priority flow control skori
@ 2022-01-18 13:28       ` skori
  2022-01-18 13:28         ` [PATCH v3 2/2] net/cnxk: support priority flow control skori
  0 siblings, 1 reply; 29+ messages in thread
From: skori @ 2022-01-18 13:28 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.

Patch adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix RoC API naming convention.

v2..v3:
 - fix pause quanta configuration for cn10k.
 - remove unnecessary code

 drivers/common/cnxk/roc_mbox.h       |  19 +++-
 drivers/common/cnxk/roc_nix.h        |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c     |  95 ++++++++++++++--
 drivers/common/cnxk/roc_nix_priv.h   |   4 +-
 drivers/common/cnxk/roc_nix_tm.c     | 156 +++++++++++++++++++++++++--
 drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
 drivers/common/cnxk/version.map      |   4 +
 7 files changed, 293 insertions(+), 20 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index e97d93e261..39f63c9271 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
+	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
+	  cgx_pfc_rsp)                                                         \
 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
 	  npa_lf_alloc_rsp)                                                    \
@@ -550,6 +552,19 @@ struct cgx_pause_frm_cfg {
 	uint8_t __io tx_pause;
 };
 
+struct cgx_pfc_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+};
+
 struct sfp_eeprom_s {
 #define SFP_EEPROM_SIZE 256
 	uint16_t __io sff_id;
@@ -1124,7 +1139,9 @@ struct nix_bp_cfg_req {
 /* PF can be mapped to either CGX or LBK interface,
  * so maximum 64 channels are possible.
  */
-#define NIX_MAX_CHAN 64
+#define NIX_MAX_CHAN	 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN NIX_MAX_CHAN
 struct nix_bp_cfg_rsp {
 	struct mbox_msghdr hdr;
 	/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 69a5e8e7b4..e05b7b7dd8 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
 
 		struct {
 			uint32_t rq;
+			uint16_t tc;
 			uint16_t cq_drop;
 			bool enable;
 		} cq_cfg;
 
 		struct {
+			uint32_t sq;
+			uint16_t tc;
 			bool enable;
 		} tm_cfg;
 	};
 };
 
+struct roc_nix_pfc_cfg {
+	enum roc_nix_fc_mode mode;
+	/* For SET, tc must be [0, 15].
+	 * For GET, TC will represent bitmap
+	 */
+	uint16_t tc;
+};
+
 struct roc_nix_eeprom_info {
 #define ROC_NIX_EEPROM_SIZE 256
 	uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
 enum roc_nix_tm_tree {
 	ROC_NIX_TM_DEFAULT = 0,
 	ROC_NIX_TM_RLIMIT,
+	ROC_NIX_TM_PFC,
 	ROC_NIX_TM_USER,
 	ROC_NIX_TM_TREE_MAX,
 };
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
 int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
 
@@ -736,6 +749,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
 				  enum roc_nix_fc_mode mode);
 
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index ca29cd2bf9..814ccab839 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 	struct mbox *mbox = get_mbox(roc_nix);
 	struct nix_bp_cfg_req *req;
 	struct nix_bp_cfg_rsp *rsp;
-	int rc = -ENOSPC;
+	int rc = -ENOSPC, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		req = mbox_alloc_msg_nix_bp_enable(mbox);
 		if (req == NULL)
 			return rc;
+
 		req->chan_base = 0;
-		req->chan_cnt = 1;
-		req->bpid_per_chan = 0;
+		if (roc_nix_is_lbk(roc_nix))
+			req->chan_cnt = NIX_LBK_MAX_CHAN;
+		else
+			req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+		req->bpid_per_chan = true;
 
 		rc = mbox_process_msg(mbox, (void *)&rsp);
 		if (rc || (req->chan_cnt != rsp->chan_cnt))
 			goto exit;
 
-		nix->bpid[0] = rsp->chan_bpid[0];
 		nix->chan_cnt = rsp->chan_cnt;
+		for (i = 0; i < rsp->chan_cnt; i++)
+			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
 	} else {
 		req = mbox_alloc_msg_nix_bp_disable(mbox);
 		if (req == NULL)
 			return rc;
 		req->chan_base = 0;
-		req->chan_cnt = 1;
+		req->chan_cnt = nix->chan_cnt;
 
 		rc = mbox_process(mbox);
 		if (rc)
@@ -152,7 +158,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -169,7 +175,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -210,7 +216,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return nix_fc_rxchan_bpid_set(roc_nix,
 					      fc_cfg->rxchan_cfg.enable);
 	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
-		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+					    fc_cfg->tm_cfg.tc,
+					    fc_cfg->tm_cfg.enable);
 
 	return -EINVAL;
 }
@@ -391,3 +399,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	mbox_process(mbox);
 }
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = get_mbox(roc_nix);
+	uint8_t tx_pause, rx_pause;
+	struct cgx_pfc_cfg *req;
+	struct cgx_pfc_rsp *rsp;
+	int rc = -ENOSPC;
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_RX);
+	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+
+	req->pfc_en = pfc_cfg->tc;
+	req->rx_pause = rx_pause;
+	req->tx_pause = tx_pause;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	nix->rx_pause = rsp->rx_pause;
+	nix->tx_pause = rsp->tx_pause;
+	if (rsp->tx_pause)
+		nix->cev |= BIT(pfc_cfg->tc);
+	else
+		nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+	return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	pfc_cfg->tc = nix->cev;
+
+	if (nix->rx_pause && nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_FULL;
+	else if (nix->rx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_RX;
+	else if (nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_TX;
+	else
+		pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+	return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 04575af295..f5bb09dc3b 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -139,6 +139,7 @@ struct nix {
 	uint16_t msixoff;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
+	uint16_t cev;
 	uint64_t rx_cfg;
 	struct dev dev;
 	uint16_t cints;
@@ -376,7 +377,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	       bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+			 bool enable);
 
 /*
  * TM priv utils.
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index b3d8ebd3c2..6a6bafaaa7 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 			if (is_pf_or_lbk && !skip_bp &&
 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
 				node->bp_capa = 1;
-				skip_bp = true;
+				skip_bp = false;
 			}
 
 			rc = nix_tm_node_reg_conf(nix, node);
@@ -317,18 +317,31 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
 }
 
 int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+		     bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	enum roc_nix_tm_tree tree = nix->tm_tree;
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	struct nix_tm_node *sq_node;
+	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
 	uint16_t link;
 	int rc = 0;
 
+	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	parent = sq_node->parent;
+	while (parent) {
+		if (parent->lvl == ROC_TM_LVL_SCH2)
+			break;
+
+		parent = parent->parent;
+	}
+
+
 	list = nix_tm_node_list(nix, tree);
 	link = nix->tx_link;
 
@@ -339,6 +352,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
 			continue;
 
+		if (node->hw_id != parent->hw_id)
+			continue;
+
 		if (!req) {
 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
 			req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +362,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		}
 
 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
-		req->regval[k] = enable ? BIT_ULL(13) : 0;
-		req->regval_mask[k] = ~BIT_ULL(13);
+		req->regval[k] = enable ? tc : 0;
+		req->regval[k] |= enable ? BIT_ULL(13) : 0;
+		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
 		k++;
 
 		if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -602,7 +619,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
 	}
 
 	/* Disable backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, false);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
 	if (rc) {
 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
 		return rc;
@@ -731,7 +748,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
 		return 0;
 
 	/* Restore backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, true);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
 	if (rc) {
 		plt_err("Failed to restore backpressure, rc=%d", rc);
 		return rc;
@@ -1420,6 +1437,133 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 	return rc;
 }
 
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	uint32_t nonleaf_id = nix->nb_tx_queues;
+	struct nix_tm_node *node = NULL;
+	uint8_t leaf_lvl, lvl, lvl_end;
+	uint32_t tl2_node_id;
+	uint32_t parent, i;
+	int rc = -ENOMEM;
+
+	parent = ROC_NIX_TM_NODE_ID_INVALID;
+	lvl_end = ROC_TM_LVL_SCH3;
+	leaf_lvl = ROC_TM_LVL_QUEUE;
+
+	/* TL1 node */
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_ROOT;
+	node->tree = ROC_NIX_TM_PFC;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	parent = nonleaf_id;
+	nonleaf_id++;
+
+	/* TL2 node */
+	rc = -ENOMEM;
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_SCH1;
+	node->tree = ROC_NIX_TM_PFC;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	tl2_node_id = nonleaf_id;
+	nonleaf_id++;
+
+	for (i = 0; i < nix->nb_tx_queues; i++) {
+		parent = tl2_node_id;
+		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+			rc = -ENOMEM;
+			node = nix_tm_node_alloc();
+			if (!node)
+				goto error;
+
+			node->id = nonleaf_id;
+			node->parent_id = parent;
+			node->priority = 0;
+			node->weight = NIX_TM_DFLT_RR_WT;
+			node->shaper_profile_id =
+				ROC_NIX_TM_SHAPER_PROFILE_NONE;
+			node->lvl = lvl;
+			node->tree = ROC_NIX_TM_PFC;
+
+			rc = nix_tm_node_add(roc_nix, node);
+			if (rc)
+				goto error;
+
+			parent = nonleaf_id;
+			nonleaf_id++;
+		}
+
+		lvl = ROC_TM_LVL_SCH4;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = nonleaf_id;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = lvl;
+		node->tree = ROC_NIX_TM_PFC;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+
+		parent = nonleaf_id;
+		nonleaf_id++;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = i;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = leaf_lvl;
+		node->tree = ROC_NIX_TM_PFC;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+error:
+	nix_tm_node_free(node);
+	return rc;
+}
+
 int
 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
 {
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3d81247a12..d3d39eeb99 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 	/* Disable backpressure, it will be enabled back if needed on
 	 * hierarchy enable
 	 */
-	rc = nix_tm_bp_config_set(roc_nix, false);
-	if (rc) {
-		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
-		goto cleanup;
+	for (i = 0; i < sq_cnt; i++) {
+		sq = nix->sqs[i];
+		if (!sq)
+			continue;
+
+		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+		if (rc) {
+			plt_err("Failed to disable backpressure, rc=%d", rc);
+			goto cleanup;
+		}
 	}
 
 	/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 5a03b91784..f36a662911 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -106,6 +106,7 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
 	roc_nix_cq_init;
@@ -196,6 +197,8 @@ INTERNAL {
 	roc_nix_npc_promisc_ena_dis;
 	roc_nix_npc_rx_ena_dis;
 	roc_nix_npc_mcast_config;
+	roc_nix_pfc_mode_set;
+	roc_nix_pfc_mode_get;
 	roc_nix_ptp_clock_read;
 	roc_nix_ptp_info_cb_register;
 	roc_nix_ptp_info_cb_unregister;
@@ -260,6 +263,7 @@ INTERNAL {
 	roc_nix_tm_node_stats_get;
 	roc_nix_tm_node_suspend_resume;
 	roc_nix_tm_prealloc_res;
+	roc_nix_tm_pfc_prepare_tree;
 	roc_nix_tm_prepare_rate_limited_tree;
 	roc_nix_tm_rlimit_sq;
 	roc_nix_tm_root_has_sp;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v3 2/2] net/cnxk: support priority flow control
  2022-01-18 13:28       ` [PATCH v3 1/2] common/cnxk: support priority flow ctrl config API skori
@ 2022-01-18 13:28         ` skori
  2022-01-20 16:59           ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API skori
  0 siblings, 1 reply; 29+ messages in thread
From: skori @ 2022-01-18 13:28 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Patch implements priority flow control support for CNXK platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix application restart issue.
v2..v3:
 - fix pause quanta configuration for cn10k.
 - fix review comments.

 drivers/net/cnxk/cnxk_ethdev.c     |  27 +++++
 drivers/net/cnxk/cnxk_ethdev.h     |  18 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 177 +++++++++++++++++++++++++++--
 3 files changed, 213 insertions(+), 9 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 74f625553d..4248267a12 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1260,6 +1260,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1548,6 +1550,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_set = cnxk_nix_priority_flow_ctrl_queue_set,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1721,6 +1724,8 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf = {0};
+	struct rte_eth_fc_conf fc_conf = {0};
 	struct roc_nix *nix = &dev->nix;
 	int rc, i;
 
@@ -1736,6 +1741,28 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
+		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+			pfc_conf.rx_pause.tc = i;
+			pfc_conf.tx_pause.rx_qid = i;
+			pfc_conf.tx_pause.tc = i;
+			rc = cnxk_nix_priority_flow_ctrl_queue_set(eth_dev,
+								   &pfc_conf);
+			if (rc)
+				plt_err("Failed to reset PFC. error code(%d)",
+					rc);
+		}
+	}
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 5bfda3d815..c4f28625f3 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -137,12 +137,24 @@
 /* SPI will be in 20 bits of tag */
 #define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
 
+#define CNXK_NIX_PFC_CHAN_COUNT 16
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -366,6 +378,8 @@ struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -467,6 +481,8 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+					  struct rte_eth_pfc_queue_conf *pfc_conf);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -606,6 +622,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index ce5f1f7240..b515da0be1 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -69,6 +69,8 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
+	devinfo->pfc_queue_tc_max = roc_nix_chan_count_get(&dev->nix);
 	return 0;
 }
 
@@ -230,6 +232,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +252,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +288,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
+			      1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +301,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
+			      1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +327,29 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+				      struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf = {0};
+	int rc;
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -911,3 +950,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_PFC) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_pfc_prepare_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = pfc->class_en;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API
  2022-01-18 13:28         ` [PATCH v3 2/2] net/cnxk: support priority flow control skori
@ 2022-01-20 16:59           ` skori
  2022-01-20 16:59             ` [PATCH v4 2/2] net/cnxk: support priority flow control skori
                               ` (2 more replies)
  0 siblings, 3 replies; 29+ messages in thread
From: skori @ 2022-01-20 16:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.

Patch adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix RoC API naming convention.

v2..v3:
 - fix pause quanta configuration for cn10k.
 - remove unnecessary code

v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree.

 drivers/common/cnxk/roc_mbox.h       |  19 ++-
 drivers/common/cnxk/roc_nix.h        |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
 drivers/common/cnxk/roc_nix_priv.h   |   6 +-
 drivers/common/cnxk/roc_nix_tm.c     | 171 ++++++++++++++++++++++++++-
 drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
 drivers/common/cnxk/version.map      |   4 +
 7 files changed, 310 insertions(+), 20 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index e97d93e261..39f63c9271 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
+	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
+	  cgx_pfc_rsp)                                                         \
 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
 	  npa_lf_alloc_rsp)                                                    \
@@ -550,6 +552,19 @@ struct cgx_pause_frm_cfg {
 	uint8_t __io tx_pause;
 };
 
+struct cgx_pfc_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+};
+
 struct sfp_eeprom_s {
 #define SFP_EEPROM_SIZE 256
 	uint16_t __io sff_id;
@@ -1124,7 +1139,9 @@ struct nix_bp_cfg_req {
 /* PF can be mapped to either CGX or LBK interface,
  * so maximum 64 channels are possible.
  */
-#define NIX_MAX_CHAN 64
+#define NIX_MAX_CHAN	 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN NIX_MAX_CHAN
 struct nix_bp_cfg_rsp {
 	struct mbox_msghdr hdr;
 	/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 69a5e8e7b4..e05b7b7dd8 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
 
 		struct {
 			uint32_t rq;
+			uint16_t tc;
 			uint16_t cq_drop;
 			bool enable;
 		} cq_cfg;
 
 		struct {
+			uint32_t sq;
+			uint16_t tc;
 			bool enable;
 		} tm_cfg;
 	};
 };
 
+struct roc_nix_pfc_cfg {
+	enum roc_nix_fc_mode mode;
+	/* For SET, tc must be [0, 15].
+	 * For GET, TC will represent bitmap
+	 */
+	uint16_t tc;
+};
+
 struct roc_nix_eeprom_info {
 #define ROC_NIX_EEPROM_SIZE 256
 	uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
 enum roc_nix_tm_tree {
 	ROC_NIX_TM_DEFAULT = 0,
 	ROC_NIX_TM_RLIMIT,
+	ROC_NIX_TM_PFC,
 	ROC_NIX_TM_USER,
 	ROC_NIX_TM_TREE_MAX,
 };
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
 int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
 
@@ -736,6 +749,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
 				  enum roc_nix_fc_mode mode);
 
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index ca29cd2bf9..814ccab839 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 	struct mbox *mbox = get_mbox(roc_nix);
 	struct nix_bp_cfg_req *req;
 	struct nix_bp_cfg_rsp *rsp;
-	int rc = -ENOSPC;
+	int rc = -ENOSPC, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		req = mbox_alloc_msg_nix_bp_enable(mbox);
 		if (req == NULL)
 			return rc;
+
 		req->chan_base = 0;
-		req->chan_cnt = 1;
-		req->bpid_per_chan = 0;
+		if (roc_nix_is_lbk(roc_nix))
+			req->chan_cnt = NIX_LBK_MAX_CHAN;
+		else
+			req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+		req->bpid_per_chan = true;
 
 		rc = mbox_process_msg(mbox, (void *)&rsp);
 		if (rc || (req->chan_cnt != rsp->chan_cnt))
 			goto exit;
 
-		nix->bpid[0] = rsp->chan_bpid[0];
 		nix->chan_cnt = rsp->chan_cnt;
+		for (i = 0; i < rsp->chan_cnt; i++)
+			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
 	} else {
 		req = mbox_alloc_msg_nix_bp_disable(mbox);
 		if (req == NULL)
 			return rc;
 		req->chan_base = 0;
-		req->chan_cnt = 1;
+		req->chan_cnt = nix->chan_cnt;
 
 		rc = mbox_process(mbox);
 		if (rc)
@@ -152,7 +158,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -169,7 +175,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -210,7 +216,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return nix_fc_rxchan_bpid_set(roc_nix,
 					      fc_cfg->rxchan_cfg.enable);
 	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
-		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+					    fc_cfg->tm_cfg.tc,
+					    fc_cfg->tm_cfg.enable);
 
 	return -EINVAL;
 }
@@ -391,3 +399,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	mbox_process(mbox);
 }
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = get_mbox(roc_nix);
+	uint8_t tx_pause, rx_pause;
+	struct cgx_pfc_cfg *req;
+	struct cgx_pfc_rsp *rsp;
+	int rc = -ENOSPC;
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_RX);
+	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+
+	req->pfc_en = pfc_cfg->tc;
+	req->rx_pause = rx_pause;
+	req->tx_pause = tx_pause;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	nix->rx_pause = rsp->rx_pause;
+	nix->tx_pause = rsp->tx_pause;
+	if (rsp->tx_pause)
+		nix->cev |= BIT(pfc_cfg->tc);
+	else
+		nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+	return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	pfc_cfg->tc = nix->cev;
+
+	if (nix->rx_pause && nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_FULL;
+	else if (nix->rx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_RX;
+	else if (nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_TX;
+	else
+		pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+	return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 04575af295..db34bcadd0 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -33,6 +33,7 @@ struct nix_qint {
 /* Traffic Manager */
 #define NIX_TM_MAX_HW_TXSCHQ 512
 #define NIX_TM_HW_ID_INVALID UINT32_MAX
+#define NIX_TM_CHAN_INVALID UINT16_MAX
 
 /* TM flags */
 #define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
@@ -56,6 +57,7 @@ struct nix_tm_node {
 	uint32_t priority;
 	uint32_t weight;
 	uint16_t lvl;
+	uint16_t rel_chan;
 	uint32_t parent_id;
 	uint32_t shaper_profile_id;
 	void (*free_fn)(void *node);
@@ -139,6 +141,7 @@ struct nix {
 	uint16_t msixoff;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
+	uint16_t cev;
 	uint64_t rx_cfg;
 	struct dev dev;
 	uint16_t cints;
@@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	       bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+			 bool enable);
 
 /*
  * TM priv utils.
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index b3d8ebd3c2..89d1478486 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 			if (is_pf_or_lbk && !skip_bp &&
 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
 				node->bp_capa = 1;
-				skip_bp = true;
+				skip_bp = false;
 			}
 
 			rc = nix_tm_node_reg_conf(nix, node);
@@ -317,21 +317,38 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
 }
 
 int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+		     bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	enum roc_nix_tm_tree tree = nix->tm_tree;
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	struct nix_tm_node *sq_node;
+	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
 	uint16_t link;
 	int rc = 0;
 
+	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	parent = sq_node->parent;
+	while (parent) {
+		if (parent->lvl == ROC_TM_LVL_SCH2)
+			break;
+
+		parent = parent->parent;
+	}
+
 	list = nix_tm_node_list(nix, tree);
 	link = nix->tx_link;
 
+	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+		rc = -EINVAL;
+		goto err;
+	}
+
 	TAILQ_FOREACH(node, list, node) {
 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
 			continue;
@@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
 			continue;
 
+		if (node->hw_id != parent->hw_id)
+			continue;
+
 		if (!req) {
 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
 			req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		}
 
 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
-		req->regval[k] = enable ? BIT_ULL(13) : 0;
-		req->regval_mask[k] = ~BIT_ULL(13);
+		req->regval[k] = enable ? tc : 0;
+		req->regval[k] |= enable ? BIT_ULL(13) : 0;
+		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
 		k++;
 
 		if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 			goto err;
 	}
 
+	parent->rel_chan = tc;
 	return 0;
 err:
 	plt_err("Failed to %s bp on link %u, rc=%d(%s)",
@@ -602,7 +624,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
 	}
 
 	/* Disable backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, false);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
 	if (rc) {
 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
 		return rc;
@@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
 		return 0;
 
 	/* Restore backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, true);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
 	if (rc) {
 		plt_err("Failed to restore backpressure, rc=%d", rc);
 		return rc;
@@ -1293,6 +1315,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1319,6 +1342,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1359,6 +1383,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1384,6 +1409,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1408,6 +1434,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+error:
+	nix_tm_node_free(node);
+	return rc;
+}
+
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	uint32_t nonleaf_id = nix->nb_tx_queues;
+	struct nix_tm_node *node = NULL;
+	uint8_t leaf_lvl, lvl, lvl_end;
+	uint32_t tl2_node_id;
+	uint32_t parent, i;
+	int rc = -ENOMEM;
+
+	parent = ROC_NIX_TM_NODE_ID_INVALID;
+	lvl_end = ROC_TM_LVL_SCH3;
+	leaf_lvl = ROC_TM_LVL_QUEUE;
+
+	/* TL1 node */
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_ROOT;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	parent = nonleaf_id;
+	nonleaf_id++;
+
+	/* TL2 node */
+	rc = -ENOMEM;
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_SCH1;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	tl2_node_id = nonleaf_id;
+	nonleaf_id++;
+
+	for (i = 0; i < nix->nb_tx_queues; i++) {
+		parent = tl2_node_id;
+		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+			rc = -ENOMEM;
+			node = nix_tm_node_alloc();
+			if (!node)
+				goto error;
+
+			node->id = nonleaf_id;
+			node->parent_id = parent;
+			node->priority = 0;
+			node->weight = NIX_TM_DFLT_RR_WT;
+			node->shaper_profile_id =
+				ROC_NIX_TM_SHAPER_PROFILE_NONE;
+			node->lvl = lvl;
+			node->tree = ROC_NIX_TM_PFC;
+			node->rel_chan = NIX_TM_CHAN_INVALID;
+
+			rc = nix_tm_node_add(roc_nix, node);
+			if (rc)
+				goto error;
+
+			parent = nonleaf_id;
+			nonleaf_id++;
+		}
+
+		lvl = ROC_TM_LVL_SCH4;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = nonleaf_id;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+
+		parent = nonleaf_id;
+		nonleaf_id++;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = i;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = leaf_lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3d81247a12..d3d39eeb99 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 	/* Disable backpressure, it will be enabled back if needed on
 	 * hierarchy enable
 	 */
-	rc = nix_tm_bp_config_set(roc_nix, false);
-	if (rc) {
-		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
-		goto cleanup;
+	for (i = 0; i < sq_cnt; i++) {
+		sq = nix->sqs[i];
+		if (!sq)
+			continue;
+
+		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+		if (rc) {
+			plt_err("Failed to disable backpressure, rc=%d", rc);
+			goto cleanup;
+		}
 	}
 
 	/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 5a03b91784..f36a662911 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -106,6 +106,7 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
 	roc_nix_cq_init;
@@ -196,6 +197,8 @@ INTERNAL {
 	roc_nix_npc_promisc_ena_dis;
 	roc_nix_npc_rx_ena_dis;
 	roc_nix_npc_mcast_config;
+	roc_nix_pfc_mode_set;
+	roc_nix_pfc_mode_get;
 	roc_nix_ptp_clock_read;
 	roc_nix_ptp_info_cb_register;
 	roc_nix_ptp_info_cb_unregister;
@@ -260,6 +263,7 @@ INTERNAL {
 	roc_nix_tm_node_stats_get;
 	roc_nix_tm_node_suspend_resume;
 	roc_nix_tm_prealloc_res;
+	roc_nix_tm_pfc_prepare_tree;
 	roc_nix_tm_prepare_rate_limited_tree;
 	roc_nix_tm_rlimit_sq;
 	roc_nix_tm_root_has_sp;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v4 2/2] net/cnxk: support priority flow control
  2022-01-20 16:59           ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API skori
@ 2022-01-20 16:59             ` skori
  2022-01-25 10:02             ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API Ray Kinsella
  2022-01-25 11:23             ` [PATCH v5 " skori
  2 siblings, 0 replies; 29+ messages in thread
From: skori @ 2022-01-20 16:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Patch implements priority flow control support for CNXK platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix application restart issue.
 
v2..v3:
 - fix pause quanta configuration for cn10k.
 - fix review comments.
 
v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree.

 drivers/net/cnxk/cnxk_ethdev.c     |  27 +++++
 drivers/net/cnxk/cnxk_ethdev.h     |  18 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 177 +++++++++++++++++++++++++++--
 3 files changed, 213 insertions(+), 9 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 74f625553d..4248267a12 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1260,6 +1260,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1548,6 +1550,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_set = cnxk_nix_priority_flow_ctrl_queue_set,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1721,6 +1724,8 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf = {0};
+	struct rte_eth_fc_conf fc_conf = {0};
 	struct roc_nix *nix = &dev->nix;
 	int rc, i;
 
@@ -1736,6 +1741,28 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
+		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+			pfc_conf.rx_pause.tc = i;
+			pfc_conf.tx_pause.rx_qid = i;
+			pfc_conf.tx_pause.tc = i;
+			rc = cnxk_nix_priority_flow_ctrl_queue_set(eth_dev,
+								   &pfc_conf);
+			if (rc)
+				plt_err("Failed to reset PFC. error code(%d)",
+					rc);
+		}
+	}
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 5bfda3d815..c4f28625f3 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -137,12 +137,24 @@
 /* SPI will be in 20 bits of tag */
 #define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
 
+#define CNXK_NIX_PFC_CHAN_COUNT 16
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -366,6 +378,8 @@ struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -467,6 +481,8 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+					  struct rte_eth_pfc_queue_conf *pfc_conf);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -606,6 +622,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index ce5f1f7240..1b47fe9dc3 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -69,6 +69,8 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
+	devinfo->pfc_queue_tc_max = roc_nix_chan_count_get(&dev->nix);
 	return 0;
 }
 
@@ -230,6 +232,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +252,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +288,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
+			      1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +301,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
+			      1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +327,29 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+				      struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf = {0};
+	int rc;
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -911,3 +950,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_pfc_prepare_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = pfc->class_en;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API
  2022-01-20 16:59           ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API skori
  2022-01-20 16:59             ` [PATCH v4 2/2] net/cnxk: support priority flow control skori
@ 2022-01-25 10:02             ` Ray Kinsella
  2022-01-25 10:19               ` [EXT] " Sunil Kumar Kori
  2022-01-25 11:23             ` [PATCH v5 " skori
  2 siblings, 1 reply; 29+ messages in thread
From: Ray Kinsella @ 2022-01-25 10:02 UTC (permalink / raw)
  To: skori; +Cc: Nithin Dabilpuram, Kiran Kumar K, Satha Rao, dev


skori@marvell.com writes:

> From: Sunil Kumar Kori <skori@marvell.com>
>
> CNXK platforms support priority flow control(802.1qbb) to pause
> respective traffic per class on that link.
>
> Patch adds RoC interface to configure priority flow control on MAC
> block i.e. CGX on cn9k and RPM on cn10k.
>
> Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
> ---
> v1..v2:
>  - fix RoC API naming convention.
>
> v2..v3:
>  - fix pause quanta configuration for cn10k.
>  - remove unnecessary code
>
> v3..v4:
>  - fix PFC configuration with other type of TM tree
>    i.e. default, user and rate limit tree.
>
>  drivers/common/cnxk/roc_mbox.h       |  19 ++-
>  drivers/common/cnxk/roc_nix.h        |  21 ++++
>  drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
>  drivers/common/cnxk/roc_nix_priv.h   |   6 +-
>  drivers/common/cnxk/roc_nix_tm.c     | 171 ++++++++++++++++++++++++++-
>  drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
>  drivers/common/cnxk/version.map      |   4 +
>  7 files changed, 310 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
> index e97d93e261..39f63c9271 100644
> --- a/drivers/common/cnxk/roc_mbox.h
> +++ b/drivers/common/cnxk/roc_mbox.h
> @@ -95,6 +95,8 @@ struct mbox_msghdr {
>  	  msg_rsp)                                                             \
>  	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
>  	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
> +	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
> +	  cgx_pfc_rsp)                                                         \
>  	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
>  	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
>  	  npa_lf_alloc_rsp)                                                    \
> @@ -550,6 +552,19 @@ struct cgx_pause_frm_cfg {
>  	uint8_t __io tx_pause;
>  };
>  
> +struct cgx_pfc_cfg {
> +	struct mbox_msghdr hdr;
> +	uint8_t __io rx_pause;
> +	uint8_t __io tx_pause;
> +	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
> +};
> +
> +struct cgx_pfc_rsp {
> +	struct mbox_msghdr hdr;
> +	uint8_t __io rx_pause;
> +	uint8_t __io tx_pause;
> +};
> +
>  struct sfp_eeprom_s {
>  #define SFP_EEPROM_SIZE 256
>  	uint16_t __io sff_id;
> @@ -1124,7 +1139,9 @@ struct nix_bp_cfg_req {
>  /* PF can be mapped to either CGX or LBK interface,
>   * so maximum 64 channels are possible.
>   */
> -#define NIX_MAX_CHAN 64
> +#define NIX_MAX_CHAN	 64
> +#define NIX_CGX_MAX_CHAN 16
> +#define NIX_LBK_MAX_CHAN NIX_MAX_CHAN
>  struct nix_bp_cfg_rsp {
>  	struct mbox_msghdr hdr;
>  	/* Channel and bpid mapping */
> diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
> index 69a5e8e7b4..e05b7b7dd8 100644
> --- a/drivers/common/cnxk/roc_nix.h
> +++ b/drivers/common/cnxk/roc_nix.h
> @@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
>  
>  		struct {
>  			uint32_t rq;
> +			uint16_t tc;
>  			uint16_t cq_drop;
>  			bool enable;
>  		} cq_cfg;
>  
>  		struct {
> +			uint32_t sq;
> +			uint16_t tc;
>  			bool enable;
>  		} tm_cfg;
>  	};
>  };
>  
> +struct roc_nix_pfc_cfg {
> +	enum roc_nix_fc_mode mode;
> +	/* For SET, tc must be [0, 15].
> +	 * For GET, TC will represent bitmap
> +	 */
> +	uint16_t tc;
> +};
> +
>  struct roc_nix_eeprom_info {
>  #define ROC_NIX_EEPROM_SIZE 256
>  	uint16_t sff_id;
> @@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
>  enum roc_nix_tm_tree {
>  	ROC_NIX_TM_DEFAULT = 0,
>  	ROC_NIX_TM_RLIMIT,
> +	ROC_NIX_TM_PFC,
>  	ROC_NIX_TM_USER,
>  	ROC_NIX_TM_TREE_MAX,
>  };
> @@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
>  int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
>  int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
>  int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
> +int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
>  bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
>  int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
>  
> @@ -736,6 +749,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
>  int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
>  				  enum roc_nix_fc_mode mode);
>  
> +int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
> +				   struct roc_nix_pfc_cfg *pfc_cfg);
> +
> +int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
> +				   struct roc_nix_pfc_cfg *pfc_cfg);
> +
> +uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
> +
>  enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
>  
>  void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
> diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
> index ca29cd2bf9..814ccab839 100644
> --- a/drivers/common/cnxk/roc_nix_fc.c
> +++ b/drivers/common/cnxk/roc_nix_fc.c
> @@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
>  	struct mbox *mbox = get_mbox(roc_nix);
>  	struct nix_bp_cfg_req *req;
>  	struct nix_bp_cfg_rsp *rsp;
> -	int rc = -ENOSPC;
> +	int rc = -ENOSPC, i;
>  
>  	if (roc_nix_is_sdp(roc_nix))
>  		return 0;
> @@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
>  		req = mbox_alloc_msg_nix_bp_enable(mbox);
>  		if (req == NULL)
>  			return rc;
> +
>  		req->chan_base = 0;
> -		req->chan_cnt = 1;
> -		req->bpid_per_chan = 0;
> +		if (roc_nix_is_lbk(roc_nix))
> +			req->chan_cnt = NIX_LBK_MAX_CHAN;
> +		else
> +			req->chan_cnt = NIX_CGX_MAX_CHAN;
> +
> +		req->bpid_per_chan = true;
>  
>  		rc = mbox_process_msg(mbox, (void *)&rsp);
>  		if (rc || (req->chan_cnt != rsp->chan_cnt))
>  			goto exit;
>  
> -		nix->bpid[0] = rsp->chan_bpid[0];
>  		nix->chan_cnt = rsp->chan_cnt;
> +		for (i = 0; i < rsp->chan_cnt; i++)
> +			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
>  	} else {
>  		req = mbox_alloc_msg_nix_bp_disable(mbox);
>  		if (req == NULL)
>  			return rc;
>  		req->chan_base = 0;
> -		req->chan_cnt = 1;
> +		req->chan_cnt = nix->chan_cnt;
>  
>  		rc = mbox_process(mbox);
>  		if (rc)
> @@ -152,7 +158,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
>  		aq->op = NIX_AQ_INSTOP_WRITE;
>  
>  		if (fc_cfg->cq_cfg.enable) {
> -			aq->cq.bpid = nix->bpid[0];
> +			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
>  			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
>  			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
>  			aq->cq_mask.bp = ~(aq->cq_mask.bp);
> @@ -169,7 +175,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
>  		aq->op = NIX_AQ_INSTOP_WRITE;
>  
>  		if (fc_cfg->cq_cfg.enable) {
> -			aq->cq.bpid = nix->bpid[0];
> +			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
>  			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
>  			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
>  			aq->cq_mask.bp = ~(aq->cq_mask.bp);
> @@ -210,7 +216,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
>  		return nix_fc_rxchan_bpid_set(roc_nix,
>  					      fc_cfg->rxchan_cfg.enable);
>  	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
> -		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
> +		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
> +					    fc_cfg->tm_cfg.tc,
> +					    fc_cfg->tm_cfg.enable);
>  
>  	return -EINVAL;
>  }
> @@ -391,3 +399,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
>  
>  	mbox_process(mbox);
>  }
> +
> +int
> +roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
> +{
> +	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +	struct mbox *mbox = get_mbox(roc_nix);
> +	uint8_t tx_pause, rx_pause;
> +	struct cgx_pfc_cfg *req;
> +	struct cgx_pfc_rsp *rsp;
> +	int rc = -ENOSPC;
> +
> +	if (roc_nix_is_lbk(roc_nix))
> +		return NIX_ERR_OP_NOTSUP;
> +
> +	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
> +		   (pfc_cfg->mode == ROC_NIX_FC_RX);
> +	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
> +		   (pfc_cfg->mode == ROC_NIX_FC_TX);
> +
> +	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
> +	if (req == NULL)
> +		goto exit;
> +
> +	req->pfc_en = pfc_cfg->tc;
> +	req->rx_pause = rx_pause;
> +	req->tx_pause = tx_pause;
> +
> +	rc = mbox_process_msg(mbox, (void *)&rsp);
> +	if (rc)
> +		goto exit;
> +
> +	nix->rx_pause = rsp->rx_pause;
> +	nix->tx_pause = rsp->tx_pause;
> +	if (rsp->tx_pause)
> +		nix->cev |= BIT(pfc_cfg->tc);
> +	else
> +		nix->cev &= ~BIT(pfc_cfg->tc);
> +
> +exit:
> +	return rc;
> +}
> +
> +int
> +roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
> +{
> +	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +
> +	if (roc_nix_is_lbk(roc_nix))
> +		return NIX_ERR_OP_NOTSUP;
> +
> +	pfc_cfg->tc = nix->cev;
> +
> +	if (nix->rx_pause && nix->tx_pause)
> +		pfc_cfg->mode = ROC_NIX_FC_FULL;
> +	else if (nix->rx_pause)
> +		pfc_cfg->mode = ROC_NIX_FC_RX;
> +	else if (nix->tx_pause)
> +		pfc_cfg->mode = ROC_NIX_FC_TX;
> +	else
> +		pfc_cfg->mode = ROC_NIX_FC_NONE;
> +
> +	return 0;
> +}
> +
> +uint16_t
> +roc_nix_chan_count_get(struct roc_nix *roc_nix)
> +{
> +	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +
> +	return nix->chan_cnt;
> +}
> diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
> index 04575af295..db34bcadd0 100644
> --- a/drivers/common/cnxk/roc_nix_priv.h
> +++ b/drivers/common/cnxk/roc_nix_priv.h
> @@ -33,6 +33,7 @@ struct nix_qint {
>  /* Traffic Manager */
>  #define NIX_TM_MAX_HW_TXSCHQ 512
>  #define NIX_TM_HW_ID_INVALID UINT32_MAX
> +#define NIX_TM_CHAN_INVALID UINT16_MAX
>  
>  /* TM flags */
>  #define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
> @@ -56,6 +57,7 @@ struct nix_tm_node {
>  	uint32_t priority;
>  	uint32_t weight;
>  	uint16_t lvl;
> +	uint16_t rel_chan;
>  	uint32_t parent_id;
>  	uint32_t shaper_profile_id;
>  	void (*free_fn)(void *node);
> @@ -139,6 +141,7 @@ struct nix {
>  	uint16_t msixoff;
>  	uint8_t rx_pause;
>  	uint8_t tx_pause;
> +	uint16_t cev;
>  	uint64_t rx_cfg;
>  	struct dev dev;
>  	uint16_t cints;
> @@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
>  	       bool ena);
>  int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
>  int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
> -int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
> +int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
> +			 bool enable);
>  
>  /*
>   * TM priv utils.
> diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
> index b3d8ebd3c2..89d1478486 100644
> --- a/drivers/common/cnxk/roc_nix_tm.c
> +++ b/drivers/common/cnxk/roc_nix_tm.c
> @@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
>  			if (is_pf_or_lbk && !skip_bp &&
>  			    node->hw_lvl == nix->tm_link_cfg_lvl) {
>  				node->bp_capa = 1;
> -				skip_bp = true;
> +				skip_bp = false;
>  			}
>  
>  			rc = nix_tm_node_reg_conf(nix, node);
> @@ -317,21 +317,38 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
>  }
>  
>  int
> -nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
> +nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
> +		     bool enable)
>  {
>  	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
>  	enum roc_nix_tm_tree tree = nix->tm_tree;
>  	struct mbox *mbox = (&nix->dev)->mbox;
>  	struct nix_txschq_config *req = NULL;
>  	struct nix_tm_node_list *list;
> +	struct nix_tm_node *sq_node;
> +	struct nix_tm_node *parent;
>  	struct nix_tm_node *node;
>  	uint8_t k = 0;
>  	uint16_t link;
>  	int rc = 0;
>  
> +	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
> +	parent = sq_node->parent;
> +	while (parent) {
> +		if (parent->lvl == ROC_TM_LVL_SCH2)
> +			break;
> +
> +		parent = parent->parent;
> +	}
> +
>  	list = nix_tm_node_list(nix, tree);
>  	link = nix->tx_link;
>  
> +	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
> +		rc = -EINVAL;
> +		goto err;
> +	}
> +
>  	TAILQ_FOREACH(node, list, node) {
>  		if (node->hw_lvl != nix->tm_link_cfg_lvl)
>  			continue;
> @@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
>  		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
>  			continue;
>  
> +		if (node->hw_id != parent->hw_id)
> +			continue;
> +
>  		if (!req) {
>  			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
>  			req->lvl = nix->tm_link_cfg_lvl;
> @@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
>  		}
>  
>  		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
> -		req->regval[k] = enable ? BIT_ULL(13) : 0;
> -		req->regval_mask[k] = ~BIT_ULL(13);
> +		req->regval[k] = enable ? tc : 0;
> +		req->regval[k] |= enable ? BIT_ULL(13) : 0;
> +		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
>  		k++;
>  
>  		if (k >= MAX_REGS_PER_MBOX_MSG) {
> @@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
>  			goto err;
>  	}
>  
> +	parent->rel_chan = tc;
>  	return 0;
>  err:
>  	plt_err("Failed to %s bp on link %u, rc=%d(%s)",
> @@ -602,7 +624,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
>  	}
>  
>  	/* Disable backpressure */
> -	rc = nix_tm_bp_config_set(roc_nix, false);
> +	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
>  	if (rc) {
>  		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
>  		return rc;
> @@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
>  		return 0;
>  
>  	/* Restore backpressure */
> -	rc = nix_tm_bp_config_set(roc_nix, true);
> +	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
>  	if (rc) {
>  		plt_err("Failed to restore backpressure, rc=%d", rc);
>  		return rc;
> @@ -1293,6 +1315,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
>  		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>  		node->lvl = lvl;
>  		node->tree = ROC_NIX_TM_DEFAULT;
> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>  
>  		rc = nix_tm_node_add(roc_nix, node);
>  		if (rc)
> @@ -1319,6 +1342,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
>  		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>  		node->lvl = leaf_lvl;
>  		node->tree = ROC_NIX_TM_DEFAULT;
> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>  
>  		rc = nix_tm_node_add(roc_nix, node);
>  		if (rc)
> @@ -1359,6 +1383,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
>  		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>  		node->lvl = lvl;
>  		node->tree = ROC_NIX_TM_RLIMIT;
> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>  
>  		rc = nix_tm_node_add(roc_nix, node);
>  		if (rc)
> @@ -1384,6 +1409,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
>  		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>  		node->lvl = lvl;
>  		node->tree = ROC_NIX_TM_RLIMIT;
> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>  
>  		rc = nix_tm_node_add(roc_nix, node);
>  		if (rc)
> @@ -1408,6 +1434,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
>  		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>  		node->lvl = leaf_lvl;
>  		node->tree = ROC_NIX_TM_RLIMIT;
> +		node->rel_chan = NIX_TM_CHAN_INVALID;
> +
> +		rc = nix_tm_node_add(roc_nix, node);
> +		if (rc)
> +			goto error;
> +	}
> +
> +	return 0;
> +error:
> +	nix_tm_node_free(node);
> +	return rc;
> +}
> +
> +int
> +roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
> +{
> +	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +	uint32_t nonleaf_id = nix->nb_tx_queues;
> +	struct nix_tm_node *node = NULL;
> +	uint8_t leaf_lvl, lvl, lvl_end;
> +	uint32_t tl2_node_id;
> +	uint32_t parent, i;
> +	int rc = -ENOMEM;
> +
> +	parent = ROC_NIX_TM_NODE_ID_INVALID;
> +	lvl_end = ROC_TM_LVL_SCH3;
> +	leaf_lvl = ROC_TM_LVL_QUEUE;
> +
> +	/* TL1 node */
> +	node = nix_tm_node_alloc();
> +	if (!node)
> +		goto error;
> +
> +	node->id = nonleaf_id;
> +	node->parent_id = parent;
> +	node->priority = 0;
> +	node->weight = NIX_TM_DFLT_RR_WT;
> +	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
> +	node->lvl = ROC_TM_LVL_ROOT;
> +	node->tree = ROC_NIX_TM_PFC;
> +	node->rel_chan = NIX_TM_CHAN_INVALID;
> +
> +	rc = nix_tm_node_add(roc_nix, node);
> +	if (rc)
> +		goto error;
> +
> +	parent = nonleaf_id;
> +	nonleaf_id++;
> +
> +	/* TL2 node */
> +	rc = -ENOMEM;
> +	node = nix_tm_node_alloc();
> +	if (!node)
> +		goto error;
> +
> +	node->id = nonleaf_id;
> +	node->parent_id = parent;
> +	node->priority = 0;
> +	node->weight = NIX_TM_DFLT_RR_WT;
> +	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
> +	node->lvl = ROC_TM_LVL_SCH1;
> +	node->tree = ROC_NIX_TM_PFC;
> +	node->rel_chan = NIX_TM_CHAN_INVALID;
> +
> +	rc = nix_tm_node_add(roc_nix, node);
> +	if (rc)
> +		goto error;
> +
> +	tl2_node_id = nonleaf_id;
> +	nonleaf_id++;
> +
> +	for (i = 0; i < nix->nb_tx_queues; i++) {
> +		parent = tl2_node_id;
> +		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
> +			rc = -ENOMEM;
> +			node = nix_tm_node_alloc();
> +			if (!node)
> +				goto error;
> +
> +			node->id = nonleaf_id;
> +			node->parent_id = parent;
> +			node->priority = 0;
> +			node->weight = NIX_TM_DFLT_RR_WT;
> +			node->shaper_profile_id =
> +				ROC_NIX_TM_SHAPER_PROFILE_NONE;
> +			node->lvl = lvl;
> +			node->tree = ROC_NIX_TM_PFC;
> +			node->rel_chan = NIX_TM_CHAN_INVALID;
> +
> +			rc = nix_tm_node_add(roc_nix, node);
> +			if (rc)
> +				goto error;
> +
> +			parent = nonleaf_id;
> +			nonleaf_id++;
> +		}
> +
> +		lvl = ROC_TM_LVL_SCH4;
> +
> +		rc = -ENOMEM;
> +		node = nix_tm_node_alloc();
> +		if (!node)
> +			goto error;
> +
> +		node->id = nonleaf_id;
> +		node->parent_id = parent;
> +		node->priority = 0;
> +		node->weight = NIX_TM_DFLT_RR_WT;
> +		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
> +		node->lvl = lvl;
> +		node->tree = ROC_NIX_TM_PFC;
> +		node->rel_chan = NIX_TM_CHAN_INVALID;
> +
> +		rc = nix_tm_node_add(roc_nix, node);
> +		if (rc)
> +			goto error;
> +
> +		parent = nonleaf_id;
> +		nonleaf_id++;
> +
> +		rc = -ENOMEM;
> +		node = nix_tm_node_alloc();
> +		if (!node)
> +			goto error;
> +
> +		node->id = i;
> +		node->parent_id = parent;
> +		node->priority = 0;
> +		node->weight = NIX_TM_DFLT_RR_WT;
> +		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
> +		node->lvl = leaf_lvl;
> +		node->tree = ROC_NIX_TM_PFC;
> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>  
>  		rc = nix_tm_node_add(roc_nix, node);
>  		if (rc)
> diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
> index 3d81247a12..d3d39eeb99 100644
> --- a/drivers/common/cnxk/roc_nix_tm_ops.c
> +++ b/drivers/common/cnxk/roc_nix_tm_ops.c
> @@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
>  	/* Disable backpressure, it will be enabled back if needed on
>  	 * hierarchy enable
>  	 */
> -	rc = nix_tm_bp_config_set(roc_nix, false);
> -	if (rc) {
> -		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
> -		goto cleanup;
> +	for (i = 0; i < sq_cnt; i++) {
> +		sq = nix->sqs[i];
> +		if (!sq)
> +			continue;
> +
> +		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
> +		if (rc) {
> +			plt_err("Failed to disable backpressure, rc=%d", rc);
> +			goto cleanup;
> +		}
>  	}
>  
>  	/* Flush all tx queues */
> diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
> index 5a03b91784..f36a662911 100644
> --- a/drivers/common/cnxk/version.map
> +++ b/drivers/common/cnxk/version.map
> @@ -106,6 +106,7 @@ INTERNAL {
>  	roc_nix_bpf_stats_reset;
>  	roc_nix_bpf_stats_to_idx;
>  	roc_nix_bpf_timeunit_get;
> +	roc_nix_chan_count_get;
>  	roc_nix_cq_dump;
>  	roc_nix_cq_fini;
>  	roc_nix_cq_init;
> @@ -196,6 +197,8 @@ INTERNAL {
>  	roc_nix_npc_promisc_ena_dis;
>  	roc_nix_npc_rx_ena_dis;
>  	roc_nix_npc_mcast_config;
> +	roc_nix_pfc_mode_set;
> +	roc_nix_pfc_mode_get;

Get before set ;-)

>  	roc_nix_ptp_clock_read;
>  	roc_nix_ptp_info_cb_register;
>  	roc_nix_ptp_info_cb_unregister;
> @@ -260,6 +263,7 @@ INTERNAL {
>  	roc_nix_tm_node_stats_get;
>  	roc_nix_tm_node_suspend_resume;
>  	roc_nix_tm_prealloc_res;
> +	roc_nix_tm_pfc_prepare_tree;
>  	roc_nix_tm_prepare_rate_limited_tree;
>  	roc_nix_tm_rlimit_sq;
>  	roc_nix_tm_root_has_sp;


-- 
Regards, Ray K

^ permalink raw reply	[flat|nested] 29+ messages in thread

* RE: [EXT] Re: [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API
  2022-01-25 10:02             ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API Ray Kinsella
@ 2022-01-25 10:19               ` Sunil Kumar Kori
  0 siblings, 0 replies; 29+ messages in thread
From: Sunil Kumar Kori @ 2022-01-25 10:19 UTC (permalink / raw)
  To: Ray Kinsella
  Cc: Nithin Kumar Dabilpuram, Kiran Kumar Kokkilagadda,
	Satha Koteswara Rao Kottidi, dev



Regards
Sunil Kumar Kori

>-----Original Message-----
>From: Ray Kinsella <mdr@ashroe.eu>
>Sent: Tuesday, January 25, 2022 3:33 PM
>To: Sunil Kumar Kori <skori@marvell.com>
>Cc: Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>; Kiran Kumar
>Kokkilagadda <kirankumark@marvell.com>; Satha Koteswara Rao Kottidi
><skoteshwar@marvell.com>; dev@dpdk.org
>Subject: [EXT] Re: [PATCH v4 1/2] common/cnxk: support priority flow ctrl
>config API
>
>External Email
>
>----------------------------------------------------------------------
>
>skori@marvell.com writes:
>
>> From: Sunil Kumar Kori <skori@marvell.com>
>>
>> CNXK platforms support priority flow control(802.1qbb) to pause
>> respective traffic per class on that link.
>>
>> Patch adds RoC interface to configure priority flow control on MAC
>> block i.e. CGX on cn9k and RPM on cn10k.
>>
>> Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
>> ---
>> v1..v2:
>>  - fix RoC API naming convention.
>>
>> v2..v3:
>>  - fix pause quanta configuration for cn10k.
>>  - remove unnecessary code
>>
>> v3..v4:
>>  - fix PFC configuration with other type of TM tree
>>    i.e. default, user and rate limit tree.
>>
>>  drivers/common/cnxk/roc_mbox.h       |  19 ++-
>>  drivers/common/cnxk/roc_nix.h        |  21 ++++
>>  drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
>>  drivers/common/cnxk/roc_nix_priv.h   |   6 +-
>>  drivers/common/cnxk/roc_nix_tm.c     | 171
>++++++++++++++++++++++++++-
>>  drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
>>  drivers/common/cnxk/version.map      |   4 +
>>  7 files changed, 310 insertions(+), 20 deletions(-)
>>
>> diff --git a/drivers/common/cnxk/roc_mbox.h
>> b/drivers/common/cnxk/roc_mbox.h index e97d93e261..39f63c9271
>100644
>> --- a/drivers/common/cnxk/roc_mbox.h
>> +++ b/drivers/common/cnxk/roc_mbox.h
>> @@ -95,6 +95,8 @@ struct mbox_msghdr {
>>  	  msg_rsp)                                                             \
>>  	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
>>  	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
>> +	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg,
>cgx_pfc_cfg,  \
>> +	  cgx_pfc_rsp)                                                         \
>>  	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
>>  	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
>>  	  npa_lf_alloc_rsp)                                                    \
>> @@ -550,6 +552,19 @@ struct cgx_pause_frm_cfg {
>>  	uint8_t __io tx_pause;
>>  };
>>
>> +struct cgx_pfc_cfg {
>> +	struct mbox_msghdr hdr;
>> +	uint8_t __io rx_pause;
>> +	uint8_t __io tx_pause;
>> +	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes
>> +*/ };
>> +
>> +struct cgx_pfc_rsp {
>> +	struct mbox_msghdr hdr;
>> +	uint8_t __io rx_pause;
>> +	uint8_t __io tx_pause;
>> +};
>> +
>>  struct sfp_eeprom_s {
>>  #define SFP_EEPROM_SIZE 256
>>  	uint16_t __io sff_id;
>> @@ -1124,7 +1139,9 @@ struct nix_bp_cfg_req {
>>  /* PF can be mapped to either CGX or LBK interface,
>>   * so maximum 64 channels are possible.
>>   */
>> -#define NIX_MAX_CHAN 64
>> +#define NIX_MAX_CHAN	 64
>> +#define NIX_CGX_MAX_CHAN 16
>> +#define NIX_LBK_MAX_CHAN NIX_MAX_CHAN
>>  struct nix_bp_cfg_rsp {
>>  	struct mbox_msghdr hdr;
>>  	/* Channel and bpid mapping */
>> diff --git a/drivers/common/cnxk/roc_nix.h
>> b/drivers/common/cnxk/roc_nix.h index 69a5e8e7b4..e05b7b7dd8 100644
>> --- a/drivers/common/cnxk/roc_nix.h
>> +++ b/drivers/common/cnxk/roc_nix.h
>> @@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
>>
>>  		struct {
>>  			uint32_t rq;
>> +			uint16_t tc;
>>  			uint16_t cq_drop;
>>  			bool enable;
>>  		} cq_cfg;
>>
>>  		struct {
>> +			uint32_t sq;
>> +			uint16_t tc;
>>  			bool enable;
>>  		} tm_cfg;
>>  	};
>>  };
>>
>> +struct roc_nix_pfc_cfg {
>> +	enum roc_nix_fc_mode mode;
>> +	/* For SET, tc must be [0, 15].
>> +	 * For GET, TC will represent bitmap
>> +	 */
>> +	uint16_t tc;
>> +};
>> +
>>  struct roc_nix_eeprom_info {
>>  #define ROC_NIX_EEPROM_SIZE 256
>>  	uint16_t sff_id;
>> @@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct
>> roc_nix *roc_nix);  enum roc_nix_tm_tree {
>>  	ROC_NIX_TM_DEFAULT = 0,
>>  	ROC_NIX_TM_RLIMIT,
>> +	ROC_NIX_TM_PFC,
>>  	ROC_NIX_TM_USER,
>>  	ROC_NIX_TM_TREE_MAX,
>>  };
>> @@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct
>> roc_nix_tm_node *node,  int __roc_api roc_nix_tm_lvl_cnt_get(struct
>> roc_nix *roc_nix);  int __roc_api
>> roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
>> int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix
>> *roc_nix);
>> +int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
>>  bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix
>> *nix);  int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
>>
>> @@ -736,6 +749,14 @@ int __roc_api roc_nix_fc_config_get(struct
>> roc_nix *roc_nix,  int __roc_api roc_nix_fc_mode_set(struct roc_nix
>*roc_nix,
>>  				  enum roc_nix_fc_mode mode);
>>
>> +int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
>> +				   struct roc_nix_pfc_cfg *pfc_cfg);
>> +
>> +int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
>> +				   struct roc_nix_pfc_cfg *pfc_cfg);
>> +
>> +uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
>> +
>>  enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix
>> *roc_nix);
>>
>>  void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix,
>> uint64_t pool_id, diff --git a/drivers/common/cnxk/roc_nix_fc.c
>> b/drivers/common/cnxk/roc_nix_fc.c
>> index ca29cd2bf9..814ccab839 100644
>> --- a/drivers/common/cnxk/roc_nix_fc.c
>> +++ b/drivers/common/cnxk/roc_nix_fc.c
>> @@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool
>enable)
>>  	struct mbox *mbox = get_mbox(roc_nix);
>>  	struct nix_bp_cfg_req *req;
>>  	struct nix_bp_cfg_rsp *rsp;
>> -	int rc = -ENOSPC;
>> +	int rc = -ENOSPC, i;
>>
>>  	if (roc_nix_is_sdp(roc_nix))
>>  		return 0;
>> @@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix,
>bool enable)
>>  		req = mbox_alloc_msg_nix_bp_enable(mbox);
>>  		if (req == NULL)
>>  			return rc;
>> +
>>  		req->chan_base = 0;
>> -		req->chan_cnt = 1;
>> -		req->bpid_per_chan = 0;
>> +		if (roc_nix_is_lbk(roc_nix))
>> +			req->chan_cnt = NIX_LBK_MAX_CHAN;
>> +		else
>> +			req->chan_cnt = NIX_CGX_MAX_CHAN;
>> +
>> +		req->bpid_per_chan = true;
>>
>>  		rc = mbox_process_msg(mbox, (void *)&rsp);
>>  		if (rc || (req->chan_cnt != rsp->chan_cnt))
>>  			goto exit;
>>
>> -		nix->bpid[0] = rsp->chan_bpid[0];
>>  		nix->chan_cnt = rsp->chan_cnt;
>> +		for (i = 0; i < rsp->chan_cnt; i++)
>> +			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
>>  	} else {
>>  		req = mbox_alloc_msg_nix_bp_disable(mbox);
>>  		if (req == NULL)
>>  			return rc;
>>  		req->chan_base = 0;
>> -		req->chan_cnt = 1;
>> +		req->chan_cnt = nix->chan_cnt;
>>
>>  		rc = mbox_process(mbox);
>>  		if (rc)
>> @@ -152,7 +158,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct
>roc_nix_fc_cfg *fc_cfg)
>>  		aq->op = NIX_AQ_INSTOP_WRITE;
>>
>>  		if (fc_cfg->cq_cfg.enable) {
>> -			aq->cq.bpid = nix->bpid[0];
>> +			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
>>  			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
>>  			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
>>  			aq->cq_mask.bp = ~(aq->cq_mask.bp); @@ -169,7
>+175,7 @@
>> nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
>>  		aq->op = NIX_AQ_INSTOP_WRITE;
>>
>>  		if (fc_cfg->cq_cfg.enable) {
>> -			aq->cq.bpid = nix->bpid[0];
>> +			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
>>  			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
>>  			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
>>  			aq->cq_mask.bp = ~(aq->cq_mask.bp); @@ -210,7
>+216,9 @@
>> roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
>>  		return nix_fc_rxchan_bpid_set(roc_nix,
>>  					      fc_cfg->rxchan_cfg.enable);
>>  	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
>> -		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
>> +		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
>> +					    fc_cfg->tm_cfg.tc,
>> +					    fc_cfg->tm_cfg.enable);
>>
>>  	return -EINVAL;
>>  }
>> @@ -391,3 +399,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix,
>> uint64_t pool_id, uint8_t ena,
>>
>>  	mbox_process(mbox);
>>  }
>> +
>> +int
>> +roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg
>> +*pfc_cfg) {
>> +	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
>> +	struct mbox *mbox = get_mbox(roc_nix);
>> +	uint8_t tx_pause, rx_pause;
>> +	struct cgx_pfc_cfg *req;
>> +	struct cgx_pfc_rsp *rsp;
>> +	int rc = -ENOSPC;
>> +
>> +	if (roc_nix_is_lbk(roc_nix))
>> +		return NIX_ERR_OP_NOTSUP;
>> +
>> +	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
>> +		   (pfc_cfg->mode == ROC_NIX_FC_RX);
>> +	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
>> +		   (pfc_cfg->mode == ROC_NIX_FC_TX);
>> +
>> +	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
>> +	if (req == NULL)
>> +		goto exit;
>> +
>> +	req->pfc_en = pfc_cfg->tc;
>> +	req->rx_pause = rx_pause;
>> +	req->tx_pause = tx_pause;
>> +
>> +	rc = mbox_process_msg(mbox, (void *)&rsp);
>> +	if (rc)
>> +		goto exit;
>> +
>> +	nix->rx_pause = rsp->rx_pause;
>> +	nix->tx_pause = rsp->tx_pause;
>> +	if (rsp->tx_pause)
>> +		nix->cev |= BIT(pfc_cfg->tc);
>> +	else
>> +		nix->cev &= ~BIT(pfc_cfg->tc);
>> +
>> +exit:
>> +	return rc;
>> +}
>> +
>> +int
>> +roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg
>> +*pfc_cfg) {
>> +	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
>> +
>> +	if (roc_nix_is_lbk(roc_nix))
>> +		return NIX_ERR_OP_NOTSUP;
>> +
>> +	pfc_cfg->tc = nix->cev;
>> +
>> +	if (nix->rx_pause && nix->tx_pause)
>> +		pfc_cfg->mode = ROC_NIX_FC_FULL;
>> +	else if (nix->rx_pause)
>> +		pfc_cfg->mode = ROC_NIX_FC_RX;
>> +	else if (nix->tx_pause)
>> +		pfc_cfg->mode = ROC_NIX_FC_TX;
>> +	else
>> +		pfc_cfg->mode = ROC_NIX_FC_NONE;
>> +
>> +	return 0;
>> +}
>> +
>> +uint16_t
>> +roc_nix_chan_count_get(struct roc_nix *roc_nix) {
>> +	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
>> +
>> +	return nix->chan_cnt;
>> +}
>> diff --git a/drivers/common/cnxk/roc_nix_priv.h
>> b/drivers/common/cnxk/roc_nix_priv.h
>> index 04575af295..db34bcadd0 100644
>> --- a/drivers/common/cnxk/roc_nix_priv.h
>> +++ b/drivers/common/cnxk/roc_nix_priv.h
>> @@ -33,6 +33,7 @@ struct nix_qint {
>>  /* Traffic Manager */
>>  #define NIX_TM_MAX_HW_TXSCHQ 512
>>  #define NIX_TM_HW_ID_INVALID UINT32_MAX
>> +#define NIX_TM_CHAN_INVALID UINT16_MAX
>>
>>  /* TM flags */
>>  #define NIX_TM_HIERARCHY_ENA BIT_ULL(0) @@ -56,6 +57,7 @@ struct
>> nix_tm_node {
>>  	uint32_t priority;
>>  	uint32_t weight;
>>  	uint16_t lvl;
>> +	uint16_t rel_chan;
>>  	uint32_t parent_id;
>>  	uint32_t shaper_profile_id;
>>  	void (*free_fn)(void *node);
>> @@ -139,6 +141,7 @@ struct nix {
>>  	uint16_t msixoff;
>>  	uint8_t rx_pause;
>>  	uint8_t tx_pause;
>> +	uint16_t cev;
>>  	uint64_t rx_cfg;
>>  	struct dev dev;
>>  	uint16_t cints;
>> @@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq,
>uint16_t qints, bool cfg,
>>  	       bool ena);
>>  int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool
>> enable);  int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool
>> *is_enabled); -int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool
>> enable);
>> +int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
>> +			 bool enable);
>>
>>  /*
>>   * TM priv utils.
>> diff --git a/drivers/common/cnxk/roc_nix_tm.c
>> b/drivers/common/cnxk/roc_nix_tm.c
>> index b3d8ebd3c2..89d1478486 100644
>> --- a/drivers/common/cnxk/roc_nix_tm.c
>> +++ b/drivers/common/cnxk/roc_nix_tm.c
>> @@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum
>roc_nix_tm_tree tree)
>>  			if (is_pf_or_lbk && !skip_bp &&
>>  			    node->hw_lvl == nix->tm_link_cfg_lvl) {
>>  				node->bp_capa = 1;
>> -				skip_bp = true;
>> +				skip_bp = false;
>>  			}
>>
>>  			rc = nix_tm_node_reg_conf(nix, node); @@ -317,21
>+317,38 @@
>> nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)  }
>>
>>  int
>> -nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
>> +nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
>> +		     bool enable)
>>  {
>>  	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
>>  	enum roc_nix_tm_tree tree = nix->tm_tree;
>>  	struct mbox *mbox = (&nix->dev)->mbox;
>>  	struct nix_txschq_config *req = NULL;
>>  	struct nix_tm_node_list *list;
>> +	struct nix_tm_node *sq_node;
>> +	struct nix_tm_node *parent;
>>  	struct nix_tm_node *node;
>>  	uint8_t k = 0;
>>  	uint16_t link;
>>  	int rc = 0;
>>
>> +	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
>> +	parent = sq_node->parent;
>> +	while (parent) {
>> +		if (parent->lvl == ROC_TM_LVL_SCH2)
>> +			break;
>> +
>> +		parent = parent->parent;
>> +	}
>> +
>>  	list = nix_tm_node_list(nix, tree);
>>  	link = nix->tx_link;
>>
>> +	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan
>!= tc) {
>> +		rc = -EINVAL;
>> +		goto err;
>> +	}
>> +
>>  	TAILQ_FOREACH(node, list, node) {
>>  		if (node->hw_lvl != nix->tm_link_cfg_lvl)
>>  			continue;
>> @@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool
>enable)
>>  		if (!(node->flags & NIX_TM_NODE_HWRES) || !node-
>>bp_capa)
>>  			continue;
>>
>> +		if (node->hw_id != parent->hw_id)
>> +			continue;
>> +
>>  		if (!req) {
>>  			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
>>  			req->lvl = nix->tm_link_cfg_lvl;
>> @@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool
>enable)
>>  		}
>>
>>  		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id,
>link);
>> -		req->regval[k] = enable ? BIT_ULL(13) : 0;
>> -		req->regval_mask[k] = ~BIT_ULL(13);
>> +		req->regval[k] = enable ? tc : 0;
>> +		req->regval[k] |= enable ? BIT_ULL(13) : 0;
>> +		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
>>  		k++;
>>
>>  		if (k >= MAX_REGS_PER_MBOX_MSG) {
>> @@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool
>enable)
>>  			goto err;
>>  	}
>>
>> +	parent->rel_chan = tc;
>>  	return 0;
>>  err:
>>  	plt_err("Failed to %s bp on link %u, rc=%d(%s)", @@ -602,7 +624,7
>@@
>> nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
>>  	}
>>
>>  	/* Disable backpressure */
>> -	rc = nix_tm_bp_config_set(roc_nix, false);
>> +	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
>>  	if (rc) {
>>  		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
>>  		return rc;
>> @@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
>>  		return 0;
>>
>>  	/* Restore backpressure */
>> -	rc = nix_tm_bp_config_set(roc_nix, true);
>> +	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
>>  	if (rc) {
>>  		plt_err("Failed to restore backpressure, rc=%d", rc);
>>  		return rc;
>> @@ -1293,6 +1315,7 @@ nix_tm_prepare_default_tree(struct roc_nix
>*roc_nix)
>>  		node->shaper_profile_id =
>ROC_NIX_TM_SHAPER_PROFILE_NONE;
>>  		node->lvl = lvl;
>>  		node->tree = ROC_NIX_TM_DEFAULT;
>> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>>
>>  		rc = nix_tm_node_add(roc_nix, node);
>>  		if (rc)
>> @@ -1319,6 +1342,7 @@ nix_tm_prepare_default_tree(struct roc_nix
>*roc_nix)
>>  		node->shaper_profile_id =
>ROC_NIX_TM_SHAPER_PROFILE_NONE;
>>  		node->lvl = leaf_lvl;
>>  		node->tree = ROC_NIX_TM_DEFAULT;
>> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>>
>>  		rc = nix_tm_node_add(roc_nix, node);
>>  		if (rc)
>> @@ -1359,6 +1383,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct
>roc_nix *roc_nix)
>>  		node->shaper_profile_id =
>ROC_NIX_TM_SHAPER_PROFILE_NONE;
>>  		node->lvl = lvl;
>>  		node->tree = ROC_NIX_TM_RLIMIT;
>> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>>
>>  		rc = nix_tm_node_add(roc_nix, node);
>>  		if (rc)
>> @@ -1384,6 +1409,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct
>roc_nix *roc_nix)
>>  		node->shaper_profile_id =
>ROC_NIX_TM_SHAPER_PROFILE_NONE;
>>  		node->lvl = lvl;
>>  		node->tree = ROC_NIX_TM_RLIMIT;
>> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>>
>>  		rc = nix_tm_node_add(roc_nix, node);
>>  		if (rc)
>> @@ -1408,6 +1434,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct
>roc_nix *roc_nix)
>>  		node->shaper_profile_id =
>ROC_NIX_TM_SHAPER_PROFILE_NONE;
>>  		node->lvl = leaf_lvl;
>>  		node->tree = ROC_NIX_TM_RLIMIT;
>> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>> +
>> +		rc = nix_tm_node_add(roc_nix, node);
>> +		if (rc)
>> +			goto error;
>> +	}
>> +
>> +	return 0;
>> +error:
>> +	nix_tm_node_free(node);
>> +	return rc;
>> +}
>> +
>> +int
>> +roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix) {
>> +	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
>> +	uint32_t nonleaf_id = nix->nb_tx_queues;
>> +	struct nix_tm_node *node = NULL;
>> +	uint8_t leaf_lvl, lvl, lvl_end;
>> +	uint32_t tl2_node_id;
>> +	uint32_t parent, i;
>> +	int rc = -ENOMEM;
>> +
>> +	parent = ROC_NIX_TM_NODE_ID_INVALID;
>> +	lvl_end = ROC_TM_LVL_SCH3;
>> +	leaf_lvl = ROC_TM_LVL_QUEUE;
>> +
>> +	/* TL1 node */
>> +	node = nix_tm_node_alloc();
>> +	if (!node)
>> +		goto error;
>> +
>> +	node->id = nonleaf_id;
>> +	node->parent_id = parent;
>> +	node->priority = 0;
>> +	node->weight = NIX_TM_DFLT_RR_WT;
>> +	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>> +	node->lvl = ROC_TM_LVL_ROOT;
>> +	node->tree = ROC_NIX_TM_PFC;
>> +	node->rel_chan = NIX_TM_CHAN_INVALID;
>> +
>> +	rc = nix_tm_node_add(roc_nix, node);
>> +	if (rc)
>> +		goto error;
>> +
>> +	parent = nonleaf_id;
>> +	nonleaf_id++;
>> +
>> +	/* TL2 node */
>> +	rc = -ENOMEM;
>> +	node = nix_tm_node_alloc();
>> +	if (!node)
>> +		goto error;
>> +
>> +	node->id = nonleaf_id;
>> +	node->parent_id = parent;
>> +	node->priority = 0;
>> +	node->weight = NIX_TM_DFLT_RR_WT;
>> +	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>> +	node->lvl = ROC_TM_LVL_SCH1;
>> +	node->tree = ROC_NIX_TM_PFC;
>> +	node->rel_chan = NIX_TM_CHAN_INVALID;
>> +
>> +	rc = nix_tm_node_add(roc_nix, node);
>> +	if (rc)
>> +		goto error;
>> +
>> +	tl2_node_id = nonleaf_id;
>> +	nonleaf_id++;
>> +
>> +	for (i = 0; i < nix->nb_tx_queues; i++) {
>> +		parent = tl2_node_id;
>> +		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
>> +			rc = -ENOMEM;
>> +			node = nix_tm_node_alloc();
>> +			if (!node)
>> +				goto error;
>> +
>> +			node->id = nonleaf_id;
>> +			node->parent_id = parent;
>> +			node->priority = 0;
>> +			node->weight = NIX_TM_DFLT_RR_WT;
>> +			node->shaper_profile_id =
>> +				ROC_NIX_TM_SHAPER_PROFILE_NONE;
>> +			node->lvl = lvl;
>> +			node->tree = ROC_NIX_TM_PFC;
>> +			node->rel_chan = NIX_TM_CHAN_INVALID;
>> +
>> +			rc = nix_tm_node_add(roc_nix, node);
>> +			if (rc)
>> +				goto error;
>> +
>> +			parent = nonleaf_id;
>> +			nonleaf_id++;
>> +		}
>> +
>> +		lvl = ROC_TM_LVL_SCH4;
>> +
>> +		rc = -ENOMEM;
>> +		node = nix_tm_node_alloc();
>> +		if (!node)
>> +			goto error;
>> +
>> +		node->id = nonleaf_id;
>> +		node->parent_id = parent;
>> +		node->priority = 0;
>> +		node->weight = NIX_TM_DFLT_RR_WT;
>> +		node->shaper_profile_id =
>ROC_NIX_TM_SHAPER_PROFILE_NONE;
>> +		node->lvl = lvl;
>> +		node->tree = ROC_NIX_TM_PFC;
>> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>> +
>> +		rc = nix_tm_node_add(roc_nix, node);
>> +		if (rc)
>> +			goto error;
>> +
>> +		parent = nonleaf_id;
>> +		nonleaf_id++;
>> +
>> +		rc = -ENOMEM;
>> +		node = nix_tm_node_alloc();
>> +		if (!node)
>> +			goto error;
>> +
>> +		node->id = i;
>> +		node->parent_id = parent;
>> +		node->priority = 0;
>> +		node->weight = NIX_TM_DFLT_RR_WT;
>> +		node->shaper_profile_id =
>ROC_NIX_TM_SHAPER_PROFILE_NONE;
>> +		node->lvl = leaf_lvl;
>> +		node->tree = ROC_NIX_TM_PFC;
>> +		node->rel_chan = NIX_TM_CHAN_INVALID;
>>
>>  		rc = nix_tm_node_add(roc_nix, node);
>>  		if (rc)
>> diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c
>> b/drivers/common/cnxk/roc_nix_tm_ops.c
>> index 3d81247a12..d3d39eeb99 100644
>> --- a/drivers/common/cnxk/roc_nix_tm_ops.c
>> +++ b/drivers/common/cnxk/roc_nix_tm_ops.c
>> @@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix
>*roc_nix)
>>  	/* Disable backpressure, it will be enabled back if needed on
>>  	 * hierarchy enable
>>  	 */
>> -	rc = nix_tm_bp_config_set(roc_nix, false);
>> -	if (rc) {
>> -		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
>> -		goto cleanup;
>> +	for (i = 0; i < sq_cnt; i++) {
>> +		sq = nix->sqs[i];
>> +		if (!sq)
>> +			continue;
>> +
>> +		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
>> +		if (rc) {
>> +			plt_err("Failed to disable backpressure, rc=%d", rc);
>> +			goto cleanup;
>> +		}
>>  	}
>>
>>  	/* Flush all tx queues */
>> diff --git a/drivers/common/cnxk/version.map
>> b/drivers/common/cnxk/version.map index 5a03b91784..f36a662911
>100644
>> --- a/drivers/common/cnxk/version.map
>> +++ b/drivers/common/cnxk/version.map
>> @@ -106,6 +106,7 @@ INTERNAL {
>>  	roc_nix_bpf_stats_reset;
>>  	roc_nix_bpf_stats_to_idx;
>>  	roc_nix_bpf_timeunit_get;
>> +	roc_nix_chan_count_get;
>>  	roc_nix_cq_dump;
>>  	roc_nix_cq_fini;
>>  	roc_nix_cq_init;
>> @@ -196,6 +197,8 @@ INTERNAL {
>>  	roc_nix_npc_promisc_ena_dis;
>>  	roc_nix_npc_rx_ena_dis;
>>  	roc_nix_npc_mcast_config;
>> +	roc_nix_pfc_mode_set;
>> +	roc_nix_pfc_mode_get;
>
>Get before set ;-)
>
Ack.
>>  	roc_nix_ptp_clock_read;
>>  	roc_nix_ptp_info_cb_register;
>>  	roc_nix_ptp_info_cb_unregister;
>> @@ -260,6 +263,7 @@ INTERNAL {
>>  	roc_nix_tm_node_stats_get;
>>  	roc_nix_tm_node_suspend_resume;
>>  	roc_nix_tm_prealloc_res;
>> +	roc_nix_tm_pfc_prepare_tree;
>>  	roc_nix_tm_prepare_rate_limited_tree;
>>  	roc_nix_tm_rlimit_sq;
>>  	roc_nix_tm_root_has_sp;
>
>
>--
>Regards, Ray K

^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v5 1/2] common/cnxk: support priority flow ctrl config API
  2022-01-20 16:59           ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API skori
  2022-01-20 16:59             ` [PATCH v4 2/2] net/cnxk: support priority flow control skori
  2022-01-25 10:02             ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API Ray Kinsella
@ 2022-01-25 11:23             ` skori
  2022-01-25 11:23               ` [PATCH v5 2/2] net/cnxk: support priority flow control skori
  2022-01-28 13:28               ` [PATCH v6 1/2] common/cnxk: support priority flow ctrl config API skori
  2 siblings, 2 replies; 29+ messages in thread
From: skori @ 2022-01-25 11:23 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.

Patch adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix RoC API naming convention.

v2..v3:
 - fix pause quanta configuration for cn10k.
 - remove unnecessary code

v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree.

v4..v5:
 - rebase on top of tree
 - fix review comments
 - fix initialization error for LBK devices

 drivers/common/cnxk/roc_mbox.h       |  19 ++-
 drivers/common/cnxk/roc_nix.h        |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
 drivers/common/cnxk/roc_nix_priv.h   |   6 +-
 drivers/common/cnxk/roc_nix_tm.c     | 171 ++++++++++++++++++++++++++-
 drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
 drivers/common/cnxk/version.map      |   4 +
 7 files changed, 310 insertions(+), 20 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index e97d93e261..769b415c00 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
+	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
+	  cgx_pfc_rsp)                                                         \
 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
 	  npa_lf_alloc_rsp)                                                    \
@@ -550,6 +552,19 @@ struct cgx_pause_frm_cfg {
 	uint8_t __io tx_pause;
 };
 
+struct cgx_pfc_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+};
+
 struct sfp_eeprom_s {
 #define SFP_EEPROM_SIZE 256
 	uint16_t __io sff_id;
@@ -1124,7 +1139,9 @@ struct nix_bp_cfg_req {
 /* PF can be mapped to either CGX or LBK interface,
  * so maximum 64 channels are possible.
  */
-#define NIX_MAX_CHAN 64
+#define NIX_MAX_CHAN	 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN 1
 struct nix_bp_cfg_rsp {
 	struct mbox_msghdr hdr;
 	/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 69a5e8e7b4..e05b7b7dd8 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
 
 		struct {
 			uint32_t rq;
+			uint16_t tc;
 			uint16_t cq_drop;
 			bool enable;
 		} cq_cfg;
 
 		struct {
+			uint32_t sq;
+			uint16_t tc;
 			bool enable;
 		} tm_cfg;
 	};
 };
 
+struct roc_nix_pfc_cfg {
+	enum roc_nix_fc_mode mode;
+	/* For SET, tc must be [0, 15].
+	 * For GET, TC will represent bitmap
+	 */
+	uint16_t tc;
+};
+
 struct roc_nix_eeprom_info {
 #define ROC_NIX_EEPROM_SIZE 256
 	uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
 enum roc_nix_tm_tree {
 	ROC_NIX_TM_DEFAULT = 0,
 	ROC_NIX_TM_RLIMIT,
+	ROC_NIX_TM_PFC,
 	ROC_NIX_TM_USER,
 	ROC_NIX_TM_TREE_MAX,
 };
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
 int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
 
@@ -736,6 +749,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
 				  enum roc_nix_fc_mode mode);
 
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index ca29cd2bf9..814ccab839 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 	struct mbox *mbox = get_mbox(roc_nix);
 	struct nix_bp_cfg_req *req;
 	struct nix_bp_cfg_rsp *rsp;
-	int rc = -ENOSPC;
+	int rc = -ENOSPC, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		req = mbox_alloc_msg_nix_bp_enable(mbox);
 		if (req == NULL)
 			return rc;
+
 		req->chan_base = 0;
-		req->chan_cnt = 1;
-		req->bpid_per_chan = 0;
+		if (roc_nix_is_lbk(roc_nix))
+			req->chan_cnt = NIX_LBK_MAX_CHAN;
+		else
+			req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+		req->bpid_per_chan = true;
 
 		rc = mbox_process_msg(mbox, (void *)&rsp);
 		if (rc || (req->chan_cnt != rsp->chan_cnt))
 			goto exit;
 
-		nix->bpid[0] = rsp->chan_bpid[0];
 		nix->chan_cnt = rsp->chan_cnt;
+		for (i = 0; i < rsp->chan_cnt; i++)
+			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
 	} else {
 		req = mbox_alloc_msg_nix_bp_disable(mbox);
 		if (req == NULL)
 			return rc;
 		req->chan_base = 0;
-		req->chan_cnt = 1;
+		req->chan_cnt = nix->chan_cnt;
 
 		rc = mbox_process(mbox);
 		if (rc)
@@ -152,7 +158,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -169,7 +175,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -210,7 +216,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return nix_fc_rxchan_bpid_set(roc_nix,
 					      fc_cfg->rxchan_cfg.enable);
 	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
-		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+					    fc_cfg->tm_cfg.tc,
+					    fc_cfg->tm_cfg.enable);
 
 	return -EINVAL;
 }
@@ -391,3 +399,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	mbox_process(mbox);
 }
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = get_mbox(roc_nix);
+	uint8_t tx_pause, rx_pause;
+	struct cgx_pfc_cfg *req;
+	struct cgx_pfc_rsp *rsp;
+	int rc = -ENOSPC;
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_RX);
+	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+
+	req->pfc_en = pfc_cfg->tc;
+	req->rx_pause = rx_pause;
+	req->tx_pause = tx_pause;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	nix->rx_pause = rsp->rx_pause;
+	nix->tx_pause = rsp->tx_pause;
+	if (rsp->tx_pause)
+		nix->cev |= BIT(pfc_cfg->tc);
+	else
+		nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+	return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	pfc_cfg->tc = nix->cev;
+
+	if (nix->rx_pause && nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_FULL;
+	else if (nix->rx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_RX;
+	else if (nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_TX;
+	else
+		pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+	return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 04575af295..db34bcadd0 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -33,6 +33,7 @@ struct nix_qint {
 /* Traffic Manager */
 #define NIX_TM_MAX_HW_TXSCHQ 512
 #define NIX_TM_HW_ID_INVALID UINT32_MAX
+#define NIX_TM_CHAN_INVALID UINT16_MAX
 
 /* TM flags */
 #define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
@@ -56,6 +57,7 @@ struct nix_tm_node {
 	uint32_t priority;
 	uint32_t weight;
 	uint16_t lvl;
+	uint16_t rel_chan;
 	uint32_t parent_id;
 	uint32_t shaper_profile_id;
 	void (*free_fn)(void *node);
@@ -139,6 +141,7 @@ struct nix {
 	uint16_t msixoff;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
+	uint16_t cev;
 	uint64_t rx_cfg;
 	struct dev dev;
 	uint16_t cints;
@@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	       bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+			 bool enable);
 
 /*
  * TM priv utils.
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index b3d8ebd3c2..89d1478486 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 			if (is_pf_or_lbk && !skip_bp &&
 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
 				node->bp_capa = 1;
-				skip_bp = true;
+				skip_bp = false;
 			}
 
 			rc = nix_tm_node_reg_conf(nix, node);
@@ -317,21 +317,38 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
 }
 
 int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+		     bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	enum roc_nix_tm_tree tree = nix->tm_tree;
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	struct nix_tm_node *sq_node;
+	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
 	uint16_t link;
 	int rc = 0;
 
+	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	parent = sq_node->parent;
+	while (parent) {
+		if (parent->lvl == ROC_TM_LVL_SCH2)
+			break;
+
+		parent = parent->parent;
+	}
+
 	list = nix_tm_node_list(nix, tree);
 	link = nix->tx_link;
 
+	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+		rc = -EINVAL;
+		goto err;
+	}
+
 	TAILQ_FOREACH(node, list, node) {
 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
 			continue;
@@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
 			continue;
 
+		if (node->hw_id != parent->hw_id)
+			continue;
+
 		if (!req) {
 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
 			req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		}
 
 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
-		req->regval[k] = enable ? BIT_ULL(13) : 0;
-		req->regval_mask[k] = ~BIT_ULL(13);
+		req->regval[k] = enable ? tc : 0;
+		req->regval[k] |= enable ? BIT_ULL(13) : 0;
+		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
 		k++;
 
 		if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 			goto err;
 	}
 
+	parent->rel_chan = tc;
 	return 0;
 err:
 	plt_err("Failed to %s bp on link %u, rc=%d(%s)",
@@ -602,7 +624,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
 	}
 
 	/* Disable backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, false);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
 	if (rc) {
 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
 		return rc;
@@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
 		return 0;
 
 	/* Restore backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, true);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
 	if (rc) {
 		plt_err("Failed to restore backpressure, rc=%d", rc);
 		return rc;
@@ -1293,6 +1315,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1319,6 +1342,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1359,6 +1383,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1384,6 +1409,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1408,6 +1434,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+error:
+	nix_tm_node_free(node);
+	return rc;
+}
+
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	uint32_t nonleaf_id = nix->nb_tx_queues;
+	struct nix_tm_node *node = NULL;
+	uint8_t leaf_lvl, lvl, lvl_end;
+	uint32_t tl2_node_id;
+	uint32_t parent, i;
+	int rc = -ENOMEM;
+
+	parent = ROC_NIX_TM_NODE_ID_INVALID;
+	lvl_end = ROC_TM_LVL_SCH3;
+	leaf_lvl = ROC_TM_LVL_QUEUE;
+
+	/* TL1 node */
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_ROOT;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	parent = nonleaf_id;
+	nonleaf_id++;
+
+	/* TL2 node */
+	rc = -ENOMEM;
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_SCH1;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	tl2_node_id = nonleaf_id;
+	nonleaf_id++;
+
+	for (i = 0; i < nix->nb_tx_queues; i++) {
+		parent = tl2_node_id;
+		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+			rc = -ENOMEM;
+			node = nix_tm_node_alloc();
+			if (!node)
+				goto error;
+
+			node->id = nonleaf_id;
+			node->parent_id = parent;
+			node->priority = 0;
+			node->weight = NIX_TM_DFLT_RR_WT;
+			node->shaper_profile_id =
+				ROC_NIX_TM_SHAPER_PROFILE_NONE;
+			node->lvl = lvl;
+			node->tree = ROC_NIX_TM_PFC;
+			node->rel_chan = NIX_TM_CHAN_INVALID;
+
+			rc = nix_tm_node_add(roc_nix, node);
+			if (rc)
+				goto error;
+
+			parent = nonleaf_id;
+			nonleaf_id++;
+		}
+
+		lvl = ROC_TM_LVL_SCH4;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = nonleaf_id;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+
+		parent = nonleaf_id;
+		nonleaf_id++;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = i;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = leaf_lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3d81247a12..d3d39eeb99 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 	/* Disable backpressure, it will be enabled back if needed on
 	 * hierarchy enable
 	 */
-	rc = nix_tm_bp_config_set(roc_nix, false);
-	if (rc) {
-		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
-		goto cleanup;
+	for (i = 0; i < sq_cnt; i++) {
+		sq = nix->sqs[i];
+		if (!sq)
+			continue;
+
+		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+		if (rc) {
+			plt_err("Failed to disable backpressure, rc=%d", rc);
+			goto cleanup;
+		}
 	}
 
 	/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 5a03b91784..d2a7ba1976 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -106,6 +106,7 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
 	roc_nix_cq_init;
@@ -196,6 +197,8 @@ INTERNAL {
 	roc_nix_npc_promisc_ena_dis;
 	roc_nix_npc_rx_ena_dis;
 	roc_nix_npc_mcast_config;
+	roc_nix_pfc_mode_get;
+	roc_nix_pfc_mode_set;
 	roc_nix_ptp_clock_read;
 	roc_nix_ptp_info_cb_register;
 	roc_nix_ptp_info_cb_unregister;
@@ -260,6 +263,7 @@ INTERNAL {
 	roc_nix_tm_node_stats_get;
 	roc_nix_tm_node_suspend_resume;
 	roc_nix_tm_prealloc_res;
+	roc_nix_tm_pfc_prepare_tree;
 	roc_nix_tm_prepare_rate_limited_tree;
 	roc_nix_tm_rlimit_sq;
 	roc_nix_tm_root_has_sp;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v5 2/2] net/cnxk: support priority flow control
  2022-01-25 11:23             ` [PATCH v5 " skori
@ 2022-01-25 11:23               ` skori
  2022-01-28 13:28               ` [PATCH v6 1/2] common/cnxk: support priority flow ctrl config API skori
  1 sibling, 0 replies; 29+ messages in thread
From: skori @ 2022-01-25 11:23 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Patch implements priority flow control support for CNXK platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix application restart issue.
 
v2..v3:
 - fix pause quanta configuration for cn10k.
 - fix review comments.
 
v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree. 

v4..v5;
 - rebase on top of tree.

 drivers/net/cnxk/cnxk_ethdev.c     |  27 +++++
 drivers/net/cnxk/cnxk_ethdev.h     |  18 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 177 +++++++++++++++++++++++++++--
 3 files changed, 213 insertions(+), 9 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 74f625553d..4248267a12 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1260,6 +1260,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1548,6 +1550,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_set = cnxk_nix_priority_flow_ctrl_queue_set,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1721,6 +1724,8 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf = {0};
+	struct rte_eth_fc_conf fc_conf = {0};
 	struct roc_nix *nix = &dev->nix;
 	int rc, i;
 
@@ -1736,6 +1741,28 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
+		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+			pfc_conf.rx_pause.tc = i;
+			pfc_conf.tx_pause.rx_qid = i;
+			pfc_conf.tx_pause.tc = i;
+			rc = cnxk_nix_priority_flow_ctrl_queue_set(eth_dev,
+								   &pfc_conf);
+			if (rc)
+				plt_err("Failed to reset PFC. error code(%d)",
+					rc);
+		}
+	}
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 5bfda3d815..c4f28625f3 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -137,12 +137,24 @@
 /* SPI will be in 20 bits of tag */
 #define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
 
+#define CNXK_NIX_PFC_CHAN_COUNT 16
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -366,6 +378,8 @@ struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -467,6 +481,8 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+					  struct rte_eth_pfc_queue_conf *pfc_conf);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -606,6 +622,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index ce5f1f7240..1b47fe9dc3 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -69,6 +69,8 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
+	devinfo->pfc_queue_tc_max = roc_nix_chan_count_get(&dev->nix);
 	return 0;
 }
 
@@ -230,6 +232,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +252,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +288,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
+			      1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +301,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
+			      1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +327,29 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+				      struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf = {0};
+	int rc;
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -911,3 +950,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_pfc_prepare_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = pfc->class_en;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v6 1/2] common/cnxk: support priority flow ctrl config API
  2022-01-25 11:23             ` [PATCH v5 " skori
  2022-01-25 11:23               ` [PATCH v5 2/2] net/cnxk: support priority flow control skori
@ 2022-01-28 13:28               ` skori
  2022-01-28 13:29                 ` [PATCH v6 2/2] net/cnxk: support priority flow control skori
  1 sibling, 1 reply; 29+ messages in thread
From: skori @ 2022-01-28 13:28 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.

Patch adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix RoC API naming convention.

v2..v3:
 - fix pause quanta configuration for cn10k.
 - remove unnecessary code

v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree.

v4..v5:
 - rebase on top of tree
 - fix review comments
 - fix initialization error for LBK devices 

v5..v6:
 - fix review comments

 drivers/common/cnxk/roc_mbox.h       |  19 ++-
 drivers/common/cnxk/roc_nix.h        |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
 drivers/common/cnxk/roc_nix_priv.h   |   6 +-
 drivers/common/cnxk/roc_nix_tm.c     | 171 ++++++++++++++++++++++++++-
 drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
 drivers/common/cnxk/version.map      |   4 +
 7 files changed, 310 insertions(+), 20 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index e97d93e261..769b415c00 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
+	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
+	  cgx_pfc_rsp)                                                         \
 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
 	  npa_lf_alloc_rsp)                                                    \
@@ -550,6 +552,19 @@ struct cgx_pause_frm_cfg {
 	uint8_t __io tx_pause;
 };
 
+struct cgx_pfc_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+};
+
 struct sfp_eeprom_s {
 #define SFP_EEPROM_SIZE 256
 	uint16_t __io sff_id;
@@ -1124,7 +1139,9 @@ struct nix_bp_cfg_req {
 /* PF can be mapped to either CGX or LBK interface,
  * so maximum 64 channels are possible.
  */
-#define NIX_MAX_CHAN 64
+#define NIX_MAX_CHAN	 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN 1
 struct nix_bp_cfg_rsp {
 	struct mbox_msghdr hdr;
 	/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 69a5e8e7b4..e05b7b7dd8 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
 
 		struct {
 			uint32_t rq;
+			uint16_t tc;
 			uint16_t cq_drop;
 			bool enable;
 		} cq_cfg;
 
 		struct {
+			uint32_t sq;
+			uint16_t tc;
 			bool enable;
 		} tm_cfg;
 	};
 };
 
+struct roc_nix_pfc_cfg {
+	enum roc_nix_fc_mode mode;
+	/* For SET, tc must be [0, 15].
+	 * For GET, TC will represent bitmap
+	 */
+	uint16_t tc;
+};
+
 struct roc_nix_eeprom_info {
 #define ROC_NIX_EEPROM_SIZE 256
 	uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
 enum roc_nix_tm_tree {
 	ROC_NIX_TM_DEFAULT = 0,
 	ROC_NIX_TM_RLIMIT,
+	ROC_NIX_TM_PFC,
 	ROC_NIX_TM_USER,
 	ROC_NIX_TM_TREE_MAX,
 };
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
 int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
 
@@ -736,6 +749,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
 				  enum roc_nix_fc_mode mode);
 
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index ca29cd2bf9..814ccab839 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 	struct mbox *mbox = get_mbox(roc_nix);
 	struct nix_bp_cfg_req *req;
 	struct nix_bp_cfg_rsp *rsp;
-	int rc = -ENOSPC;
+	int rc = -ENOSPC, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		req = mbox_alloc_msg_nix_bp_enable(mbox);
 		if (req == NULL)
 			return rc;
+
 		req->chan_base = 0;
-		req->chan_cnt = 1;
-		req->bpid_per_chan = 0;
+		if (roc_nix_is_lbk(roc_nix))
+			req->chan_cnt = NIX_LBK_MAX_CHAN;
+		else
+			req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+		req->bpid_per_chan = true;
 
 		rc = mbox_process_msg(mbox, (void *)&rsp);
 		if (rc || (req->chan_cnt != rsp->chan_cnt))
 			goto exit;
 
-		nix->bpid[0] = rsp->chan_bpid[0];
 		nix->chan_cnt = rsp->chan_cnt;
+		for (i = 0; i < rsp->chan_cnt; i++)
+			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
 	} else {
 		req = mbox_alloc_msg_nix_bp_disable(mbox);
 		if (req == NULL)
 			return rc;
 		req->chan_base = 0;
-		req->chan_cnt = 1;
+		req->chan_cnt = nix->chan_cnt;
 
 		rc = mbox_process(mbox);
 		if (rc)
@@ -152,7 +158,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -169,7 +175,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -210,7 +216,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return nix_fc_rxchan_bpid_set(roc_nix,
 					      fc_cfg->rxchan_cfg.enable);
 	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
-		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+					    fc_cfg->tm_cfg.tc,
+					    fc_cfg->tm_cfg.enable);
 
 	return -EINVAL;
 }
@@ -391,3 +399,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	mbox_process(mbox);
 }
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = get_mbox(roc_nix);
+	uint8_t tx_pause, rx_pause;
+	struct cgx_pfc_cfg *req;
+	struct cgx_pfc_rsp *rsp;
+	int rc = -ENOSPC;
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_RX);
+	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+
+	req->pfc_en = pfc_cfg->tc;
+	req->rx_pause = rx_pause;
+	req->tx_pause = tx_pause;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	nix->rx_pause = rsp->rx_pause;
+	nix->tx_pause = rsp->tx_pause;
+	if (rsp->tx_pause)
+		nix->cev |= BIT(pfc_cfg->tc);
+	else
+		nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+	return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	pfc_cfg->tc = nix->cev;
+
+	if (nix->rx_pause && nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_FULL;
+	else if (nix->rx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_RX;
+	else if (nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_TX;
+	else
+		pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+	return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index deb2a6ba11..f3889424c4 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -33,6 +33,7 @@ struct nix_qint {
 /* Traffic Manager */
 #define NIX_TM_MAX_HW_TXSCHQ 512
 #define NIX_TM_HW_ID_INVALID UINT32_MAX
+#define NIX_TM_CHAN_INVALID UINT16_MAX
 
 /* TM flags */
 #define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
@@ -56,6 +57,7 @@ struct nix_tm_node {
 	uint32_t priority;
 	uint32_t weight;
 	uint16_t lvl;
+	uint16_t rel_chan;
 	uint32_t parent_id;
 	uint32_t shaper_profile_id;
 	void (*free_fn)(void *node);
@@ -139,6 +141,7 @@ struct nix {
 	uint16_t msixoff;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
+	uint16_t cev;
 	uint64_t rx_cfg;
 	struct dev dev;
 	uint16_t cints;
@@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	       bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+			 bool enable);
 void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
 
 /*
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index b3d8ebd3c2..8255fdb504 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 			if (is_pf_or_lbk && !skip_bp &&
 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
 				node->bp_capa = 1;
-				skip_bp = true;
+				skip_bp = false;
 			}
 
 			rc = nix_tm_node_reg_conf(nix, node);
@@ -317,21 +317,38 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
 }
 
 int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+		     bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	enum roc_nix_tm_tree tree = nix->tm_tree;
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	struct nix_tm_node *sq_node;
+	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
 	uint16_t link;
 	int rc = 0;
 
+	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	parent = sq_node->parent;
+	while (parent) {
+		if (parent->lvl == ROC_TM_LVL_SCH2)
+			break;
+
+		parent = parent->parent;
+	}
+
 	list = nix_tm_node_list(nix, tree);
 	link = nix->tx_link;
 
+	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+		rc = -EINVAL;
+		goto err;
+	}
+
 	TAILQ_FOREACH(node, list, node) {
 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
 			continue;
@@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
 			continue;
 
+		if (node->hw_id != parent->hw_id)
+			continue;
+
 		if (!req) {
 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
 			req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		}
 
 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
-		req->regval[k] = enable ? BIT_ULL(13) : 0;
-		req->regval_mask[k] = ~BIT_ULL(13);
+		req->regval[k] = enable ? tc : 0;
+		req->regval[k] |= enable ? BIT_ULL(13) : 0;
+		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
 		k++;
 
 		if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 			goto err;
 	}
 
+	parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
 	return 0;
 err:
 	plt_err("Failed to %s bp on link %u, rc=%d(%s)",
@@ -602,7 +624,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
 	}
 
 	/* Disable backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, false);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
 	if (rc) {
 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
 		return rc;
@@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
 		return 0;
 
 	/* Restore backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, true);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
 	if (rc) {
 		plt_err("Failed to restore backpressure, rc=%d", rc);
 		return rc;
@@ -1293,6 +1315,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1319,6 +1342,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1359,6 +1383,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1384,6 +1409,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1408,6 +1434,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+error:
+	nix_tm_node_free(node);
+	return rc;
+}
+
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	uint32_t nonleaf_id = nix->nb_tx_queues;
+	struct nix_tm_node *node = NULL;
+	uint8_t leaf_lvl, lvl, lvl_end;
+	uint32_t tl2_node_id;
+	uint32_t parent, i;
+	int rc = -ENOMEM;
+
+	parent = ROC_NIX_TM_NODE_ID_INVALID;
+	lvl_end = ROC_TM_LVL_SCH3;
+	leaf_lvl = ROC_TM_LVL_QUEUE;
+
+	/* TL1 node */
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_ROOT;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	parent = nonleaf_id;
+	nonleaf_id++;
+
+	/* TL2 node */
+	rc = -ENOMEM;
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_SCH1;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	tl2_node_id = nonleaf_id;
+	nonleaf_id++;
+
+	for (i = 0; i < nix->nb_tx_queues; i++) {
+		parent = tl2_node_id;
+		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+			rc = -ENOMEM;
+			node = nix_tm_node_alloc();
+			if (!node)
+				goto error;
+
+			node->id = nonleaf_id;
+			node->parent_id = parent;
+			node->priority = 0;
+			node->weight = NIX_TM_DFLT_RR_WT;
+			node->shaper_profile_id =
+				ROC_NIX_TM_SHAPER_PROFILE_NONE;
+			node->lvl = lvl;
+			node->tree = ROC_NIX_TM_PFC;
+			node->rel_chan = NIX_TM_CHAN_INVALID;
+
+			rc = nix_tm_node_add(roc_nix, node);
+			if (rc)
+				goto error;
+
+			parent = nonleaf_id;
+			nonleaf_id++;
+		}
+
+		lvl = ROC_TM_LVL_SCH4;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = nonleaf_id;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+
+		parent = nonleaf_id;
+		nonleaf_id++;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = i;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = leaf_lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3d81247a12..d3d39eeb99 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 	/* Disable backpressure, it will be enabled back if needed on
 	 * hierarchy enable
 	 */
-	rc = nix_tm_bp_config_set(roc_nix, false);
-	if (rc) {
-		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
-		goto cleanup;
+	for (i = 0; i < sq_cnt; i++) {
+		sq = nix->sqs[i];
+		if (!sq)
+			continue;
+
+		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+		if (rc) {
+			plt_err("Failed to disable backpressure, rc=%d", rc);
+			goto cleanup;
+		}
 	}
 
 	/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index e3948d369d..50a9d1e6f0 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -107,6 +107,7 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
 	roc_nix_cq_init;
@@ -197,6 +198,8 @@ INTERNAL {
 	roc_nix_npc_promisc_ena_dis;
 	roc_nix_npc_rx_ena_dis;
 	roc_nix_npc_mcast_config;
+	roc_nix_pfc_mode_get;
+	roc_nix_pfc_mode_set;
 	roc_nix_ptp_clock_read;
 	roc_nix_ptp_info_cb_register;
 	roc_nix_ptp_info_cb_unregister;
@@ -261,6 +264,7 @@ INTERNAL {
 	roc_nix_tm_node_stats_get;
 	roc_nix_tm_node_suspend_resume;
 	roc_nix_tm_prealloc_res;
+	roc_nix_tm_pfc_prepare_tree;
 	roc_nix_tm_prepare_rate_limited_tree;
 	roc_nix_tm_rlimit_sq;
 	roc_nix_tm_root_has_sp;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v6 2/2] net/cnxk: support priority flow control
  2022-01-28 13:28               ` [PATCH v6 1/2] common/cnxk: support priority flow ctrl config API skori
@ 2022-01-28 13:29                 ` skori
  2022-02-07 17:21                   ` [PATCH v7 1/2] common/cnxk: support priority flow ctrl config API skori
  0 siblings, 1 reply; 29+ messages in thread
From: skori @ 2022-01-28 13:29 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Patch implements priority flow control support for CNXK platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix application restart issue.
 
v2..v3:
 - fix pause quanta configuration for cn10k.
 - fix review comments.
 
v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree. 

v4..v5:
 - rebase on top of tree.

v5..v6:
 - fix review comments

 drivers/net/cnxk/cnxk_ethdev.c     |  30 +++++
 drivers/net/cnxk/cnxk_ethdev.h     |  20 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 188 +++++++++++++++++++++++++++--
 3 files changed, 229 insertions(+), 9 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 74f625553d..6f0c206422 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1260,6 +1260,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1548,6 +1550,10 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_config =
+				cnxk_nix_priority_flow_ctrl_queue_config,
+	.priority_flow_ctrl_queue_info_get =
+				cnxk_nix_priority_flow_ctrl_queue_info_get,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1721,6 +1727,8 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf = {0};
+	struct rte_eth_fc_conf fc_conf = {0};
 	struct roc_nix *nix = &dev->nix;
 	int rc, i;
 
@@ -1736,6 +1744,28 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
+		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+			pfc_conf.rx_pause.tc = i;
+			pfc_conf.tx_pause.rx_qid = i;
+			pfc_conf.tx_pause.tc = i;
+			rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
+				&pfc_conf);
+			if (rc)
+				plt_err("Failed to reset PFC. error code(%d)",
+					rc);
+		}
+	}
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 5bfda3d815..e3dc916c0c 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -137,12 +137,24 @@
 /* SPI will be in 20 bits of tag */
 #define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
 
+#define CNXK_NIX_PFC_CHAN_COUNT 16
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -366,6 +378,8 @@ struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -467,6 +481,10 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					     struct rte_eth_pfc_queue_conf *pfc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					       struct rte_eth_pfc_queue_info *pfc_info);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -606,6 +624,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index ce5f1f7240..138d83daaa 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -69,6 +69,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
 	return 0;
 }
 
@@ -230,6 +231,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +251,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +287,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
+			      1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +300,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
+			      1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +326,41 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_info *pfc_info)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+	pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
+	pfc_info->capa = RTE_ETH_PFC_QUEUE_CAPA_RX_PAUSE |
+			 RTE_ETH_PFC_QUEUE_CAPA_TX_PAUSE;
+	return 0;
+}
+
+int
+cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf = {0};
+	int rc;
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -911,3 +961,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_pfc_prepare_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = pfc->class_en;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v7 1/2] common/cnxk: support priority flow ctrl config API
  2022-01-28 13:29                 ` [PATCH v6 2/2] net/cnxk: support priority flow control skori
@ 2022-02-07 17:21                   ` skori
  2022-02-07 17:21                     ` [PATCH v7 2/2] net/cnxk: support priority flow control skori
  0 siblings, 1 reply; 29+ messages in thread
From: skori @ 2022-02-07 17:21 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.

Patch adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix RoC API naming convention.

v2..v3:
 - fix pause quanta configuration for cn10k.
 - remove unnecessary code

v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree.

v4..v5:
 - rebase on top of tree
 - fix review comments
 - fix initialization error for LBK devices 

v5..v6:
 - fix review comments

v6..v7:
 - no change

 drivers/common/cnxk/roc_mbox.h       |  19 ++-
 drivers/common/cnxk/roc_nix.h        |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
 drivers/common/cnxk/roc_nix_priv.h   |   6 +-
 drivers/common/cnxk/roc_nix_tm.c     | 171 ++++++++++++++++++++++++++-
 drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
 drivers/common/cnxk/version.map      |   4 +
 7 files changed, 310 insertions(+), 20 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 8967858914..b608f58357 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
+	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
+	  cgx_pfc_rsp)                                                         \
 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
 	  npa_lf_alloc_rsp)                                                    \
@@ -551,6 +553,19 @@ struct cgx_pause_frm_cfg {
 	uint8_t __io tx_pause;
 };
 
+struct cgx_pfc_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+};
+
 struct sfp_eeprom_s {
 #define SFP_EEPROM_SIZE 256
 	uint16_t __io sff_id;
@@ -1125,7 +1140,9 @@ struct nix_bp_cfg_req {
 /* PF can be mapped to either CGX or LBK interface,
  * so maximum 64 channels are possible.
  */
-#define NIX_MAX_CHAN 64
+#define NIX_MAX_CHAN	 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN 1
 struct nix_bp_cfg_rsp {
 	struct mbox_msghdr hdr;
 	/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 755212c8f9..680a34cdcd 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
 
 		struct {
 			uint32_t rq;
+			uint16_t tc;
 			uint16_t cq_drop;
 			bool enable;
 		} cq_cfg;
 
 		struct {
+			uint32_t sq;
+			uint16_t tc;
 			bool enable;
 		} tm_cfg;
 	};
 };
 
+struct roc_nix_pfc_cfg {
+	enum roc_nix_fc_mode mode;
+	/* For SET, tc must be [0, 15].
+	 * For GET, TC will represent bitmap
+	 */
+	uint16_t tc;
+};
+
 struct roc_nix_eeprom_info {
 #define ROC_NIX_EEPROM_SIZE 256
 	uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
 enum roc_nix_tm_tree {
 	ROC_NIX_TM_DEFAULT = 0,
 	ROC_NIX_TM_RLIMIT,
+	ROC_NIX_TM_PFC,
 	ROC_NIX_TM_USER,
 	ROC_NIX_TM_TREE_MAX,
 };
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
 int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
 
@@ -739,6 +752,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
 				  enum roc_nix_fc_mode mode);
 
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index d31137188e..8e31443b8f 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 	struct mbox *mbox = get_mbox(roc_nix);
 	struct nix_bp_cfg_req *req;
 	struct nix_bp_cfg_rsp *rsp;
-	int rc = -ENOSPC;
+	int rc = -ENOSPC, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		req = mbox_alloc_msg_nix_bp_enable(mbox);
 		if (req == NULL)
 			return rc;
+
 		req->chan_base = 0;
-		req->chan_cnt = 1;
-		req->bpid_per_chan = 0;
+		if (roc_nix_is_lbk(roc_nix))
+			req->chan_cnt = NIX_LBK_MAX_CHAN;
+		else
+			req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+		req->bpid_per_chan = true;
 
 		rc = mbox_process_msg(mbox, (void *)&rsp);
 		if (rc || (req->chan_cnt != rsp->chan_cnt))
 			goto exit;
 
-		nix->bpid[0] = rsp->chan_bpid[0];
 		nix->chan_cnt = rsp->chan_cnt;
+		for (i = 0; i < rsp->chan_cnt; i++)
+			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
 	} else {
 		req = mbox_alloc_msg_nix_bp_disable(mbox);
 		if (req == NULL)
 			return rc;
 		req->chan_base = 0;
-		req->chan_cnt = 1;
+		req->chan_cnt = nix->chan_cnt;
 
 		rc = mbox_process(mbox);
 		if (rc)
@@ -161,7 +167,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -181,7 +187,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -222,7 +228,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return nix_fc_rxchan_bpid_set(roc_nix,
 					      fc_cfg->rxchan_cfg.enable);
 	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
-		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+					    fc_cfg->tm_cfg.tc,
+					    fc_cfg->tm_cfg.enable);
 
 	return -EINVAL;
 }
@@ -403,3 +411,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	mbox_process(mbox);
 }
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = get_mbox(roc_nix);
+	uint8_t tx_pause, rx_pause;
+	struct cgx_pfc_cfg *req;
+	struct cgx_pfc_rsp *rsp;
+	int rc = -ENOSPC;
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_RX);
+	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+
+	req->pfc_en = pfc_cfg->tc;
+	req->rx_pause = rx_pause;
+	req->tx_pause = tx_pause;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	nix->rx_pause = rsp->rx_pause;
+	nix->tx_pause = rsp->tx_pause;
+	if (rsp->tx_pause)
+		nix->cev |= BIT(pfc_cfg->tc);
+	else
+		nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+	return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	pfc_cfg->tc = nix->cev;
+
+	if (nix->rx_pause && nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_FULL;
+	else if (nix->rx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_RX;
+	else if (nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_TX;
+	else
+		pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+	return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index deb2a6ba11..f3889424c4 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -33,6 +33,7 @@ struct nix_qint {
 /* Traffic Manager */
 #define NIX_TM_MAX_HW_TXSCHQ 512
 #define NIX_TM_HW_ID_INVALID UINT32_MAX
+#define NIX_TM_CHAN_INVALID UINT16_MAX
 
 /* TM flags */
 #define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
@@ -56,6 +57,7 @@ struct nix_tm_node {
 	uint32_t priority;
 	uint32_t weight;
 	uint16_t lvl;
+	uint16_t rel_chan;
 	uint32_t parent_id;
 	uint32_t shaper_profile_id;
 	void (*free_fn)(void *node);
@@ -139,6 +141,7 @@ struct nix {
 	uint16_t msixoff;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
+	uint16_t cev;
 	uint64_t rx_cfg;
 	struct dev dev;
 	uint16_t cints;
@@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	       bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+			 bool enable);
 void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
 
 /*
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index a0448bec61..670cf66db4 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 			if (is_pf_or_lbk && !skip_bp &&
 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
 				node->bp_capa = 1;
-				skip_bp = true;
+				skip_bp = false;
 			}
 
 			rc = nix_tm_node_reg_conf(nix, node);
@@ -317,21 +317,38 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
 }
 
 int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+		     bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	enum roc_nix_tm_tree tree = nix->tm_tree;
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	struct nix_tm_node *sq_node;
+	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
 	uint16_t link;
 	int rc = 0;
 
+	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	parent = sq_node->parent;
+	while (parent) {
+		if (parent->lvl == ROC_TM_LVL_SCH2)
+			break;
+
+		parent = parent->parent;
+	}
+
 	list = nix_tm_node_list(nix, tree);
 	link = nix->tx_link;
 
+	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+		rc = -EINVAL;
+		goto err;
+	}
+
 	TAILQ_FOREACH(node, list, node) {
 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
 			continue;
@@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
 			continue;
 
+		if (node->hw_id != parent->hw_id)
+			continue;
+
 		if (!req) {
 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
 			req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		}
 
 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
-		req->regval[k] = enable ? BIT_ULL(13) : 0;
-		req->regval_mask[k] = ~BIT_ULL(13);
+		req->regval[k] = enable ? tc : 0;
+		req->regval[k] |= enable ? BIT_ULL(13) : 0;
+		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
 		k++;
 
 		if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 			goto err;
 	}
 
+	parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
 	return 0;
 err:
 	plt_err("Failed to %s bp on link %u, rc=%d(%s)",
@@ -602,7 +624,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
 	}
 
 	/* Disable backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, false);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
 	if (rc) {
 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
 		return rc;
@@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
 		return 0;
 
 	/* Restore backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, true);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
 	if (rc) {
 		plt_err("Failed to restore backpressure, rc=%d", rc);
 		return rc;
@@ -1299,6 +1321,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1325,6 +1348,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1365,6 +1389,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1390,6 +1415,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1414,6 +1440,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+error:
+	nix_tm_node_free(node);
+	return rc;
+}
+
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	uint32_t nonleaf_id = nix->nb_tx_queues;
+	struct nix_tm_node *node = NULL;
+	uint8_t leaf_lvl, lvl, lvl_end;
+	uint32_t tl2_node_id;
+	uint32_t parent, i;
+	int rc = -ENOMEM;
+
+	parent = ROC_NIX_TM_NODE_ID_INVALID;
+	lvl_end = ROC_TM_LVL_SCH3;
+	leaf_lvl = ROC_TM_LVL_QUEUE;
+
+	/* TL1 node */
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_ROOT;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	parent = nonleaf_id;
+	nonleaf_id++;
+
+	/* TL2 node */
+	rc = -ENOMEM;
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_SCH1;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	tl2_node_id = nonleaf_id;
+	nonleaf_id++;
+
+	for (i = 0; i < nix->nb_tx_queues; i++) {
+		parent = tl2_node_id;
+		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+			rc = -ENOMEM;
+			node = nix_tm_node_alloc();
+			if (!node)
+				goto error;
+
+			node->id = nonleaf_id;
+			node->parent_id = parent;
+			node->priority = 0;
+			node->weight = NIX_TM_DFLT_RR_WT;
+			node->shaper_profile_id =
+				ROC_NIX_TM_SHAPER_PROFILE_NONE;
+			node->lvl = lvl;
+			node->tree = ROC_NIX_TM_PFC;
+			node->rel_chan = NIX_TM_CHAN_INVALID;
+
+			rc = nix_tm_node_add(roc_nix, node);
+			if (rc)
+				goto error;
+
+			parent = nonleaf_id;
+			nonleaf_id++;
+		}
+
+		lvl = ROC_TM_LVL_SCH4;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = nonleaf_id;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+
+		parent = nonleaf_id;
+		nonleaf_id++;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = i;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = leaf_lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3d81247a12..d3d39eeb99 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 	/* Disable backpressure, it will be enabled back if needed on
 	 * hierarchy enable
 	 */
-	rc = nix_tm_bp_config_set(roc_nix, false);
-	if (rc) {
-		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
-		goto cleanup;
+	for (i = 0; i < sq_cnt; i++) {
+		sq = nix->sqs[i];
+		if (!sq)
+			continue;
+
+		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+		if (rc) {
+			plt_err("Failed to disable backpressure, rc=%d", rc);
+			goto cleanup;
+		}
 	}
 
 	/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index ad1b5e8476..37ec100451 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -107,6 +107,7 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
 	roc_nix_cq_head_tail_get;
@@ -198,6 +199,8 @@ INTERNAL {
 	roc_nix_npc_promisc_ena_dis;
 	roc_nix_npc_rx_ena_dis;
 	roc_nix_npc_mcast_config;
+	roc_nix_pfc_mode_get;
+	roc_nix_pfc_mode_set;
 	roc_nix_ptp_clock_read;
 	roc_nix_ptp_info_cb_register;
 	roc_nix_ptp_info_cb_unregister;
@@ -263,6 +266,7 @@ INTERNAL {
 	roc_nix_tm_node_stats_get;
 	roc_nix_tm_node_suspend_resume;
 	roc_nix_tm_prealloc_res;
+	roc_nix_tm_pfc_prepare_tree;
 	roc_nix_tm_prepare_rate_limited_tree;
 	roc_nix_tm_rlimit_sq;
 	roc_nix_tm_root_has_sp;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v7 2/2] net/cnxk: support priority flow control
  2022-02-07 17:21                   ` [PATCH v7 1/2] common/cnxk: support priority flow ctrl config API skori
@ 2022-02-07 17:21                     ` skori
  2022-02-14  9:02                       ` [PATCH v8 1/2] common/cnxk: support priority flow ctrl config API skori
  0 siblings, 1 reply; 29+ messages in thread
From: skori @ 2022-02-07 17:21 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Patch implements priority flow control support for CNXK platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix application restart issue.
 
v2..v3:
 - fix pause quanta configuration for cn10k.
 - fix review comments.
 
v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree. 

v4..v5:
 - rebase on top of tree.

v5..v6:
 - fix review comments

v6..v7:
 - use correct FC mode flags

 drivers/net/cnxk/cnxk_ethdev.c     |  30 +++++
 drivers/net/cnxk/cnxk_ethdev.h     |  20 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 187 +++++++++++++++++++++++++++--
 3 files changed, 228 insertions(+), 9 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 53dfb5eae8..6a37a7b0e8 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1263,6 +1263,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1551,6 +1553,10 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_config =
+				cnxk_nix_priority_flow_ctrl_queue_config,
+	.priority_flow_ctrl_queue_info_get =
+				cnxk_nix_priority_flow_ctrl_queue_info_get,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1730,6 +1736,8 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf = {0};
+	struct rte_eth_fc_conf fc_conf = {0};
 	struct roc_nix *nix = &dev->nix;
 	int rc, i;
 
@@ -1745,6 +1753,28 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
+		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+			pfc_conf.rx_pause.tc = i;
+			pfc_conf.tx_pause.rx_qid = i;
+			pfc_conf.tx_pause.tc = i;
+			rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
+				&pfc_conf);
+			if (rc)
+				plt_err("Failed to reset PFC. error code(%d)",
+					rc);
+		}
+	}
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index fadc8aaf45..d0dfa7cb70 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -137,12 +137,24 @@
 /* SPI will be in 20 bits of tag */
 #define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
 
+#define CNXK_NIX_PFC_CHAN_COUNT 16
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -372,6 +384,8 @@ struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -473,6 +487,10 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					     struct rte_eth_pfc_queue_conf *pfc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					       struct rte_eth_pfc_queue_info *pfc_info);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -617,6 +635,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index f20f201db2..f4669ee7cf 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -69,6 +69,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
 	return 0;
 }
 
@@ -230,6 +231,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +251,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +287,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
+			      1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +300,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
+			      1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +326,40 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_info *pfc_info)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+	pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
+	pfc_info->mode_capa = RTE_ETH_FC_FULL;
+	return 0;
+}
+
+int
+cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf = {0};
+	int rc;
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -972,3 +1021,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_pfc_prepare_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = pfc->class_en;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v8 1/2] common/cnxk: support priority flow ctrl config API
  2022-02-07 17:21                     ` [PATCH v7 2/2] net/cnxk: support priority flow control skori
@ 2022-02-14  9:02                       ` skori
  2022-02-14  9:02                         ` [PATCH v8 2/2] net/cnxk: support priority flow control skori
  2022-02-14 10:06                         ` [PATCH v8 " Ray Kinsella
  0 siblings, 2 replies; 29+ messages in thread
From: skori @ 2022-02-14  9:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.

Patch adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix RoC API naming convention.

v2..v3:
 - fix pause quanta configuration for cn10k.
 - remove unnecessary code

v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree.

v4..v5:
 - rebase on top of tree
 - fix review comments
 - fix initialization error for LBK devices 

v5..v6:
 - fix review comments

v6..v7:
 - no change

v7..v8:
 - rebase on top of 22.03-rc1

 drivers/common/cnxk/roc_mbox.h       |  19 ++-
 drivers/common/cnxk/roc_nix.h        |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
 drivers/common/cnxk/roc_nix_priv.h   |   6 +-
 drivers/common/cnxk/roc_nix_tm.c     | 171 ++++++++++++++++++++++++++-
 drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
 drivers/common/cnxk/version.map      |   4 +
 7 files changed, 310 insertions(+), 20 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 8967858914..b608f58357 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
+	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
+	  cgx_pfc_rsp)                                                         \
 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
 	  npa_lf_alloc_rsp)                                                    \
@@ -551,6 +553,19 @@ struct cgx_pause_frm_cfg {
 	uint8_t __io tx_pause;
 };
 
+struct cgx_pfc_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+};
+
 struct sfp_eeprom_s {
 #define SFP_EEPROM_SIZE 256
 	uint16_t __io sff_id;
@@ -1125,7 +1140,9 @@ struct nix_bp_cfg_req {
 /* PF can be mapped to either CGX or LBK interface,
  * so maximum 64 channels are possible.
  */
-#define NIX_MAX_CHAN 64
+#define NIX_MAX_CHAN	 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN 1
 struct nix_bp_cfg_rsp {
 	struct mbox_msghdr hdr;
 	/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 755212c8f9..680a34cdcd 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
 
 		struct {
 			uint32_t rq;
+			uint16_t tc;
 			uint16_t cq_drop;
 			bool enable;
 		} cq_cfg;
 
 		struct {
+			uint32_t sq;
+			uint16_t tc;
 			bool enable;
 		} tm_cfg;
 	};
 };
 
+struct roc_nix_pfc_cfg {
+	enum roc_nix_fc_mode mode;
+	/* For SET, tc must be [0, 15].
+	 * For GET, TC will represent bitmap
+	 */
+	uint16_t tc;
+};
+
 struct roc_nix_eeprom_info {
 #define ROC_NIX_EEPROM_SIZE 256
 	uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
 enum roc_nix_tm_tree {
 	ROC_NIX_TM_DEFAULT = 0,
 	ROC_NIX_TM_RLIMIT,
+	ROC_NIX_TM_PFC,
 	ROC_NIX_TM_USER,
 	ROC_NIX_TM_TREE_MAX,
 };
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
 int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
 
@@ -739,6 +752,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
 				  enum roc_nix_fc_mode mode);
 
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index d31137188e..8e31443b8f 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 	struct mbox *mbox = get_mbox(roc_nix);
 	struct nix_bp_cfg_req *req;
 	struct nix_bp_cfg_rsp *rsp;
-	int rc = -ENOSPC;
+	int rc = -ENOSPC, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		req = mbox_alloc_msg_nix_bp_enable(mbox);
 		if (req == NULL)
 			return rc;
+
 		req->chan_base = 0;
-		req->chan_cnt = 1;
-		req->bpid_per_chan = 0;
+		if (roc_nix_is_lbk(roc_nix))
+			req->chan_cnt = NIX_LBK_MAX_CHAN;
+		else
+			req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+		req->bpid_per_chan = true;
 
 		rc = mbox_process_msg(mbox, (void *)&rsp);
 		if (rc || (req->chan_cnt != rsp->chan_cnt))
 			goto exit;
 
-		nix->bpid[0] = rsp->chan_bpid[0];
 		nix->chan_cnt = rsp->chan_cnt;
+		for (i = 0; i < rsp->chan_cnt; i++)
+			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
 	} else {
 		req = mbox_alloc_msg_nix_bp_disable(mbox);
 		if (req == NULL)
 			return rc;
 		req->chan_base = 0;
-		req->chan_cnt = 1;
+		req->chan_cnt = nix->chan_cnt;
 
 		rc = mbox_process(mbox);
 		if (rc)
@@ -161,7 +167,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -181,7 +187,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -222,7 +228,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return nix_fc_rxchan_bpid_set(roc_nix,
 					      fc_cfg->rxchan_cfg.enable);
 	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
-		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+					    fc_cfg->tm_cfg.tc,
+					    fc_cfg->tm_cfg.enable);
 
 	return -EINVAL;
 }
@@ -403,3 +411,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	mbox_process(mbox);
 }
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = get_mbox(roc_nix);
+	uint8_t tx_pause, rx_pause;
+	struct cgx_pfc_cfg *req;
+	struct cgx_pfc_rsp *rsp;
+	int rc = -ENOSPC;
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_RX);
+	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+
+	req->pfc_en = pfc_cfg->tc;
+	req->rx_pause = rx_pause;
+	req->tx_pause = tx_pause;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	nix->rx_pause = rsp->rx_pause;
+	nix->tx_pause = rsp->tx_pause;
+	if (rsp->tx_pause)
+		nix->cev |= BIT(pfc_cfg->tc);
+	else
+		nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+	return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	pfc_cfg->tc = nix->cev;
+
+	if (nix->rx_pause && nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_FULL;
+	else if (nix->rx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_RX;
+	else if (nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_TX;
+	else
+		pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+	return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index deb2a6ba11..f3889424c4 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -33,6 +33,7 @@ struct nix_qint {
 /* Traffic Manager */
 #define NIX_TM_MAX_HW_TXSCHQ 512
 #define NIX_TM_HW_ID_INVALID UINT32_MAX
+#define NIX_TM_CHAN_INVALID UINT16_MAX
 
 /* TM flags */
 #define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
@@ -56,6 +57,7 @@ struct nix_tm_node {
 	uint32_t priority;
 	uint32_t weight;
 	uint16_t lvl;
+	uint16_t rel_chan;
 	uint32_t parent_id;
 	uint32_t shaper_profile_id;
 	void (*free_fn)(void *node);
@@ -139,6 +141,7 @@ struct nix {
 	uint16_t msixoff;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
+	uint16_t cev;
 	uint64_t rx_cfg;
 	struct dev dev;
 	uint16_t cints;
@@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	       bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+			 bool enable);
 void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
 
 /*
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index a0448bec61..670cf66db4 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 			if (is_pf_or_lbk && !skip_bp &&
 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
 				node->bp_capa = 1;
-				skip_bp = true;
+				skip_bp = false;
 			}
 
 			rc = nix_tm_node_reg_conf(nix, node);
@@ -317,21 +317,38 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
 }
 
 int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+		     bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	enum roc_nix_tm_tree tree = nix->tm_tree;
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	struct nix_tm_node *sq_node;
+	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
 	uint16_t link;
 	int rc = 0;
 
+	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	parent = sq_node->parent;
+	while (parent) {
+		if (parent->lvl == ROC_TM_LVL_SCH2)
+			break;
+
+		parent = parent->parent;
+	}
+
 	list = nix_tm_node_list(nix, tree);
 	link = nix->tx_link;
 
+	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+		rc = -EINVAL;
+		goto err;
+	}
+
 	TAILQ_FOREACH(node, list, node) {
 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
 			continue;
@@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
 			continue;
 
+		if (node->hw_id != parent->hw_id)
+			continue;
+
 		if (!req) {
 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
 			req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		}
 
 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
-		req->regval[k] = enable ? BIT_ULL(13) : 0;
-		req->regval_mask[k] = ~BIT_ULL(13);
+		req->regval[k] = enable ? tc : 0;
+		req->regval[k] |= enable ? BIT_ULL(13) : 0;
+		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
 		k++;
 
 		if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 			goto err;
 	}
 
+	parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
 	return 0;
 err:
 	plt_err("Failed to %s bp on link %u, rc=%d(%s)",
@@ -602,7 +624,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
 	}
 
 	/* Disable backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, false);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
 	if (rc) {
 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
 		return rc;
@@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
 		return 0;
 
 	/* Restore backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, true);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
 	if (rc) {
 		plt_err("Failed to restore backpressure, rc=%d", rc);
 		return rc;
@@ -1299,6 +1321,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1325,6 +1348,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1365,6 +1389,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1390,6 +1415,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1414,6 +1440,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+error:
+	nix_tm_node_free(node);
+	return rc;
+}
+
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	uint32_t nonleaf_id = nix->nb_tx_queues;
+	struct nix_tm_node *node = NULL;
+	uint8_t leaf_lvl, lvl, lvl_end;
+	uint32_t tl2_node_id;
+	uint32_t parent, i;
+	int rc = -ENOMEM;
+
+	parent = ROC_NIX_TM_NODE_ID_INVALID;
+	lvl_end = ROC_TM_LVL_SCH3;
+	leaf_lvl = ROC_TM_LVL_QUEUE;
+
+	/* TL1 node */
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_ROOT;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	parent = nonleaf_id;
+	nonleaf_id++;
+
+	/* TL2 node */
+	rc = -ENOMEM;
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_SCH1;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	tl2_node_id = nonleaf_id;
+	nonleaf_id++;
+
+	for (i = 0; i < nix->nb_tx_queues; i++) {
+		parent = tl2_node_id;
+		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+			rc = -ENOMEM;
+			node = nix_tm_node_alloc();
+			if (!node)
+				goto error;
+
+			node->id = nonleaf_id;
+			node->parent_id = parent;
+			node->priority = 0;
+			node->weight = NIX_TM_DFLT_RR_WT;
+			node->shaper_profile_id =
+				ROC_NIX_TM_SHAPER_PROFILE_NONE;
+			node->lvl = lvl;
+			node->tree = ROC_NIX_TM_PFC;
+			node->rel_chan = NIX_TM_CHAN_INVALID;
+
+			rc = nix_tm_node_add(roc_nix, node);
+			if (rc)
+				goto error;
+
+			parent = nonleaf_id;
+			nonleaf_id++;
+		}
+
+		lvl = ROC_TM_LVL_SCH4;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = nonleaf_id;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+
+		parent = nonleaf_id;
+		nonleaf_id++;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = i;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = leaf_lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3d81247a12..d3d39eeb99 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 	/* Disable backpressure, it will be enabled back if needed on
 	 * hierarchy enable
 	 */
-	rc = nix_tm_bp_config_set(roc_nix, false);
-	if (rc) {
-		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
-		goto cleanup;
+	for (i = 0; i < sq_cnt; i++) {
+		sq = nix->sqs[i];
+		if (!sq)
+			continue;
+
+		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+		if (rc) {
+			plt_err("Failed to disable backpressure, rc=%d", rc);
+			goto cleanup;
+		}
 	}
 
 	/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index ad1b5e8476..37ec100451 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -107,6 +107,7 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
 	roc_nix_cq_head_tail_get;
@@ -198,6 +199,8 @@ INTERNAL {
 	roc_nix_npc_promisc_ena_dis;
 	roc_nix_npc_rx_ena_dis;
 	roc_nix_npc_mcast_config;
+	roc_nix_pfc_mode_get;
+	roc_nix_pfc_mode_set;
 	roc_nix_ptp_clock_read;
 	roc_nix_ptp_info_cb_register;
 	roc_nix_ptp_info_cb_unregister;
@@ -263,6 +266,7 @@ INTERNAL {
 	roc_nix_tm_node_stats_get;
 	roc_nix_tm_node_suspend_resume;
 	roc_nix_tm_prealloc_res;
+	roc_nix_tm_pfc_prepare_tree;
 	roc_nix_tm_prepare_rate_limited_tree;
 	roc_nix_tm_rlimit_sq;
 	roc_nix_tm_root_has_sp;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v8 2/2] net/cnxk: support priority flow control
  2022-02-14  9:02                       ` [PATCH v8 1/2] common/cnxk: support priority flow ctrl config API skori
@ 2022-02-14  9:02                         ` skori
  2022-02-14 10:10                           ` [PATCH v9 1/2] common/cnxk: support priority flow ctrl config API skori
  2022-02-14 10:06                         ` [PATCH v8 " Ray Kinsella
  1 sibling, 1 reply; 29+ messages in thread
From: skori @ 2022-02-14  9:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Patch implements priority flow control support for CNXK platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix application restart issue.
 
v2..v3:
 - fix pause quanta configuration for cn10k.
 - fix review comments.
 
v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree. 

v4..v5:
 - rebase on top of tree.

v5..v6:
 - fix review comments

v6..v7:
 - use correct FC mode flags

v7..v8:
 - rebase on top of 22.03-rc1

 drivers/net/cnxk/cnxk_ethdev.c     |  30 +++++
 drivers/net/cnxk/cnxk_ethdev.h     |  20 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 187 +++++++++++++++++++++++++++--
 3 files changed, 228 insertions(+), 9 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 27751a6956..37ae0939d7 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1251,6 +1251,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1539,6 +1541,10 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_config =
+				cnxk_nix_priority_flow_ctrl_queue_config,
+	.priority_flow_ctrl_queue_info_get =
+				cnxk_nix_priority_flow_ctrl_queue_info_get,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1718,6 +1724,8 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf = {0};
+	struct rte_eth_fc_conf fc_conf = {0};
 	struct roc_nix *nix = &dev->nix;
 	int rc, i;
 
@@ -1733,6 +1741,28 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
+		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+			pfc_conf.rx_pause.tc = i;
+			pfc_conf.tx_pause.rx_qid = i;
+			pfc_conf.tx_pause.tc = i;
+			rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
+				&pfc_conf);
+			if (rc)
+				plt_err("Failed to reset PFC. error code(%d)",
+					rc);
+		}
+	}
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index fadc8aaf45..d0dfa7cb70 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -137,12 +137,24 @@
 /* SPI will be in 20 bits of tag */
 #define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
 
+#define CNXK_NIX_PFC_CHAN_COUNT 16
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -372,6 +384,8 @@ struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -473,6 +487,10 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					     struct rte_eth_pfc_queue_conf *pfc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					       struct rte_eth_pfc_queue_info *pfc_info);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -617,6 +635,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index f20f201db2..f4669ee7cf 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -69,6 +69,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
 	return 0;
 }
 
@@ -230,6 +231,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +251,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +287,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
+			      1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +300,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
+			      1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +326,40 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_info *pfc_info)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+	pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
+	pfc_info->mode_capa = RTE_ETH_FC_FULL;
+	return 0;
+}
+
+int
+cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf = {0};
+	int rc;
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -972,3 +1021,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_pfc_prepare_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = pfc->class_en;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH v8 1/2] common/cnxk: support priority flow ctrl config API
  2022-02-14  9:02                       ` [PATCH v8 1/2] common/cnxk: support priority flow ctrl config API skori
  2022-02-14  9:02                         ` [PATCH v8 2/2] net/cnxk: support priority flow control skori
@ 2022-02-14 10:06                         ` Ray Kinsella
  1 sibling, 0 replies; 29+ messages in thread
From: Ray Kinsella @ 2022-02-14 10:06 UTC (permalink / raw)
  To: skori; +Cc: Nithin Dabilpuram, Kiran Kumar K, Satha Rao, dev


skori@marvell.com writes:

> From: Sunil Kumar Kori <skori@marvell.com>
>
> CNXK platforms support priority flow control(802.1qbb) to pause
> respective traffic per class on that link.
>
> Patch adds RoC interface to configure priority flow control on MAC
> block i.e. CGX on cn9k and RPM on cn10k.
>
> Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
> ---
> v1..v2:
>  - fix RoC API naming convention.
>
> v2..v3:
>  - fix pause quanta configuration for cn10k.
>  - remove unnecessary code
>
> v3..v4:
>  - fix PFC configuration with other type of TM tree
>    i.e. default, user and rate limit tree.
>
> v4..v5:
>  - rebase on top of tree
>  - fix review comments
>  - fix initialization error for LBK devices 
>
> v5..v6:
>  - fix review comments
>
> v6..v7:
>  - no change
>
> v7..v8:
>  - rebase on top of 22.03-rc1
>
>  drivers/common/cnxk/roc_mbox.h       |  19 ++-
>  drivers/common/cnxk/roc_nix.h        |  21 ++++
>  drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
>  drivers/common/cnxk/roc_nix_priv.h   |   6 +-
>  drivers/common/cnxk/roc_nix_tm.c     | 171 ++++++++++++++++++++++++++-
>  drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
>  drivers/common/cnxk/version.map      |   4 +
>  7 files changed, 310 insertions(+), 20 deletions(-)
Acked-by: Ray Kinsella <mdr@ashroe.eu>

-- 
Regards, Ray K

^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v9 1/2] common/cnxk: support priority flow ctrl config API
  2022-02-14  9:02                         ` [PATCH v8 2/2] net/cnxk: support priority flow control skori
@ 2022-02-14 10:10                           ` skori
  2022-02-14 10:10                             ` [PATCH v9 2/2] net/cnxk: support priority flow control skori
  0 siblings, 1 reply; 29+ messages in thread
From: skori @ 2022-02-14 10:10 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.

Patch adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix RoC API naming convention.

v2..v3:
 - fix pause quanta configuration for cn10k.
 - remove unnecessary code

v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree.

v4..v5:
 - rebase on top of tree
 - fix review comments
 - fix initialization error for LBK devices 

v5..v6:
 - fix review comments

v6..v7:
 - no change

v7..v8:
 - rebase on top of 22.03-rc1

v8..v9:
 - no change

 drivers/common/cnxk/roc_mbox.h       |  19 ++-
 drivers/common/cnxk/roc_nix.h        |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
 drivers/common/cnxk/roc_nix_priv.h   |   6 +-
 drivers/common/cnxk/roc_nix_tm.c     | 171 ++++++++++++++++++++++++++-
 drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
 drivers/common/cnxk/version.map      |   4 +
 7 files changed, 310 insertions(+), 20 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 8967858914..b608f58357 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
+	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
+	  cgx_pfc_rsp)                                                         \
 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
 	  npa_lf_alloc_rsp)                                                    \
@@ -551,6 +553,19 @@ struct cgx_pause_frm_cfg {
 	uint8_t __io tx_pause;
 };
 
+struct cgx_pfc_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+};
+
 struct sfp_eeprom_s {
 #define SFP_EEPROM_SIZE 256
 	uint16_t __io sff_id;
@@ -1125,7 +1140,9 @@ struct nix_bp_cfg_req {
 /* PF can be mapped to either CGX or LBK interface,
  * so maximum 64 channels are possible.
  */
-#define NIX_MAX_CHAN 64
+#define NIX_MAX_CHAN	 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN 1
 struct nix_bp_cfg_rsp {
 	struct mbox_msghdr hdr;
 	/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 755212c8f9..680a34cdcd 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
 
 		struct {
 			uint32_t rq;
+			uint16_t tc;
 			uint16_t cq_drop;
 			bool enable;
 		} cq_cfg;
 
 		struct {
+			uint32_t sq;
+			uint16_t tc;
 			bool enable;
 		} tm_cfg;
 	};
 };
 
+struct roc_nix_pfc_cfg {
+	enum roc_nix_fc_mode mode;
+	/* For SET, tc must be [0, 15].
+	 * For GET, TC will represent bitmap
+	 */
+	uint16_t tc;
+};
+
 struct roc_nix_eeprom_info {
 #define ROC_NIX_EEPROM_SIZE 256
 	uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
 enum roc_nix_tm_tree {
 	ROC_NIX_TM_DEFAULT = 0,
 	ROC_NIX_TM_RLIMIT,
+	ROC_NIX_TM_PFC,
 	ROC_NIX_TM_USER,
 	ROC_NIX_TM_TREE_MAX,
 };
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
 int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
 
@@ -739,6 +752,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
 				  enum roc_nix_fc_mode mode);
 
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index d31137188e..8e31443b8f 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 	struct mbox *mbox = get_mbox(roc_nix);
 	struct nix_bp_cfg_req *req;
 	struct nix_bp_cfg_rsp *rsp;
-	int rc = -ENOSPC;
+	int rc = -ENOSPC, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		req = mbox_alloc_msg_nix_bp_enable(mbox);
 		if (req == NULL)
 			return rc;
+
 		req->chan_base = 0;
-		req->chan_cnt = 1;
-		req->bpid_per_chan = 0;
+		if (roc_nix_is_lbk(roc_nix))
+			req->chan_cnt = NIX_LBK_MAX_CHAN;
+		else
+			req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+		req->bpid_per_chan = true;
 
 		rc = mbox_process_msg(mbox, (void *)&rsp);
 		if (rc || (req->chan_cnt != rsp->chan_cnt))
 			goto exit;
 
-		nix->bpid[0] = rsp->chan_bpid[0];
 		nix->chan_cnt = rsp->chan_cnt;
+		for (i = 0; i < rsp->chan_cnt; i++)
+			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
 	} else {
 		req = mbox_alloc_msg_nix_bp_disable(mbox);
 		if (req == NULL)
 			return rc;
 		req->chan_base = 0;
-		req->chan_cnt = 1;
+		req->chan_cnt = nix->chan_cnt;
 
 		rc = mbox_process(mbox);
 		if (rc)
@@ -161,7 +167,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -181,7 +187,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -222,7 +228,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return nix_fc_rxchan_bpid_set(roc_nix,
 					      fc_cfg->rxchan_cfg.enable);
 	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
-		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+					    fc_cfg->tm_cfg.tc,
+					    fc_cfg->tm_cfg.enable);
 
 	return -EINVAL;
 }
@@ -403,3 +411,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	mbox_process(mbox);
 }
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = get_mbox(roc_nix);
+	uint8_t tx_pause, rx_pause;
+	struct cgx_pfc_cfg *req;
+	struct cgx_pfc_rsp *rsp;
+	int rc = -ENOSPC;
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_RX);
+	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+
+	req->pfc_en = pfc_cfg->tc;
+	req->rx_pause = rx_pause;
+	req->tx_pause = tx_pause;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	nix->rx_pause = rsp->rx_pause;
+	nix->tx_pause = rsp->tx_pause;
+	if (rsp->tx_pause)
+		nix->cev |= BIT(pfc_cfg->tc);
+	else
+		nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+	return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	pfc_cfg->tc = nix->cev;
+
+	if (nix->rx_pause && nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_FULL;
+	else if (nix->rx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_RX;
+	else if (nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_TX;
+	else
+		pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+	return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index deb2a6ba11..f3889424c4 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -33,6 +33,7 @@ struct nix_qint {
 /* Traffic Manager */
 #define NIX_TM_MAX_HW_TXSCHQ 512
 #define NIX_TM_HW_ID_INVALID UINT32_MAX
+#define NIX_TM_CHAN_INVALID UINT16_MAX
 
 /* TM flags */
 #define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
@@ -56,6 +57,7 @@ struct nix_tm_node {
 	uint32_t priority;
 	uint32_t weight;
 	uint16_t lvl;
+	uint16_t rel_chan;
 	uint32_t parent_id;
 	uint32_t shaper_profile_id;
 	void (*free_fn)(void *node);
@@ -139,6 +141,7 @@ struct nix {
 	uint16_t msixoff;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
+	uint16_t cev;
 	uint64_t rx_cfg;
 	struct dev dev;
 	uint16_t cints;
@@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	       bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+			 bool enable);
 void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
 
 /*
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index a0448bec61..670cf66db4 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 			if (is_pf_or_lbk && !skip_bp &&
 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
 				node->bp_capa = 1;
-				skip_bp = true;
+				skip_bp = false;
 			}
 
 			rc = nix_tm_node_reg_conf(nix, node);
@@ -317,21 +317,38 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
 }
 
 int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+		     bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	enum roc_nix_tm_tree tree = nix->tm_tree;
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	struct nix_tm_node *sq_node;
+	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
 	uint16_t link;
 	int rc = 0;
 
+	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	parent = sq_node->parent;
+	while (parent) {
+		if (parent->lvl == ROC_TM_LVL_SCH2)
+			break;
+
+		parent = parent->parent;
+	}
+
 	list = nix_tm_node_list(nix, tree);
 	link = nix->tx_link;
 
+	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+		rc = -EINVAL;
+		goto err;
+	}
+
 	TAILQ_FOREACH(node, list, node) {
 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
 			continue;
@@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
 			continue;
 
+		if (node->hw_id != parent->hw_id)
+			continue;
+
 		if (!req) {
 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
 			req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		}
 
 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
-		req->regval[k] = enable ? BIT_ULL(13) : 0;
-		req->regval_mask[k] = ~BIT_ULL(13);
+		req->regval[k] = enable ? tc : 0;
+		req->regval[k] |= enable ? BIT_ULL(13) : 0;
+		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
 		k++;
 
 		if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 			goto err;
 	}
 
+	parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
 	return 0;
 err:
 	plt_err("Failed to %s bp on link %u, rc=%d(%s)",
@@ -602,7 +624,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
 	}
 
 	/* Disable backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, false);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
 	if (rc) {
 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
 		return rc;
@@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
 		return 0;
 
 	/* Restore backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, true);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
 	if (rc) {
 		plt_err("Failed to restore backpressure, rc=%d", rc);
 		return rc;
@@ -1299,6 +1321,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1325,6 +1348,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1365,6 +1389,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1390,6 +1415,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1414,6 +1440,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+error:
+	nix_tm_node_free(node);
+	return rc;
+}
+
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	uint32_t nonleaf_id = nix->nb_tx_queues;
+	struct nix_tm_node *node = NULL;
+	uint8_t leaf_lvl, lvl, lvl_end;
+	uint32_t tl2_node_id;
+	uint32_t parent, i;
+	int rc = -ENOMEM;
+
+	parent = ROC_NIX_TM_NODE_ID_INVALID;
+	lvl_end = ROC_TM_LVL_SCH3;
+	leaf_lvl = ROC_TM_LVL_QUEUE;
+
+	/* TL1 node */
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_ROOT;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	parent = nonleaf_id;
+	nonleaf_id++;
+
+	/* TL2 node */
+	rc = -ENOMEM;
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_SCH1;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	tl2_node_id = nonleaf_id;
+	nonleaf_id++;
+
+	for (i = 0; i < nix->nb_tx_queues; i++) {
+		parent = tl2_node_id;
+		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+			rc = -ENOMEM;
+			node = nix_tm_node_alloc();
+			if (!node)
+				goto error;
+
+			node->id = nonleaf_id;
+			node->parent_id = parent;
+			node->priority = 0;
+			node->weight = NIX_TM_DFLT_RR_WT;
+			node->shaper_profile_id =
+				ROC_NIX_TM_SHAPER_PROFILE_NONE;
+			node->lvl = lvl;
+			node->tree = ROC_NIX_TM_PFC;
+			node->rel_chan = NIX_TM_CHAN_INVALID;
+
+			rc = nix_tm_node_add(roc_nix, node);
+			if (rc)
+				goto error;
+
+			parent = nonleaf_id;
+			nonleaf_id++;
+		}
+
+		lvl = ROC_TM_LVL_SCH4;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = nonleaf_id;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+
+		parent = nonleaf_id;
+		nonleaf_id++;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = i;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = leaf_lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3d81247a12..d3d39eeb99 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 	/* Disable backpressure, it will be enabled back if needed on
 	 * hierarchy enable
 	 */
-	rc = nix_tm_bp_config_set(roc_nix, false);
-	if (rc) {
-		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
-		goto cleanup;
+	for (i = 0; i < sq_cnt; i++) {
+		sq = nix->sqs[i];
+		if (!sq)
+			continue;
+
+		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+		if (rc) {
+			plt_err("Failed to disable backpressure, rc=%d", rc);
+			goto cleanup;
+		}
 	}
 
 	/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index ad1b5e8476..37ec100451 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -107,6 +107,7 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
 	roc_nix_cq_head_tail_get;
@@ -198,6 +199,8 @@ INTERNAL {
 	roc_nix_npc_promisc_ena_dis;
 	roc_nix_npc_rx_ena_dis;
 	roc_nix_npc_mcast_config;
+	roc_nix_pfc_mode_get;
+	roc_nix_pfc_mode_set;
 	roc_nix_ptp_clock_read;
 	roc_nix_ptp_info_cb_register;
 	roc_nix_ptp_info_cb_unregister;
@@ -263,6 +266,7 @@ INTERNAL {
 	roc_nix_tm_node_stats_get;
 	roc_nix_tm_node_suspend_resume;
 	roc_nix_tm_prealloc_res;
+	roc_nix_tm_pfc_prepare_tree;
 	roc_nix_tm_prepare_rate_limited_tree;
 	roc_nix_tm_rlimit_sq;
 	roc_nix_tm_root_has_sp;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v9 2/2] net/cnxk: support priority flow control
  2022-02-14 10:10                           ` [PATCH v9 1/2] common/cnxk: support priority flow ctrl config API skori
@ 2022-02-14 10:10                             ` skori
  2022-02-18  6:11                               ` Jerin Jacob
                                                 ` (2 more replies)
  0 siblings, 3 replies; 29+ messages in thread
From: skori @ 2022-02-14 10:10 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Patch implements priority flow control support for CNXK platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix application restart issue.
 
v2..v3:
 - fix pause quanta configuration for cn10k.
 - fix review comments.
 
v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree. 

v4..v5:
 - rebase on top of tree.

v5..v6:
 - fix review comments

v6..v7:
 - use correct FC mode flags

v7..v8:
 - rebase on top of 22.03-rc1

v8..v9:
 - update documentation and release notes

 doc/guides/nics/cnxk.rst               |   1 +
 doc/guides/rel_notes/release_22_03.rst |   4 +
 drivers/net/cnxk/cnxk_ethdev.c         |  30 ++++
 drivers/net/cnxk/cnxk_ethdev.h         |  20 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c     | 187 +++++++++++++++++++++++--
 5 files changed, 233 insertions(+), 9 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 27a94204cb..c9467f5d2a 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -36,6 +36,7 @@ Features of the CNXK Ethdev PMD are:
 - Support Rx interrupt
 - Inline IPsec processing support
 - Ingress meter support
+- Queue based priority flow control support
 
 Prerequisites
 -------------
diff --git a/doc/guides/rel_notes/release_22_03.rst b/doc/guides/rel_notes/release_22_03.rst
index ff3095d742..479448fafc 100644
--- a/doc/guides/rel_notes/release_22_03.rst
+++ b/doc/guides/rel_notes/release_22_03.rst
@@ -136,6 +136,10 @@ New Features
   * Added AES-CMAC support in CN9K & CN10K.
   * Added ESN and anti-replay support in lookaside protocol (IPsec) for CN10K.
 
+* **Updated Marvell cnxk ethdev PMD.**
+
+  * Added queue based priority flow control support for CN9K & CN10K.
+
 * **Added support for CPM2.0b devices to Intel QuickAssist Technology PMD.**
 
   * CPM2.0b (4942) devices are now enabled for QAT crypto PMD.
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 27751a6956..37ae0939d7 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1251,6 +1251,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1539,6 +1541,10 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_config =
+				cnxk_nix_priority_flow_ctrl_queue_config,
+	.priority_flow_ctrl_queue_info_get =
+				cnxk_nix_priority_flow_ctrl_queue_info_get,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1718,6 +1724,8 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf = {0};
+	struct rte_eth_fc_conf fc_conf = {0};
 	struct roc_nix *nix = &dev->nix;
 	int rc, i;
 
@@ -1733,6 +1741,28 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
+		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+			pfc_conf.rx_pause.tc = i;
+			pfc_conf.tx_pause.rx_qid = i;
+			pfc_conf.tx_pause.tc = i;
+			rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
+				&pfc_conf);
+			if (rc)
+				plt_err("Failed to reset PFC. error code(%d)",
+					rc);
+		}
+	}
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index fadc8aaf45..d0dfa7cb70 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -137,12 +137,24 @@
 /* SPI will be in 20 bits of tag */
 #define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
 
+#define CNXK_NIX_PFC_CHAN_COUNT 16
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -372,6 +384,8 @@ struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -473,6 +487,10 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					     struct rte_eth_pfc_queue_conf *pfc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					       struct rte_eth_pfc_queue_info *pfc_info);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -617,6 +635,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index f20f201db2..f4669ee7cf 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -69,6 +69,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
 	return 0;
 }
 
@@ -230,6 +231,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +251,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +287,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
+			      1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +300,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
+			      1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +326,40 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_info *pfc_info)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+	pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
+	pfc_info->mode_capa = RTE_ETH_FC_FULL;
+	return 0;
+}
+
+int
+cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf = {0};
+	int rc;
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -972,3 +1021,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_pfc_prepare_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = pfc->class_en;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH v9 2/2] net/cnxk: support priority flow control
  2022-02-14 10:10                             ` [PATCH v9 2/2] net/cnxk: support priority flow control skori
@ 2022-02-18  6:11                               ` Jerin Jacob
  2022-02-22  8:06                                 ` [EXT] " Sunil Kumar Kori
  2022-02-22  8:58                               ` [PATCH v10 1/2] common/cnxk: support priority flow ctrl config API skori
  2022-02-22 10:37                               ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API skori
  2 siblings, 1 reply; 29+ messages in thread
From: Jerin Jacob @ 2022-02-18  6:11 UTC (permalink / raw)
  To: Sunil Kumar Kori; +Cc: Nithin Dabilpuram, Kiran Kumar K, Satha Rao, dpdk-dev

On Mon, Feb 14, 2022 at 3:40 PM <skori@marvell.com> wrote:
>
> From: Sunil Kumar Kori <skori@marvell.com>
>
> Patch implements priority flow control support for CNXK platforms.

Add support for priority flow control support for CNXK platforms.

>
> Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
> ---
> v1..v2:
>  - fix application restart issue.
>
> v2..v3:
>  - fix pause quanta configuration for cn10k.
>  - fix review comments.
>
> v3..v4:
>  - fix PFC configuration with other type of TM tree
>    i.e. default, user and rate limit tree.
>
> v4..v5:
>  - rebase on top of tree.
>
> v5..v6:
>  - fix review comments
>
> v6..v7:
>  - use correct FC mode flags
>
> v7..v8:
>  - rebase on top of 22.03-rc1
>
> v8..v9:
>  - update documentation and release notes
>
>  doc/guides/nics/cnxk.rst               |   1 +

>
> +* **Updated Marvell cnxk ethdev PMD.**
> +
> +  * Added queue based priority flow control support for CN9K & CN10K.
> +

Move near to all ethdev PMD release notes

# Please fix the build issue with RHEL7
http://mails.dpdk.org/archives/test-report/2022-February/260982.html

^ permalink raw reply	[flat|nested] 29+ messages in thread

* RE: [EXT] Re: [PATCH v9 2/2] net/cnxk: support priority flow control
  2022-02-18  6:11                               ` Jerin Jacob
@ 2022-02-22  8:06                                 ` Sunil Kumar Kori
  0 siblings, 0 replies; 29+ messages in thread
From: Sunil Kumar Kori @ 2022-02-22  8:06 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Nithin Kumar Dabilpuram, Kiran Kumar Kokkilagadda,
	Satha Koteswara Rao Kottidi, dpdk-dev

> -----Original Message-----
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Sent: Friday, February 18, 2022 11:42 AM
> To: Sunil Kumar Kori <skori@marvell.com>
> Cc: Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>; Kiran Kumar
> Kokkilagadda <kirankumark@marvell.com>; Satha Koteswara Rao Kottidi
> <skoteshwar@marvell.com>; dpdk-dev <dev@dpdk.org>
> Subject: [EXT] Re: [PATCH v9 2/2] net/cnxk: support priority flow control
> 
> External Email
> 
> ----------------------------------------------------------------------
> On Mon, Feb 14, 2022 at 3:40 PM <skori@marvell.com> wrote:
> >
> > From: Sunil Kumar Kori <skori@marvell.com>
> >
> > Patch implements priority flow control support for CNXK platforms.
> 
> Add support for priority flow control support for CNXK platforms.
> 
Ack.
> >
> > Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
> > ---
> > v1..v2:
> >  - fix application restart issue.
> >
> > v2..v3:
> >  - fix pause quanta configuration for cn10k.
> >  - fix review comments.
> >
> > v3..v4:
> >  - fix PFC configuration with other type of TM tree
> >    i.e. default, user and rate limit tree.
> >
> > v4..v5:
> >  - rebase on top of tree.
> >
> > v5..v6:
> >  - fix review comments
> >
> > v6..v7:
> >  - use correct FC mode flags
> >
> > v7..v8:
> >  - rebase on top of 22.03-rc1
> >
> > v8..v9:
> >  - update documentation and release notes
> >
> >  doc/guides/nics/cnxk.rst               |   1 +
> 
> >
> > +* **Updated Marvell cnxk ethdev PMD.**
> > +
> > +  * Added queue based priority flow control support for CN9K & CN10K.
> > +
> 
> Move near to all ethdev PMD release notes
> 
> # Please fix the build issue with RHEL7
> https://urldefense.proofpoint.com/v2/url?u=http-
> 3A__mails.dpdk.org_archives_test-2Dreport_2022-
> 2DFebruary_260982.html&d=DwIBaQ&c=nKjWec2b6R0mOyPaz7xtfQ&r=dXe
> XaAMkP5COgn1zxHMyaF1_d9IIuq6vHQO6NrIPjaE&m=s3VMAaFynZDJrDsLrH
> KnCOC_KvKEm00NjjQxp1UHuy7tVn37YWwC25no8aaAdqOe&s=094i2FhGda7
> 2zuWrSkp6krIvzPcXFg0VQZS56YtLydE&e=
Ack.

^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v10 1/2] common/cnxk: support priority flow ctrl config API
  2022-02-14 10:10                             ` [PATCH v9 2/2] net/cnxk: support priority flow control skori
  2022-02-18  6:11                               ` Jerin Jacob
@ 2022-02-22  8:58                               ` skori
  2022-02-22  8:58                                 ` [PATCH v10 2/2] net/cnxk: support priority flow control skori
  2022-02-22 10:37                               ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API skori
  2 siblings, 1 reply; 29+ messages in thread
From: skori @ 2022-02-22  8:58 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.

Adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix RoC API naming convention.

v2..v3:
 - fix pause quanta configuration for cn10k.
 - remove unnecessary code

v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree.

v4..v5:
 - rebase on top of tree
 - fix review comments
 - fix initialization error for LBK devices 

v5..v6:
 - fix review comments

v6..v7:
 - no change

v7..v8:
 - rebase on top of 22.03-rc1

v8..v10:
 - no change

 drivers/common/cnxk/roc_mbox.h       |  19 ++-
 drivers/common/cnxk/roc_nix.h        |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
 drivers/common/cnxk/roc_nix_priv.h   |   6 +-
 drivers/common/cnxk/roc_nix_tm.c     | 171 ++++++++++++++++++++++++++-
 drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
 drivers/common/cnxk/version.map      |   4 +
 7 files changed, 310 insertions(+), 20 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 8967858914..b608f58357 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
+	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
+	  cgx_pfc_rsp)                                                         \
 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
 	  npa_lf_alloc_rsp)                                                    \
@@ -551,6 +553,19 @@ struct cgx_pause_frm_cfg {
 	uint8_t __io tx_pause;
 };
 
+struct cgx_pfc_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+};
+
 struct sfp_eeprom_s {
 #define SFP_EEPROM_SIZE 256
 	uint16_t __io sff_id;
@@ -1125,7 +1140,9 @@ struct nix_bp_cfg_req {
 /* PF can be mapped to either CGX or LBK interface,
  * so maximum 64 channels are possible.
  */
-#define NIX_MAX_CHAN 64
+#define NIX_MAX_CHAN	 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN 1
 struct nix_bp_cfg_rsp {
 	struct mbox_msghdr hdr;
 	/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 755212c8f9..680a34cdcd 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
 
 		struct {
 			uint32_t rq;
+			uint16_t tc;
 			uint16_t cq_drop;
 			bool enable;
 		} cq_cfg;
 
 		struct {
+			uint32_t sq;
+			uint16_t tc;
 			bool enable;
 		} tm_cfg;
 	};
 };
 
+struct roc_nix_pfc_cfg {
+	enum roc_nix_fc_mode mode;
+	/* For SET, tc must be [0, 15].
+	 * For GET, TC will represent bitmap
+	 */
+	uint16_t tc;
+};
+
 struct roc_nix_eeprom_info {
 #define ROC_NIX_EEPROM_SIZE 256
 	uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
 enum roc_nix_tm_tree {
 	ROC_NIX_TM_DEFAULT = 0,
 	ROC_NIX_TM_RLIMIT,
+	ROC_NIX_TM_PFC,
 	ROC_NIX_TM_USER,
 	ROC_NIX_TM_TREE_MAX,
 };
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
 int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
 
@@ -739,6 +752,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
 				  enum roc_nix_fc_mode mode);
 
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index d31137188e..8e31443b8f 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 	struct mbox *mbox = get_mbox(roc_nix);
 	struct nix_bp_cfg_req *req;
 	struct nix_bp_cfg_rsp *rsp;
-	int rc = -ENOSPC;
+	int rc = -ENOSPC, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		req = mbox_alloc_msg_nix_bp_enable(mbox);
 		if (req == NULL)
 			return rc;
+
 		req->chan_base = 0;
-		req->chan_cnt = 1;
-		req->bpid_per_chan = 0;
+		if (roc_nix_is_lbk(roc_nix))
+			req->chan_cnt = NIX_LBK_MAX_CHAN;
+		else
+			req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+		req->bpid_per_chan = true;
 
 		rc = mbox_process_msg(mbox, (void *)&rsp);
 		if (rc || (req->chan_cnt != rsp->chan_cnt))
 			goto exit;
 
-		nix->bpid[0] = rsp->chan_bpid[0];
 		nix->chan_cnt = rsp->chan_cnt;
+		for (i = 0; i < rsp->chan_cnt; i++)
+			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
 	} else {
 		req = mbox_alloc_msg_nix_bp_disable(mbox);
 		if (req == NULL)
 			return rc;
 		req->chan_base = 0;
-		req->chan_cnt = 1;
+		req->chan_cnt = nix->chan_cnt;
 
 		rc = mbox_process(mbox);
 		if (rc)
@@ -161,7 +167,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -181,7 +187,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -222,7 +228,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return nix_fc_rxchan_bpid_set(roc_nix,
 					      fc_cfg->rxchan_cfg.enable);
 	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
-		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+					    fc_cfg->tm_cfg.tc,
+					    fc_cfg->tm_cfg.enable);
 
 	return -EINVAL;
 }
@@ -403,3 +411,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	mbox_process(mbox);
 }
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = get_mbox(roc_nix);
+	uint8_t tx_pause, rx_pause;
+	struct cgx_pfc_cfg *req;
+	struct cgx_pfc_rsp *rsp;
+	int rc = -ENOSPC;
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_RX);
+	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+
+	req->pfc_en = pfc_cfg->tc;
+	req->rx_pause = rx_pause;
+	req->tx_pause = tx_pause;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	nix->rx_pause = rsp->rx_pause;
+	nix->tx_pause = rsp->tx_pause;
+	if (rsp->tx_pause)
+		nix->cev |= BIT(pfc_cfg->tc);
+	else
+		nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+	return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	pfc_cfg->tc = nix->cev;
+
+	if (nix->rx_pause && nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_FULL;
+	else if (nix->rx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_RX;
+	else if (nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_TX;
+	else
+		pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+	return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index deb2a6ba11..f3889424c4 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -33,6 +33,7 @@ struct nix_qint {
 /* Traffic Manager */
 #define NIX_TM_MAX_HW_TXSCHQ 512
 #define NIX_TM_HW_ID_INVALID UINT32_MAX
+#define NIX_TM_CHAN_INVALID UINT16_MAX
 
 /* TM flags */
 #define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
@@ -56,6 +57,7 @@ struct nix_tm_node {
 	uint32_t priority;
 	uint32_t weight;
 	uint16_t lvl;
+	uint16_t rel_chan;
 	uint32_t parent_id;
 	uint32_t shaper_profile_id;
 	void (*free_fn)(void *node);
@@ -139,6 +141,7 @@ struct nix {
 	uint16_t msixoff;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
+	uint16_t cev;
 	uint64_t rx_cfg;
 	struct dev dev;
 	uint16_t cints;
@@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	       bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+			 bool enable);
 void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
 
 /*
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index a0448bec61..670cf66db4 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 			if (is_pf_or_lbk && !skip_bp &&
 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
 				node->bp_capa = 1;
-				skip_bp = true;
+				skip_bp = false;
 			}
 
 			rc = nix_tm_node_reg_conf(nix, node);
@@ -317,21 +317,38 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
 }
 
 int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+		     bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	enum roc_nix_tm_tree tree = nix->tm_tree;
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	struct nix_tm_node *sq_node;
+	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
 	uint16_t link;
 	int rc = 0;
 
+	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	parent = sq_node->parent;
+	while (parent) {
+		if (parent->lvl == ROC_TM_LVL_SCH2)
+			break;
+
+		parent = parent->parent;
+	}
+
 	list = nix_tm_node_list(nix, tree);
 	link = nix->tx_link;
 
+	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+		rc = -EINVAL;
+		goto err;
+	}
+
 	TAILQ_FOREACH(node, list, node) {
 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
 			continue;
@@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
 			continue;
 
+		if (node->hw_id != parent->hw_id)
+			continue;
+
 		if (!req) {
 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
 			req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		}
 
 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
-		req->regval[k] = enable ? BIT_ULL(13) : 0;
-		req->regval_mask[k] = ~BIT_ULL(13);
+		req->regval[k] = enable ? tc : 0;
+		req->regval[k] |= enable ? BIT_ULL(13) : 0;
+		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
 		k++;
 
 		if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 			goto err;
 	}
 
+	parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
 	return 0;
 err:
 	plt_err("Failed to %s bp on link %u, rc=%d(%s)",
@@ -602,7 +624,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
 	}
 
 	/* Disable backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, false);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
 	if (rc) {
 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
 		return rc;
@@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
 		return 0;
 
 	/* Restore backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, true);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
 	if (rc) {
 		plt_err("Failed to restore backpressure, rc=%d", rc);
 		return rc;
@@ -1299,6 +1321,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1325,6 +1348,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1365,6 +1389,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1390,6 +1415,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1414,6 +1440,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+error:
+	nix_tm_node_free(node);
+	return rc;
+}
+
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	uint32_t nonleaf_id = nix->nb_tx_queues;
+	struct nix_tm_node *node = NULL;
+	uint8_t leaf_lvl, lvl, lvl_end;
+	uint32_t tl2_node_id;
+	uint32_t parent, i;
+	int rc = -ENOMEM;
+
+	parent = ROC_NIX_TM_NODE_ID_INVALID;
+	lvl_end = ROC_TM_LVL_SCH3;
+	leaf_lvl = ROC_TM_LVL_QUEUE;
+
+	/* TL1 node */
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_ROOT;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	parent = nonleaf_id;
+	nonleaf_id++;
+
+	/* TL2 node */
+	rc = -ENOMEM;
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_SCH1;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	tl2_node_id = nonleaf_id;
+	nonleaf_id++;
+
+	for (i = 0; i < nix->nb_tx_queues; i++) {
+		parent = tl2_node_id;
+		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+			rc = -ENOMEM;
+			node = nix_tm_node_alloc();
+			if (!node)
+				goto error;
+
+			node->id = nonleaf_id;
+			node->parent_id = parent;
+			node->priority = 0;
+			node->weight = NIX_TM_DFLT_RR_WT;
+			node->shaper_profile_id =
+				ROC_NIX_TM_SHAPER_PROFILE_NONE;
+			node->lvl = lvl;
+			node->tree = ROC_NIX_TM_PFC;
+			node->rel_chan = NIX_TM_CHAN_INVALID;
+
+			rc = nix_tm_node_add(roc_nix, node);
+			if (rc)
+				goto error;
+
+			parent = nonleaf_id;
+			nonleaf_id++;
+		}
+
+		lvl = ROC_TM_LVL_SCH4;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = nonleaf_id;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+
+		parent = nonleaf_id;
+		nonleaf_id++;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = i;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = leaf_lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3d81247a12..d3d39eeb99 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 	/* Disable backpressure, it will be enabled back if needed on
 	 * hierarchy enable
 	 */
-	rc = nix_tm_bp_config_set(roc_nix, false);
-	if (rc) {
-		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
-		goto cleanup;
+	for (i = 0; i < sq_cnt; i++) {
+		sq = nix->sqs[i];
+		if (!sq)
+			continue;
+
+		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+		if (rc) {
+			plt_err("Failed to disable backpressure, rc=%d", rc);
+			goto cleanup;
+		}
 	}
 
 	/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index ad1b5e8476..37ec100451 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -107,6 +107,7 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
 	roc_nix_cq_head_tail_get;
@@ -198,6 +199,8 @@ INTERNAL {
 	roc_nix_npc_promisc_ena_dis;
 	roc_nix_npc_rx_ena_dis;
 	roc_nix_npc_mcast_config;
+	roc_nix_pfc_mode_get;
+	roc_nix_pfc_mode_set;
 	roc_nix_ptp_clock_read;
 	roc_nix_ptp_info_cb_register;
 	roc_nix_ptp_info_cb_unregister;
@@ -263,6 +266,7 @@ INTERNAL {
 	roc_nix_tm_node_stats_get;
 	roc_nix_tm_node_suspend_resume;
 	roc_nix_tm_prealloc_res;
+	roc_nix_tm_pfc_prepare_tree;
 	roc_nix_tm_prepare_rate_limited_tree;
 	roc_nix_tm_rlimit_sq;
 	roc_nix_tm_root_has_sp;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v10 2/2] net/cnxk: support priority flow control
  2022-02-22  8:58                               ` [PATCH v10 1/2] common/cnxk: support priority flow ctrl config API skori
@ 2022-02-22  8:58                                 ` skori
  0 siblings, 0 replies; 29+ messages in thread
From: skori @ 2022-02-22  8:58 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Adds support for priority flow control support for CNXK
platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix application restart issue.
 
v2..v3:
 - fix pause quanta configuration for cn10k.
 - fix review comments.
 
v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree. 

v4..v5:
 - rebase on top of tree.

v5..v6:
 - fix review comments

v6..v7:
 - use correct FC mode flags

v7..v8:
 - rebase on top of 22.03-rc1

v8..v9:
 - update documentation and release notes

v9..v10:
 - fix build error on RHEL

 doc/guides/nics/cnxk.rst               |   1 +
 doc/guides/rel_notes/release_22_03.rst |   4 +
 drivers/net/cnxk/cnxk_ethdev.c         |  32 +++++
 drivers/net/cnxk/cnxk_ethdev.h         |  20 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c     | 189 +++++++++++++++++++++++--
 5 files changed, 237 insertions(+), 9 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 27a94204cb..c9467f5d2a 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -36,6 +36,7 @@ Features of the CNXK Ethdev PMD are:
 - Support Rx interrupt
 - Inline IPsec processing support
 - Ingress meter support
+- Queue based priority flow control support
 
 Prerequisites
 -------------
diff --git a/doc/guides/rel_notes/release_22_03.rst b/doc/guides/rel_notes/release_22_03.rst
index cbc98e798f..a0f2e2e414 100644
--- a/doc/guides/rel_notes/release_22_03.rst
+++ b/doc/guides/rel_notes/release_22_03.rst
@@ -121,6 +121,10 @@ New Features
 
   * Added LED OEM support.
 
+* **Updated Marvell cnxk ethdev PMD.**
+
+  * Added queue based priority flow control support for CN9K & CN10K.
+
 * **Added an API for private user data in asymmetric crypto session.**
 
   An API was added to get/set an asymmetric crypto session's user data.
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 27751a6956..bd8ced867e 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1251,6 +1251,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1539,6 +1541,10 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_config =
+				cnxk_nix_priority_flow_ctrl_queue_config,
+	.priority_flow_ctrl_queue_info_get =
+				cnxk_nix_priority_flow_ctrl_queue_info_get,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1718,7 +1724,9 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf;
 	struct roc_nix *nix = &dev->nix;
+	struct rte_eth_fc_conf fc_conf;
 	int rc, i;
 
 	plt_free(eth_dev->security_ctx);
@@ -1733,6 +1741,30 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_queue_conf));
+	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
+		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+			pfc_conf.rx_pause.tc = i;
+			pfc_conf.tx_pause.rx_qid = i;
+			pfc_conf.tx_pause.tc = i;
+			rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
+				&pfc_conf);
+			if (rc)
+				plt_err("Failed to reset PFC. error code(%d)",
+					rc);
+		}
+	}
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index fadc8aaf45..d0dfa7cb70 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -137,12 +137,24 @@
 /* SPI will be in 20 bits of tag */
 #define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
 
+#define CNXK_NIX_PFC_CHAN_COUNT 16
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -372,6 +384,8 @@ struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -473,6 +487,10 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					     struct rte_eth_pfc_queue_conf *pfc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					       struct rte_eth_pfc_queue_info *pfc_info);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -617,6 +635,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index f20f201db2..12ba0c641b 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -69,6 +69,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
 	return 0;
 }
 
@@ -230,6 +231,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +251,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +287,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
+			      1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +300,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
+			      1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +326,42 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_info *pfc_info)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+	pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
+	pfc_info->mode_capa = RTE_ETH_FC_FULL;
+	return 0;
+}
+
+int
+cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf;
+	int rc;
+
+	memset(&conf, 0, sizeof(struct cnxk_pfc_cfg));
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -972,3 +1023,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_pfc_prepare_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = pfc->class_en;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API
  2022-02-14 10:10                             ` [PATCH v9 2/2] net/cnxk: support priority flow control skori
  2022-02-18  6:11                               ` Jerin Jacob
  2022-02-22  8:58                               ` [PATCH v10 1/2] common/cnxk: support priority flow ctrl config API skori
@ 2022-02-22 10:37                               ` skori
  2022-02-22 10:37                                 ` [PATCH v11 2/2] net/cnxk: support priority flow control skori
  2022-02-23 11:36                                 ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API Jerin Jacob
  2 siblings, 2 replies; 29+ messages in thread
From: skori @ 2022-02-22 10:37 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

CNXK platforms support priority flow control(802.1qbb) to pause
respective traffic per class on that link.

Adds RoC interface to configure priority flow control on MAC
block i.e. CGX on cn9k and RPM on cn10k.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix RoC API naming convention.

v2..v3:
 - fix pause quanta configuration for cn10k.
 - remove unnecessary code

v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree.

v4..v5:
 - rebase on top of tree
 - fix review comments
 - fix initialization error for LBK devices 

v5..v6:
 - fix review comments

v6..v7:
 - no change

v7..v8:
 - rebase on top of 22.03-rc1

v8..v10:
 - no change

v10..v11:
 - Rebase to dpdk-next-net-mrvl branch

 drivers/common/cnxk/roc_mbox.h       |  19 ++-
 drivers/common/cnxk/roc_nix.h        |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
 drivers/common/cnxk/roc_nix_priv.h   |   6 +-
 drivers/common/cnxk/roc_nix_tm.c     | 171 ++++++++++++++++++++++++++-
 drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
 drivers/common/cnxk/version.map      |   4 +
 7 files changed, 310 insertions(+), 20 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 8967858914..b608f58357 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -95,6 +95,8 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
+	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
+	  cgx_pfc_rsp)                                                         \
 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
 	  npa_lf_alloc_rsp)                                                    \
@@ -551,6 +553,19 @@ struct cgx_pause_frm_cfg {
 	uint8_t __io tx_pause;
 };
 
+struct cgx_pfc_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+	struct mbox_msghdr hdr;
+	uint8_t __io rx_pause;
+	uint8_t __io tx_pause;
+};
+
 struct sfp_eeprom_s {
 #define SFP_EEPROM_SIZE 256
 	uint16_t __io sff_id;
@@ -1125,7 +1140,9 @@ struct nix_bp_cfg_req {
 /* PF can be mapped to either CGX or LBK interface,
  * so maximum 64 channels are possible.
  */
-#define NIX_MAX_CHAN 64
+#define NIX_MAX_CHAN	 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN 1
 struct nix_bp_cfg_rsp {
 	struct mbox_msghdr hdr;
 	/* Channel and bpid mapping */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 755212c8f9..680a34cdcd 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
 
 		struct {
 			uint32_t rq;
+			uint16_t tc;
 			uint16_t cq_drop;
 			bool enable;
 		} cq_cfg;
 
 		struct {
+			uint32_t sq;
+			uint16_t tc;
 			bool enable;
 		} tm_cfg;
 	};
 };
 
+struct roc_nix_pfc_cfg {
+	enum roc_nix_fc_mode mode;
+	/* For SET, tc must be [0, 15].
+	 * For GET, TC will represent bitmap
+	 */
+	uint16_t tc;
+};
+
 struct roc_nix_eeprom_info {
 #define ROC_NIX_EEPROM_SIZE 256
 	uint16_t sff_id;
@@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
 enum roc_nix_tm_tree {
 	ROC_NIX_TM_DEFAULT = 0,
 	ROC_NIX_TM_RLIMIT,
+	ROC_NIX_TM_PFC,
 	ROC_NIX_TM_USER,
 	ROC_NIX_TM_TREE_MAX,
 };
@@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
 int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
 
@@ -739,6 +752,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
 				  enum roc_nix_fc_mode mode);
 
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+				   struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index d31137188e..8e31443b8f 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 	struct mbox *mbox = get_mbox(roc_nix);
 	struct nix_bp_cfg_req *req;
 	struct nix_bp_cfg_rsp *rsp;
-	int rc = -ENOSPC;
+	int rc = -ENOSPC, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return 0;
@@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		req = mbox_alloc_msg_nix_bp_enable(mbox);
 		if (req == NULL)
 			return rc;
+
 		req->chan_base = 0;
-		req->chan_cnt = 1;
-		req->bpid_per_chan = 0;
+		if (roc_nix_is_lbk(roc_nix))
+			req->chan_cnt = NIX_LBK_MAX_CHAN;
+		else
+			req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+		req->bpid_per_chan = true;
 
 		rc = mbox_process_msg(mbox, (void *)&rsp);
 		if (rc || (req->chan_cnt != rsp->chan_cnt))
 			goto exit;
 
-		nix->bpid[0] = rsp->chan_bpid[0];
 		nix->chan_cnt = rsp->chan_cnt;
+		for (i = 0; i < rsp->chan_cnt; i++)
+			nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
 	} else {
 		req = mbox_alloc_msg_nix_bp_disable(mbox);
 		if (req == NULL)
 			return rc;
 		req->chan_base = 0;
-		req->chan_cnt = 1;
+		req->chan_cnt = nix->chan_cnt;
 
 		rc = mbox_process(mbox);
 		if (rc)
@@ -161,7 +167,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -181,7 +187,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		aq->op = NIX_AQ_INSTOP_WRITE;
 
 		if (fc_cfg->cq_cfg.enable) {
-			aq->cq.bpid = nix->bpid[0];
+			aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
 			aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
@@ -222,7 +228,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return nix_fc_rxchan_bpid_set(roc_nix,
 					      fc_cfg->rxchan_cfg.enable);
 	else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
-		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+		return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+					    fc_cfg->tm_cfg.tc,
+					    fc_cfg->tm_cfg.enable);
 
 	return -EINVAL;
 }
@@ -403,3 +411,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	mbox_process(mbox);
 }
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = get_mbox(roc_nix);
+	uint8_t tx_pause, rx_pause;
+	struct cgx_pfc_cfg *req;
+	struct cgx_pfc_rsp *rsp;
+	int rc = -ENOSPC;
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_RX);
+	tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+		   (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+	req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+
+	req->pfc_en = pfc_cfg->tc;
+	req->rx_pause = rx_pause;
+	req->tx_pause = tx_pause;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	nix->rx_pause = rsp->rx_pause;
+	nix->tx_pause = rsp->tx_pause;
+	if (rsp->tx_pause)
+		nix->cev |= BIT(pfc_cfg->tc);
+	else
+		nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+	return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (roc_nix_is_lbk(roc_nix))
+		return NIX_ERR_OP_NOTSUP;
+
+	pfc_cfg->tc = nix->cev;
+
+	if (nix->rx_pause && nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_FULL;
+	else if (nix->rx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_RX;
+	else if (nix->tx_pause)
+		pfc_cfg->mode = ROC_NIX_FC_TX;
+	else
+		pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+	return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->chan_cnt;
+}
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index deb2a6ba11..f3889424c4 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -33,6 +33,7 @@ struct nix_qint {
 /* Traffic Manager */
 #define NIX_TM_MAX_HW_TXSCHQ 512
 #define NIX_TM_HW_ID_INVALID UINT32_MAX
+#define NIX_TM_CHAN_INVALID UINT16_MAX
 
 /* TM flags */
 #define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
@@ -56,6 +57,7 @@ struct nix_tm_node {
 	uint32_t priority;
 	uint32_t weight;
 	uint16_t lvl;
+	uint16_t rel_chan;
 	uint32_t parent_id;
 	uint32_t shaper_profile_id;
 	void (*free_fn)(void *node);
@@ -139,6 +141,7 @@ struct nix {
 	uint16_t msixoff;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
+	uint16_t cev;
 	uint64_t rx_cfg;
 	struct dev dev;
 	uint16_t cints;
@@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	       bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+			 bool enable);
 void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
 
 /*
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 517502b1af..ecf3edfa44 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 			if (is_pf_or_lbk && !skip_bp &&
 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
 				node->bp_capa = 1;
-				skip_bp = true;
+				skip_bp = false;
 			}
 
 			rc = nix_tm_node_reg_conf(nix, node);
@@ -317,21 +317,38 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
 }
 
 int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+		     bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	enum roc_nix_tm_tree tree = nix->tm_tree;
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	struct nix_tm_node *sq_node;
+	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
 	uint16_t link;
 	int rc = 0;
 
+	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	parent = sq_node->parent;
+	while (parent) {
+		if (parent->lvl == ROC_TM_LVL_SCH2)
+			break;
+
+		parent = parent->parent;
+	}
+
 	list = nix_tm_node_list(nix, tree);
 	link = nix->tx_link;
 
+	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+		rc = -EINVAL;
+		goto err;
+	}
+
 	TAILQ_FOREACH(node, list, node) {
 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
 			continue;
@@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
 			continue;
 
+		if (node->hw_id != parent->hw_id)
+			continue;
+
 		if (!req) {
 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
 			req->lvl = nix->tm_link_cfg_lvl;
@@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 		}
 
 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
-		req->regval[k] = enable ? BIT_ULL(13) : 0;
-		req->regval_mask[k] = ~BIT_ULL(13);
+		req->regval[k] = enable ? tc : 0;
+		req->regval[k] |= enable ? BIT_ULL(13) : 0;
+		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
 		k++;
 
 		if (k >= MAX_REGS_PER_MBOX_MSG) {
@@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
 			goto err;
 	}
 
+	parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
 	return 0;
 err:
 	plt_err("Failed to %s bp on link %u, rc=%d(%s)",
@@ -602,7 +624,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
 	}
 
 	/* Disable backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, false);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
 	if (rc) {
 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
 		return rc;
@@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
 		return 0;
 
 	/* Restore backpressure */
-	rc = nix_tm_bp_config_set(roc_nix, true);
+	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
 	if (rc) {
 		plt_err("Failed to restore backpressure, rc=%d", rc);
 		return rc;
@@ -1301,6 +1323,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1327,6 +1350,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_DEFAULT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1367,6 +1391,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1392,6 +1417,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
@@ -1416,6 +1442,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
 		node->lvl = leaf_lvl;
 		node->tree = ROC_NIX_TM_RLIMIT;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+error:
+	nix_tm_node_free(node);
+	return rc;
+}
+
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	uint32_t nonleaf_id = nix->nb_tx_queues;
+	struct nix_tm_node *node = NULL;
+	uint8_t leaf_lvl, lvl, lvl_end;
+	uint32_t tl2_node_id;
+	uint32_t parent, i;
+	int rc = -ENOMEM;
+
+	parent = ROC_NIX_TM_NODE_ID_INVALID;
+	lvl_end = ROC_TM_LVL_SCH3;
+	leaf_lvl = ROC_TM_LVL_QUEUE;
+
+	/* TL1 node */
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_ROOT;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	parent = nonleaf_id;
+	nonleaf_id++;
+
+	/* TL2 node */
+	rc = -ENOMEM;
+	node = nix_tm_node_alloc();
+	if (!node)
+		goto error;
+
+	node->id = nonleaf_id;
+	node->parent_id = parent;
+	node->priority = 0;
+	node->weight = NIX_TM_DFLT_RR_WT;
+	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+	node->lvl = ROC_TM_LVL_SCH1;
+	node->tree = ROC_NIX_TM_PFC;
+	node->rel_chan = NIX_TM_CHAN_INVALID;
+
+	rc = nix_tm_node_add(roc_nix, node);
+	if (rc)
+		goto error;
+
+	tl2_node_id = nonleaf_id;
+	nonleaf_id++;
+
+	for (i = 0; i < nix->nb_tx_queues; i++) {
+		parent = tl2_node_id;
+		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+			rc = -ENOMEM;
+			node = nix_tm_node_alloc();
+			if (!node)
+				goto error;
+
+			node->id = nonleaf_id;
+			node->parent_id = parent;
+			node->priority = 0;
+			node->weight = NIX_TM_DFLT_RR_WT;
+			node->shaper_profile_id =
+				ROC_NIX_TM_SHAPER_PROFILE_NONE;
+			node->lvl = lvl;
+			node->tree = ROC_NIX_TM_PFC;
+			node->rel_chan = NIX_TM_CHAN_INVALID;
+
+			rc = nix_tm_node_add(roc_nix, node);
+			if (rc)
+				goto error;
+
+			parent = nonleaf_id;
+			nonleaf_id++;
+		}
+
+		lvl = ROC_TM_LVL_SCH4;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = nonleaf_id;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
+
+		rc = nix_tm_node_add(roc_nix, node);
+		if (rc)
+			goto error;
+
+		parent = nonleaf_id;
+		nonleaf_id++;
+
+		rc = -ENOMEM;
+		node = nix_tm_node_alloc();
+		if (!node)
+			goto error;
+
+		node->id = i;
+		node->parent_id = parent;
+		node->priority = 0;
+		node->weight = NIX_TM_DFLT_RR_WT;
+		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+		node->lvl = leaf_lvl;
+		node->tree = ROC_NIX_TM_PFC;
+		node->rel_chan = NIX_TM_CHAN_INVALID;
 
 		rc = nix_tm_node_add(roc_nix, node);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 3d81247a12..d3d39eeb99 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 	/* Disable backpressure, it will be enabled back if needed on
 	 * hierarchy enable
 	 */
-	rc = nix_tm_bp_config_set(roc_nix, false);
-	if (rc) {
-		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
-		goto cleanup;
+	for (i = 0; i < sq_cnt; i++) {
+		sq = nix->sqs[i];
+		if (!sq)
+			continue;
+
+		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+		if (rc) {
+			plt_err("Failed to disable backpressure, rc=%d", rc);
+			goto cleanup;
+		}
 	}
 
 	/* Flush all tx queues */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 75a260f11e..c14af6afed 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -107,6 +107,7 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
 	roc_nix_cq_head_tail_get;
@@ -198,6 +199,8 @@ INTERNAL {
 	roc_nix_npc_promisc_ena_dis;
 	roc_nix_npc_rx_ena_dis;
 	roc_nix_npc_mcast_config;
+	roc_nix_pfc_mode_get;
+	roc_nix_pfc_mode_set;
 	roc_nix_ptp_clock_read;
 	roc_nix_ptp_info_cb_register;
 	roc_nix_ptp_info_cb_unregister;
@@ -263,6 +266,7 @@ INTERNAL {
 	roc_nix_tm_node_stats_get;
 	roc_nix_tm_node_suspend_resume;
 	roc_nix_tm_prealloc_res;
+	roc_nix_tm_pfc_prepare_tree;
 	roc_nix_tm_prepare_rate_limited_tree;
 	roc_nix_tm_rlimit_sq;
 	roc_nix_tm_root_has_sp;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH v11 2/2] net/cnxk: support priority flow control
  2022-02-22 10:37                               ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API skori
@ 2022-02-22 10:37                                 ` skori
  2022-02-23 11:36                                 ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API Jerin Jacob
  1 sibling, 0 replies; 29+ messages in thread
From: skori @ 2022-02-22 10:37 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Adds support for priority flow control support for CNXK
platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
 - fix application restart issue.
 
v2..v3:
 - fix pause quanta configuration for cn10k.
 - fix review comments.
 
v3..v4:
 - fix PFC configuration with other type of TM tree
   i.e. default, user and rate limit tree. 

v4..v5:
 - rebase on top of tree.

v5..v6:
 - fix review comments

v6..v7:
 - use correct FC mode flags

v7..v8:
 - rebase on top of 22.03-rc1

v8..v9:
 - update documentation and release notes

v9..v10:
 - fix build error on RHEL

v10..v11:
 - rebase to dpdk-next-net-mrvl branch

 doc/guides/nics/cnxk.rst               |   1 +
 doc/guides/rel_notes/release_22_03.rst |   4 +
 drivers/net/cnxk/cnxk_ethdev.c         |  32 +++++
 drivers/net/cnxk/cnxk_ethdev.h         |  20 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c     | 188 +++++++++++++++++++++++--
 5 files changed, 236 insertions(+), 9 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 27a94204cb..c9467f5d2a 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -36,6 +36,7 @@ Features of the CNXK Ethdev PMD are:
 - Support Rx interrupt
 - Inline IPsec processing support
 - Ingress meter support
+- Queue based priority flow control support
 
 Prerequisites
 -------------
diff --git a/doc/guides/rel_notes/release_22_03.rst b/doc/guides/rel_notes/release_22_03.rst
index 41923f50e6..112dde0e79 100644
--- a/doc/guides/rel_notes/release_22_03.rst
+++ b/doc/guides/rel_notes/release_22_03.rst
@@ -130,6 +130,10 @@ New Features
 
   * Added LED OEM support.
 
+* **Updated Marvell cnxk ethdev PMD.**
+
+  * Added queue based priority flow control support for CN9K & CN10K.
+
 * **Added an API for private user data in asymmetric crypto session.**
 
   An API was added to get/set an asymmetric crypto session's user data.
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 3468aab329..0558bc3eed 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1251,6 +1251,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1547,6 +1549,10 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_config =
+				cnxk_nix_priority_flow_ctrl_queue_config,
+	.priority_flow_ctrl_queue_info_get =
+				cnxk_nix_priority_flow_ctrl_queue_info_get,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1726,7 +1732,9 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf;
 	struct roc_nix *nix = &dev->nix;
+	struct rte_eth_fc_conf fc_conf;
 	int rc, i;
 
 	/* Disable switch hdr pkind */
@@ -1744,6 +1752,30 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_queue_conf));
+	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
+		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+			pfc_conf.rx_pause.tc = i;
+			pfc_conf.tx_pause.rx_qid = i;
+			pfc_conf.tx_pause.tc = i;
+			rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
+				&pfc_conf);
+			if (rc)
+				plt_err("Failed to reset PFC. error code(%d)",
+					rc);
+		}
+	}
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index ad568c9fcd..d71e7465e1 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -137,12 +137,24 @@
 /* SPI will be in 20 bits of tag */
 #define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
 
+#define CNXK_NIX_PFC_CHAN_COUNT 16
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -372,6 +384,8 @@ struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -473,6 +487,10 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					     struct rte_eth_pfc_queue_conf *pfc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					       struct rte_eth_pfc_queue_info *pfc_info);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -617,6 +635,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 1ae90092d6..b0a16f3c56 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -230,6 +230,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +250,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +286,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
+			      1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +299,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
+			      1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +325,42 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_info *pfc_info)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+	pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
+	pfc_info->mode_capa = RTE_ETH_FC_FULL;
+	return 0;
+}
+
+int
+cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+					 struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf;
+	int rc;
+
+	memset(&conf, 0, sizeof(struct cnxk_pfc_cfg));
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -972,3 +1022,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_pfc_prepare_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = pfc->class_en;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API
  2022-02-22 10:37                               ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API skori
  2022-02-22 10:37                                 ` [PATCH v11 2/2] net/cnxk: support priority flow control skori
@ 2022-02-23 11:36                                 ` Jerin Jacob
  1 sibling, 0 replies; 29+ messages in thread
From: Jerin Jacob @ 2022-02-23 11:36 UTC (permalink / raw)
  To: Sunil Kumar Kori
  Cc: Nithin Dabilpuram, Kiran Kumar K, Satha Rao, Ray Kinsella, dpdk-dev

On Tue, Feb 22, 2022 at 4:08 PM <skori@marvell.com> wrote:
>
> From: Sunil Kumar Kori <skori@marvell.com>
>
> CNXK platforms support priority flow control(802.1qbb) to pause
> respective traffic per class on that link.
>
> Adds RoC interface to configure priority flow control on MAC
> block i.e. CGX on cn9k and RPM on cn10k.
>
> Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
> ---
> v1..v2:
>  - fix RoC API naming convention.
>
> v2..v3:
>  - fix pause quanta configuration for cn10k.
>  - remove unnecessary code
>
> v3..v4:
>  - fix PFC configuration with other type of TM tree
>    i.e. default, user and rate limit tree.
>
> v4..v5:
>  - rebase on top of tree
>  - fix review comments
>  - fix initialization error for LBK devices
>
> v5..v6:
>  - fix review comments
>
> v6..v7:
>  - no change
>
> v7..v8:
>  - rebase on top of 22.03-rc1
>
> v8..v10:
>  - no change
>
> v10..v11:
>  - Rebase to dpdk-next-net-mrvl branch

Series Acked-by: Jerin Jacob <jerinj@marvell.com>
Series applied to dpdk-next-net-mrvl/for-next-net. Thanks.



>
>  drivers/common/cnxk/roc_mbox.h       |  19 ++-
>  drivers/common/cnxk/roc_nix.h        |  21 ++++
>  drivers/common/cnxk/roc_nix_fc.c     |  95 +++++++++++++--
>  drivers/common/cnxk/roc_nix_priv.h   |   6 +-
>  drivers/common/cnxk/roc_nix_tm.c     | 171 ++++++++++++++++++++++++++-
>  drivers/common/cnxk/roc_nix_tm_ops.c |  14 ++-
>  drivers/common/cnxk/version.map      |   4 +
>  7 files changed, 310 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
> index 8967858914..b608f58357 100644
> --- a/drivers/common/cnxk/roc_mbox.h
> +++ b/drivers/common/cnxk/roc_mbox.h
> @@ -95,6 +95,8 @@ struct mbox_msghdr {
>           msg_rsp)                                                             \
>         M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
>         M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
> +       M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
> +         cgx_pfc_rsp)                                                         \
>         /* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
>         M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
>           npa_lf_alloc_rsp)                                                    \
> @@ -551,6 +553,19 @@ struct cgx_pause_frm_cfg {
>         uint8_t __io tx_pause;
>  };
>
> +struct cgx_pfc_cfg {
> +       struct mbox_msghdr hdr;
> +       uint8_t __io rx_pause;
> +       uint8_t __io tx_pause;
> +       uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
> +};
> +
> +struct cgx_pfc_rsp {
> +       struct mbox_msghdr hdr;
> +       uint8_t __io rx_pause;
> +       uint8_t __io tx_pause;
> +};
> +
>  struct sfp_eeprom_s {
>  #define SFP_EEPROM_SIZE 256
>         uint16_t __io sff_id;
> @@ -1125,7 +1140,9 @@ struct nix_bp_cfg_req {
>  /* PF can be mapped to either CGX or LBK interface,
>   * so maximum 64 channels are possible.
>   */
> -#define NIX_MAX_CHAN 64
> +#define NIX_MAX_CHAN    64
> +#define NIX_CGX_MAX_CHAN 16
> +#define NIX_LBK_MAX_CHAN 1
>  struct nix_bp_cfg_rsp {
>         struct mbox_msghdr hdr;
>         /* Channel and bpid mapping */
> diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
> index 755212c8f9..680a34cdcd 100644
> --- a/drivers/common/cnxk/roc_nix.h
> +++ b/drivers/common/cnxk/roc_nix.h
> @@ -165,16 +165,27 @@ struct roc_nix_fc_cfg {
>
>                 struct {
>                         uint32_t rq;
> +                       uint16_t tc;
>                         uint16_t cq_drop;
>                         bool enable;
>                 } cq_cfg;
>
>                 struct {
> +                       uint32_t sq;
> +                       uint16_t tc;
>                         bool enable;
>                 } tm_cfg;
>         };
>  };
>
> +struct roc_nix_pfc_cfg {
> +       enum roc_nix_fc_mode mode;
> +       /* For SET, tc must be [0, 15].
> +        * For GET, TC will represent bitmap
> +        */
> +       uint16_t tc;
> +};
> +
>  struct roc_nix_eeprom_info {
>  #define ROC_NIX_EEPROM_SIZE 256
>         uint16_t sff_id;
> @@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
>  enum roc_nix_tm_tree {
>         ROC_NIX_TM_DEFAULT = 0,
>         ROC_NIX_TM_RLIMIT,
> +       ROC_NIX_TM_PFC,
>         ROC_NIX_TM_USER,
>         ROC_NIX_TM_TREE_MAX,
>  };
> @@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
>  int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
>  int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
>  int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
> +int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
>  bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
>  int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
>
> @@ -739,6 +752,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix,
>  int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
>                                   enum roc_nix_fc_mode mode);
>
> +int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
> +                                  struct roc_nix_pfc_cfg *pfc_cfg);
> +
> +int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
> +                                  struct roc_nix_pfc_cfg *pfc_cfg);
> +
> +uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
> +
>  enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
>
>  void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
> diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
> index d31137188e..8e31443b8f 100644
> --- a/drivers/common/cnxk/roc_nix_fc.c
> +++ b/drivers/common/cnxk/roc_nix_fc.c
> @@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
>         struct mbox *mbox = get_mbox(roc_nix);
>         struct nix_bp_cfg_req *req;
>         struct nix_bp_cfg_rsp *rsp;
> -       int rc = -ENOSPC;
> +       int rc = -ENOSPC, i;
>
>         if (roc_nix_is_sdp(roc_nix))
>                 return 0;
> @@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
>                 req = mbox_alloc_msg_nix_bp_enable(mbox);
>                 if (req == NULL)
>                         return rc;
> +
>                 req->chan_base = 0;
> -               req->chan_cnt = 1;
> -               req->bpid_per_chan = 0;
> +               if (roc_nix_is_lbk(roc_nix))
> +                       req->chan_cnt = NIX_LBK_MAX_CHAN;
> +               else
> +                       req->chan_cnt = NIX_CGX_MAX_CHAN;
> +
> +               req->bpid_per_chan = true;
>
>                 rc = mbox_process_msg(mbox, (void *)&rsp);
>                 if (rc || (req->chan_cnt != rsp->chan_cnt))
>                         goto exit;
>
> -               nix->bpid[0] = rsp->chan_bpid[0];
>                 nix->chan_cnt = rsp->chan_cnt;
> +               for (i = 0; i < rsp->chan_cnt; i++)
> +                       nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
>         } else {
>                 req = mbox_alloc_msg_nix_bp_disable(mbox);
>                 if (req == NULL)
>                         return rc;
>                 req->chan_base = 0;
> -               req->chan_cnt = 1;
> +               req->chan_cnt = nix->chan_cnt;
>
>                 rc = mbox_process(mbox);
>                 if (rc)
> @@ -161,7 +167,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
>                 aq->op = NIX_AQ_INSTOP_WRITE;
>
>                 if (fc_cfg->cq_cfg.enable) {
> -                       aq->cq.bpid = nix->bpid[0];
> +                       aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
>                         aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
>                         aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
>                         aq->cq_mask.bp = ~(aq->cq_mask.bp);
> @@ -181,7 +187,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
>                 aq->op = NIX_AQ_INSTOP_WRITE;
>
>                 if (fc_cfg->cq_cfg.enable) {
> -                       aq->cq.bpid = nix->bpid[0];
> +                       aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
>                         aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
>                         aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
>                         aq->cq_mask.bp = ~(aq->cq_mask.bp);
> @@ -222,7 +228,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
>                 return nix_fc_rxchan_bpid_set(roc_nix,
>                                               fc_cfg->rxchan_cfg.enable);
>         else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
> -               return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
> +               return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
> +                                           fc_cfg->tm_cfg.tc,
> +                                           fc_cfg->tm_cfg.enable);
>
>         return -EINVAL;
>  }
> @@ -403,3 +411,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
>
>         mbox_process(mbox);
>  }
> +
> +int
> +roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
> +{
> +       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +       struct mbox *mbox = get_mbox(roc_nix);
> +       uint8_t tx_pause, rx_pause;
> +       struct cgx_pfc_cfg *req;
> +       struct cgx_pfc_rsp *rsp;
> +       int rc = -ENOSPC;
> +
> +       if (roc_nix_is_lbk(roc_nix))
> +               return NIX_ERR_OP_NOTSUP;
> +
> +       rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
> +                  (pfc_cfg->mode == ROC_NIX_FC_RX);
> +       tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
> +                  (pfc_cfg->mode == ROC_NIX_FC_TX);
> +
> +       req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
> +       if (req == NULL)
> +               goto exit;
> +
> +       req->pfc_en = pfc_cfg->tc;
> +       req->rx_pause = rx_pause;
> +       req->tx_pause = tx_pause;
> +
> +       rc = mbox_process_msg(mbox, (void *)&rsp);
> +       if (rc)
> +               goto exit;
> +
> +       nix->rx_pause = rsp->rx_pause;
> +       nix->tx_pause = rsp->tx_pause;
> +       if (rsp->tx_pause)
> +               nix->cev |= BIT(pfc_cfg->tc);
> +       else
> +               nix->cev &= ~BIT(pfc_cfg->tc);
> +
> +exit:
> +       return rc;
> +}
> +
> +int
> +roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
> +{
> +       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +
> +       if (roc_nix_is_lbk(roc_nix))
> +               return NIX_ERR_OP_NOTSUP;
> +
> +       pfc_cfg->tc = nix->cev;
> +
> +       if (nix->rx_pause && nix->tx_pause)
> +               pfc_cfg->mode = ROC_NIX_FC_FULL;
> +       else if (nix->rx_pause)
> +               pfc_cfg->mode = ROC_NIX_FC_RX;
> +       else if (nix->tx_pause)
> +               pfc_cfg->mode = ROC_NIX_FC_TX;
> +       else
> +               pfc_cfg->mode = ROC_NIX_FC_NONE;
> +
> +       return 0;
> +}
> +
> +uint16_t
> +roc_nix_chan_count_get(struct roc_nix *roc_nix)
> +{
> +       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +
> +       return nix->chan_cnt;
> +}
> diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
> index deb2a6ba11..f3889424c4 100644
> --- a/drivers/common/cnxk/roc_nix_priv.h
> +++ b/drivers/common/cnxk/roc_nix_priv.h
> @@ -33,6 +33,7 @@ struct nix_qint {
>  /* Traffic Manager */
>  #define NIX_TM_MAX_HW_TXSCHQ 512
>  #define NIX_TM_HW_ID_INVALID UINT32_MAX
> +#define NIX_TM_CHAN_INVALID UINT16_MAX
>
>  /* TM flags */
>  #define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
> @@ -56,6 +57,7 @@ struct nix_tm_node {
>         uint32_t priority;
>         uint32_t weight;
>         uint16_t lvl;
> +       uint16_t rel_chan;
>         uint32_t parent_id;
>         uint32_t shaper_profile_id;
>         void (*free_fn)(void *node);
> @@ -139,6 +141,7 @@ struct nix {
>         uint16_t msixoff;
>         uint8_t rx_pause;
>         uint8_t tx_pause;
> +       uint16_t cev;
>         uint64_t rx_cfg;
>         struct dev dev;
>         uint16_t cints;
> @@ -376,7 +379,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
>                bool ena);
>  int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
>  int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
> -int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
> +int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
> +                        bool enable);
>  void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
>
>  /*
> diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
> index 517502b1af..ecf3edfa44 100644
> --- a/drivers/common/cnxk/roc_nix_tm.c
> +++ b/drivers/common/cnxk/roc_nix_tm.c
> @@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
>                         if (is_pf_or_lbk && !skip_bp &&
>                             node->hw_lvl == nix->tm_link_cfg_lvl) {
>                                 node->bp_capa = 1;
> -                               skip_bp = true;
> +                               skip_bp = false;
>                         }
>
>                         rc = nix_tm_node_reg_conf(nix, node);
> @@ -317,21 +317,38 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
>  }
>
>  int
> -nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
> +nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
> +                    bool enable)
>  {
>         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
>         enum roc_nix_tm_tree tree = nix->tm_tree;
>         struct mbox *mbox = (&nix->dev)->mbox;
>         struct nix_txschq_config *req = NULL;
>         struct nix_tm_node_list *list;
> +       struct nix_tm_node *sq_node;
> +       struct nix_tm_node *parent;
>         struct nix_tm_node *node;
>         uint8_t k = 0;
>         uint16_t link;
>         int rc = 0;
>
> +       sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
> +       parent = sq_node->parent;
> +       while (parent) {
> +               if (parent->lvl == ROC_TM_LVL_SCH2)
> +                       break;
> +
> +               parent = parent->parent;
> +       }
> +
>         list = nix_tm_node_list(nix, tree);
>         link = nix->tx_link;
>
> +       if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
> +               rc = -EINVAL;
> +               goto err;
> +       }
> +
>         TAILQ_FOREACH(node, list, node) {
>                 if (node->hw_lvl != nix->tm_link_cfg_lvl)
>                         continue;
> @@ -339,6 +356,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
>                 if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
>                         continue;
>
> +               if (node->hw_id != parent->hw_id)
> +                       continue;
> +
>                 if (!req) {
>                         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
>                         req->lvl = nix->tm_link_cfg_lvl;
> @@ -346,8 +366,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
>                 }
>
>                 req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
> -               req->regval[k] = enable ? BIT_ULL(13) : 0;
> -               req->regval_mask[k] = ~BIT_ULL(13);
> +               req->regval[k] = enable ? tc : 0;
> +               req->regval[k] |= enable ? BIT_ULL(13) : 0;
> +               req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
>                 k++;
>
>                 if (k >= MAX_REGS_PER_MBOX_MSG) {
> @@ -366,6 +387,7 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
>                         goto err;
>         }
>
> +       parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
>         return 0;
>  err:
>         plt_err("Failed to %s bp on link %u, rc=%d(%s)",
> @@ -602,7 +624,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
>         }
>
>         /* Disable backpressure */
> -       rc = nix_tm_bp_config_set(roc_nix, false);
> +       rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
>         if (rc) {
>                 plt_err("Failed to disable backpressure for flush, rc=%d", rc);
>                 return rc;
> @@ -731,7 +753,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
>                 return 0;
>
>         /* Restore backpressure */
> -       rc = nix_tm_bp_config_set(roc_nix, true);
> +       rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
>         if (rc) {
>                 plt_err("Failed to restore backpressure, rc=%d", rc);
>                 return rc;
> @@ -1301,6 +1323,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
>                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>                 node->lvl = lvl;
>                 node->tree = ROC_NIX_TM_DEFAULT;
> +               node->rel_chan = NIX_TM_CHAN_INVALID;
>
>                 rc = nix_tm_node_add(roc_nix, node);
>                 if (rc)
> @@ -1327,6 +1350,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
>                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>                 node->lvl = leaf_lvl;
>                 node->tree = ROC_NIX_TM_DEFAULT;
> +               node->rel_chan = NIX_TM_CHAN_INVALID;
>
>                 rc = nix_tm_node_add(roc_nix, node);
>                 if (rc)
> @@ -1367,6 +1391,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
>                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>                 node->lvl = lvl;
>                 node->tree = ROC_NIX_TM_RLIMIT;
> +               node->rel_chan = NIX_TM_CHAN_INVALID;
>
>                 rc = nix_tm_node_add(roc_nix, node);
>                 if (rc)
> @@ -1392,6 +1417,7 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
>                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>                 node->lvl = lvl;
>                 node->tree = ROC_NIX_TM_RLIMIT;
> +               node->rel_chan = NIX_TM_CHAN_INVALID;
>
>                 rc = nix_tm_node_add(roc_nix, node);
>                 if (rc)
> @@ -1416,6 +1442,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
>                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
>                 node->lvl = leaf_lvl;
>                 node->tree = ROC_NIX_TM_RLIMIT;
> +               node->rel_chan = NIX_TM_CHAN_INVALID;
> +
> +               rc = nix_tm_node_add(roc_nix, node);
> +               if (rc)
> +                       goto error;
> +       }
> +
> +       return 0;
> +error:
> +       nix_tm_node_free(node);
> +       return rc;
> +}
> +
> +int
> +roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
> +{
> +       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +       uint32_t nonleaf_id = nix->nb_tx_queues;
> +       struct nix_tm_node *node = NULL;
> +       uint8_t leaf_lvl, lvl, lvl_end;
> +       uint32_t tl2_node_id;
> +       uint32_t parent, i;
> +       int rc = -ENOMEM;
> +
> +       parent = ROC_NIX_TM_NODE_ID_INVALID;
> +       lvl_end = ROC_TM_LVL_SCH3;
> +       leaf_lvl = ROC_TM_LVL_QUEUE;
> +
> +       /* TL1 node */
> +       node = nix_tm_node_alloc();
> +       if (!node)
> +               goto error;
> +
> +       node->id = nonleaf_id;
> +       node->parent_id = parent;
> +       node->priority = 0;
> +       node->weight = NIX_TM_DFLT_RR_WT;
> +       node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
> +       node->lvl = ROC_TM_LVL_ROOT;
> +       node->tree = ROC_NIX_TM_PFC;
> +       node->rel_chan = NIX_TM_CHAN_INVALID;
> +
> +       rc = nix_tm_node_add(roc_nix, node);
> +       if (rc)
> +               goto error;
> +
> +       parent = nonleaf_id;
> +       nonleaf_id++;
> +
> +       /* TL2 node */
> +       rc = -ENOMEM;
> +       node = nix_tm_node_alloc();
> +       if (!node)
> +               goto error;
> +
> +       node->id = nonleaf_id;
> +       node->parent_id = parent;
> +       node->priority = 0;
> +       node->weight = NIX_TM_DFLT_RR_WT;
> +       node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
> +       node->lvl = ROC_TM_LVL_SCH1;
> +       node->tree = ROC_NIX_TM_PFC;
> +       node->rel_chan = NIX_TM_CHAN_INVALID;
> +
> +       rc = nix_tm_node_add(roc_nix, node);
> +       if (rc)
> +               goto error;
> +
> +       tl2_node_id = nonleaf_id;
> +       nonleaf_id++;
> +
> +       for (i = 0; i < nix->nb_tx_queues; i++) {
> +               parent = tl2_node_id;
> +               for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
> +                       rc = -ENOMEM;
> +                       node = nix_tm_node_alloc();
> +                       if (!node)
> +                               goto error;
> +
> +                       node->id = nonleaf_id;
> +                       node->parent_id = parent;
> +                       node->priority = 0;
> +                       node->weight = NIX_TM_DFLT_RR_WT;
> +                       node->shaper_profile_id =
> +                               ROC_NIX_TM_SHAPER_PROFILE_NONE;
> +                       node->lvl = lvl;
> +                       node->tree = ROC_NIX_TM_PFC;
> +                       node->rel_chan = NIX_TM_CHAN_INVALID;
> +
> +                       rc = nix_tm_node_add(roc_nix, node);
> +                       if (rc)
> +                               goto error;
> +
> +                       parent = nonleaf_id;
> +                       nonleaf_id++;
> +               }
> +
> +               lvl = ROC_TM_LVL_SCH4;
> +
> +               rc = -ENOMEM;
> +               node = nix_tm_node_alloc();
> +               if (!node)
> +                       goto error;
> +
> +               node->id = nonleaf_id;
> +               node->parent_id = parent;
> +               node->priority = 0;
> +               node->weight = NIX_TM_DFLT_RR_WT;
> +               node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
> +               node->lvl = lvl;
> +               node->tree = ROC_NIX_TM_PFC;
> +               node->rel_chan = NIX_TM_CHAN_INVALID;
> +
> +               rc = nix_tm_node_add(roc_nix, node);
> +               if (rc)
> +                       goto error;
> +
> +               parent = nonleaf_id;
> +               nonleaf_id++;
> +
> +               rc = -ENOMEM;
> +               node = nix_tm_node_alloc();
> +               if (!node)
> +                       goto error;
> +
> +               node->id = i;
> +               node->parent_id = parent;
> +               node->priority = 0;
> +               node->weight = NIX_TM_DFLT_RR_WT;
> +               node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
> +               node->lvl = leaf_lvl;
> +               node->tree = ROC_NIX_TM_PFC;
> +               node->rel_chan = NIX_TM_CHAN_INVALID;
>
>                 rc = nix_tm_node_add(roc_nix, node);
>                 if (rc)
> diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
> index 3d81247a12..d3d39eeb99 100644
> --- a/drivers/common/cnxk/roc_nix_tm_ops.c
> +++ b/drivers/common/cnxk/roc_nix_tm_ops.c
> @@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
>         /* Disable backpressure, it will be enabled back if needed on
>          * hierarchy enable
>          */
> -       rc = nix_tm_bp_config_set(roc_nix, false);
> -       if (rc) {
> -               plt_err("Failed to disable backpressure for flush, rc=%d", rc);
> -               goto cleanup;
> +       for (i = 0; i < sq_cnt; i++) {
> +               sq = nix->sqs[i];
> +               if (!sq)
> +                       continue;
> +
> +               rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
> +               if (rc) {
> +                       plt_err("Failed to disable backpressure, rc=%d", rc);
> +                       goto cleanup;
> +               }
>         }
>
>         /* Flush all tx queues */
> diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
> index 75a260f11e..c14af6afed 100644
> --- a/drivers/common/cnxk/version.map
> +++ b/drivers/common/cnxk/version.map
> @@ -107,6 +107,7 @@ INTERNAL {
>         roc_nix_bpf_stats_reset;
>         roc_nix_bpf_stats_to_idx;
>         roc_nix_bpf_timeunit_get;
> +       roc_nix_chan_count_get;
>         roc_nix_cq_dump;
>         roc_nix_cq_fini;
>         roc_nix_cq_head_tail_get;
> @@ -198,6 +199,8 @@ INTERNAL {
>         roc_nix_npc_promisc_ena_dis;
>         roc_nix_npc_rx_ena_dis;
>         roc_nix_npc_mcast_config;
> +       roc_nix_pfc_mode_get;
> +       roc_nix_pfc_mode_set;
>         roc_nix_ptp_clock_read;
>         roc_nix_ptp_info_cb_register;
>         roc_nix_ptp_info_cb_unregister;
> @@ -263,6 +266,7 @@ INTERNAL {
>         roc_nix_tm_node_stats_get;
>         roc_nix_tm_node_suspend_resume;
>         roc_nix_tm_prealloc_res;
> +       roc_nix_tm_pfc_prepare_tree;
>         roc_nix_tm_prepare_rate_limited_tree;
>         roc_nix_tm_rlimit_sq;
>         roc_nix_tm_root_has_sp;
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 29+ messages in thread

end of thread, other threads:[~2022-02-23 11:36 UTC | newest]

Thread overview: 29+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-09 11:11 [PATCH v1 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-09 11:11 ` [PATCH v1 2/2] net/cnxk: support priority flow control skori
2022-01-09 11:18   ` Sunil Kumar Kori
2022-01-11  8:18   ` [PATCH v2 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-11  8:18     ` [PATCH v2 2/2] net/cnxk: support priority flow control skori
2022-01-18 13:28       ` [PATCH v3 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-18 13:28         ` [PATCH v3 2/2] net/cnxk: support priority flow control skori
2022-01-20 16:59           ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-20 16:59             ` [PATCH v4 2/2] net/cnxk: support priority flow control skori
2022-01-25 10:02             ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API Ray Kinsella
2022-01-25 10:19               ` [EXT] " Sunil Kumar Kori
2022-01-25 11:23             ` [PATCH v5 " skori
2022-01-25 11:23               ` [PATCH v5 2/2] net/cnxk: support priority flow control skori
2022-01-28 13:28               ` [PATCH v6 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-28 13:29                 ` [PATCH v6 2/2] net/cnxk: support priority flow control skori
2022-02-07 17:21                   ` [PATCH v7 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-07 17:21                     ` [PATCH v7 2/2] net/cnxk: support priority flow control skori
2022-02-14  9:02                       ` [PATCH v8 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-14  9:02                         ` [PATCH v8 2/2] net/cnxk: support priority flow control skori
2022-02-14 10:10                           ` [PATCH v9 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-14 10:10                             ` [PATCH v9 2/2] net/cnxk: support priority flow control skori
2022-02-18  6:11                               ` Jerin Jacob
2022-02-22  8:06                                 ` [EXT] " Sunil Kumar Kori
2022-02-22  8:58                               ` [PATCH v10 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-22  8:58                                 ` [PATCH v10 2/2] net/cnxk: support priority flow control skori
2022-02-22 10:37                               ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-22 10:37                                 ` [PATCH v11 2/2] net/cnxk: support priority flow control skori
2022-02-23 11:36                                 ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API Jerin Jacob
2022-02-14 10:06                         ` [PATCH v8 " Ray Kinsella

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).