DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 01/21] common/cnxk: allocate dynamic BPIDs
@ 2023-04-11  9:11 Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 02/21] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
                   ` (21 more replies)
  0 siblings, 22 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

New mail box to allocate/free dynamic BPIDs based on NIX type.
Added to new mail box APIs to get/set RX channel config with
new BPIDs.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---

Depends-on: series-27659 ("add hwpools and support exchanging mbufs between pools")

 drivers/common/cnxk/roc_cpt.c      |  10 +-
 drivers/common/cnxk/roc_cpt.h      |   3 +-
 drivers/common/cnxk/roc_features.h |   7 ++
 drivers/common/cnxk/roc_mbox.h     |  31 ++++-
 drivers/common/cnxk/roc_nix.h      |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c   | 182 +++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_nix_inl.c  |  24 ++--
 drivers/common/cnxk/roc_nix_priv.h |   1 +
 drivers/common/cnxk/version.map    |   5 +
 9 files changed, 266 insertions(+), 18 deletions(-)

diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index dff2fbf2a4..d235ff51ca 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -311,8 +311,7 @@ roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
 }
 
 int
-roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
-			     uint16_t param2, uint16_t opcode)
+roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, struct roc_cpt_inline_ipsec_inb_cfg *cfg)
 {
 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 	struct cpt_rx_inline_lf_cfg_msg *req;
@@ -328,9 +327,10 @@ roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
 	}
 
 	req->sso_pf_func = idev_sso_pffunc_get();
-	req->param1 = param1;
-	req->param2 = param2;
-	req->opcode = opcode;
+	req->param1 = cfg->param1;
+	req->param2 = cfg->param2;
+	req->opcode = cfg->opcode;
+	req->bpid = cfg->bpid;
 
 	rc = mbox_process(mbox);
 exit:
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index d3a5683dc8..92a18711dc 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -178,8 +178,7 @@ int __roc_api roc_cpt_inline_ipsec_cfg(struct dev *dev, uint8_t slot,
 int __roc_api roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
 					struct roc_cpt_inline_ipsec_inb_cfg *cfg);
 int __roc_api roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt,
-					   uint16_t param1, uint16_t param2,
-					   uint16_t opcode);
+					   struct roc_cpt_inline_ipsec_inb_cfg *cfg);
 int __roc_api roc_cpt_afs_print(struct roc_cpt *roc_cpt);
 int __roc_api roc_cpt_lfs_print(struct roc_cpt *roc_cpt);
 void __roc_api roc_cpt_iq_disable(struct roc_cpt_lf *lf);
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 252f306a86..c2893faa65 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -40,4 +40,11 @@ roc_feature_nix_has_reass(void)
 	return roc_model_is_cn10ka();
 }
 
+static inline bool
+roc_feature_nix_has_rxchan_multi_bpid(void)
+{
+	if (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0())
+		return true;
+	return false;
+}
 #endif
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index af3c10b0b0..3d5746b9b8 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -275,7 +275,12 @@ struct mbox_msghdr {
 	M(NIX_SPI_TO_SA_ADD, 0x8026, nix_spi_to_sa_add, nix_spi_to_sa_add_req, \
 	  nix_spi_to_sa_add_rsp)                                               \
 	M(NIX_SPI_TO_SA_DELETE, 0x8027, nix_spi_to_sa_delete,                  \
-	  nix_spi_to_sa_delete_req, msg_rsp)
+	  nix_spi_to_sa_delete_req, msg_rsp)                                   \
+	M(NIX_ALLOC_BPIDS, 0x8028, nix_alloc_bpids, nix_alloc_bpid_req,        \
+	  nix_bpids)                                                           \
+	M(NIX_FREE_BPIDS, 0x8029, nix_free_bpids, nix_bpids, msg_rsp)          \
+	M(NIX_RX_CHAN_CFG, 0x802a, nix_rx_chan_cfg, nix_rx_chan_cfg,           \
+	  nix_rx_chan_cfg)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES                                                   \
@@ -1186,6 +1191,30 @@ struct nix_bp_cfg_rsp {
 	uint8_t __io chan_cnt;
 };
 
+struct nix_alloc_bpid_req {
+	struct mbox_msghdr hdr;
+	uint8_t __io bpid_cnt;
+	uint8_t __io type;
+	uint64_t __io rsvd;
+};
+
+struct nix_bpids {
+#define ROC_NIX_MAX_BPID_CNT	8
+	struct mbox_msghdr hdr;
+	uint8_t __io bpid_cnt;
+	uint16_t __io bpids[ROC_NIX_MAX_BPID_CNT];
+	uint64_t __io rsvd;
+};
+
+struct nix_rx_chan_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io type; /* Interface type(CGX/CPT/LBK) */
+	uint8_t __io read;
+	uint16_t __io chan; /* RX channel to be configured */
+	uint64_t __io val; /* NIX_AF_RX_CHAN_CFG value */
+	uint64_t __io rsvd;
+};
+
 /* Global NIX inline IPSec configuration */
 struct nix_inline_ipsec_cfg {
 	struct mbox_msghdr hdr;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 0ec98ad630..2737bb9517 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -16,6 +16,17 @@
 #define ROC_NIX_SQB_LOWER_THRESH      70U
 #define ROC_NIX_SQB_SLACK	      12U
 
+/* Reserved interface types for BPID allocation */
+#define ROC_NIX_INTF_TYPE_CGX  0
+#define ROC_NIX_INTF_TYPE_LBK  1
+#define ROC_NIX_INTF_TYPE_SDP  2
+#define ROC_NIX_INTF_TYPE_CPT  3
+#define ROC_NIX_INTF_TYPE_RSVD 4
+
+/* Application based types for BPID allocation, start from end (255 unused rsvd) */
+#define ROC_NIX_INTF_TYPE_CPT_NIX 254
+#define ROC_NIX_INTF_TYPE_SSO     253
+
 enum roc_nix_rss_reta_sz {
 	ROC_NIX_RSS_RETA_SZ_64 = 64,
 	ROC_NIX_RSS_RETA_SZ_128 = 128,
@@ -837,6 +848,16 @@ enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
 				     uint8_t ena, uint8_t force, uint8_t tc);
+int __roc_api roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type,
+				  uint8_t bp_cnt, uint16_t *bpids);
+int __roc_api roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt,
+				 uint16_t *bpids);
+int __roc_api roc_nix_rx_chan_cfg_get(struct roc_nix *roc_nix, uint16_t chan,
+				      bool is_cpt, uint64_t *cfg);
+int __roc_api roc_nix_rx_chan_cfg_set(struct roc_nix *roc_nix, uint16_t chan,
+				      bool is_cpt, uint64_t val);
+int __roc_api roc_nix_chan_bpid_set(struct roc_nix *roc_nix, uint16_t chan,
+				    uint64_t bpid, int ena, bool cpt_chan);
 
 /* NPC */
 int __roc_api roc_nix_npc_promisc_ena_dis(struct roc_nix *roc_nix, int enable);
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index cec83b31f3..3b726673a6 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -104,6 +104,17 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		nix->cpt_lbpid = 0;
 	}
 
+	/* CPT to NIX BP on all channels */
+	if (!roc_feature_nix_has_rxchan_multi_bpid() || !nix->cpt_nixbpid)
+		goto exit;
+
+	mbox_put(mbox);
+	for (i = 0; i < nix->rx_chan_cnt; i++) {
+		rc = roc_nix_chan_bpid_set(roc_nix, i, nix->cpt_nixbpid, enable, false);
+		if (rc)
+			break;
+	}
+	return rc;
 exit:
 	mbox_put(mbox);
 	return rc;
@@ -599,3 +610,174 @@ roc_nix_chan_count_get(struct roc_nix *roc_nix)
 
 	return nix->chan_cnt;
 }
+
+/* Allocate BPID for requested type
+ * Returns number of BPIDs allocated
+ *	0 if no BPIDs available
+ *	-ve value on error
+ */
+int
+roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type, uint8_t bp_cnt, uint16_t *bpids)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_alloc_bpid_req *req;
+	struct nix_bpids *rsp;
+	int rc = -EINVAL;
+
+	/* Use this api for unreserved interface types */
+	if ((type < ROC_NIX_INTF_TYPE_RSVD) || (bp_cnt > ROC_NIX_MAX_BPID_CNT) || !bpids)
+		goto exit;
+
+	rc = -ENOSPC;
+	req = mbox_alloc_msg_nix_alloc_bpids(mbox);
+	if (req == NULL)
+		goto exit;
+	req->type = type;
+	req->bpid_cnt = bp_cnt;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	for (rc = 0; rc < rsp->bpid_cnt; rc++)
+		bpids[rc] = rsp->bpids[rc];
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+int
+roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt, uint16_t *bpids)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_bpids *req;
+	int rc = -EINVAL;
+
+	/* Use this api for unreserved interface types */
+	if ((bp_cnt > ROC_NIX_MAX_BPID_CNT) || !bpids)
+		goto exit;
+
+	rc = -ENOSPC;
+	req = mbox_alloc_msg_nix_free_bpids(mbox);
+	if (req == NULL)
+		goto exit;
+	for (rc = 0; rc < bp_cnt; rc++)
+		req->bpids[rc] = bpids[rc];
+	req->bpid_cnt = rc;
+
+	rc = mbox_process(mbox);
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+int
+roc_nix_rx_chan_cfg_get(struct roc_nix *roc_nix, uint16_t chan, bool is_cpt, uint64_t *cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_rx_chan_cfg *req;
+	struct nix_rx_chan_cfg *rsp;
+	int rc = -EINVAL;
+
+	req = mbox_alloc_msg_nix_rx_chan_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+	if (is_cpt)
+		req->type = ROC_NIX_INTF_TYPE_CPT;
+	req->chan = chan;
+	req->read = 1;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+	*cfg = rsp->val;
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+int
+roc_nix_rx_chan_cfg_set(struct roc_nix *roc_nix, uint16_t chan, bool is_cpt, uint64_t val)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_rx_chan_cfg *req;
+	int rc = -EINVAL;
+
+	req = mbox_alloc_msg_nix_rx_chan_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+	if (is_cpt)
+		req->type = ROC_NIX_INTF_TYPE_CPT;
+	req->chan = chan;
+	req->val = val;
+	req->read = 0;
+
+	rc = mbox_process(mbox);
+	if (rc)
+		goto exit;
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+#define NIX_BPID1_ENA 15
+#define NIX_BPID2_ENA 14
+#define NIX_BPID3_ENA 13
+
+#define NIX_BPID1_OFF 20
+#define NIX_BPID2_OFF 32
+#define NIX_BPID3_OFF 44
+
+int
+roc_nix_chan_bpid_set(struct roc_nix *roc_nix, uint16_t chan, uint64_t bpid, int ena, bool cpt_chan)
+{
+	uint64_t cfg;
+	int rc;
+
+	if (!roc_feature_nix_has_rxchan_multi_bpid())
+		return -ENOTSUP;
+
+	rc = roc_nix_rx_chan_cfg_get(roc_nix, chan, cpt_chan, &cfg);
+	if (rc)
+		return rc;
+
+	if (ena) {
+		if ((((cfg >> NIX_BPID1_OFF) & GENMASK_ULL(8, 0)) == bpid) ||
+		    (((cfg >> NIX_BPID2_OFF) & GENMASK_ULL(8, 0)) == bpid) ||
+		    (((cfg >> NIX_BPID3_OFF) & GENMASK_ULL(8, 0)) == bpid))
+			return 0;
+
+		if (!(cfg & BIT_ULL(NIX_BPID1_ENA))) {
+			cfg &= ~GENMASK_ULL(NIX_BPID1_OFF + 8, NIX_BPID1_OFF);
+			cfg |= (((uint64_t)bpid << NIX_BPID1_OFF) | BIT_ULL(NIX_BPID1_ENA));
+		} else if (!(cfg & BIT_ULL(NIX_BPID2_ENA))) {
+			cfg &= ~GENMASK_ULL(NIX_BPID2_OFF + 8, NIX_BPID2_OFF);
+			cfg |= (((uint64_t)bpid << NIX_BPID2_OFF) | BIT_ULL(NIX_BPID2_ENA));
+		} else if (!(cfg & BIT_ULL(NIX_BPID3_ENA))) {
+			cfg &= ~GENMASK_ULL(NIX_BPID3_OFF + 8, NIX_BPID3_OFF);
+			cfg |= (((uint64_t)bpid << NIX_BPID3_OFF) | BIT_ULL(NIX_BPID3_ENA));
+		} else {
+			plt_nix_dbg("Exceed maximum BPIDs");
+			return -ENOSPC;
+		}
+	} else {
+		if (((cfg >> NIX_BPID1_OFF) & GENMASK_ULL(8, 0)) == bpid) {
+			cfg &= ~(GENMASK_ULL(NIX_BPID1_OFF + 8, NIX_BPID1_OFF) |
+				 BIT_ULL(NIX_BPID1_ENA));
+		} else if (((cfg >> NIX_BPID2_OFF) & GENMASK_ULL(8, 0)) == bpid) {
+			cfg &= ~(GENMASK_ULL(NIX_BPID2_OFF + 8, NIX_BPID2_OFF) |
+				 BIT_ULL(NIX_BPID2_ENA));
+		} else if (((cfg >> NIX_BPID3_OFF) & GENMASK_ULL(8, 0)) == bpid) {
+			cfg &= ~(GENMASK_ULL(NIX_BPID3_OFF + 8, NIX_BPID3_OFF) |
+				 BIT_ULL(NIX_BPID3_ENA));
+		} else {
+			plt_nix_dbg("BPID not found");
+			return -EINVAL;
+		}
+	}
+	return roc_nix_rx_chan_cfg_set(roc_nix, chan, cpt_chan, cfg);
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 076d83e8d5..9485bba099 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -603,11 +603,10 @@ int
 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct roc_cpt_inline_ipsec_inb_cfg cfg;
 	struct idev_cfg *idev = idev_get_cfg();
+	uint16_t bpids[ROC_NIX_MAX_BPID_CNT];
 	struct roc_cpt *roc_cpt;
-	uint16_t opcode;
-	uint16_t param1;
-	uint16_t param2;
 	int rc;
 
 	if (idev == NULL)
@@ -624,9 +623,9 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	}
 
 	if (roc_model_is_cn9k()) {
-		param1 = (ROC_ONF_IPSEC_INB_MAX_L2_SZ >> 3) & 0xf;
-		param2 = ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT;
-		opcode =
+		cfg.param1 = (ROC_ONF_IPSEC_INB_MAX_L2_SZ >> 3) & 0xf;
+		cfg.param2 = ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT;
+		cfg.opcode =
 			((ROC_IE_ON_INB_MAX_CTX_LEN << 8) |
 			 (ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6)));
 	} else {
@@ -634,13 +633,18 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 
 		u.u16 = 0;
 		u.s.esp_trailer_disable = 1;
-		param1 = u.u16;
-		param2 = 0;
-		opcode = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6));
+		cfg.param1 = u.u16;
+		cfg.param2 = 0;
+		cfg.opcode = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6));
+		rc = roc_nix_bpids_alloc(roc_nix, ROC_NIX_INTF_TYPE_CPT_NIX, 1, bpids);
+		if (rc > 0) {
+			nix->cpt_nixbpid = bpids[0];
+			cfg.bpid = nix->cpt_nixbpid;
+		}
 	}
 
 	/* Do onetime Inbound Inline config in CPTPF */
-	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, param2, opcode);
+	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, &cfg);
 	if (rc && rc != -EEXIST) {
 		plt_err("Failed to setup inbound lf, rc=%d", rc);
 		return rc;
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 2fe9093324..99e27cdc56 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -208,6 +208,7 @@ struct nix {
 	uint16_t outb_se_ring_cnt;
 	uint16_t outb_se_ring_base;
 	uint16_t cpt_lbpid;
+	uint16_t cpt_nixbpid;
 	bool need_meta_aura;
 	/* Mode provided by driver */
 	bool inb_inl_dev;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 5281c71550..e7c6f6bce5 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -147,6 +147,9 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_bpids_alloc;
+	roc_nix_bpids_free;
+	roc_nix_chan_bpid_set;
 	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
@@ -277,6 +280,8 @@ INTERNAL {
 	roc_nix_rss_key_set;
 	roc_nix_rss_reta_get;
 	roc_nix_rss_reta_set;
+	roc_nix_rx_chan_cfg_get;
+	roc_nix_rx_chan_cfg_set;
 	roc_nix_rx_drop_re_set;
 	roc_nix_rx_queue_intr_disable;
 	roc_nix_rx_queue_intr_enable;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 02/21] common/cnxk: add pool BPID to RQ while using common pool
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-05-18  5:52   ` Jerin Jacob
  2023-04-11  9:11 ` [PATCH 03/21] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
                   ` (20 subsequent siblings)
  21 siblings, 1 reply; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Shijith Thotton

From: Shijith Thotton <sthotton@marvell.com>

When RQs of two different traffic classes are using the same mempool,
BPIDs could differ between the RQs and BPID of only one RQ can be
configured per pool. In such cases, a new BPID is configured on both RQs
and pool or pool back-pressure is disabled.

CN103xx and CN106xx B0 supports configuring multiple BPID per RQ.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
 drivers/common/cnxk/roc_idev.c      |  12 +++
 drivers/common/cnxk/roc_idev.h      |   1 +
 drivers/common/cnxk/roc_idev_priv.h |   1 +
 drivers/common/cnxk/roc_nix.c       |   5 +
 drivers/common/cnxk/roc_nix.h       |   3 +
 drivers/common/cnxk/roc_nix_fc.c    | 156 ++++++++++++++++------------
 drivers/common/cnxk/roc_npa.c       |  48 +++++++++
 drivers/common/cnxk/roc_npa.h       |   2 +
 drivers/common/cnxk/version.map     |   2 +
 9 files changed, 166 insertions(+), 64 deletions(-)

diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index 62a4fd8880..f420f0158d 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -39,6 +39,7 @@ idev_set_defaults(struct idev_cfg *idev)
 	idev->bphy = NULL;
 	idev->cpt = NULL;
 	idev->nix_inl_dev = NULL;
+	TAILQ_INIT(&idev->roc_nix_list);
 	plt_spinlock_init(&idev->nix_inl_dev_lock);
 	plt_spinlock_init(&idev->npa_dev_lock);
 	__atomic_store_n(&idev->npa_refcnt, 0, __ATOMIC_RELEASE);
@@ -201,6 +202,17 @@ roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix)
 	return (uint64_t *)&inl_dev->sa_soft_exp_ring[nix->outb_se_ring_base];
 }
 
+struct roc_nix_list *
+roc_idev_nix_list_get(void)
+{
+	struct idev_cfg *idev;
+
+	idev = idev_get_cfg();
+	if (idev != NULL)
+		return &idev->roc_nix_list;
+	return NULL;
+}
+
 void
 roc_idev_cpt_set(struct roc_cpt *cpt)
 {
diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h
index 926aac0634..640ca97708 100644
--- a/drivers/common/cnxk/roc_idev.h
+++ b/drivers/common/cnxk/roc_idev.h
@@ -17,5 +17,6 @@ void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
 
 struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
 uint64_t __roc_api roc_idev_nix_inl_meta_aura_get(void);
+struct roc_nix_list *__roc_api roc_idev_nix_list_get(void);
 
 #endif /* _ROC_IDEV_H_ */
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index b97d2936a2..d83522799f 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -32,6 +32,7 @@ struct idev_cfg {
 	struct roc_sso *sso;
 	struct nix_inl_dev *nix_inl_dev;
 	struct idev_nix_inl_cfg inl_cfg;
+	struct roc_nix_list roc_nix_list;
 	plt_spinlock_t nix_inl_dev_lock;
 	plt_spinlock_t npa_dev_lock;
 };
diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 97ef1c7133..39943e4ba7 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -417,6 +417,7 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 	nix = roc_nix_to_nix_priv(roc_nix);
 	pci_dev = roc_nix->pci_dev;
 	dev = &nix->dev;
+	TAILQ_INSERT_TAIL(roc_idev_nix_list_get(), roc_nix, next);
 
 	if (nix->dev.drv_inited)
 		return 0;
@@ -425,6 +426,10 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 		goto skip_dev_init;
 
 	memset(nix, 0, sizeof(*nix));
+
+	/* Since 0 is a valid BPID, use -1 to represent invalid value. */
+	memset(nix->bpid, -1, sizeof(nix->bpid));
+
 	/* Initialize device  */
 	rc = dev_init(dev, pci_dev);
 	if (rc) {
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 2737bb9517..188b8800d3 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -425,6 +425,8 @@ typedef void (*q_err_get_t)(struct roc_nix *roc_nix, void *data);
 typedef void (*link_info_get_t)(struct roc_nix *roc_nix,
 				struct roc_nix_link_info *link);
 
+TAILQ_HEAD(roc_nix_list, roc_nix);
+
 struct roc_nix {
 	/* Input parameters */
 	struct plt_pci_device *pci_dev;
@@ -456,6 +458,7 @@ struct roc_nix {
 	uint32_t buf_sz;
 	uint64_t meta_aura_handle;
 	uintptr_t meta_mempool;
+	TAILQ_ENTRY(roc_nix) next;
 
 #define ROC_NIX_MEM_SZ (6 * 1056)
 	uint8_t reserved[ROC_NIX_MEM_SZ] __plt_cache_aligned;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 3b726673a6..8b7659fb9a 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -428,17 +428,64 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
 	return rc;
 }
 
+static int
+nix_rx_chan_multi_bpid_cfg(struct roc_nix *roc_nix, uint8_t chan, uint16_t bpid, uint16_t *bpid_new)
+{
+	struct roc_nix *roc_nix_tmp, *roc_nix_pre = NULL;
+	uint8_t chan_pre;
+
+	if (!roc_feature_nix_has_rxchan_multi_bpid())
+		return -ENOTSUP;
+
+	/* Find associated NIX RX channel if Aura BPID is of that of a NIX. */
+	TAILQ_FOREACH (roc_nix_tmp, roc_idev_nix_list_get(), next) {
+		struct nix *nix = roc_nix_to_nix_priv(roc_nix_tmp);
+		int i;
+
+		for (i = 0; i < NIX_MAX_CHAN; i++) {
+			if (nix->bpid[i] == bpid)
+				break;
+		}
+
+		if (i < NIX_MAX_CHAN) {
+			roc_nix_pre = roc_nix_tmp;
+			chan_pre = i;
+			break;
+		}
+	}
+
+	/* Alloc and configure a new BPID if Aura BPID is that of a NIX. */
+	if (roc_nix_pre) {
+		if (roc_nix_bpids_alloc(roc_nix, ROC_NIX_INTF_TYPE_SSO, 1, bpid_new) <= 0)
+			return -ENOSPC;
+
+		if (roc_nix_chan_bpid_set(roc_nix_pre, chan_pre, *bpid_new, 1, false) < 0)
+			return -ENOSPC;
+
+		if (roc_nix_chan_bpid_set(roc_nix, chan, *bpid_new, 1, false) < 0)
+			return -ENOSPC;
+
+		return 0;
+	} else {
+		return roc_nix_chan_bpid_set(roc_nix, chan, bpid, 1, false);
+	}
+
+	return 0;
+}
+
+#define NIX_BPID_INVALID 0xFFFF
+
 void
 roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 		      uint8_t force, uint8_t tc)
 {
+	uint32_t aura_id = roc_npa_aura_handle_to_aura(pool_id);
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct npa_lf *lf = idev_npa_obj_get();
 	struct npa_aq_enq_req *req;
 	struct npa_aq_enq_rsp *rsp;
+	uint8_t bp_thresh, bp_intf;
 	struct mbox *mbox;
-	uint32_t limit;
-	uint64_t shift;
 	int rc;
 
 	if (roc_nix_is_sdp(roc_nix))
@@ -446,93 +493,74 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	if (!lf)
 		return;
-	mbox = mbox_get(lf->mbox);
 
-	req = mbox_alloc_msg_npa_aq_enq(mbox);
-	if (req == NULL)
-		goto exit;
+	mbox = lf->mbox;
+	req = mbox_alloc_msg_npa_aq_enq(mbox_get(mbox));
+	if (req == NULL) {
+		mbox_put(mbox);
+		return;
+	}
 
-	req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
+	req->aura_id = aura_id;
 	req->ctype = NPA_AQ_CTYPE_AURA;
 	req->op = NPA_AQ_INSTOP_READ;
 
 	rc = mbox_process_msg(mbox, (void *)&rsp);
-	if (rc)
-		goto exit;
+	mbox_put(mbox);
+	if (rc) {
+		plt_nix_dbg("Failed to read context of aura 0x%" PRIx64, pool_id);
+		return;
+	}
 
-	limit = rsp->aura.limit;
-	shift = rsp->aura.shift;
+	bp_intf = 1 << nix->is_nix1;
+	bp_thresh = NIX_RQ_AURA_THRESH(rsp->aura.limit >> rsp->aura.shift);
 
 	/* BP is already enabled. */
 	if (rsp->aura.bp_ena && ena) {
-		uint16_t bpid;
-		bool nix1;
+		uint16_t bpid =
+			(rsp->aura.bp_ena & 0x1) ? rsp->aura.nix0_bpid : rsp->aura.nix1_bpid;
 
-		nix1 = !!(rsp->aura.bp_ena & 0x2);
-		if (nix1)
-			bpid = rsp->aura.nix1_bpid;
-		else
-			bpid = rsp->aura.nix0_bpid;
+		/* Disable BP if BPIDs don't match and couldn't add new BPID. */
+		if (bpid != nix->bpid[tc]) {
+			uint16_t bpid_new = NIX_BPID_INVALID;
 
-		/* If BP ids don't match disable BP. */
-		if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc])) &&
-		    !force) {
-			req = mbox_alloc_msg_npa_aq_enq(mbox);
-			if (req == NULL)
-				goto exit;
+			if ((nix_rx_chan_multi_bpid_cfg(roc_nix, tc, bpid, &bpid_new) < 0) &&
+			    !force) {
+				plt_info("Disabling BP/FC on aura 0x%" PRIx64
+					 " as it shared across ports or tc",
+					 pool_id);
 
-			plt_info("Disabling BP/FC on aura 0x%" PRIx64
-				 " as it shared across ports or tc",
-				 pool_id);
-			req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
-			req->ctype = NPA_AQ_CTYPE_AURA;
-			req->op = NPA_AQ_INSTOP_WRITE;
+				if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
+					plt_nix_dbg(
+						"Disabling backpressue failed on aura 0x%" PRIx64,
+						pool_id);
+			}
 
-			req->aura.bp_ena = 0;
-			req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
-
-			mbox_process(mbox);
+			/* Configure Aura with new BPID if it is allocated. */
+			if (bpid_new != NIX_BPID_INVALID) {
+				if (roc_npa_aura_bp_configure(pool_id, bpid_new, bp_intf, bp_thresh,
+							      true))
+					plt_nix_dbg(
+						"Enabling backpressue failed on aura 0x%" PRIx64,
+						pool_id);
+			}
 		}
 
-		if ((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc]))
-			plt_info("Ignoring aura 0x%" PRIx64 "->%u bpid mapping",
-				 pool_id, nix->bpid[tc]);
-		goto exit;
+		return;
 	}
 
 	/* BP was previously enabled but now disabled skip. */
 	if (rsp->aura.bp && ena)
-		goto exit;
-
-	req = mbox_alloc_msg_npa_aq_enq(mbox);
-	if (req == NULL)
-		goto exit;
-
-	req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
-	req->ctype = NPA_AQ_CTYPE_AURA;
-	req->op = NPA_AQ_INSTOP_WRITE;
+		return;
 
 	if (ena) {
-		if (nix->is_nix1) {
-			req->aura.nix1_bpid = nix->bpid[tc];
-			req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
-		} else {
-			req->aura.nix0_bpid = nix->bpid[tc];
-			req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
-		}
-		req->aura.bp = NIX_RQ_AURA_THRESH(limit >> shift);
-		req->aura_mask.bp = ~(req->aura_mask.bp);
+		if (roc_npa_aura_bp_configure(pool_id, nix->bpid[tc], bp_intf, bp_thresh, true))
+			plt_nix_dbg("Enabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	} else {
-		req->aura.bp = 0;
-		req->aura_mask.bp = ~(req->aura_mask.bp);
+		if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
+			plt_nix_dbg("Disabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	}
 
-	req->aura.bp_ena = (!!ena << nix->is_nix1);
-	req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
-
-	mbox_process(mbox);
-exit:
-	mbox_put(mbox);
 	return;
 }
 
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 42846ac4ec..d6a97e49c9 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -882,6 +882,54 @@ roc_npa_zero_aura_handle(void)
 	return 0;
 }
 
+int
+roc_npa_aura_bp_configure(uint64_t aura_handle, uint16_t bpid, uint8_t bp_intf, uint8_t bp_thresh,
+			  bool enable)
+{
+	uint32_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+	struct npa_lf *lf = idev_npa_obj_get();
+	struct npa_aq_enq_req *req;
+	struct mbox *mbox;
+	int rc = 0;
+
+	if (lf == NULL)
+		return NPA_ERR_PARAM;
+
+	mbox = mbox_get(lf->mbox);
+	req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (req == NULL) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	req->aura_id = aura_id;
+	req->ctype = NPA_AQ_CTYPE_AURA;
+	req->op = NPA_AQ_INSTOP_WRITE;
+
+	if (enable) {
+		if (bp_intf & 0x1) {
+			req->aura.nix0_bpid = bpid;
+			req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
+		} else {
+			req->aura.nix1_bpid = bpid;
+			req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
+		}
+		req->aura.bp = bp_thresh;
+		req->aura_mask.bp = ~(req->aura_mask.bp);
+	} else {
+		req->aura.bp = 0;
+		req->aura_mask.bp = ~(req->aura_mask.bp);
+	}
+
+	req->aura.bp_ena = bp_intf;
+	req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
+
+	mbox_process(mbox);
+fail:
+	mbox_put(mbox);
+	return rc;
+}
+
 static inline int
 npa_attach(struct mbox *m_box)
 {
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index 21608a40d9..546b7c93d9 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -746,6 +746,8 @@ uint64_t __roc_api roc_npa_zero_aura_handle(void);
 int __roc_api roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int cnt);
 uint64_t __roc_api roc_npa_buf_type_mask(uint64_t aura_handle);
 uint64_t __roc_api roc_npa_buf_type_limit_get(uint64_t type_mask);
+int __roc_api roc_npa_aura_bp_configure(uint64_t aura_id, uint16_t bpid, uint8_t bp_intf,
+					uint8_t bp_thresh, bool enable);
 
 /* Init callbacks */
 typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev);
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index e7c6f6bce5..d740d9df81 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -99,6 +99,7 @@ INTERNAL {
 	roc_idev_npa_nix_get;
 	roc_idev_num_lmtlines_get;
 	roc_idev_nix_inl_meta_aura_get;
+	roc_idev_nix_list_get;
 	roc_ml_reg_read64;
 	roc_ml_reg_write64;
 	roc_ml_reg_read32;
@@ -361,6 +362,7 @@ INTERNAL {
 	roc_npa_aura_limit_modify;
 	roc_npa_aura_op_range_get;
 	roc_npa_aura_op_range_set;
+	roc_npa_aura_bp_configure;
 	roc_npa_ctx_dump;
 	roc_npa_dev_fini;
 	roc_npa_dev_init;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 03/21] common/cnxk: skip flow ctrl set on non-existent meta aura
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 02/21] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 04/21] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
                   ` (19 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Thomas Monjalon, Nithin Kumar Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Skip setting flow control on local meta aura if it is not yet
created. Also in flow control mode set, do get to confirm
if it is in same state to avoid a set and unnecessary mbox
failures.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 .mailmap                           | 1 +
 drivers/common/cnxk/roc_dev.c      | 1 +
 drivers/common/cnxk/roc_nix_fc.c   | 4 ++--
 drivers/common/cnxk/roc_nix_inl.c  | 3 +++
 drivers/net/cnxk/cnxk_ethdev.c     | 5 +++--
 drivers/net/cnxk/cnxk_ethdev_ops.c | 4 ++++
 6 files changed, 14 insertions(+), 4 deletions(-)

diff --git a/.mailmap b/.mailmap
index 0859104404..be2eddf1b3 100644
--- a/.mailmap
+++ b/.mailmap
@@ -988,6 +988,7 @@ Nipun Gupta <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
+Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
 Noa Ezra <noae@mellanox.com>
diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 2388237186..5e4e564ebe 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -421,6 +421,7 @@ process_msgs(struct dev *dev, struct mbox *mbox)
 			dev->pf_func = msg->pcifunc;
 			break;
 		case MBOX_MSG_CGX_PRIO_FLOW_CTRL_CFG:
+		case MBOX_MSG_CGX_CFG_PAUSE_FRM:
 			/* Handling the case where one VF tries to disable PFC
 			 * while PFC already configured on other VFs. This is
 			 * not an error but a warning which can be ignored.
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 8b7659fb9a..3618d2920b 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -311,7 +311,7 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 				      fc_cfg->rq_cfg.enable, true,
 				      fc_cfg->rq_cfg.tc);
 
-		if (roc_nix->local_meta_aura_ena)
+		if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
 					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc);
 	}
@@ -409,6 +409,7 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
 		goto exit;
 	}
 
+	/* Set new config */
 	req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
 	if (req == NULL)
 		goto exit;
@@ -422,7 +423,6 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
 
 	nix->rx_pause = rx_pause;
 	nix->tx_pause = tx_pause;
-
 exit:
 	mbox_put(mbox);
 	return rc;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 9485bba099..b16756d642 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -131,6 +131,9 @@ nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_
 	}
 	roc_nix->meta_mempool = mp;
 
+	plt_nix_dbg("Created meta aura %p(%s)for port %d", (void *)*meta_aura, mp_name,
+		    roc_nix->port_id);
+
 	if (!roc_nix->local_meta_aura_ena) {
 		inl_cfg->buf_sz = buf_sz;
 		inl_cfg->nb_bufs = nb_bufs;
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 1cae3084e1..3bccc34d79 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -378,8 +378,9 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	if (rc)
 		return rc;
 
-	fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL :
-						  RTE_ETH_FC_TX_PAUSE;
+	fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL : RTE_ETH_FC_TX_PAUSE;
+	fc->rx_pause = (fc->mode == RTE_ETH_FC_FULL) || (fc->mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc->mode == RTE_ETH_FC_FULL) || (fc->mode == RTE_ETH_FC_TX_PAUSE);
 	return rc;
 }
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 068b7c3502..bce6d59bbc 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -342,6 +342,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			return rc;
 	}
 
+	/* Skip mode set if it is we are in same state */
+	if (fc->rx_pause == rx_pause && fc->tx_pause == tx_pause)
+		return 0;
+
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
 	if (rc)
 		return rc;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 04/21] common/cnxk: reduce sqes per sqb by one
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 02/21] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 03/21] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 05/21] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
                   ` (18 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Each SQB reserves last SQE to store pointer to next SQB. So
each SQB will holds either 31 or 63 based on send descriptors
selected.

This patch also consider sqb_slack to maintain threshold buffers
to sync between HW and SW. Threshold will be maximum of 30% of
queue size or sqb_slack.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       |  2 +-
 drivers/common/cnxk/roc_nix_priv.h  |  2 +-
 drivers/common/cnxk/roc_nix_queue.c | 21 ++++++++++-----------
 drivers/event/cnxk/cn10k_eventdev.c |  2 +-
 drivers/event/cnxk/cn9k_eventdev.c  |  2 +-
 5 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 188b8800d3..50aef4fe85 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -13,7 +13,7 @@
 #define ROC_NIX_BPF_STATS_MAX	      12
 #define ROC_NIX_MTR_ID_INVALID	      UINT32_MAX
 #define ROC_NIX_PFC_CLASS_INVALID     UINT8_MAX
-#define ROC_NIX_SQB_LOWER_THRESH      70U
+#define ROC_NIX_SQB_THRESH	      30U
 #define ROC_NIX_SQB_SLACK	      12U
 
 /* Reserved interface types for BPID allocation */
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 99e27cdc56..7144d1ee10 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -12,7 +12,7 @@
 #define NIX_MAX_SQB	     ((uint16_t)512)
 #define NIX_DEF_SQB	     ((uint16_t)16)
 #define NIX_MIN_SQB	     ((uint16_t)8)
-#define NIX_SQB_LIST_SPACE   ((uint16_t)2)
+#define NIX_SQB_PREFETCH     ((uint16_t)1)
 
 /* Apply BP/DROP when CQ is 95% full */
 #define NIX_CQ_THRESH_LEVEL	(5 * 256 / 100)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index ac4d9856c1..d29fafa895 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -982,7 +982,7 @@ static int
 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
-	uint16_t sqes_per_sqb, count, nb_sqb_bufs;
+	uint16_t sqes_per_sqb, count, nb_sqb_bufs, thr;
 	struct npa_pool_s pool;
 	struct npa_aura_s aura;
 	uint64_t blk_sz;
@@ -995,22 +995,21 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	else
 		sqes_per_sqb = (blk_sz / 8) / 8;
 
+	/* Reserve One SQE in each SQB to hold pointer for next SQB */
+	sqes_per_sqb -= 1;
+
 	sq->nb_desc = PLT_MAX(512U, sq->nb_desc);
-	nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
-	nb_sqb_bufs += NIX_SQB_LIST_SPACE;
+	nb_sqb_bufs = PLT_DIV_CEIL(sq->nb_desc, sqes_per_sqb);
+	thr = PLT_DIV_CEIL((nb_sqb_bufs * ROC_NIX_SQB_THRESH), 100);
+	nb_sqb_bufs += NIX_SQB_PREFETCH;
 	/* Clamp up the SQB count */
-	nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
-			      (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
+	nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count, (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
 
 	sq->nb_sqb_bufs = nb_sqb_bufs;
 	sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
-	sq->nb_sqb_bufs_adj =
-		nb_sqb_bufs -
-		(PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
-	sq->nb_sqb_bufs_adj =
-		(sq->nb_sqb_bufs_adj * ROC_NIX_SQB_LOWER_THRESH) / 100;
+	sq->nb_sqb_bufs_adj = nb_sqb_bufs;
 
-	nb_sqb_bufs += roc_nix->sqb_slack;
+	nb_sqb_bufs += PLT_MAX(thr, roc_nix->sqb_slack);
 	/* Explicitly set nat_align alone as by default pool is with both
 	 * nat_align and buf_offset = 1 which we don't want for SQB.
 	 */
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 071ea5a212..afd8e323b8 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -995,7 +995,7 @@ cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
 		txq->nb_sqb_bufs_adj =
-			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
+			((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 2d2985f175..b104d19b9b 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1037,7 +1037,7 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
 		txq->nb_sqb_bufs_adj =
-			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
+			((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 05/21] common/cnxk: dump SW SSO work count as xstat
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (2 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 04/21] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 06/21] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
                   ` (17 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Dump SW SSO work count as xstat.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_inl_dev_irq.c |  1 +
 drivers/common/cnxk/roc_nix_inl_priv.h    |  1 +
 drivers/common/cnxk/roc_nix_stats.c       | 17 +++++++++++------
 drivers/common/cnxk/roc_nix_xstats.h      |  4 ++++
 4 files changed, 17 insertions(+), 6 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_inl_dev_irq.c b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
index 445b440447..becd7907f2 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev_irq.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
@@ -41,6 +41,7 @@ nix_inl_sso_work_cb(struct nix_inl_dev *inl_dev)
 			goto again;
 	}
 
+	inl_dev->sso_work_cnt += cnt;
 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
 }
 
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index 528d2db365..b0a8976c6b 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -57,6 +57,7 @@ struct nix_inl_dev {
 	bool is_nix1;
 	uint8_t spb_drop_pc;
 	uint8_t lpb_drop_pc;
+	uint64_t sso_work_cnt;
 
 	/* NIX/CPT data */
 	void *inb_sa_base;
diff --git a/drivers/common/cnxk/roc_nix_stats.c b/drivers/common/cnxk/roc_nix_stats.c
index 6b5803af84..ca0e8ccb4f 100644
--- a/drivers/common/cnxk/roc_nix_stats.c
+++ b/drivers/common/cnxk/roc_nix_stats.c
@@ -24,12 +24,7 @@
 int
 roc_nix_num_xstats_get(struct roc_nix *roc_nix)
 {
-	if (roc_nix_is_vf_or_sdp(roc_nix))
-		return CNXK_NIX_NUM_XSTATS_REG;
-	else if (roc_model_is_cn9k())
-		return CNXK_NIX_NUM_XSTATS_CGX;
-
-	return CNXK_NIX_NUM_XSTATS_RPM;
+	return roc_nix_xstats_names_get(roc_nix, NULL, 0);
 }
 
 int
@@ -360,6 +355,12 @@ roc_nix_xstats_get(struct roc_nix *roc_nix, struct roc_nix_xstat *xstats,
 				xstats[count].id = count;
 				count++;
 			}
+			for (i = 0; i < PLT_DIM(inl_sw_xstats); i++) {
+				if (!inl_sw_xstats[i].offset)
+					xstats[count].value = inl_dev->sso_work_cnt;
+				xstats[count].id = count;
+				count++;
+			}
 		}
 	}
 
@@ -475,6 +476,10 @@ roc_nix_xstats_names_get(struct roc_nix *roc_nix,
 						      inl_nix_rq_xstats, i);
 				count++;
 			}
+			for (i = 0; i < PLT_DIM(inl_sw_xstats); i++) {
+				NIX_XSTATS_NAME_PRINT(xstats_names, count, inl_sw_xstats, i);
+				count++;
+			}
 		}
 	}
 
diff --git a/drivers/common/cnxk/roc_nix_xstats.h b/drivers/common/cnxk/roc_nix_xstats.h
index 813fb7f578..11b8e1c0ff 100644
--- a/drivers/common/cnxk/roc_nix_xstats.h
+++ b/drivers/common/cnxk/roc_nix_xstats.h
@@ -206,6 +206,10 @@ static const struct cnxk_nix_xstats_name nix_tx_xstats_cgx[] = {
 	{"cgx_tx_pause_packets", CGX_TX_PAUSE_PKTS},
 };
 
+static const struct cnxk_nix_xstats_name inl_sw_xstats[] = {
+	{"inl_sso_work_cnt", 0},
+};
+
 #define CNXK_NIX_NUM_RX_XSTATS	   PLT_DIM(nix_rx_xstats)
 #define CNXK_NIX_NUM_TX_XSTATS	   PLT_DIM(nix_tx_xstats)
 #define CNXK_NIX_NUM_QUEUE_XSTATS  PLT_DIM(nix_q_xstats)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 06/21] common/cnxk: add percent drop threshold to pool
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (3 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 05/21] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 07/21] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
                   ` (16 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

From: Sunil Kumar Kori <skori@marvell.com>

Currently hard coded drop threshold(95%) is configured to aura/pool as a
threshold for drop limit.

Patch adds a input parameter to RoC API so that user passed percentage
value can be configured.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/common/cnxk/roc_nix.h            |  6 ++++--
 drivers/common/cnxk/roc_nix_fc.c         | 17 ++++++++++++-----
 drivers/common/cnxk/roc_nix_inl.c        |  2 +-
 drivers/common/cnxk/roc_nix_priv.h       |  2 +-
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  4 ++--
 5 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 50aef4fe85..fde8fe4ecc 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -15,6 +15,7 @@
 #define ROC_NIX_PFC_CLASS_INVALID     UINT8_MAX
 #define ROC_NIX_SQB_THRESH	      30U
 #define ROC_NIX_SQB_SLACK	      12U
+#define ROC_NIX_AURA_THRESH	      95U
 
 /* Reserved interface types for BPID allocation */
 #define ROC_NIX_INTF_TYPE_CGX  0
@@ -197,6 +198,7 @@ struct roc_nix_fc_cfg {
 			uint16_t cq_drop;
 			bool enable;
 			uint64_t pool;
+			uint64_t pool_drop_pct;
 		} rq_cfg;
 
 		struct {
@@ -849,8 +851,8 @@ uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
 
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
-void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
-				     uint8_t ena, uint8_t force, uint8_t tc);
+void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
+				     uint8_t force, uint8_t tc, uint64_t drop_percent);
 int __roc_api roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type,
 				  uint8_t bp_cnt, uint16_t *bpids);
 int __roc_api roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 3618d2920b..98dd9a9e66 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -297,6 +297,7 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct roc_nix_fc_cfg tmp;
+	uint64_t pool_drop_pct;
 	struct roc_nix_rq *rq;
 	int sso_ena = 0, rc;
 
@@ -307,13 +308,19 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return -EINVAL;
 
 	if (sso_ena) {
+		pool_drop_pct = fc_cfg->rq_cfg.pool_drop_pct;
+		/* Use default value for zero pct */
+		if (fc_cfg->rq_cfg.enable && !pool_drop_pct)
+			pool_drop_pct = ROC_NIX_AURA_THRESH;
+
 		roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool,
 				      fc_cfg->rq_cfg.enable, true,
-				      fc_cfg->rq_cfg.tc);
+				      fc_cfg->rq_cfg.tc, fc_cfg->rq_cfg.pool_drop_pct);
 
 		if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc);
+					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc,
+					      fc_cfg->rq_cfg.pool_drop_pct);
 	}
 
 	/* Copy RQ config to CQ config as they are occupying same area */
@@ -476,8 +483,8 @@ nix_rx_chan_multi_bpid_cfg(struct roc_nix *roc_nix, uint8_t chan, uint16_t bpid,
 #define NIX_BPID_INVALID 0xFFFF
 
 void
-roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
-		      uint8_t force, uint8_t tc)
+roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, uint8_t force,
+		      uint8_t tc, uint64_t drop_percent)
 {
 	uint32_t aura_id = roc_npa_aura_handle_to_aura(pool_id);
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
@@ -513,7 +520,7 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 	}
 
 	bp_intf = 1 << nix->is_nix1;
-	bp_thresh = NIX_RQ_AURA_THRESH(rsp->aura.limit >> rsp->aura.shift);
+	bp_thresh = NIX_RQ_AURA_THRESH(drop_percent, rsp->aura.limit >> rsp->aura.shift);
 
 	/* BP is already enabled. */
 	if (rsp->aura.bp_ena && ena) {
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index b16756d642..329ebf9405 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -263,7 +263,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 		 */
 		if (aura_setup && nix->rqs[0] && nix->rqs[0]->tc != ROC_NIX_PFC_CLASS_INVALID)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      true, true, nix->rqs[0]->tc);
+					      true, true, nix->rqs[0]->tc, ROC_NIX_AURA_THRESH);
 	} else {
 		rc = nix_inl_global_meta_buffer_validate(idev, rq);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 7144d1ee10..f900a81d8a 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -20,7 +20,7 @@
 /* Apply LBP at 75% of actual BP */
 #define NIX_CQ_LPB_THRESH_FRAC	(75 * 16 / 100)
 #define NIX_CQ_FULL_ERRATA_SKID (1024ull * 256)
-#define NIX_RQ_AURA_THRESH(x)	(((x)*95) / 100)
+#define NIX_RQ_AURA_THRESH(percent, val) (((val) * (percent)) / 100)
 
 /* IRQ triggered when NIX_LF_CINTX_CNT[QCOUNT] crosses this value */
 #define CQ_CQE_THRESH_DEFAULT	0x1ULL
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 5ec436382c..3dc3d04a1e 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -263,7 +263,7 @@ cnxk_sso_rx_adapter_queue_add(
 		if (rxq_sp->tx_pause)
 			roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
 					      rxq_sp->qconf.mp->pool_id, true,
-					      dev->force_ena_bp, rxq_sp->tc);
+					      dev->force_ena_bp, rxq_sp->tc, ROC_NIX_AURA_THRESH);
 		cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
 		cnxk_eth_dev->nb_rxq_sso++;
 	}
@@ -307,7 +307,7 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 		rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
 		roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
 				      rxq_sp->qconf.mp->pool_id, false,
-				      dev->force_ena_bp, 0);
+				      dev->force_ena_bp, 0, ROC_NIX_AURA_THRESH);
 		cnxk_eth_dev->nb_rxq_sso--;
 
 		/* Enable drop_re if it was disabled earlier */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 07/21] common/cnxk: make aura flow control config more predictable
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (4 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 06/21] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 08/21] common/cnxk: update age drop statistics Nithin Dabilpuram
                   ` (15 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

Restrict shared BPID config only when force BP is enabled
and make aura flow control config more predictable by not disabling
it if there is a collision but ignore new config and log the same.

Also remove BPID setup from Rx adapter as it is now evaluated and
configured every time ethdev is stopped/started.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix.h            |  1 +
 drivers/common/cnxk/roc_nix_fc.c         | 49 ++++++++++++------------
 drivers/common/cnxk/roc_nix_inl.c        |  2 +-
 drivers/common/cnxk/roc_npa.c            |  3 ++
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 13 +------
 5 files changed, 32 insertions(+), 36 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index fde8fe4ecc..2b576f0891 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -451,6 +451,7 @@ struct roc_nix {
 	bool custom_sa_action;
 	bool local_meta_aura_ena;
 	uint32_t meta_buf_sz;
+	bool force_rx_aura_bp;
 	/* End of input parameters */
 	/* LMT line base for "Per Core Tx LMT line" mode*/
 	uintptr_t lmt_base;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 98dd9a9e66..bbc27a6421 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -314,13 +314,13 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 			pool_drop_pct = ROC_NIX_AURA_THRESH;
 
 		roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool,
-				      fc_cfg->rq_cfg.enable, true,
-				      fc_cfg->rq_cfg.tc, fc_cfg->rq_cfg.pool_drop_pct);
+				      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
+				      fc_cfg->rq_cfg.tc, pool_drop_pct);
 
 		if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc,
-					      fc_cfg->rq_cfg.pool_drop_pct);
+					      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
+					      fc_cfg->rq_cfg.tc, pool_drop_pct);
 	}
 
 	/* Copy RQ config to CQ config as they are occupying same area */
@@ -493,7 +493,8 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui
 	struct npa_aq_enq_rsp *rsp;
 	uint8_t bp_thresh, bp_intf;
 	struct mbox *mbox;
-	int rc;
+	uint16_t bpid;
+	int rc, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return;
@@ -522,34 +523,25 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui
 	bp_intf = 1 << nix->is_nix1;
 	bp_thresh = NIX_RQ_AURA_THRESH(drop_percent, rsp->aura.limit >> rsp->aura.shift);
 
+	bpid = (rsp->aura.bp_ena & 0x1) ? rsp->aura.nix0_bpid : rsp->aura.nix1_bpid;
 	/* BP is already enabled. */
 	if (rsp->aura.bp_ena && ena) {
-		uint16_t bpid =
-			(rsp->aura.bp_ena & 0x1) ? rsp->aura.nix0_bpid : rsp->aura.nix1_bpid;
-
 		/* Disable BP if BPIDs don't match and couldn't add new BPID. */
 		if (bpid != nix->bpid[tc]) {
 			uint16_t bpid_new = NIX_BPID_INVALID;
 
-			if ((nix_rx_chan_multi_bpid_cfg(roc_nix, tc, bpid, &bpid_new) < 0) &&
-			    !force) {
-				plt_info("Disabling BP/FC on aura 0x%" PRIx64
-					 " as it shared across ports or tc",
+			if (force && !nix_rx_chan_multi_bpid_cfg(roc_nix, tc, bpid, &bpid_new)) {
+				plt_info("Setting up shared BPID on shared aura 0x%" PRIx64,
 					 pool_id);
 
-				if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
-					plt_nix_dbg(
-						"Disabling backpressue failed on aura 0x%" PRIx64,
-						pool_id);
-			}
-
-			/* Configure Aura with new BPID if it is allocated. */
-			if (bpid_new != NIX_BPID_INVALID) {
+				/* Configure Aura with new BPID if it is allocated. */
 				if (roc_npa_aura_bp_configure(pool_id, bpid_new, bp_intf, bp_thresh,
 							      true))
-					plt_nix_dbg(
-						"Enabling backpressue failed on aura 0x%" PRIx64,
+					plt_err("Enabling backpressue failed on aura 0x%" PRIx64,
 						pool_id);
+			} else {
+				plt_info("Ignoring port=%u tc=%u config on shared aura 0x%" PRIx64,
+					 roc_nix->port_id, tc, pool_id);
 			}
 		}
 
@@ -562,10 +554,19 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui
 
 	if (ena) {
 		if (roc_npa_aura_bp_configure(pool_id, nix->bpid[tc], bp_intf, bp_thresh, true))
-			plt_nix_dbg("Enabling backpressue failed on aura 0x%" PRIx64, pool_id);
+			plt_err("Enabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	} else {
+		bool found = !!force;
+
+		/* Don't disable if existing BPID is not within this port's list */
+		for (i = 0; i < nix->chan_cnt; i++)
+			if (bpid == nix->bpid[i])
+				found = true;
+		if (!found)
+			return;
+
 		if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
-			plt_nix_dbg("Disabling backpressue failed on aura 0x%" PRIx64, pool_id);
+			plt_err("Disabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	}
 
 	return;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 329ebf9405..8592e1cb0b 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -263,7 +263,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 		 */
 		if (aura_setup && nix->rqs[0] && nix->rqs[0]->tc != ROC_NIX_PFC_CLASS_INVALID)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      true, true, nix->rqs[0]->tc, ROC_NIX_AURA_THRESH);
+					      true, false, nix->rqs[0]->tc, ROC_NIX_AURA_THRESH);
 	} else {
 		rc = nix_inl_global_meta_buffer_validate(idev, rq);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index d6a97e49c9..7463f2522c 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -892,6 +892,9 @@ roc_npa_aura_bp_configure(uint64_t aura_handle, uint16_t bpid, uint8_t bp_intf,
 	struct mbox *mbox;
 	int rc = 0;
 
+	plt_npa_dbg("Setting BPID %u BP_INTF 0x%x BP_THRESH %u enable %u on aura %" PRIx64,
+		    bpid, bp_intf, bp_thresh, enable, aura_handle);
+
 	if (lf == NULL)
 		return NPA_ERR_PARAM;
 
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 3dc3d04a1e..81e61ed856 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -260,10 +260,8 @@ cnxk_sso_rx_adapter_queue_add(
 							     false);
 		}
 
-		if (rxq_sp->tx_pause)
-			roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
-					      rxq_sp->qconf.mp->pool_id, true,
-					      dev->force_ena_bp, rxq_sp->tc, ROC_NIX_AURA_THRESH);
+		/* Propagate force bp devarg */
+		cnxk_eth_dev->nix.force_rx_aura_bp = dev->force_ena_bp;
 		cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
 		cnxk_eth_dev->nb_rxq_sso++;
 	}
@@ -293,8 +291,6 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 			      int32_t rx_queue_id)
 {
 	struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
-	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-	struct cnxk_eth_rxq_sp *rxq_sp;
 	int i, rc = 0;
 
 	RTE_SET_USED(event_dev);
@@ -302,12 +298,7 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 		for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
 			cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, i);
 	} else {
-		rxq_sp = cnxk_eth_rxq_to_sp(
-			eth_dev->data->rx_queues[rx_queue_id]);
 		rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
-		roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
-				      rxq_sp->qconf.mp->pool_id, false,
-				      dev->force_ena_bp, 0, ROC_NIX_AURA_THRESH);
 		cnxk_eth_dev->nb_rxq_sso--;
 
 		/* Enable drop_re if it was disabled earlier */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 08/21] common/cnxk: update age drop statistics
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (5 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 07/21] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 09/21] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
                   ` (14 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Update age drop statistics. Added telemetry statistics for age drops.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/cnxk_telemetry_nix.c | 4 ++++
 drivers/common/cnxk/hw/nix.h             | 2 ++
 drivers/common/cnxk/roc_features.h       | 6 ++++++
 drivers/common/cnxk/roc_nix.h            | 2 ++
 drivers/common/cnxk/roc_nix_stats.c      | 4 ++++
 5 files changed, 18 insertions(+)

diff --git a/drivers/common/cnxk/cnxk_telemetry_nix.c b/drivers/common/cnxk/cnxk_telemetry_nix.c
index b7285cf137..ccae5d7853 100644
--- a/drivers/common/cnxk/cnxk_telemetry_nix.c
+++ b/drivers/common/cnxk/cnxk_telemetry_nix.c
@@ -680,6 +680,10 @@ nix_sq_ctx(volatile void *qctx, struct plt_tel_data *d)
 	/* W12 */
 	CNXK_TEL_DICT_BF_PTR(d, ctx, pkts, w12_);
 
+	/* W13 */
+	CNXK_TEL_DICT_INT(d, ctx, aged_drop_octs, w13_);
+	CNXK_TEL_DICT_INT(d, ctx, aged_drop_pkts, w13_);
+
 	/* W14 */
 	CNXK_TEL_DICT_BF_PTR(d, ctx, drop_octs, w14_);
 
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index 0d8f2a5e9b..fbdf1b64f6 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -363,6 +363,8 @@
 #define NIX_LF_SQ_OP_STATUS	 (0xa30ull)
 #define NIX_LF_SQ_OP_DROP_OCTS	 (0xa40ull)
 #define NIX_LF_SQ_OP_DROP_PKTS	 (0xa50ull)
+#define NIX_LF_SQ_OP_AGE_DROP_OCTS (0xa60ull) /* [CN10K, .) */
+#define NIX_LF_SQ_OP_AGE_DROP_PKTS (0xa70ull) /* [CN10K, .) */
 #define NIX_LF_CQ_OP_INT	 (0xb00ull)
 #define NIX_LF_CQ_OP_DOOR	 (0xb30ull)
 #define NIX_LF_CQ_OP_STATUS	 (0xb40ull)
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index c2893faa65..6fe01015d8 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -47,4 +47,10 @@ roc_feature_nix_has_rxchan_multi_bpid(void)
 		return true;
 	return false;
 }
+
+static inline bool
+roc_feature_nix_has_age_drop_stats(void)
+{
+	return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+}
 #endif
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 2b576f0891..f84e473db6 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -293,6 +293,8 @@ struct roc_nix_stats_queue {
 			uint64_t tx_octs;
 			uint64_t tx_drop_pkts;
 			uint64_t tx_drop_octs;
+			uint64_t tx_age_drop_pkts;
+			uint64_t tx_age_drop_octs;
 		};
 	};
 };
diff --git a/drivers/common/cnxk/roc_nix_stats.c b/drivers/common/cnxk/roc_nix_stats.c
index ca0e8ccb4f..1e93191a07 100644
--- a/drivers/common/cnxk/roc_nix_stats.c
+++ b/drivers/common/cnxk/roc_nix_stats.c
@@ -137,6 +137,10 @@ nix_stat_tx_queue_get(struct nix *nix, uint16_t qid,
 	qstats->tx_octs = qstat_read(nix, qid, NIX_LF_SQ_OP_OCTS);
 	qstats->tx_drop_pkts = qstat_read(nix, qid, NIX_LF_SQ_OP_DROP_PKTS);
 	qstats->tx_drop_octs = qstat_read(nix, qid, NIX_LF_SQ_OP_DROP_OCTS);
+	if (roc_feature_nix_has_age_drop_stats()) {
+		qstats->tx_age_drop_pkts = qstat_read(nix, qid, NIX_LF_SQ_OP_AGE_DROP_PKTS);
+		qstats->tx_age_drop_octs = qstat_read(nix, qid, NIX_LF_SQ_OP_AGE_DROP_OCTS);
+	}
 }
 
 static int
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 09/21] common/cnxk: fetch eng caps for inl outb inst format
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (6 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 08/21] common/cnxk: update age drop statistics Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 10/21] common/cnxk: add receive error mask Nithin Dabilpuram
                   ` (13 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Fetch engine caps and use it along with model check
to determine inline outbound instruction format
with NIX Tx offset or address.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_cpt.h       |   3 +
 drivers/common/cnxk/roc_nix_inl.c   | 101 ++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_nix_inl.h   |   1 +
 drivers/common/cnxk/roc_nix_priv.h  |   1 +
 drivers/common/cnxk/version.map     |   1 +
 drivers/net/cnxk/cn10k_ethdev_sec.c |   3 +-
 drivers/net/cnxk/cnxk_ethdev.c      |   2 +
 drivers/net/cnxk/cnxk_ethdev.h      |   3 +
 8 files changed, 114 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 92a18711dc..910bd37a0c 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -12,6 +12,9 @@
 #define ROC_AE_CPT_BLOCK_TYPE1 0
 #define ROC_AE_CPT_BLOCK_TYPE2 1
 
+#define ROC_LOADFVC_MAJOR_OP 0x01UL
+#define ROC_LOADFVC_MINOR_OP 0x08UL
+
 /* Default engine groups */
 #define ROC_CPT_DFLT_ENG_GRP_SE	   0UL
 #define ROC_CPT_DFLT_ENG_GRP_SE_IE 1UL
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 8592e1cb0b..67f8ce9aa0 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -602,6 +602,96 @@ nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
 	return rc;
 }
 
+static void
+nix_inl_eng_caps_get(struct nix *nix)
+{
+	struct roc_cpt_lf *lf = nix->cpt_lf_base;
+	uintptr_t lmt_base = lf->lmt_base;
+	union cpt_res_s res, *hw_res;
+	struct cpt_inst_s inst;
+	uint64_t *rptr;
+
+	hw_res = plt_zmalloc(sizeof(*hw_res), ROC_CPT_RES_ALIGN);
+	if (hw_res == NULL) {
+		plt_err("Couldn't allocate memory for result address");
+		return;
+	}
+
+	rptr = plt_zmalloc(ROC_ALIGN, 0);
+	if (rptr == NULL) {
+		plt_err("Couldn't allocate memory for rptr");
+		plt_free(hw_res);
+		return;
+	}
+
+	/* Fill CPT_INST_S for LOAD_FVC/HW_CRYPTO_SUPPORT microcode op */
+	memset(&inst, 0, sizeof(struct cpt_inst_s));
+	inst.res_addr = (uint64_t)hw_res;
+	inst.rptr = (uint64_t)rptr;
+	inst.w4.s.opcode_major = ROC_LOADFVC_MAJOR_OP;
+	inst.w4.s.opcode_minor = ROC_LOADFVC_MINOR_OP;
+	inst.w7.s.egrp = ROC_CPT_DFLT_ENG_GRP_SE;
+
+	/* Use 1 min timeout for the poll */
+	const uint64_t timeout = plt_tsc_cycles() + 60 * plt_tsc_hz();
+
+	if (roc_model_is_cn9k()) {
+		uint64_t lmt_status;
+
+		hw_res->cn9k.compcode = CPT_COMP_NOT_DONE;
+		plt_io_wmb();
+
+		do {
+			roc_lmt_mov_seg((void *)lmt_base, &inst, 4);
+			lmt_status = roc_lmt_submit_ldeor(lf->io_addr);
+		} while (lmt_status != 0);
+
+		/* Wait until CPT instruction completes */
+		do {
+			res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
+			if (unlikely(plt_tsc_cycles() > timeout))
+				break;
+		} while (res.cn9k.compcode == CPT_COMP_NOT_DONE);
+
+		if (res.cn9k.compcode != CPT_COMP_GOOD) {
+			plt_err("LOAD FVC operation timed out");
+			return;
+		}
+	} else {
+		uint64_t lmt_arg, io_addr;
+		uint16_t lmt_id;
+
+		hw_res->cn10k.compcode = CPT_COMP_NOT_DONE;
+
+		/* Use this lcore's LMT line as no one else is using it */
+		ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+		memcpy((void *)lmt_base, &inst, sizeof(inst));
+
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
+		io_addr = lf->io_addr | ROC_CN10K_CPT_INST_DW_M1 << 4;
+
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+		plt_io_wmb();
+
+		/* Wait until CPT instruction completes */
+		do {
+			res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
+			if (unlikely(plt_tsc_cycles() > timeout))
+				break;
+		} while (res.cn10k.compcode == CPT_COMP_NOT_DONE);
+
+		if (res.cn10k.compcode != CPT_COMP_GOOD || res.cn10k.uc_compcode) {
+			plt_err("LOAD FVC operation timed out");
+			goto exit;
+		}
+	}
+
+	nix->cpt_eng_caps = plt_be_to_cpu_64(*rptr);
+exit:
+	plt_free(rptr);
+	plt_free(hw_res);
+}
+
 int
 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 {
@@ -652,6 +742,7 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 		plt_err("Failed to setup inbound lf, rc=%d", rc);
 		return rc;
 	}
+	nix->cpt_eng_caps = roc_cpt->hw_caps[CPT_ENG_TYPE_SE].u;
 
 	/* Setup Inbound SA table */
 	rc = nix_inl_inb_sa_tbl_setup(roc_nix);
@@ -871,6 +962,8 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
 		}
 	}
 
+	/* Fetch engine capabilities */
+	nix_inl_eng_caps_get(nix);
 	return 0;
 
 lf_fini:
@@ -1571,3 +1664,11 @@ roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb)
 {
 	meta_pool_cb = cb;
 }
+
+uint64_t
+roc_nix_inl_eng_caps_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->cpt_eng_caps;
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 6220ba6773..daa21a941a 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -212,5 +212,6 @@ int __roc_api roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
 int __roc_api roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr,
 				    void *sa_cptr, bool inb, uint16_t sa_len);
 void __roc_api roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix, FILE *file);
+uint64_t __roc_api roc_nix_inl_eng_caps_get(struct roc_nix *roc_nix);
 
 #endif /* _ROC_NIX_INL_H_ */
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index f900a81d8a..6872630dc8 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -209,6 +209,7 @@ struct nix {
 	uint16_t outb_se_ring_base;
 	uint16_t cpt_lbpid;
 	uint16_t cpt_nixbpid;
+	uint64_t cpt_eng_caps;
 	bool need_meta_aura;
 	/* Mode provided by driver */
 	bool inb_inl_dev;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index d740d9df81..809fd81b20 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -186,6 +186,7 @@ INTERNAL {
 	roc_nix_inl_dev_rq_put;
 	roc_nix_inl_dev_unlock;
 	roc_nix_inl_dev_xaq_realloc;
+	roc_nix_inl_eng_caps_get;
 	roc_nix_inl_inb_is_enabled;
 	roc_nix_inl_inb_init;
 	roc_nix_inl_inb_sa_base_get;
diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 3c32de0f94..9625704ec1 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -809,7 +809,8 @@ cn10k_eth_sec_session_create(void *device,
 		sess_priv.chksum = (!ipsec->options.ip_csum_enable << 1 |
 				    !ipsec->options.l4_csum_enable);
 		sess_priv.dec_ttl = ipsec->options.dec_ttl;
-		if (roc_feature_nix_has_inl_ipsec_mseg())
+		if (roc_feature_nix_has_inl_ipsec_mseg() &&
+		    dev->outb.cpt_eng_caps & BIT_ULL(35))
 			sess_priv.nixtx_off = 1;
 
 		/* Pointer from eth_sec -> outb_sa */
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 3bccc34d79..ff0c3b8ed1 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -203,6 +203,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 			plt_err("Outbound fc sw mem alloc failed");
 			goto sa_bmap_free;
 		}
+
+		dev->outb.cpt_eng_caps = roc_nix_inl_eng_caps_get(nix);
 	}
 	return 0;
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 62a06e5d03..d76f5486e6 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -285,6 +285,9 @@ struct cnxk_eth_dev_sec_outb {
 
 	/* Lock to synchronize sa setup/release */
 	rte_spinlock_t lock;
+
+	/* Engine caps */
+	uint64_t cpt_eng_caps;
 };
 
 struct cnxk_eth_dev {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 10/21] common/cnxk: add receive error mask
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (7 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 09/21] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 11/21] common/cnxk: fix null pointer dereference Nithin Dabilpuram
                   ` (12 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Adding support to configure receive error mask
for 106B0

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/common/cnxk/roc_features.h |  6 ++++++
 drivers/common/cnxk/roc_nix.h      | 16 ++++++++++++++++
 2 files changed, 22 insertions(+)

diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 6fe01015d8..ce12a1dca4 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -16,6 +16,12 @@ roc_feature_nix_has_inl_ipsec_mseg(void)
 	return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
 }
 
+static inline bool
+roc_feature_nix_has_drop_re_mask(void)
+{
+	return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+}
+
 static inline bool
 roc_feature_nix_has_inl_rq_mask(void)
 {
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index f84e473db6..37d0ed5ebe 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -242,6 +242,22 @@ struct roc_nix_eeprom_info {
 #define ROC_NIX_LF_RX_CFG_LEN_OL4     BIT_ULL(40)
 #define ROC_NIX_LF_RX_CFG_LEN_OL3     BIT_ULL(41)
 
+#define ROC_NIX_LF_RX_CFG_RX_ERROR_MASK 0xFFFFFFFFFFF80000
+#define ROC_NIX_RE_PARTIAL		BIT_ULL(1)
+#define ROC_NIX_RE_JABBER		BIT_ULL(2)
+#define ROC_NIX_RE_CRC8_PCH		BIT_ULL(5)
+#define ROC_NIX_RE_CNC_INV		BIT_ULL(6)
+#define ROC_NIX_RE_FCS			BIT_ULL(7)
+#define ROC_NIX_RE_FCS_RCV		BIT_ULL(8)
+#define ROC_NIX_RE_TERMINATE		BIT_ULL(9)
+#define ROC_NIX_RE_MACSEC		BIT_ULL(10)
+#define ROC_NIX_RE_RX_CTL		BIT_ULL(11)
+#define ROC_NIX_RE_SKIP			BIT_ULL(12)
+#define ROC_NIX_RE_DMAPKT		BIT_ULL(15)
+#define ROC_NIX_RE_UNDERSIZE		BIT_ULL(16)
+#define ROC_NIX_RE_OVERSIZE		BIT_ULL(17)
+#define ROC_NIX_RE_OL2_LENMISM		BIT_ULL(18)
+
 /* Group 0 will be used for RSS, 1 -7 will be used for npc_flow RSS action*/
 #define ROC_NIX_RSS_GROUP_DEFAULT    0
 #define ROC_NIX_RSS_GRPS	     8
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 11/21] common/cnxk: fix null pointer dereference
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (8 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 10/21] common/cnxk: add receive error mask Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 12/21] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
                   ` (11 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Gowrishankar Muthukrishnan

From: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>

Fix null pointer dereferences reported by klocwork.

Fixes: 4398c4092f3d ("common/cnxk: dump inline device RQ context")
Fixes: 79dc6f324e82 ("common/cnxk: add inline function for statistics")

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
 drivers/common/cnxk/roc_nix_debug.c   | 8 +++++++-
 drivers/common/cnxk/roc_nix_inl_dev.c | 2 +-
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index 399d0d7eae..a1c3db284b 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -733,7 +733,13 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix, FILE *file)
 	inl_rq = roc_nix_inl_dev_rq(roc_nix);
 	if (inl_rq) {
 		struct idev_cfg *idev = idev_get_cfg();
-		struct nix_inl_dev *inl_dev = idev->nix_inl_dev;
+		struct nix_inl_dev *inl_dev = NULL;
+
+		if (idev && idev->nix_inl_dev)
+			inl_dev = idev->nix_inl_dev;
+
+		if (!inl_dev)
+			return -EINVAL;
 
 		rc = nix_q_ctx_get(&inl_dev->dev, NIX_AQ_CTYPE_RQ, inl_rq->qid, &ctx);
 		if (rc) {
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 196a04db09..b6abafd5c4 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -843,7 +843,7 @@ roc_nix_inl_dev_stats_get(struct roc_nix_stats *stats)
 	if (stats == NULL)
 		return NIX_ERR_PARAM;
 
-	if (!idev && idev->nix_inl_dev)
+	if (idev && idev->nix_inl_dev)
 		inl_dev = idev->nix_inl_dev;
 
 	if (!inl_dev)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 12/21] common/cnxk: fix parameter in NIX dump
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (9 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 11/21] common/cnxk: fix null pointer dereference Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 13/21] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
                   ` (10 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Gowrishankar Muthukrishnan

From: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>

Fix parameter passed to nix_dump to what expected in format
specifier.
Fixes: d2f168dfa5de ("common/cnxk: support 10K B0 for inline IPsec")

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
 drivers/common/cnxk/roc_nix_debug.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index a1c3db284b..8c7d902f1e 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -664,8 +664,8 @@ nix_lf_cq_dump(__io struct nix_cq_ctx_s *ctx, FILE *file)
 	nix_dump(file,
 		 "W1: lbpid_high \t\t\t0x%03x\nW1: lbpid_med \t\t\t0x%03x\n"
 		 "W1: lbpid_low \t\t\t0x%03x\n(W1: lbpid) \t\t\t0x%03x\n",
-		 ctx->lbpid_high, ctx->lbpid_med, ctx->lbpid_low,
-		 ctx->lbpid_high << 6 | ctx->lbpid_med << 3 | ctx->lbpid_low);
+		 ctx->lbpid_high, ctx->lbpid_med, ctx->lbpid_low, (unsigned int)
+		 (ctx->lbpid_high << 6 | ctx->lbpid_med << 3 | ctx->lbpid_low));
 	nix_dump(file, "W1: lbp_ena \t\t\t\t%d\n", ctx->lbp_ena);
 
 	nix_dump(file, "W2: update_time \t\t%d\nW2: avg_level \t\t\t%d",
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 13/21] common/cnxk: set relchan in TL4 config for each SDP queue
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (10 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 12/21] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 14/21] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
                   ` (9 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Veerasenareddy Burru

From: Veerasenareddy Burru <vburru@marvell.com>

set distinct relchan in each TL4 queue connected to SDP.

currently rechan in TL4 SDP config is getting set to 0 for all
SDP-NIX queues.
Each TL4 queues for SDP need to be configured with distinct channel
for SDP to provide per channel backpressure to NIX.

Signed-off-by: Veerasenareddy Burru <vburru@marvell.com>
---
 drivers/common/cnxk/roc_nix_tm_utils.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 5864833109..9ede1bebe7 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -582,6 +582,7 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
 
 		/* Configure TL4 to send to SDP channel instead of CGX/LBK */
 		if (nix->sdp_link) {
+			relchan = nix->tx_chan_base & 0xff;
 			plt_tm_dbg("relchan=%u schq=%u tx_chan_cnt=%u\n", relchan, schq,
 				   nix->tx_chan_cnt);
 			reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 14/21] common/cnxk: avoid STALL with dual rate on CNF95N
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (11 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 13/21] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 15/21] common/cnxk: update errata info Nithin Dabilpuram
                   ` (8 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Due to errata RED_ALGO STALL with dual shaper rate will hangs on
platforms CNF95N and CNF95O. Set READ_ALGO to DISCARD with dual
shaper rate on CNF95N and CNF95O.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix_tm_utils.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 9ede1bebe7..3840d6d457 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -1267,7 +1267,8 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 	tm_node->red_algo = roc_prof->red_algo;
 
 	/* C0 doesn't support STALL when both PIR & CIR are enabled */
-	if (roc_model_is_cn96_cx()) {
+	if (roc_model_is_cn96_cx() || roc_model_is_cnf95xxn_a0() || roc_model_is_cnf95xxo_a0() ||
+	    roc_model_is_cnf95xxn_a1() || roc_model_is_cnf95xxn_b0()) {
 		nix_tm_shaper_conf_get(profile, &cir, &pir);
 
 		if (pir.rate && cir.rate)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 15/21] common/cnxk: update errata info
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (12 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 14/21] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 16/21] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
                   ` (7 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Update errata info based on CN10KA B0 and CN10KB A0.
Also remove duplicate model check roc_model_is_cn103xx()

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_errata.h   | 20 ++++++++------------
 drivers/common/cnxk/roc_features.h |  2 +-
 drivers/common/cnxk/roc_model.h    |  6 ------
 3 files changed, 9 insertions(+), 19 deletions(-)

diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h
index 2d15e639b7..7ff7e2fc35 100644
--- a/drivers/common/cnxk/roc_errata.h
+++ b/drivers/common/cnxk/roc_errata.h
@@ -6,7 +6,7 @@
 
 #include "roc_model.h"
 
-/* Errata IPBUNIXRX-40129 */
+/* Errata IPBUNIXRX-40129, IPBUNIXRX-40179 */
 static inline bool
 roc_errata_nix_has_no_drop_re(void)
 {
@@ -40,7 +40,8 @@ static inline bool
 roc_errata_nix_has_no_vwqe_flush_op(void)
 {
 	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
-		roc_model_is_cnf10kb_a0());
+		roc_model_is_cnf10kb_a0() || roc_model_is_cn10ka_a1() || roc_model_is_cn10ka_b0() ||
+		roc_model_is_cn10kb_a0());
 }
 
 /* Errata IPBURVUM-38481 */
@@ -50,13 +51,6 @@ roc_errata_ruvm_has_no_interrupt_with_msixen(void)
 	return true;
 }
 
-/* Errata IPBUNIXTX-39300 */
-static inline bool
-roc_errata_nix_has_assign_incorrect_qintidx(void)
-{
-	return true;
-}
-
 /* Errata IPBUCPT-38551 */
 static inline bool
 roc_errata_cpt_has_use_incorrect_ldwb(void)
@@ -68,17 +62,19 @@ roc_errata_cpt_has_use_incorrect_ldwb(void)
 static inline bool
 roc_errata_nix_has_overwrite_incorrect_sq_intr(void)
 {
-	return true;
+	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
+		roc_model_is_cnf10kb_a0() || roc_model_is_cn10ka_a1());
 }
 
 /* Errata IPBUNIXTX-39248 */
 static inline bool
 roc_errata_nix_has_perf_issue_on_stats_update(void)
 {
-	return true;
+	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
+		roc_model_is_cnf10kb_a0() || roc_model_is_cn10ka_a1());
 }
 
-/* Errata IPBUCPT-38726, IPBUCPT-38727 */
+/* Errata IPBUCPT-38736, IPBUCPT-38737 */
 static inline bool
 roc_errata_cpt_hang_on_x2p_bp(void)
 {
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index ce12a1dca4..36ef315f5a 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -7,7 +7,7 @@
 static inline bool
 roc_feature_sso_has_stash(void)
 {
-	return (roc_model_is_cn103xx() | roc_model_is_cn10ka_b0()) ? true : false;
+	return (roc_model_is_cn10kb() | roc_model_is_cn10ka_b0()) ? true : false;
 }
 
 static inline bool
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index f010cc4a44..58046af193 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -258,12 +258,6 @@ roc_model_is_cn10kb(void)
 	return roc_model->flag & ROC_MODEL_CN103xx;
 }
 
-static inline uint64_t
-roc_model_is_cn103xx(void)
-{
-	return roc_model->flag & ROC_MODEL_CN103xx;
-}
-
 static inline bool
 roc_env_is_hw(void)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 16/21] common/cnxk: sync between mbox up and down messages
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (13 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 15/21] common/cnxk: update errata info Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 17/21] common/cnxk: add more comments to mbox code Nithin Dabilpuram
                   ` (6 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

An issue is observed where if PF is with DPDK and VF as kernel
netdev does not responds to link events. It was due to recent
design change in kernel where sender checks whether previous
interrupt is received before triggering current interrupt by
waiting for mailbox data register to become zero.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_dev.c       | 20 ++++++++-
 drivers/common/cnxk/roc_mbox.c      | 64 +++++++++++++++++++++--------
 drivers/common/cnxk/roc_mbox.h      | 15 +++++++
 drivers/common/cnxk/roc_mbox_priv.h |  6 ++-
 4 files changed, 84 insertions(+), 21 deletions(-)

diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 5e4e564ebe..e5a5cd7c10 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -195,7 +195,8 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 				vf_msg->rc = msg->rc;
 				vf_msg->pcifunc = msg->pcifunc;
 				/* Send to VF */
-				mbox_msg_send(&dev->mbox_vfpf_up, vf);
+				mbox_msg_send_up(&dev->mbox_vfpf_up, vf);
+				mbox_wait_for_zero(&dev->mbox_vfpf_up, vf);
 			}
 		}
 
@@ -498,6 +499,7 @@ pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
 
 		/* Send to VF */
 		mbox_msg_send(vf_mbox, vf);
+		mbox_wait_for_zero(&dev->mbox_vfpf_up, vf);
 	}
 }
 
@@ -631,6 +633,7 @@ static void
 roc_pf_vf_mbox_irq(void *param)
 {
 	struct dev *dev = param;
+	uint64_t mbox_data;
 	uint64_t intr;
 
 	intr = plt_read64(dev->bar2 + RVU_VF_INT);
@@ -640,6 +643,13 @@ roc_pf_vf_mbox_irq(void *param)
 	plt_write64(intr, dev->bar2 + RVU_VF_INT);
 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
 
+	/* Reading for UP/DOWN message, next message sending will be delayed
+	 * by 1ms until this region is zeroed mbox_wait_for_zero()
+	 */
+	mbox_data = plt_read64(dev->bar2 + RVU_VF_VFPF_MBOX0);
+	if (mbox_data)
+		plt_write64(!mbox_data, dev->bar2 + RVU_VF_VFPF_MBOX0);
+
 	/* First process all configuration messages */
 	process_msgs(dev, dev->mbox);
 
@@ -651,6 +661,7 @@ static void
 roc_af_pf_mbox_irq(void *param)
 {
 	struct dev *dev = param;
+	uint64_t mbox_data;
 	uint64_t intr;
 
 	intr = plt_read64(dev->bar2 + RVU_PF_INT);
@@ -660,6 +671,13 @@ roc_af_pf_mbox_irq(void *param)
 	plt_write64(intr, dev->bar2 + RVU_PF_INT);
 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
 
+	/* Reading for UP/DOWN message, next message sending will be delayed
+	 * by 1ms until this region is zeroed mbox_wait_for_zero()
+	 */
+	mbox_data = plt_read64(dev->bar2 + RVU_PF_PFAF_MBOX0);
+	if (mbox_data)
+		plt_write64(!mbox_data, dev->bar2 + RVU_PF_PFAF_MBOX0);
+
 	/* First process all configuration messages */
 	process_msgs(dev, dev->mbox);
 
diff --git a/drivers/common/cnxk/roc_mbox.c b/drivers/common/cnxk/roc_mbox.c
index 7dcd188ca7..5338a960d9 100644
--- a/drivers/common/cnxk/roc_mbox.c
+++ b/drivers/common/cnxk/roc_mbox.c
@@ -10,18 +10,6 @@
 #include "roc_api.h"
 #include "roc_priv.h"
 
-#define RVU_AF_AFPF_MBOX0 (0x02000)
-#define RVU_AF_AFPF_MBOX1 (0x02008)
-
-#define RVU_PF_PFAF_MBOX0 (0xC00)
-#define RVU_PF_PFAF_MBOX1 (0xC08)
-
-#define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
-#define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
-
-#define RVU_VF_VFPF_MBOX0 (0x0000)
-#define RVU_VF_VFPF_MBOX1 (0x0008)
-
 /* RCLK, SCLK in MHz */
 uint16_t dev_rclk_freq;
 uint16_t dev_sclk_freq;
@@ -194,10 +182,31 @@ mbox_alloc_msg_rsp(struct mbox *mbox, int devid, int size, int size_rsp)
 
 /**
  * @internal
- * Send a mailbox message
+ * Synchronization between UP and DOWN messages
  */
-void
-mbox_msg_send(struct mbox *mbox, int devid)
+bool
+mbox_wait_for_zero(struct mbox *mbox, int devid)
+{
+	uint64_t data;
+
+	data = plt_read64((volatile void *)(mbox->reg_base +
+				(mbox->trigger | (devid << mbox->tr_shift))));
+
+	/* If data is non-zero wait for ~1ms and return to caller
+	 * whether data has changed to zero or not after the wait.
+	 */
+	if (data)
+		usleep(1000);
+	else
+		return true;
+
+	data = plt_read64((volatile void *)(mbox->reg_base +
+				(mbox->trigger | (devid << mbox->tr_shift))));
+	return data == 0;
+}
+
+static void
+mbox_msg_send_data(struct mbox *mbox, int devid, uint8_t data)
 {
 	struct mbox_dev *mdev = &mbox->dev[devid];
 	struct mbox_hdr *tx_hdr =
@@ -223,9 +232,28 @@ mbox_msg_send(struct mbox *mbox, int devid)
 	/* The interrupt should be fired after num_msgs is written
 	 * to the shared memory
 	 */
-	plt_write64(1, (volatile void *)(mbox->reg_base +
-					 (mbox->trigger |
-					  (devid << mbox->tr_shift))));
+	plt_write64(data, (volatile void *)(mbox->reg_base +
+				(mbox->trigger | (devid << mbox->tr_shift))));
+}
+
+/**
+ * @internal
+ * Send a mailbox message
+ */
+void
+mbox_msg_send(struct mbox *mbox, int devid)
+{
+	mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
+}
+
+/**
+ * @internal
+ * Send an UP mailbox message
+ */
+void
+mbox_msg_send_up(struct mbox *mbox, int devid)
+{
+	mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
 }
 
 /**
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 3d5746b9b8..93c5451c0f 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -35,6 +35,21 @@ struct mbox_msghdr {
 	int __io rc; /* Msg processed response code */
 };
 
+#define RVU_AF_AFPF_MBOX0 (0x02000)
+#define RVU_AF_AFPF_MBOX1 (0x02008)
+
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+
+#define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
+
+#define RVU_VF_VFPF_MBOX0 (0x0000)
+#define RVU_VF_VFPF_MBOX1 (0x0008)
+
+#define MBOX_DOWN_MSG 1
+#define MBOX_UP_MSG   2
+
 /* Mailbox message types */
 #define MBOX_MSG_MASK	 0xFFFF
 #define MBOX_MSG_INVALID 0xFFFE
diff --git a/drivers/common/cnxk/roc_mbox_priv.h b/drivers/common/cnxk/roc_mbox_priv.h
index 4fafca6f72..354c8fa52a 100644
--- a/drivers/common/cnxk/roc_mbox_priv.h
+++ b/drivers/common/cnxk/roc_mbox_priv.h
@@ -71,10 +71,12 @@ struct mbox {
 const char *mbox_id2name(uint16_t id);
 int mbox_id2size(uint16_t id);
 void mbox_reset(struct mbox *mbox, int devid);
-int mbox_init(struct mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
-	      int direction, int ndevsi, uint64_t intr_offset);
+int mbox_init(struct mbox *mbox, uintptr_t hwbase, uintptr_t reg_base, int direction, int ndevsi,
+	      uint64_t intr_offset);
 void mbox_fini(struct mbox *mbox);
 void mbox_msg_send(struct mbox *mbox, int devid);
+void mbox_msg_send_up(struct mbox *mbox, int devid);
+bool mbox_wait_for_zero(struct mbox *mbox, int devid);
 int mbox_wait_for_rsp(struct mbox *mbox, int devid);
 int mbox_wait_for_rsp_tmo(struct mbox *mbox, int devid, uint32_t tmo);
 int mbox_get_rsp(struct mbox *mbox, int devid, void **msg);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 17/21] common/cnxk: add more comments to mbox code
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (14 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 16/21] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 18/21] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
                   ` (5 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding more comments to the mbox routines to understand the flow
well.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_dev.c  | 20 +++++++++++++++++---
 drivers/common/cnxk/roc_mbox.c |  5 +++++
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index e5a5cd7c10..3125f9dda2 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -98,6 +98,9 @@ pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp)
 	return rc;
 }
 
+/* PF will send the messages to AF and wait for responses and forward the
+ * responses to VF.
+ */
 static int
 af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 {
@@ -115,9 +118,10 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 	/* We need to disable PF interrupts. We are in timer interrupt */
 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
 
-	/* Send message */
+	/* Send message to AF */
 	mbox_msg_send(mbox, 0);
 
+	/* Wait for AF response */
 	do {
 		plt_delay_ms(sleep);
 		timeout++;
@@ -206,6 +210,7 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 	return req_hdr->num_msgs;
 }
 
+/* PF receives mbox DOWN messages from VF and forwards to AF */
 static int
 vf_pf_process_msgs(struct dev *dev, uint16_t vf)
 {
@@ -274,6 +279,7 @@ vf_pf_process_msgs(struct dev *dev, uint16_t vf)
 	if (routed > 0) {
 		plt_base_dbg("pf:%d routed %d messages from vf:%d to AF",
 			     dev->pf, routed, vf);
+		/* PF will send the messages to AF and wait for responses */
 		af_pf_wait_msg(dev, vf, routed);
 		mbox_reset(dev->mbox, 0);
 	}
@@ -289,6 +295,7 @@ vf_pf_process_msgs(struct dev *dev, uint16_t vf)
 	return i;
 }
 
+/* VF sends Ack to PF's UP messages */
 static int
 vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
 {
@@ -339,6 +346,7 @@ vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
 	return i;
 }
 
+/* PF handling messages from VF */
 static void
 roc_vf_pf_mbox_handle_msg(void *param)
 {
@@ -352,8 +360,9 @@ roc_vf_pf_mbox_handle_msg(void *param)
 		if (dev->intr.bits[vf / max_bits] & BIT_ULL(vf % max_bits)) {
 			plt_base_dbg("Process vf:%d request (pf:%d, vf:%d)", vf,
 				     dev->pf, dev->vf);
+			/* VF initiated down messages */
 			vf_pf_process_msgs(dev, vf);
-			/* UP messages */
+			/* VF replies to PF's UP messages */
 			vf_pf_process_up_msgs(dev, vf);
 			dev->intr.bits[vf / max_bits] &=
 				~(BIT_ULL(vf % max_bits));
@@ -362,6 +371,7 @@ roc_vf_pf_mbox_handle_msg(void *param)
 	dev->timer_set = 0;
 }
 
+/* IRQ to PF from VF - PF context (interrupt thread) */
 static void
 roc_vf_pf_mbox_irq(void *param)
 {
@@ -392,6 +402,7 @@ roc_vf_pf_mbox_irq(void *param)
 	}
 }
 
+/* Received response from AF (PF context) / PF (VF context) */
 static void
 process_msgs(struct dev *dev, struct mbox *mbox)
 {
@@ -451,7 +462,7 @@ process_msgs(struct dev *dev, struct mbox *mbox)
 	}
 
 	mbox_reset(mbox, 0);
-	/* Update acked if someone is waiting a message */
+	/* Update acked if someone is waiting a message - mbox_wait is waiting */
 	mdev->msgs_acked = msgs_acked;
 	plt_wmb();
 }
@@ -597,6 +608,7 @@ mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
 	return -ENODEV;
 }
 
+/* Received up messages from AF (PF context) / PF (in context) */
 static void
 process_msgs_up(struct dev *dev, struct mbox *mbox)
 {
@@ -629,6 +641,7 @@ process_msgs_up(struct dev *dev, struct mbox *mbox)
 	}
 }
 
+/* IRQ to VF from PF - VF context (interrupt thread) */
 static void
 roc_pf_vf_mbox_irq(void *param)
 {
@@ -657,6 +670,7 @@ roc_pf_vf_mbox_irq(void *param)
 	process_msgs_up(dev, &dev->mbox_up);
 }
 
+/* IRQ to PF from AF - PF context (interrupt thread) */
 static void
 roc_af_pf_mbox_irq(void *param)
 {
diff --git a/drivers/common/cnxk/roc_mbox.c b/drivers/common/cnxk/roc_mbox.c
index 5338a960d9..c91fa63e83 100644
--- a/drivers/common/cnxk/roc_mbox.c
+++ b/drivers/common/cnxk/roc_mbox.c
@@ -350,6 +350,11 @@ mbox_wait(struct mbox *mbox, int devid, uint32_t rst_timo)
 	uint32_t timeout = 0, sleep = 1;
 
 	rst_timo = rst_timo * 1000; /* Milli seconds to micro seconds */
+
+	/* Waiting for mdev->msgs_acked tp become equal to mdev->num_msgs,
+	 * mdev->msgs_acked are incremented at process_msgs() in interrupt
+	 * thread context.
+	 */
 	while (mdev->num_msgs > mdev->msgs_acked) {
 		plt_delay_us(sleep);
 		timeout += sleep;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 18/21] common/cnxk: add CN105xxN B0 model
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (15 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 17/21] common/cnxk: add more comments to mbox code Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 19/21] common/cnxk: access valid pass value Nithin Dabilpuram
                   ` (4 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding support for CN105xxN B0 pass

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_model.c | 1 +
 drivers/common/cnxk/roc_model.h | 9 ++++++++-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index e4767ed91f..f4f2a38e70 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -66,6 +66,7 @@ static const struct model_db {
 	{VENDOR_ARM, PART_105xx, 0, 1, ROC_MODEL_CNF105xx_A1, "cnf10ka_a1"},
 	{VENDOR_ARM, PART_103xx, 0, 0, ROC_MODEL_CN103xx_A0, "cn10kb_a0"},
 	{VENDOR_ARM, PART_105xxN, 0, 0, ROC_MODEL_CNF105xxN_A0, "cnf10kb_a0"},
+	{VENDOR_ARM, PART_105xxN, 1, 0, ROC_MODEL_CNF105xxN_B0, "cnf10kb_b0"},
 	{VENDOR_CAVIUM, PART_98xx, 0, 0, ROC_MODEL_CN98xx_A0, "cn98xx_a0"},
 	{VENDOR_CAVIUM, PART_98xx, 0, 1, ROC_MODEL_CN98xx_A1, "cn98xx_a1"},
 	{VENDOR_CAVIUM, PART_96xx, 0, 0, ROC_MODEL_CN96xx_A0, "cn96xx_a0"},
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index 58046af193..b6dab4f64e 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -31,6 +31,7 @@ struct roc_model {
 #define ROC_MODEL_CN106xx_A1   BIT_ULL(24)
 #define ROC_MODEL_CNF105xx_A1  BIT_ULL(25)
 #define ROC_MODEL_CN106xx_B0   BIT_ULL(26)
+#define ROC_MODEL_CNF105xxN_B0 BIT_ULL(27)
 /* Following flags describe platform code is running on */
 #define ROC_ENV_HW   BIT_ULL(61)
 #define ROC_ENV_EMUL BIT_ULL(62)
@@ -57,7 +58,7 @@ struct roc_model {
 
 #define ROC_MODEL_CN106xx   (ROC_MODEL_CN106xx_A0 | ROC_MODEL_CN106xx_A1 | ROC_MODEL_CN106xx_B0)
 #define ROC_MODEL_CNF105xx  (ROC_MODEL_CNF105xx_A0 | ROC_MODEL_CNF105xx_A1)
-#define ROC_MODEL_CNF105xxN (ROC_MODEL_CNF105xxN_A0)
+#define ROC_MODEL_CNF105xxN (ROC_MODEL_CNF105xxN_A0 | ROC_MODEL_CNF105xxN_B0)
 #define ROC_MODEL_CN103xx   (ROC_MODEL_CN103xx_A0)
 #define ROC_MODEL_CN10K                                                        \
 	(ROC_MODEL_CN106xx | ROC_MODEL_CNF105xx | ROC_MODEL_CNF105xxN |        \
@@ -252,6 +253,12 @@ roc_model_is_cnf10kb_a0(void)
 	return roc_model->flag & ROC_MODEL_CNF105xxN_A0;
 }
 
+static inline uint64_t
+roc_model_is_cnf10kb_b0(void)
+{
+	return roc_model->flag & ROC_MODEL_CNF105xxN_B0;
+}
+
 static inline uint64_t
 roc_model_is_cn10kb(void)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 19/21] common/cnxk: access valid pass value
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (16 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 18/21] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 20/21] net/cnxk: add receive error mask Nithin Dabilpuram
                   ` (3 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Hanumanth Pothula

From: Hanumanth Pothula <hpothula@marvell.com>

There is a possibility of accessing an invalid pass value on
rvu device look up failure, as the return value is dropped.

Hence pass through the return value of rvu device look, to make
sure valid pass value is accessed.

Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
---
 drivers/common/cnxk/roc_model.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index f4f2a38e70..6dc2afe7f0 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -148,6 +148,7 @@ cn10k_part_pass_get(uint32_t *part, uint32_t *pass)
 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
 	char dirname[PATH_MAX];
 	struct dirent *e;
+	int ret = -1;
 	DIR *dir;
 
 	dir = opendir(SYSFS_PCI_DEVICES);
@@ -165,18 +166,19 @@ cn10k_part_pass_get(uint32_t *part, uint32_t *pass)
 			 e->d_name);
 
 		/* Lookup for rvu device and get part pass information */
-		if (!rvu_device_lookup(dirname, part, pass))
+		ret = rvu_device_lookup(dirname, part, pass);
+		if (!ret)
 			break;
 	}
 
 	closedir(dir);
-	return 0;
+	return ret;
 }
 
 static bool
 populate_model(struct roc_model *model, uint32_t midr)
 {
-	uint32_t impl, major, part, minor, pass;
+	uint32_t impl, major, part, minor, pass = 0;
 	bool found = false;
 	size_t i;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 20/21] net/cnxk: add receive error mask
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (17 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 19/21] common/cnxk: access valid pass value Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-04-11  9:11 ` [PATCH 21/21] common/cnxk: support of 1:n pool:aura per NIX LF Nithin Dabilpuram
                   ` (2 subsequent siblings)
  21 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

receive errors related to MACSEC and USXGMI are masked
for cn10kb_b0 and cn10kb

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index ff0c3b8ed1..6b45ccd0f7 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1265,6 +1265,11 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		   ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
 		   ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
 
+	rx_cfg &= (ROC_NIX_LF_RX_CFG_RX_ERROR_MASK);
+
+	if (roc_feature_nix_has_drop_re_mask())
+		rx_cfg |= (ROC_NIX_RE_CRC8_PCH | ROC_NIX_RE_MACSEC);
+
 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
 		/* Disable drop re if rx offload security is enabled and
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 21/21] common/cnxk: support of 1:n pool:aura per NIX LF
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (18 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 20/21] net/cnxk: add receive error mask Nithin Dabilpuram
@ 2023-04-11  9:11 ` Nithin Dabilpuram
  2023-05-18  5:50   ` Jerin Jacob
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
  21 siblings, 1 reply; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-04-11  9:11 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev, Rahul Bhansali

From: Rahul Bhansali <rbhansali@marvell.com>

This will add the support of 1:n pool:aura per NIX LF when
inl_cpt_channel devargs is set to inline device, otherwise
it will create 1:1 pool:aura for CN103/CN106B0 SOCs.

With 1:N, global pool will be created with Aura 0, and per NIX
individual aura will be created and mapped to this global pool.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/common/cnxk/roc_idev_priv.h |   1 +
 drivers/common/cnxk/roc_nix.h       |   1 +
 drivers/common/cnxk/roc_nix_inl.c   | 178 ++++++++++++++++++++++++----
 drivers/common/cnxk/roc_nix_inl.h   |   4 +
 drivers/common/cnxk/version.map     |   1 +
 drivers/event/cnxk/cn10k_worker.h   |   9 +-
 drivers/net/cnxk/cn10k_rx_select.c  |   5 +-
 drivers/net/cnxk/cnxk_ethdev.c      |   3 +
 drivers/net/cnxk/cnxk_ethdev.h      |   3 +
 drivers/net/cnxk/cnxk_ethdev_sec.c  |  62 ++++++++++
 10 files changed, 240 insertions(+), 27 deletions(-)

diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index d83522799f..4983578fc6 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -13,6 +13,7 @@ struct nix_inl_dev;
 
 struct idev_nix_inl_cfg {
 	uint64_t meta_aura;
+	uintptr_t meta_mempool;
 	uint32_t nb_bufs;
 	uint32_t buf_sz;
 	uint32_t refs;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 37d0ed5ebe..548854952b 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -470,6 +470,7 @@ struct roc_nix {
 	bool local_meta_aura_ena;
 	uint32_t meta_buf_sz;
 	bool force_rx_aura_bp;
+	bool custom_meta_aura_ena;
 	/* End of input parameters */
 	/* LMT line base for "Per Core Tx LMT line" mode*/
 	uintptr_t lmt_base;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 67f8ce9aa0..69f658ba87 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -7,6 +7,7 @@
 
 uint32_t soft_exp_consumer_cnt;
 roc_nix_inl_meta_pool_cb_t meta_pool_cb;
+roc_nix_inl_custom_meta_pool_cb_t custom_meta_pool_cb;
 
 PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_INB_SA_SZ ==
 		  1UL << ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2);
@@ -33,13 +34,14 @@ nix_inl_meta_aura_destroy(struct roc_nix *roc_nix)
 		return -EINVAL;
 
 	inl_cfg = &idev->inl_cfg;
-	if (roc_nix->local_meta_aura_ena) {
+
+	if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena) {
+		meta_aura = &inl_cfg->meta_aura;
+	} else {
 		meta_aura = &roc_nix->meta_aura_handle;
 		snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
 			 roc_nix->port_id + 1);
 		mp_name = mempool_name;
-	} else {
-		meta_aura = &inl_cfg->meta_aura;
 	}
 
 	/* Destroy existing Meta aura */
@@ -72,7 +74,7 @@ nix_inl_meta_aura_destroy(struct roc_nix *roc_nix)
 
 static int
 nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_t first_skip,
-			 uint64_t *meta_aura)
+			 uint64_t *meta_aura, bool is_local_metaaura)
 {
 	uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
 	struct idev_nix_inl_cfg *inl_cfg;
@@ -89,7 +91,7 @@ nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_
 	inl_cfg = &idev->inl_cfg;
 	nix_inl_dev = idev->nix_inl_dev;
 
-	if (roc_nix->local_meta_aura_ena) {
+	if (is_local_metaaura) {
 		/* Per LF Meta Aura */
 		inl_rq_id = nix_inl_dev->nb_rqs > 1 ? port_id : 0;
 		inl_rq = &nix_inl_dev->rqs[inl_rq_id];
@@ -134,15 +136,107 @@ nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_
 	plt_nix_dbg("Created meta aura %p(%s)for port %d", (void *)*meta_aura, mp_name,
 		    roc_nix->port_id);
 
-	if (!roc_nix->local_meta_aura_ena) {
+	if (!is_local_metaaura) {
 		inl_cfg->buf_sz = buf_sz;
 		inl_cfg->nb_bufs = nb_bufs;
+		inl_cfg->meta_mempool = mp;
 	} else
 		roc_nix->buf_sz = buf_sz;
 
 	return 0;
 }
 
+static int
+nix_inl_custom_meta_aura_destroy(struct roc_nix *roc_nix)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct idev_nix_inl_cfg *inl_cfg;
+	char mempool_name[24] = {'\0'};
+	char *mp_name = NULL;
+	uint64_t *meta_aura;
+	int rc;
+
+	if (!idev)
+		return -EINVAL;
+
+	inl_cfg = &idev->inl_cfg;
+	meta_aura = &roc_nix->meta_aura_handle;
+	snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
+		 roc_nix->port_id + 1);
+	mp_name = mempool_name;
+
+	/* Destroy existing Meta aura */
+	if (*meta_aura) {
+		uint64_t avail, limit;
+
+		/* Check if all buffers are back to pool */
+		avail = roc_npa_aura_op_available(*meta_aura);
+		limit = roc_npa_aura_op_limit_get(*meta_aura);
+		if (avail != limit)
+			plt_warn("Not all buffers are back to meta pool,"
+				 " %" PRIu64 " != %" PRIu64, avail, limit);
+
+		rc = custom_meta_pool_cb(inl_cfg->meta_mempool, &roc_nix->meta_mempool, mp_name,
+					 meta_aura, 0, 0, true);
+		if (rc) {
+			plt_err("Failed to destroy meta aura, rc=%d", rc);
+			return rc;
+		}
+
+		roc_nix->buf_sz = 0;
+	}
+
+	return 0;
+}
+
+static int
+nix_inl_custom_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_t first_skip,
+				uint64_t *meta_aura)
+{
+	uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
+	struct idev_nix_inl_cfg *inl_cfg;
+	struct nix_inl_dev *nix_inl_dev;
+	char mempool_name[24] = {'\0'};
+	uint32_t nb_bufs, buf_sz;
+	char *mp_name = NULL;
+	uintptr_t mp;
+	int rc;
+
+	inl_cfg = &idev->inl_cfg;
+	nix_inl_dev = idev->nix_inl_dev;
+
+	/* Override meta buf count from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->nb_meta_bufs)
+		nb_bufs = nix_inl_dev->nb_meta_bufs;
+	else
+		nb_bufs = roc_npa_buf_type_limit_get(mask);
+
+	/* Override meta buf size from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->meta_buf_sz)
+		buf_sz = nix_inl_dev->meta_buf_sz;
+	else
+		buf_sz = first_skip + NIX_INL_META_SIZE;
+
+	/* Create Metapool name */
+	snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
+		 roc_nix->port_id + 1);
+	mp_name = mempool_name;
+
+	/* Allocate meta aura */
+	rc = custom_meta_pool_cb(inl_cfg->meta_mempool, &mp, mp_name, meta_aura,
+				 buf_sz, nb_bufs, false);
+	if (rc) {
+		plt_err("Failed to allocate meta aura, rc=%d", rc);
+		return rc;
+	}
+
+	/* Overwrite */
+	roc_nix->meta_mempool = mp;
+	roc_nix->buf_sz = buf_sz;
+
+	return 0;
+}
+
 static int
 nix_inl_global_meta_buffer_validate(struct idev_cfg *idev, struct roc_nix_rq *rq)
 {
@@ -228,6 +322,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	struct idev_nix_inl_cfg *inl_cfg;
+	bool is_local_metaaura;
 	bool aura_setup = false;
 	uint64_t *meta_aura;
 	int rc;
@@ -238,18 +333,39 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 	inl_cfg = &idev->inl_cfg;
 
 	/* Create meta aura if not present */
-	if (roc_nix->local_meta_aura_ena)
-		meta_aura = &roc_nix->meta_aura_handle;
-	else
+	if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena) {
 		meta_aura = &inl_cfg->meta_aura;
+		is_local_metaaura = false;
+	} else {
+		meta_aura = &roc_nix->meta_aura_handle;
+		is_local_metaaura = true;
+	}
 
 	if (!(*meta_aura)) {
-		rc = nix_inl_meta_aura_create(idev, roc_nix, rq->first_skip, meta_aura);
+		rc = nix_inl_meta_aura_create(idev, roc_nix, rq->first_skip, meta_aura,
+					      is_local_metaaura);
 		if (rc)
 			return rc;
 
 		aura_setup = true;
 	}
+
+	if (roc_nix->custom_meta_aura_ena) {
+		/* Create metaura for 1:N pool:aura */
+		if (!custom_meta_pool_cb)
+			return -EFAULT;
+
+		meta_aura = &roc_nix->meta_aura_handle;
+		if (!(*meta_aura)) {
+			rc = nix_inl_custom_meta_aura_create(idev, roc_nix, rq->first_skip,
+							     meta_aura);
+			if (rc)
+				return rc;
+
+			aura_setup = true;
+		}
+	}
+
 	/* Update rq meta aura handle */
 	rq->meta_aura_handle = *meta_aura;
 
@@ -698,6 +814,7 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct roc_cpt_inline_ipsec_inb_cfg cfg;
 	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev;
 	uint16_t bpids[ROC_NIX_MAX_BPID_CNT];
 	struct roc_cpt *roc_cpt;
 	int rc;
@@ -749,9 +866,13 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	if (rc)
 		return rc;
 
+	inl_dev = idev->nix_inl_dev;
+
+	roc_nix->custom_meta_aura_ena = (roc_nix->local_meta_aura_ena &&
+					 (inl_dev->is_multi_channel || roc_nix->custom_sa_action));
 	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
 		nix->need_meta_aura = true;
-		if (!roc_nix->local_meta_aura_ena)
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs++;
 	}
 
@@ -773,15 +894,17 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 		return -EFAULT;
 
 	nix->inl_inb_ena = false;
+
 	if (nix->need_meta_aura) {
 		nix->need_meta_aura = false;
-		if (roc_nix->local_meta_aura_ena) {
-			nix_inl_meta_aura_destroy(roc_nix);
-		} else {
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs--;
-			if (!idev->inl_cfg.refs)
-				nix_inl_meta_aura_destroy(roc_nix);
-		}
+
+		if (roc_nix->custom_meta_aura_ena)
+			nix_inl_custom_meta_aura_destroy(roc_nix);
+
+		if (!idev->inl_cfg.refs)
+			nix_inl_meta_aura_destroy(roc_nix);
 	}
 
 	if (roc_feature_nix_has_inl_rq_mask()) {
@@ -1309,17 +1432,18 @@ roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena)
 
 	if (ena) {
 		nix->need_meta_aura = true;
-		if (!roc_nix->local_meta_aura_ena)
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs++;
 	} else if (nix->need_meta_aura) {
 		nix->need_meta_aura = false;
-		if (roc_nix->local_meta_aura_ena) {
-			nix_inl_meta_aura_destroy(roc_nix);
-		} else {
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs--;
-			if (!idev->inl_cfg.refs)
-				nix_inl_meta_aura_destroy(roc_nix);
-		}
+
+		if (roc_nix->custom_meta_aura_ena)
+			nix_inl_custom_meta_aura_destroy(roc_nix);
+
+		if (!idev->inl_cfg.refs)
+			nix_inl_meta_aura_destroy(roc_nix);
 	}
 }
 
@@ -1672,3 +1796,9 @@ roc_nix_inl_eng_caps_get(struct roc_nix *roc_nix)
 
 	return nix->cpt_eng_caps;
 }
+
+void
+roc_nix_inl_custom_meta_pool_cb_register(roc_nix_inl_custom_meta_pool_cb_t cb)
+{
+	custom_meta_pool_cb = cb;
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index daa21a941a..885d95335e 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -121,6 +121,9 @@ typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
 typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle,  uintptr_t *mpool,
 					  uint32_t blk_sz, uint32_t nb_bufs, bool destroy,
 					  const char *mempool_name);
+typedef int (*roc_nix_inl_custom_meta_pool_cb_t)(uintptr_t pmpool, uintptr_t *mpool,
+						 const char *mempool_name, uint64_t *aura_handle,
+						 uint32_t blk_sz, uint32_t nb_bufs, bool destroy);
 
 struct roc_nix_inl_dev {
 	/* Input parameters */
@@ -199,6 +202,7 @@ int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
 						    bool poll);
 uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
 void __roc_api roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb);
+void __roc_api roc_nix_inl_custom_meta_pool_cb_register(roc_nix_inl_custom_meta_pool_cb_t cb);
 
 /* NIX Inline/Outbound API */
 enum roc_nix_inl_sa_sync_op {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 809fd81b20..c76564b46e 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -199,6 +199,7 @@ INTERNAL {
 	roc_nix_inb_is_with_inl_dev;
 	roc_nix_inl_meta_aura_check;
 	roc_nix_inl_meta_pool_cb_register;
+	roc_nix_inl_custom_meta_pool_cb_register;
 	roc_nix_inb_mode_set;
 	roc_nix_inl_outb_fini;
 	roc_nix_inl_outb_init;
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 06c71c6092..07f0dad97d 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -167,6 +167,10 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		mbuf = u64[1] - sizeof(struct rte_mbuf);
 		rte_prefetch0((void *)mbuf);
 		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			void *lookup_mem = ws->lookup_mem;
+			struct rte_mempool *mp = NULL;
+			uint64_t meta_aura;
+
 			const uint64_t mbuf_init =
 				0x100010000ULL | RTE_PKTMBUF_HEADROOM |
 				(flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
@@ -191,8 +195,11 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 				cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
 				(struct rte_mbuf *)mbuf, d_off, flags,
 				mbuf_init | ((uint64_t)port) << 48);
+			mp = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port, lookup_mem);
+			meta_aura = mp ? mp->pool_id : m->pool->pool_id;
+
 			if (loff)
-				roc_npa_aura_op_free(m->pool->pool_id, 0, iova);
+				roc_npa_aura_op_free(meta_aura, 0, iova);
 		}
 
 		u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
diff --git a/drivers/net/cnxk/cn10k_rx_select.c b/drivers/net/cnxk/cn10k_rx_select.c
index b906f6725a..1e0de1b7ac 100644
--- a/drivers/net/cnxk/cn10k_rx_select.c
+++ b/drivers/net/cnxk/cn10k_rx_select.c
@@ -79,9 +79,10 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 #undef R
 	};
 
-	/* Copy multi seg version with no offload for tear down sequence */
+	/* Copy multi seg version with security for tear down sequence */
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		dev->rx_pkt_burst_no_offload = nix_eth_rx_burst_mseg[0];
+		dev->rx_pkt_burst_no_offload =
+			nix_eth_rx_burst_mseg_reas[NIX_RX_OFFLOAD_SECURITY_F];
 
 	if (dev->scalar_ena) {
 		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 6b45ccd0f7..677539c35a 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1883,6 +1883,9 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 	/* Register callback for inline meta pool create */
 	roc_nix_inl_meta_pool_cb_register(cnxk_nix_inl_meta_pool_cb);
 
+	/* Register callback for inline meta pool create 1:N pool:aura */
+	roc_nix_inl_custom_meta_pool_cb_register(cnxk_nix_inl_custom_meta_pool_cb);
+
 	dev->eth_dev = eth_dev;
 	dev->configured = 0;
 	dev->ptype_disable = 0;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index d76f5486e6..85287dd66c 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -608,6 +608,9 @@ cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
 			      struct rte_security_session *sess);
 int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_sz,
 			      uint32_t nb_bufs, bool destroy, const char *mempool_name);
+int cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
+				     uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
+				     bool destroy);
 
 /* Congestion Management */
 int cnxk_nix_cman_info_get(struct rte_eth_dev *dev, struct rte_eth_cman_info *info);
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index cd64daacc0..a66d58ca61 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -6,6 +6,7 @@
 #include <cnxk_mempool.h>
 
 #define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
+#define CN10K_HW_POOL_OPS_NAME "cn10k_hwpool_ops"
 
 #define CNXK_NIX_INL_SELFTEST	      "selftest"
 #define CNXK_NIX_INL_IPSEC_IN_MIN_SPI "ipsec_in_min_spi"
@@ -114,6 +115,67 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
 	return rc;
 }
 
+/* Create Aura and link with Global mempool for 1:N Pool:Aura case */
+int
+cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
+				 uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
+				 bool destroy)
+{
+	struct rte_mempool *hp;
+	int rc;
+
+	/* Destroy the mempool if requested */
+	if (destroy) {
+		hp = rte_mempool_lookup(mempool_name);
+		if (!hp)
+			return -ENOENT;
+
+		if (hp->pool_id != *aura_handle) {
+			plt_err("Meta pool aura mismatch");
+			return -EINVAL;
+		}
+
+		rte_mempool_free(hp);
+		plt_free(hp->pool_config);
+
+		*aura_handle = 0;
+		*mpool = 0;
+		return 0;
+	}
+
+	/* Need to make it similar to rte_pktmbuf_pool() for sake of OOP
+	 * support.
+	 */
+	hp = rte_mempool_create_empty(mempool_name, nb_bufs, buf_sz, 0,
+				      sizeof(struct rte_pktmbuf_pool_private),
+				      SOCKET_ID_ANY, 0);
+	if (!hp) {
+		plt_err("Failed to create inline meta pool");
+		return -EIO;
+	}
+
+	rc = rte_mempool_set_ops_byname(hp, CN10K_HW_POOL_OPS_NAME, (void *)pmpool);
+
+	if (rc) {
+		plt_err("Failed to setup ops, rc=%d", rc);
+		goto free_hp;
+	}
+
+	/* Populate buffer */
+	rc = rte_mempool_populate_default(hp);
+	if (rc < 0) {
+		plt_err("Failed to populate pool, rc=%d", rc);
+		goto free_hp;
+	}
+
+	*aura_handle = hp->pool_id;
+	*mpool = (uintptr_t)hp;
+	return 0;
+free_hp:
+	rte_mempool_free(hp);
+	return rc;
+}
+
 static int
 parse_max_ipsec_rules(const char *key, const char *value, void *extra_args)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* Re: [PATCH 21/21] common/cnxk: support of 1:n pool:aura per NIX LF
  2023-04-11  9:11 ` [PATCH 21/21] common/cnxk: support of 1:n pool:aura per NIX LF Nithin Dabilpuram
@ 2023-05-18  5:50   ` Jerin Jacob
  0 siblings, 0 replies; 89+ messages in thread
From: Jerin Jacob @ 2023-05-18  5:50 UTC (permalink / raw)
  To: Nithin Dabilpuram
  Cc: Kiran Kumar K, Sunil Kumar Kori, Satha Rao, Pavan Nikhilesh,
	Shijith Thotton, jerinj, dev, Rahul Bhansali

On Tue, Apr 11, 2023 at 2:44 PM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> From: Rahul Bhansali <rbhansali@marvell.com>
>
> This will add the support of 1:n pool:aura per NIX LF when
> inl_cpt_channel devargs is set to inline device, otherwise
> it will create 1:1 pool:aura for CN103/CN106B0 SOCs.
>
> With 1:N, global pool will be created with Aura 0, and per NIX
> individual aura will be created and mapped to this global pool.
>
> Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>



Please fix the following in this series

Wrong headline format:
        common/cnxk: support of 1:n pool:aura per NIX LF

Is it candidate for Cc: stable@dpdk.org backport?
        common/cnxk: fix null pointer dereference
        common/cnxk: fix parameter in NIX dump

Invalid patch(es) found - checked 21 patches
check-git-log failed

### [PATCH] common/cnxk: add pool BPID to RQ while using common pool

WARNING:SPACING: space prohibited between function name and open parenthesis '('
#130: FILE: drivers/common/cnxk/roc_nix_fc.c:441:
+       TAILQ_FOREACH (roc_nix_tmp, roc_idev_nix_list_get(), next) {

total: 0 errors, 1 warnings, 347 lines checked

20/21 valid patches
checkpatch failed

> ---
>  drivers/common/cnxk/roc_idev_priv.h |   1 +
>  drivers/common/cnxk/roc_nix.h       |   1 +
>  drivers/common/cnxk/roc_nix_inl.c   | 178 ++++++++++++++++++++++++----
>  drivers/common/cnxk/roc_nix_inl.h   |   4 +
>  drivers/common/cnxk/version.map     |   1 +
>  drivers/event/cnxk/cn10k_worker.h   |   9 +-
>  drivers/net/cnxk/cn10k_rx_select.c  |   5 +-
>  drivers/net/cnxk/cnxk_ethdev.c      |   3 +
>  drivers/net/cnxk/cnxk_ethdev.h      |   3 +
>  drivers/net/cnxk/cnxk_ethdev_sec.c  |  62 ++++++++++
>  10 files changed, 240 insertions(+), 27 deletions(-)
>
> diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
> index d83522799f..4983578fc6 100644
> --- a/drivers/common/cnxk/roc_idev_priv.h
> +++ b/drivers/common/cnxk/roc_idev_priv.h
> @@ -13,6 +13,7 @@ struct nix_inl_dev;
>
>  struct idev_nix_inl_cfg {
>         uint64_t meta_aura;
> +       uintptr_t meta_mempool;
>         uint32_t nb_bufs;
>         uint32_t buf_sz;
>         uint32_t refs;
> diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
> index 37d0ed5ebe..548854952b 100644
> --- a/drivers/common/cnxk/roc_nix.h
> +++ b/drivers/common/cnxk/roc_nix.h
> @@ -470,6 +470,7 @@ struct roc_nix {
>         bool local_meta_aura_ena;
>         uint32_t meta_buf_sz;
>         bool force_rx_aura_bp;
> +       bool custom_meta_aura_ena;
>         /* End of input parameters */
>         /* LMT line base for "Per Core Tx LMT line" mode*/
>         uintptr_t lmt_base;
> diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
> index 67f8ce9aa0..69f658ba87 100644
> --- a/drivers/common/cnxk/roc_nix_inl.c
> +++ b/drivers/common/cnxk/roc_nix_inl.c
> @@ -7,6 +7,7 @@
>
>  uint32_t soft_exp_consumer_cnt;
>  roc_nix_inl_meta_pool_cb_t meta_pool_cb;
> +roc_nix_inl_custom_meta_pool_cb_t custom_meta_pool_cb;
>
>  PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_INB_SA_SZ ==
>                   1UL << ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2);
> @@ -33,13 +34,14 @@ nix_inl_meta_aura_destroy(struct roc_nix *roc_nix)
>                 return -EINVAL;
>
>         inl_cfg = &idev->inl_cfg;
> -       if (roc_nix->local_meta_aura_ena) {
> +
> +       if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena) {
> +               meta_aura = &inl_cfg->meta_aura;
> +       } else {
>                 meta_aura = &roc_nix->meta_aura_handle;
>                 snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
>                          roc_nix->port_id + 1);
>                 mp_name = mempool_name;
> -       } else {
> -               meta_aura = &inl_cfg->meta_aura;
>         }
>
>         /* Destroy existing Meta aura */
> @@ -72,7 +74,7 @@ nix_inl_meta_aura_destroy(struct roc_nix *roc_nix)
>
>  static int
>  nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_t first_skip,
> -                        uint64_t *meta_aura)
> +                        uint64_t *meta_aura, bool is_local_metaaura)
>  {
>         uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
>         struct idev_nix_inl_cfg *inl_cfg;
> @@ -89,7 +91,7 @@ nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_
>         inl_cfg = &idev->inl_cfg;
>         nix_inl_dev = idev->nix_inl_dev;
>
> -       if (roc_nix->local_meta_aura_ena) {
> +       if (is_local_metaaura) {
>                 /* Per LF Meta Aura */
>                 inl_rq_id = nix_inl_dev->nb_rqs > 1 ? port_id : 0;
>                 inl_rq = &nix_inl_dev->rqs[inl_rq_id];
> @@ -134,15 +136,107 @@ nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_
>         plt_nix_dbg("Created meta aura %p(%s)for port %d", (void *)*meta_aura, mp_name,
>                     roc_nix->port_id);
>
> -       if (!roc_nix->local_meta_aura_ena) {
> +       if (!is_local_metaaura) {
>                 inl_cfg->buf_sz = buf_sz;
>                 inl_cfg->nb_bufs = nb_bufs;
> +               inl_cfg->meta_mempool = mp;
>         } else
>                 roc_nix->buf_sz = buf_sz;
>
>         return 0;
>  }
>
> +static int
> +nix_inl_custom_meta_aura_destroy(struct roc_nix *roc_nix)
> +{
> +       struct idev_cfg *idev = idev_get_cfg();
> +       struct idev_nix_inl_cfg *inl_cfg;
> +       char mempool_name[24] = {'\0'};
> +       char *mp_name = NULL;
> +       uint64_t *meta_aura;
> +       int rc;
> +
> +       if (!idev)
> +               return -EINVAL;
> +
> +       inl_cfg = &idev->inl_cfg;
> +       meta_aura = &roc_nix->meta_aura_handle;
> +       snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
> +                roc_nix->port_id + 1);
> +       mp_name = mempool_name;
> +
> +       /* Destroy existing Meta aura */
> +       if (*meta_aura) {
> +               uint64_t avail, limit;
> +
> +               /* Check if all buffers are back to pool */
> +               avail = roc_npa_aura_op_available(*meta_aura);
> +               limit = roc_npa_aura_op_limit_get(*meta_aura);
> +               if (avail != limit)
> +                       plt_warn("Not all buffers are back to meta pool,"
> +                                " %" PRIu64 " != %" PRIu64, avail, limit);
> +
> +               rc = custom_meta_pool_cb(inl_cfg->meta_mempool, &roc_nix->meta_mempool, mp_name,
> +                                        meta_aura, 0, 0, true);
> +               if (rc) {
> +                       plt_err("Failed to destroy meta aura, rc=%d", rc);
> +                       return rc;
> +               }
> +
> +               roc_nix->buf_sz = 0;
> +       }
> +
> +       return 0;
> +}
> +
> +static int
> +nix_inl_custom_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_t first_skip,
> +                               uint64_t *meta_aura)
> +{
> +       uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
> +       struct idev_nix_inl_cfg *inl_cfg;
> +       struct nix_inl_dev *nix_inl_dev;
> +       char mempool_name[24] = {'\0'};
> +       uint32_t nb_bufs, buf_sz;
> +       char *mp_name = NULL;
> +       uintptr_t mp;
> +       int rc;
> +
> +       inl_cfg = &idev->inl_cfg;
> +       nix_inl_dev = idev->nix_inl_dev;
> +
> +       /* Override meta buf count from devargs if present */
> +       if (nix_inl_dev && nix_inl_dev->nb_meta_bufs)
> +               nb_bufs = nix_inl_dev->nb_meta_bufs;
> +       else
> +               nb_bufs = roc_npa_buf_type_limit_get(mask);
> +
> +       /* Override meta buf size from devargs if present */
> +       if (nix_inl_dev && nix_inl_dev->meta_buf_sz)
> +               buf_sz = nix_inl_dev->meta_buf_sz;
> +       else
> +               buf_sz = first_skip + NIX_INL_META_SIZE;
> +
> +       /* Create Metapool name */
> +       snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
> +                roc_nix->port_id + 1);
> +       mp_name = mempool_name;
> +
> +       /* Allocate meta aura */
> +       rc = custom_meta_pool_cb(inl_cfg->meta_mempool, &mp, mp_name, meta_aura,
> +                                buf_sz, nb_bufs, false);
> +       if (rc) {
> +               plt_err("Failed to allocate meta aura, rc=%d", rc);
> +               return rc;
> +       }
> +
> +       /* Overwrite */
> +       roc_nix->meta_mempool = mp;
> +       roc_nix->buf_sz = buf_sz;
> +
> +       return 0;
> +}
> +
>  static int
>  nix_inl_global_meta_buffer_validate(struct idev_cfg *idev, struct roc_nix_rq *rq)
>  {
> @@ -228,6 +322,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
>         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
>         struct idev_cfg *idev = idev_get_cfg();
>         struct idev_nix_inl_cfg *inl_cfg;
> +       bool is_local_metaaura;
>         bool aura_setup = false;
>         uint64_t *meta_aura;
>         int rc;
> @@ -238,18 +333,39 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
>         inl_cfg = &idev->inl_cfg;
>
>         /* Create meta aura if not present */
> -       if (roc_nix->local_meta_aura_ena)
> -               meta_aura = &roc_nix->meta_aura_handle;
> -       else
> +       if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena) {
>                 meta_aura = &inl_cfg->meta_aura;
> +               is_local_metaaura = false;
> +       } else {
> +               meta_aura = &roc_nix->meta_aura_handle;
> +               is_local_metaaura = true;
> +       }
>
>         if (!(*meta_aura)) {
> -               rc = nix_inl_meta_aura_create(idev, roc_nix, rq->first_skip, meta_aura);
> +               rc = nix_inl_meta_aura_create(idev, roc_nix, rq->first_skip, meta_aura,
> +                                             is_local_metaaura);
>                 if (rc)
>                         return rc;
>
>                 aura_setup = true;
>         }
> +
> +       if (roc_nix->custom_meta_aura_ena) {
> +               /* Create metaura for 1:N pool:aura */
> +               if (!custom_meta_pool_cb)
> +                       return -EFAULT;
> +
> +               meta_aura = &roc_nix->meta_aura_handle;
> +               if (!(*meta_aura)) {
> +                       rc = nix_inl_custom_meta_aura_create(idev, roc_nix, rq->first_skip,
> +                                                            meta_aura);
> +                       if (rc)
> +                               return rc;
> +
> +                       aura_setup = true;
> +               }
> +       }
> +
>         /* Update rq meta aura handle */
>         rq->meta_aura_handle = *meta_aura;
>
> @@ -698,6 +814,7 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
>         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
>         struct roc_cpt_inline_ipsec_inb_cfg cfg;
>         struct idev_cfg *idev = idev_get_cfg();
> +       struct nix_inl_dev *inl_dev;
>         uint16_t bpids[ROC_NIX_MAX_BPID_CNT];
>         struct roc_cpt *roc_cpt;
>         int rc;
> @@ -749,9 +866,13 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
>         if (rc)
>                 return rc;
>
> +       inl_dev = idev->nix_inl_dev;
> +
> +       roc_nix->custom_meta_aura_ena = (roc_nix->local_meta_aura_ena &&
> +                                        (inl_dev->is_multi_channel || roc_nix->custom_sa_action));
>         if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
>                 nix->need_meta_aura = true;
> -               if (!roc_nix->local_meta_aura_ena)
> +               if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
>                         idev->inl_cfg.refs++;
>         }
>
> @@ -773,15 +894,17 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
>                 return -EFAULT;
>
>         nix->inl_inb_ena = false;
> +
>         if (nix->need_meta_aura) {
>                 nix->need_meta_aura = false;
> -               if (roc_nix->local_meta_aura_ena) {
> -                       nix_inl_meta_aura_destroy(roc_nix);
> -               } else {
> +               if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
>                         idev->inl_cfg.refs--;
> -                       if (!idev->inl_cfg.refs)
> -                               nix_inl_meta_aura_destroy(roc_nix);
> -               }
> +
> +               if (roc_nix->custom_meta_aura_ena)
> +                       nix_inl_custom_meta_aura_destroy(roc_nix);
> +
> +               if (!idev->inl_cfg.refs)
> +                       nix_inl_meta_aura_destroy(roc_nix);
>         }
>
>         if (roc_feature_nix_has_inl_rq_mask()) {
> @@ -1309,17 +1432,18 @@ roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena)
>
>         if (ena) {
>                 nix->need_meta_aura = true;
> -               if (!roc_nix->local_meta_aura_ena)
> +               if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
>                         idev->inl_cfg.refs++;
>         } else if (nix->need_meta_aura) {
>                 nix->need_meta_aura = false;
> -               if (roc_nix->local_meta_aura_ena) {
> -                       nix_inl_meta_aura_destroy(roc_nix);
> -               } else {
> +               if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
>                         idev->inl_cfg.refs--;
> -                       if (!idev->inl_cfg.refs)
> -                               nix_inl_meta_aura_destroy(roc_nix);
> -               }
> +
> +               if (roc_nix->custom_meta_aura_ena)
> +                       nix_inl_custom_meta_aura_destroy(roc_nix);
> +
> +               if (!idev->inl_cfg.refs)
> +                       nix_inl_meta_aura_destroy(roc_nix);
>         }
>  }
>
> @@ -1672,3 +1796,9 @@ roc_nix_inl_eng_caps_get(struct roc_nix *roc_nix)
>
>         return nix->cpt_eng_caps;
>  }
> +
> +void
> +roc_nix_inl_custom_meta_pool_cb_register(roc_nix_inl_custom_meta_pool_cb_t cb)
> +{
> +       custom_meta_pool_cb = cb;
> +}
> diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
> index daa21a941a..885d95335e 100644
> --- a/drivers/common/cnxk/roc_nix_inl.h
> +++ b/drivers/common/cnxk/roc_nix_inl.h
> @@ -121,6 +121,9 @@ typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
>  typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle,  uintptr_t *mpool,
>                                           uint32_t blk_sz, uint32_t nb_bufs, bool destroy,
>                                           const char *mempool_name);
> +typedef int (*roc_nix_inl_custom_meta_pool_cb_t)(uintptr_t pmpool, uintptr_t *mpool,
> +                                                const char *mempool_name, uint64_t *aura_handle,
> +                                                uint32_t blk_sz, uint32_t nb_bufs, bool destroy);
>
>  struct roc_nix_inl_dev {
>         /* Input parameters */
> @@ -199,6 +202,7 @@ int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
>                                                     bool poll);
>  uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
>  void __roc_api roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb);
> +void __roc_api roc_nix_inl_custom_meta_pool_cb_register(roc_nix_inl_custom_meta_pool_cb_t cb);
>
>  /* NIX Inline/Outbound API */
>  enum roc_nix_inl_sa_sync_op {
> diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
> index 809fd81b20..c76564b46e 100644
> --- a/drivers/common/cnxk/version.map
> +++ b/drivers/common/cnxk/version.map
> @@ -199,6 +199,7 @@ INTERNAL {
>         roc_nix_inb_is_with_inl_dev;
>         roc_nix_inl_meta_aura_check;
>         roc_nix_inl_meta_pool_cb_register;
> +       roc_nix_inl_custom_meta_pool_cb_register;
>         roc_nix_inb_mode_set;
>         roc_nix_inl_outb_fini;
>         roc_nix_inl_outb_init;
> diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
> index 06c71c6092..07f0dad97d 100644
> --- a/drivers/event/cnxk/cn10k_worker.h
> +++ b/drivers/event/cnxk/cn10k_worker.h
> @@ -167,6 +167,10 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
>                 mbuf = u64[1] - sizeof(struct rte_mbuf);
>                 rte_prefetch0((void *)mbuf);
>                 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
> +                       void *lookup_mem = ws->lookup_mem;
> +                       struct rte_mempool *mp = NULL;
> +                       uint64_t meta_aura;
> +
>                         const uint64_t mbuf_init =
>                                 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
>                                 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
> @@ -191,8 +195,11 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
>                                 cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
>                                 (struct rte_mbuf *)mbuf, d_off, flags,
>                                 mbuf_init | ((uint64_t)port) << 48);
> +                       mp = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port, lookup_mem);
> +                       meta_aura = mp ? mp->pool_id : m->pool->pool_id;
> +
>                         if (loff)
> -                               roc_npa_aura_op_free(m->pool->pool_id, 0, iova);
> +                               roc_npa_aura_op_free(meta_aura, 0, iova);
>                 }
>
>                 u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
> diff --git a/drivers/net/cnxk/cn10k_rx_select.c b/drivers/net/cnxk/cn10k_rx_select.c
> index b906f6725a..1e0de1b7ac 100644
> --- a/drivers/net/cnxk/cn10k_rx_select.c
> +++ b/drivers/net/cnxk/cn10k_rx_select.c
> @@ -79,9 +79,10 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
>  #undef R
>         };
>
> -       /* Copy multi seg version with no offload for tear down sequence */
> +       /* Copy multi seg version with security for tear down sequence */
>         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> -               dev->rx_pkt_burst_no_offload = nix_eth_rx_burst_mseg[0];
> +               dev->rx_pkt_burst_no_offload =
> +                       nix_eth_rx_burst_mseg_reas[NIX_RX_OFFLOAD_SECURITY_F];
>
>         if (dev->scalar_ena) {
>                 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
> diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
> index 6b45ccd0f7..677539c35a 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.c
> +++ b/drivers/net/cnxk/cnxk_ethdev.c
> @@ -1883,6 +1883,9 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
>         /* Register callback for inline meta pool create */
>         roc_nix_inl_meta_pool_cb_register(cnxk_nix_inl_meta_pool_cb);
>
> +       /* Register callback for inline meta pool create 1:N pool:aura */
> +       roc_nix_inl_custom_meta_pool_cb_register(cnxk_nix_inl_custom_meta_pool_cb);
> +
>         dev->eth_dev = eth_dev;
>         dev->configured = 0;
>         dev->ptype_disable = 0;
> diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
> index d76f5486e6..85287dd66c 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.h
> +++ b/drivers/net/cnxk/cnxk_ethdev.h
> @@ -608,6 +608,9 @@ cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
>                               struct rte_security_session *sess);
>  int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_sz,
>                               uint32_t nb_bufs, bool destroy, const char *mempool_name);
> +int cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
> +                                    uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
> +                                    bool destroy);
>
>  /* Congestion Management */
>  int cnxk_nix_cman_info_get(struct rte_eth_dev *dev, struct rte_eth_cman_info *info);
> diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
> index cd64daacc0..a66d58ca61 100644
> --- a/drivers/net/cnxk/cnxk_ethdev_sec.c
> +++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
> @@ -6,6 +6,7 @@
>  #include <cnxk_mempool.h>
>
>  #define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
> +#define CN10K_HW_POOL_OPS_NAME "cn10k_hwpool_ops"
>
>  #define CNXK_NIX_INL_SELFTEST        "selftest"
>  #define CNXK_NIX_INL_IPSEC_IN_MIN_SPI "ipsec_in_min_spi"
> @@ -114,6 +115,67 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
>         return rc;
>  }
>
> +/* Create Aura and link with Global mempool for 1:N Pool:Aura case */
> +int
> +cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
> +                                uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
> +                                bool destroy)
> +{
> +       struct rte_mempool *hp;
> +       int rc;
> +
> +       /* Destroy the mempool if requested */
> +       if (destroy) {
> +               hp = rte_mempool_lookup(mempool_name);
> +               if (!hp)
> +                       return -ENOENT;
> +
> +               if (hp->pool_id != *aura_handle) {
> +                       plt_err("Meta pool aura mismatch");
> +                       return -EINVAL;
> +               }
> +
> +               rte_mempool_free(hp);
> +               plt_free(hp->pool_config);
> +
> +               *aura_handle = 0;
> +               *mpool = 0;
> +               return 0;
> +       }
> +
> +       /* Need to make it similar to rte_pktmbuf_pool() for sake of OOP
> +        * support.
> +        */
> +       hp = rte_mempool_create_empty(mempool_name, nb_bufs, buf_sz, 0,
> +                                     sizeof(struct rte_pktmbuf_pool_private),
> +                                     SOCKET_ID_ANY, 0);
> +       if (!hp) {
> +               plt_err("Failed to create inline meta pool");
> +               return -EIO;
> +       }
> +
> +       rc = rte_mempool_set_ops_byname(hp, CN10K_HW_POOL_OPS_NAME, (void *)pmpool);
> +
> +       if (rc) {
> +               plt_err("Failed to setup ops, rc=%d", rc);
> +               goto free_hp;
> +       }
> +
> +       /* Populate buffer */
> +       rc = rte_mempool_populate_default(hp);
> +       if (rc < 0) {
> +               plt_err("Failed to populate pool, rc=%d", rc);
> +               goto free_hp;
> +       }
> +
> +       *aura_handle = hp->pool_id;
> +       *mpool = (uintptr_t)hp;
> +       return 0;
> +free_hp:
> +       rte_mempool_free(hp);
> +       return rc;
> +}
> +
>  static int
>  parse_max_ipsec_rules(const char *key, const char *value, void *extra_args)
>  {
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 89+ messages in thread

* Re: [PATCH 02/21] common/cnxk: add pool BPID to RQ while using common pool
  2023-04-11  9:11 ` [PATCH 02/21] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
@ 2023-05-18  5:52   ` Jerin Jacob
  0 siblings, 0 replies; 89+ messages in thread
From: Jerin Jacob @ 2023-05-18  5:52 UTC (permalink / raw)
  To: Nithin Dabilpuram
  Cc: Kiran Kumar K, Sunil Kumar Kori, Satha Rao, jerinj, dev, Shijith Thotton

On Tue, Apr 11, 2023 at 2:42 PM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> From: Shijith Thotton <sthotton@marvell.com>
>
> When RQs of two different traffic classes are using the same mempool,
> BPIDs could differ between the RQs and BPID of only one RQ can be
> configured per pool. In such cases, a new BPID is configured on both RQs
> and pool or pool back-pressure is disabled.
>
> CN103xx and CN106xx B0 supports configuring multiple BPID per RQ.
>
> Signed-off-by: Shijith Thotton <sthotton@marvell.com>
> ---
>  drivers/common/cnxk/roc_idev.c      |  12 +++
>  drivers/common/cnxk/roc_idev.h      |   1 +
>  drivers/common/cnxk/roc_idev_priv.h |   1 +
>  drivers/common/cnxk/roc_nix.c       |   5 +
>  drivers/common/cnxk/roc_nix.h       |   3 +
>  drivers/common/cnxk/roc_nix_fc.c    | 156 ++++++++++++++++------------
>  drivers/common/cnxk/roc_npa.c       |  48 +++++++++
>  drivers/common/cnxk/roc_npa.h       |   2 +
>  drivers/common/cnxk/version.map     |   2 +
>  9 files changed, 166 insertions(+), 64 deletions(-)
>
> diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
> index 2737bb9517..188b8800d3 100644
> --- a/drivers/common/cnxk/roc_nix.h
> +++ b/drivers/common/cnxk/roc_nix.h
> @@ -425,6 +425,8 @@ typedef void (*q_err_get_t)(struct roc_nix *roc_nix, void *data);
>  typedef void (*link_info_get_t)(struct roc_nix *roc_nix,
>                                 struct roc_nix_link_info *link);
>
> +TAILQ_HEAD(roc_nix_list, roc_nix);

Could you check to remove this global variable?

^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (19 preceding siblings ...)
  2023-04-11  9:11 ` [PATCH 21/21] common/cnxk: support of 1:n pool:aura per NIX LF Nithin Dabilpuram
@ 2023-05-24 10:03 ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 02/32] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
                     ` (30 more replies)
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
  21 siblings, 31 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

New mail box to allocate/free dynamic BPIDs based on NIX type.
Added to new mail box APIs to get/set RX channel config with
new BPIDs.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
v2:
- Rebased on top of dpdk-next-net-mrvl
- Handled comments from Jerin w.r.t commit messages
- Added few more fixes to the series.


 drivers/common/cnxk/roc_cpt.c      |  10 +-
 drivers/common/cnxk/roc_cpt.h      |   3 +-
 drivers/common/cnxk/roc_features.h |   7 ++
 drivers/common/cnxk/roc_mbox.h     |  31 ++++-
 drivers/common/cnxk/roc_nix.h      |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c   | 182 +++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_nix_inl.c  |  24 ++--
 drivers/common/cnxk/roc_nix_priv.h |   1 +
 drivers/common/cnxk/version.map    |   5 +
 9 files changed, 266 insertions(+), 18 deletions(-)

diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index dff2fbf2a4..d235ff51ca 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -311,8 +311,7 @@ roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
 }
 
 int
-roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
-			     uint16_t param2, uint16_t opcode)
+roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, struct roc_cpt_inline_ipsec_inb_cfg *cfg)
 {
 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 	struct cpt_rx_inline_lf_cfg_msg *req;
@@ -328,9 +327,10 @@ roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
 	}
 
 	req->sso_pf_func = idev_sso_pffunc_get();
-	req->param1 = param1;
-	req->param2 = param2;
-	req->opcode = opcode;
+	req->param1 = cfg->param1;
+	req->param2 = cfg->param2;
+	req->opcode = cfg->opcode;
+	req->bpid = cfg->bpid;
 
 	rc = mbox_process(mbox);
 exit:
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index d3a5683dc8..92a18711dc 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -178,8 +178,7 @@ int __roc_api roc_cpt_inline_ipsec_cfg(struct dev *dev, uint8_t slot,
 int __roc_api roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
 					struct roc_cpt_inline_ipsec_inb_cfg *cfg);
 int __roc_api roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt,
-					   uint16_t param1, uint16_t param2,
-					   uint16_t opcode);
+					   struct roc_cpt_inline_ipsec_inb_cfg *cfg);
 int __roc_api roc_cpt_afs_print(struct roc_cpt *roc_cpt);
 int __roc_api roc_cpt_lfs_print(struct roc_cpt *roc_cpt);
 void __roc_api roc_cpt_iq_disable(struct roc_cpt_lf *lf);
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 252f306a86..c2893faa65 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -40,4 +40,11 @@ roc_feature_nix_has_reass(void)
 	return roc_model_is_cn10ka();
 }
 
+static inline bool
+roc_feature_nix_has_rxchan_multi_bpid(void)
+{
+	if (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0())
+		return true;
+	return false;
+}
 #endif
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index af3c10b0b0..3d5746b9b8 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -275,7 +275,12 @@ struct mbox_msghdr {
 	M(NIX_SPI_TO_SA_ADD, 0x8026, nix_spi_to_sa_add, nix_spi_to_sa_add_req, \
 	  nix_spi_to_sa_add_rsp)                                               \
 	M(NIX_SPI_TO_SA_DELETE, 0x8027, nix_spi_to_sa_delete,                  \
-	  nix_spi_to_sa_delete_req, msg_rsp)
+	  nix_spi_to_sa_delete_req, msg_rsp)                                   \
+	M(NIX_ALLOC_BPIDS, 0x8028, nix_alloc_bpids, nix_alloc_bpid_req,        \
+	  nix_bpids)                                                           \
+	M(NIX_FREE_BPIDS, 0x8029, nix_free_bpids, nix_bpids, msg_rsp)          \
+	M(NIX_RX_CHAN_CFG, 0x802a, nix_rx_chan_cfg, nix_rx_chan_cfg,           \
+	  nix_rx_chan_cfg)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES                                                   \
@@ -1186,6 +1191,30 @@ struct nix_bp_cfg_rsp {
 	uint8_t __io chan_cnt;
 };
 
+struct nix_alloc_bpid_req {
+	struct mbox_msghdr hdr;
+	uint8_t __io bpid_cnt;
+	uint8_t __io type;
+	uint64_t __io rsvd;
+};
+
+struct nix_bpids {
+#define ROC_NIX_MAX_BPID_CNT	8
+	struct mbox_msghdr hdr;
+	uint8_t __io bpid_cnt;
+	uint16_t __io bpids[ROC_NIX_MAX_BPID_CNT];
+	uint64_t __io rsvd;
+};
+
+struct nix_rx_chan_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io type; /* Interface type(CGX/CPT/LBK) */
+	uint8_t __io read;
+	uint16_t __io chan; /* RX channel to be configured */
+	uint64_t __io val; /* NIX_AF_RX_CHAN_CFG value */
+	uint64_t __io rsvd;
+};
+
 /* Global NIX inline IPSec configuration */
 struct nix_inline_ipsec_cfg {
 	struct mbox_msghdr hdr;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 0ec98ad630..2737bb9517 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -16,6 +16,17 @@
 #define ROC_NIX_SQB_LOWER_THRESH      70U
 #define ROC_NIX_SQB_SLACK	      12U
 
+/* Reserved interface types for BPID allocation */
+#define ROC_NIX_INTF_TYPE_CGX  0
+#define ROC_NIX_INTF_TYPE_LBK  1
+#define ROC_NIX_INTF_TYPE_SDP  2
+#define ROC_NIX_INTF_TYPE_CPT  3
+#define ROC_NIX_INTF_TYPE_RSVD 4
+
+/* Application based types for BPID allocation, start from end (255 unused rsvd) */
+#define ROC_NIX_INTF_TYPE_CPT_NIX 254
+#define ROC_NIX_INTF_TYPE_SSO     253
+
 enum roc_nix_rss_reta_sz {
 	ROC_NIX_RSS_RETA_SZ_64 = 64,
 	ROC_NIX_RSS_RETA_SZ_128 = 128,
@@ -837,6 +848,16 @@ enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
 				     uint8_t ena, uint8_t force, uint8_t tc);
+int __roc_api roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type,
+				  uint8_t bp_cnt, uint16_t *bpids);
+int __roc_api roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt,
+				 uint16_t *bpids);
+int __roc_api roc_nix_rx_chan_cfg_get(struct roc_nix *roc_nix, uint16_t chan,
+				      bool is_cpt, uint64_t *cfg);
+int __roc_api roc_nix_rx_chan_cfg_set(struct roc_nix *roc_nix, uint16_t chan,
+				      bool is_cpt, uint64_t val);
+int __roc_api roc_nix_chan_bpid_set(struct roc_nix *roc_nix, uint16_t chan,
+				    uint64_t bpid, int ena, bool cpt_chan);
 
 /* NPC */
 int __roc_api roc_nix_npc_promisc_ena_dis(struct roc_nix *roc_nix, int enable);
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index cec83b31f3..3b726673a6 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -104,6 +104,17 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		nix->cpt_lbpid = 0;
 	}
 
+	/* CPT to NIX BP on all channels */
+	if (!roc_feature_nix_has_rxchan_multi_bpid() || !nix->cpt_nixbpid)
+		goto exit;
+
+	mbox_put(mbox);
+	for (i = 0; i < nix->rx_chan_cnt; i++) {
+		rc = roc_nix_chan_bpid_set(roc_nix, i, nix->cpt_nixbpid, enable, false);
+		if (rc)
+			break;
+	}
+	return rc;
 exit:
 	mbox_put(mbox);
 	return rc;
@@ -599,3 +610,174 @@ roc_nix_chan_count_get(struct roc_nix *roc_nix)
 
 	return nix->chan_cnt;
 }
+
+/* Allocate BPID for requested type
+ * Returns number of BPIDs allocated
+ *	0 if no BPIDs available
+ *	-ve value on error
+ */
+int
+roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type, uint8_t bp_cnt, uint16_t *bpids)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_alloc_bpid_req *req;
+	struct nix_bpids *rsp;
+	int rc = -EINVAL;
+
+	/* Use this api for unreserved interface types */
+	if ((type < ROC_NIX_INTF_TYPE_RSVD) || (bp_cnt > ROC_NIX_MAX_BPID_CNT) || !bpids)
+		goto exit;
+
+	rc = -ENOSPC;
+	req = mbox_alloc_msg_nix_alloc_bpids(mbox);
+	if (req == NULL)
+		goto exit;
+	req->type = type;
+	req->bpid_cnt = bp_cnt;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	for (rc = 0; rc < rsp->bpid_cnt; rc++)
+		bpids[rc] = rsp->bpids[rc];
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+int
+roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt, uint16_t *bpids)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_bpids *req;
+	int rc = -EINVAL;
+
+	/* Use this api for unreserved interface types */
+	if ((bp_cnt > ROC_NIX_MAX_BPID_CNT) || !bpids)
+		goto exit;
+
+	rc = -ENOSPC;
+	req = mbox_alloc_msg_nix_free_bpids(mbox);
+	if (req == NULL)
+		goto exit;
+	for (rc = 0; rc < bp_cnt; rc++)
+		req->bpids[rc] = bpids[rc];
+	req->bpid_cnt = rc;
+
+	rc = mbox_process(mbox);
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+int
+roc_nix_rx_chan_cfg_get(struct roc_nix *roc_nix, uint16_t chan, bool is_cpt, uint64_t *cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_rx_chan_cfg *req;
+	struct nix_rx_chan_cfg *rsp;
+	int rc = -EINVAL;
+
+	req = mbox_alloc_msg_nix_rx_chan_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+	if (is_cpt)
+		req->type = ROC_NIX_INTF_TYPE_CPT;
+	req->chan = chan;
+	req->read = 1;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+	*cfg = rsp->val;
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+int
+roc_nix_rx_chan_cfg_set(struct roc_nix *roc_nix, uint16_t chan, bool is_cpt, uint64_t val)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_rx_chan_cfg *req;
+	int rc = -EINVAL;
+
+	req = mbox_alloc_msg_nix_rx_chan_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+	if (is_cpt)
+		req->type = ROC_NIX_INTF_TYPE_CPT;
+	req->chan = chan;
+	req->val = val;
+	req->read = 0;
+
+	rc = mbox_process(mbox);
+	if (rc)
+		goto exit;
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+#define NIX_BPID1_ENA 15
+#define NIX_BPID2_ENA 14
+#define NIX_BPID3_ENA 13
+
+#define NIX_BPID1_OFF 20
+#define NIX_BPID2_OFF 32
+#define NIX_BPID3_OFF 44
+
+int
+roc_nix_chan_bpid_set(struct roc_nix *roc_nix, uint16_t chan, uint64_t bpid, int ena, bool cpt_chan)
+{
+	uint64_t cfg;
+	int rc;
+
+	if (!roc_feature_nix_has_rxchan_multi_bpid())
+		return -ENOTSUP;
+
+	rc = roc_nix_rx_chan_cfg_get(roc_nix, chan, cpt_chan, &cfg);
+	if (rc)
+		return rc;
+
+	if (ena) {
+		if ((((cfg >> NIX_BPID1_OFF) & GENMASK_ULL(8, 0)) == bpid) ||
+		    (((cfg >> NIX_BPID2_OFF) & GENMASK_ULL(8, 0)) == bpid) ||
+		    (((cfg >> NIX_BPID3_OFF) & GENMASK_ULL(8, 0)) == bpid))
+			return 0;
+
+		if (!(cfg & BIT_ULL(NIX_BPID1_ENA))) {
+			cfg &= ~GENMASK_ULL(NIX_BPID1_OFF + 8, NIX_BPID1_OFF);
+			cfg |= (((uint64_t)bpid << NIX_BPID1_OFF) | BIT_ULL(NIX_BPID1_ENA));
+		} else if (!(cfg & BIT_ULL(NIX_BPID2_ENA))) {
+			cfg &= ~GENMASK_ULL(NIX_BPID2_OFF + 8, NIX_BPID2_OFF);
+			cfg |= (((uint64_t)bpid << NIX_BPID2_OFF) | BIT_ULL(NIX_BPID2_ENA));
+		} else if (!(cfg & BIT_ULL(NIX_BPID3_ENA))) {
+			cfg &= ~GENMASK_ULL(NIX_BPID3_OFF + 8, NIX_BPID3_OFF);
+			cfg |= (((uint64_t)bpid << NIX_BPID3_OFF) | BIT_ULL(NIX_BPID3_ENA));
+		} else {
+			plt_nix_dbg("Exceed maximum BPIDs");
+			return -ENOSPC;
+		}
+	} else {
+		if (((cfg >> NIX_BPID1_OFF) & GENMASK_ULL(8, 0)) == bpid) {
+			cfg &= ~(GENMASK_ULL(NIX_BPID1_OFF + 8, NIX_BPID1_OFF) |
+				 BIT_ULL(NIX_BPID1_ENA));
+		} else if (((cfg >> NIX_BPID2_OFF) & GENMASK_ULL(8, 0)) == bpid) {
+			cfg &= ~(GENMASK_ULL(NIX_BPID2_OFF + 8, NIX_BPID2_OFF) |
+				 BIT_ULL(NIX_BPID2_ENA));
+		} else if (((cfg >> NIX_BPID3_OFF) & GENMASK_ULL(8, 0)) == bpid) {
+			cfg &= ~(GENMASK_ULL(NIX_BPID3_OFF + 8, NIX_BPID3_OFF) |
+				 BIT_ULL(NIX_BPID3_ENA));
+		} else {
+			plt_nix_dbg("BPID not found");
+			return -EINVAL;
+		}
+	}
+	return roc_nix_rx_chan_cfg_set(roc_nix, chan, cpt_chan, cfg);
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 076d83e8d5..9485bba099 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -603,11 +603,10 @@ int
 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct roc_cpt_inline_ipsec_inb_cfg cfg;
 	struct idev_cfg *idev = idev_get_cfg();
+	uint16_t bpids[ROC_NIX_MAX_BPID_CNT];
 	struct roc_cpt *roc_cpt;
-	uint16_t opcode;
-	uint16_t param1;
-	uint16_t param2;
 	int rc;
 
 	if (idev == NULL)
@@ -624,9 +623,9 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	}
 
 	if (roc_model_is_cn9k()) {
-		param1 = (ROC_ONF_IPSEC_INB_MAX_L2_SZ >> 3) & 0xf;
-		param2 = ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT;
-		opcode =
+		cfg.param1 = (ROC_ONF_IPSEC_INB_MAX_L2_SZ >> 3) & 0xf;
+		cfg.param2 = ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT;
+		cfg.opcode =
 			((ROC_IE_ON_INB_MAX_CTX_LEN << 8) |
 			 (ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6)));
 	} else {
@@ -634,13 +633,18 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 
 		u.u16 = 0;
 		u.s.esp_trailer_disable = 1;
-		param1 = u.u16;
-		param2 = 0;
-		opcode = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6));
+		cfg.param1 = u.u16;
+		cfg.param2 = 0;
+		cfg.opcode = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6));
+		rc = roc_nix_bpids_alloc(roc_nix, ROC_NIX_INTF_TYPE_CPT_NIX, 1, bpids);
+		if (rc > 0) {
+			nix->cpt_nixbpid = bpids[0];
+			cfg.bpid = nix->cpt_nixbpid;
+		}
 	}
 
 	/* Do onetime Inbound Inline config in CPTPF */
-	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, param2, opcode);
+	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, &cfg);
 	if (rc && rc != -EEXIST) {
 		plt_err("Failed to setup inbound lf, rc=%d", rc);
 		return rc;
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 2fe9093324..99e27cdc56 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -208,6 +208,7 @@ struct nix {
 	uint16_t outb_se_ring_cnt;
 	uint16_t outb_se_ring_base;
 	uint16_t cpt_lbpid;
+	uint16_t cpt_nixbpid;
 	bool need_meta_aura;
 	/* Mode provided by driver */
 	bool inb_inl_dev;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 5281c71550..e7c6f6bce5 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -147,6 +147,9 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_bpids_alloc;
+	roc_nix_bpids_free;
+	roc_nix_chan_bpid_set;
 	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
@@ -277,6 +280,8 @@ INTERNAL {
 	roc_nix_rss_key_set;
 	roc_nix_rss_reta_get;
 	roc_nix_rss_reta_set;
+	roc_nix_rx_chan_cfg_get;
+	roc_nix_rx_chan_cfg_set;
 	roc_nix_rx_drop_re_set;
 	roc_nix_rx_queue_intr_disable;
 	roc_nix_rx_queue_intr_enable;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 02/32] common/cnxk: add pool BPID to RQ while using common pool
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 03/32] common/cnxk: fix CPT backpressure disable on LBK Nithin Dabilpuram
                     ` (29 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Shijith Thotton

From: Shijith Thotton <sthotton@marvell.com>

When RQs of two different traffic classes are using the same mempool,
BPIDs could differ between the RQs and BPID of only one RQ can be
configured per pool. In such cases, a new BPID is configured on both RQs
and pool or pool back-pressure is disabled.

CN103xx and CN106xx B0 supports configuring multiple BPID per RQ.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
 drivers/common/cnxk/roc_idev.c      |  12 +++
 drivers/common/cnxk/roc_idev.h      |   1 +
 drivers/common/cnxk/roc_idev_priv.h |   1 +
 drivers/common/cnxk/roc_nix.c       |   5 +
 drivers/common/cnxk/roc_nix.h       |   3 +
 drivers/common/cnxk/roc_nix_fc.c    | 156 ++++++++++++++++------------
 drivers/common/cnxk/roc_npa.c       |  48 +++++++++
 drivers/common/cnxk/roc_npa.h       |   2 +
 drivers/common/cnxk/version.map     |   2 +
 9 files changed, 166 insertions(+), 64 deletions(-)

diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index 62a4fd8880..f420f0158d 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -39,6 +39,7 @@ idev_set_defaults(struct idev_cfg *idev)
 	idev->bphy = NULL;
 	idev->cpt = NULL;
 	idev->nix_inl_dev = NULL;
+	TAILQ_INIT(&idev->roc_nix_list);
 	plt_spinlock_init(&idev->nix_inl_dev_lock);
 	plt_spinlock_init(&idev->npa_dev_lock);
 	__atomic_store_n(&idev->npa_refcnt, 0, __ATOMIC_RELEASE);
@@ -201,6 +202,17 @@ roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix)
 	return (uint64_t *)&inl_dev->sa_soft_exp_ring[nix->outb_se_ring_base];
 }
 
+struct roc_nix_list *
+roc_idev_nix_list_get(void)
+{
+	struct idev_cfg *idev;
+
+	idev = idev_get_cfg();
+	if (idev != NULL)
+		return &idev->roc_nix_list;
+	return NULL;
+}
+
 void
 roc_idev_cpt_set(struct roc_cpt *cpt)
 {
diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h
index 926aac0634..640ca97708 100644
--- a/drivers/common/cnxk/roc_idev.h
+++ b/drivers/common/cnxk/roc_idev.h
@@ -17,5 +17,6 @@ void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
 
 struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
 uint64_t __roc_api roc_idev_nix_inl_meta_aura_get(void);
+struct roc_nix_list *__roc_api roc_idev_nix_list_get(void);
 
 #endif /* _ROC_IDEV_H_ */
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index b97d2936a2..d83522799f 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -32,6 +32,7 @@ struct idev_cfg {
 	struct roc_sso *sso;
 	struct nix_inl_dev *nix_inl_dev;
 	struct idev_nix_inl_cfg inl_cfg;
+	struct roc_nix_list roc_nix_list;
 	plt_spinlock_t nix_inl_dev_lock;
 	plt_spinlock_t npa_dev_lock;
 };
diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 97ef1c7133..39943e4ba7 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -417,6 +417,7 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 	nix = roc_nix_to_nix_priv(roc_nix);
 	pci_dev = roc_nix->pci_dev;
 	dev = &nix->dev;
+	TAILQ_INSERT_TAIL(roc_idev_nix_list_get(), roc_nix, next);
 
 	if (nix->dev.drv_inited)
 		return 0;
@@ -425,6 +426,10 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 		goto skip_dev_init;
 
 	memset(nix, 0, sizeof(*nix));
+
+	/* Since 0 is a valid BPID, use -1 to represent invalid value. */
+	memset(nix->bpid, -1, sizeof(nix->bpid));
+
 	/* Initialize device  */
 	rc = dev_init(dev, pci_dev);
 	if (rc) {
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 2737bb9517..188b8800d3 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -425,6 +425,8 @@ typedef void (*q_err_get_t)(struct roc_nix *roc_nix, void *data);
 typedef void (*link_info_get_t)(struct roc_nix *roc_nix,
 				struct roc_nix_link_info *link);
 
+TAILQ_HEAD(roc_nix_list, roc_nix);
+
 struct roc_nix {
 	/* Input parameters */
 	struct plt_pci_device *pci_dev;
@@ -456,6 +458,7 @@ struct roc_nix {
 	uint32_t buf_sz;
 	uint64_t meta_aura_handle;
 	uintptr_t meta_mempool;
+	TAILQ_ENTRY(roc_nix) next;
 
 #define ROC_NIX_MEM_SZ (6 * 1056)
 	uint8_t reserved[ROC_NIX_MEM_SZ] __plt_cache_aligned;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 3b726673a6..8b7659fb9a 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -428,17 +428,64 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
 	return rc;
 }
 
+static int
+nix_rx_chan_multi_bpid_cfg(struct roc_nix *roc_nix, uint8_t chan, uint16_t bpid, uint16_t *bpid_new)
+{
+	struct roc_nix *roc_nix_tmp, *roc_nix_pre = NULL;
+	uint8_t chan_pre;
+
+	if (!roc_feature_nix_has_rxchan_multi_bpid())
+		return -ENOTSUP;
+
+	/* Find associated NIX RX channel if Aura BPID is of that of a NIX. */
+	TAILQ_FOREACH (roc_nix_tmp, roc_idev_nix_list_get(), next) {
+		struct nix *nix = roc_nix_to_nix_priv(roc_nix_tmp);
+		int i;
+
+		for (i = 0; i < NIX_MAX_CHAN; i++) {
+			if (nix->bpid[i] == bpid)
+				break;
+		}
+
+		if (i < NIX_MAX_CHAN) {
+			roc_nix_pre = roc_nix_tmp;
+			chan_pre = i;
+			break;
+		}
+	}
+
+	/* Alloc and configure a new BPID if Aura BPID is that of a NIX. */
+	if (roc_nix_pre) {
+		if (roc_nix_bpids_alloc(roc_nix, ROC_NIX_INTF_TYPE_SSO, 1, bpid_new) <= 0)
+			return -ENOSPC;
+
+		if (roc_nix_chan_bpid_set(roc_nix_pre, chan_pre, *bpid_new, 1, false) < 0)
+			return -ENOSPC;
+
+		if (roc_nix_chan_bpid_set(roc_nix, chan, *bpid_new, 1, false) < 0)
+			return -ENOSPC;
+
+		return 0;
+	} else {
+		return roc_nix_chan_bpid_set(roc_nix, chan, bpid, 1, false);
+	}
+
+	return 0;
+}
+
+#define NIX_BPID_INVALID 0xFFFF
+
 void
 roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 		      uint8_t force, uint8_t tc)
 {
+	uint32_t aura_id = roc_npa_aura_handle_to_aura(pool_id);
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct npa_lf *lf = idev_npa_obj_get();
 	struct npa_aq_enq_req *req;
 	struct npa_aq_enq_rsp *rsp;
+	uint8_t bp_thresh, bp_intf;
 	struct mbox *mbox;
-	uint32_t limit;
-	uint64_t shift;
 	int rc;
 
 	if (roc_nix_is_sdp(roc_nix))
@@ -446,93 +493,74 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	if (!lf)
 		return;
-	mbox = mbox_get(lf->mbox);
 
-	req = mbox_alloc_msg_npa_aq_enq(mbox);
-	if (req == NULL)
-		goto exit;
+	mbox = lf->mbox;
+	req = mbox_alloc_msg_npa_aq_enq(mbox_get(mbox));
+	if (req == NULL) {
+		mbox_put(mbox);
+		return;
+	}
 
-	req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
+	req->aura_id = aura_id;
 	req->ctype = NPA_AQ_CTYPE_AURA;
 	req->op = NPA_AQ_INSTOP_READ;
 
 	rc = mbox_process_msg(mbox, (void *)&rsp);
-	if (rc)
-		goto exit;
+	mbox_put(mbox);
+	if (rc) {
+		plt_nix_dbg("Failed to read context of aura 0x%" PRIx64, pool_id);
+		return;
+	}
 
-	limit = rsp->aura.limit;
-	shift = rsp->aura.shift;
+	bp_intf = 1 << nix->is_nix1;
+	bp_thresh = NIX_RQ_AURA_THRESH(rsp->aura.limit >> rsp->aura.shift);
 
 	/* BP is already enabled. */
 	if (rsp->aura.bp_ena && ena) {
-		uint16_t bpid;
-		bool nix1;
+		uint16_t bpid =
+			(rsp->aura.bp_ena & 0x1) ? rsp->aura.nix0_bpid : rsp->aura.nix1_bpid;
 
-		nix1 = !!(rsp->aura.bp_ena & 0x2);
-		if (nix1)
-			bpid = rsp->aura.nix1_bpid;
-		else
-			bpid = rsp->aura.nix0_bpid;
+		/* Disable BP if BPIDs don't match and couldn't add new BPID. */
+		if (bpid != nix->bpid[tc]) {
+			uint16_t bpid_new = NIX_BPID_INVALID;
 
-		/* If BP ids don't match disable BP. */
-		if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc])) &&
-		    !force) {
-			req = mbox_alloc_msg_npa_aq_enq(mbox);
-			if (req == NULL)
-				goto exit;
+			if ((nix_rx_chan_multi_bpid_cfg(roc_nix, tc, bpid, &bpid_new) < 0) &&
+			    !force) {
+				plt_info("Disabling BP/FC on aura 0x%" PRIx64
+					 " as it shared across ports or tc",
+					 pool_id);
 
-			plt_info("Disabling BP/FC on aura 0x%" PRIx64
-				 " as it shared across ports or tc",
-				 pool_id);
-			req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
-			req->ctype = NPA_AQ_CTYPE_AURA;
-			req->op = NPA_AQ_INSTOP_WRITE;
+				if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
+					plt_nix_dbg(
+						"Disabling backpressue failed on aura 0x%" PRIx64,
+						pool_id);
+			}
 
-			req->aura.bp_ena = 0;
-			req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
-
-			mbox_process(mbox);
+			/* Configure Aura with new BPID if it is allocated. */
+			if (bpid_new != NIX_BPID_INVALID) {
+				if (roc_npa_aura_bp_configure(pool_id, bpid_new, bp_intf, bp_thresh,
+							      true))
+					plt_nix_dbg(
+						"Enabling backpressue failed on aura 0x%" PRIx64,
+						pool_id);
+			}
 		}
 
-		if ((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc]))
-			plt_info("Ignoring aura 0x%" PRIx64 "->%u bpid mapping",
-				 pool_id, nix->bpid[tc]);
-		goto exit;
+		return;
 	}
 
 	/* BP was previously enabled but now disabled skip. */
 	if (rsp->aura.bp && ena)
-		goto exit;
-
-	req = mbox_alloc_msg_npa_aq_enq(mbox);
-	if (req == NULL)
-		goto exit;
-
-	req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
-	req->ctype = NPA_AQ_CTYPE_AURA;
-	req->op = NPA_AQ_INSTOP_WRITE;
+		return;
 
 	if (ena) {
-		if (nix->is_nix1) {
-			req->aura.nix1_bpid = nix->bpid[tc];
-			req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
-		} else {
-			req->aura.nix0_bpid = nix->bpid[tc];
-			req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
-		}
-		req->aura.bp = NIX_RQ_AURA_THRESH(limit >> shift);
-		req->aura_mask.bp = ~(req->aura_mask.bp);
+		if (roc_npa_aura_bp_configure(pool_id, nix->bpid[tc], bp_intf, bp_thresh, true))
+			plt_nix_dbg("Enabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	} else {
-		req->aura.bp = 0;
-		req->aura_mask.bp = ~(req->aura_mask.bp);
+		if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
+			plt_nix_dbg("Disabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	}
 
-	req->aura.bp_ena = (!!ena << nix->is_nix1);
-	req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
-
-	mbox_process(mbox);
-exit:
-	mbox_put(mbox);
 	return;
 }
 
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 3b0f95a304..dd649812b4 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -882,6 +882,54 @@ roc_npa_zero_aura_handle(void)
 	return 0;
 }
 
+int
+roc_npa_aura_bp_configure(uint64_t aura_handle, uint16_t bpid, uint8_t bp_intf, uint8_t bp_thresh,
+			  bool enable)
+{
+	uint32_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+	struct npa_lf *lf = idev_npa_obj_get();
+	struct npa_aq_enq_req *req;
+	struct mbox *mbox;
+	int rc = 0;
+
+	if (lf == NULL)
+		return NPA_ERR_PARAM;
+
+	mbox = mbox_get(lf->mbox);
+	req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (req == NULL) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	req->aura_id = aura_id;
+	req->ctype = NPA_AQ_CTYPE_AURA;
+	req->op = NPA_AQ_INSTOP_WRITE;
+
+	if (enable) {
+		if (bp_intf & 0x1) {
+			req->aura.nix0_bpid = bpid;
+			req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
+		} else {
+			req->aura.nix1_bpid = bpid;
+			req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
+		}
+		req->aura.bp = bp_thresh;
+		req->aura_mask.bp = ~(req->aura_mask.bp);
+	} else {
+		req->aura.bp = 0;
+		req->aura_mask.bp = ~(req->aura_mask.bp);
+	}
+
+	req->aura.bp_ena = bp_intf;
+	req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
+
+	mbox_process(mbox);
+fail:
+	mbox_put(mbox);
+	return rc;
+}
+
 static inline int
 npa_attach(struct mbox *m_box)
 {
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index d3caa71586..e1e164499e 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -749,6 +749,8 @@ uint64_t __roc_api roc_npa_zero_aura_handle(void);
 int __roc_api roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int cnt);
 uint64_t __roc_api roc_npa_buf_type_mask(uint64_t aura_handle);
 uint64_t __roc_api roc_npa_buf_type_limit_get(uint64_t type_mask);
+int __roc_api roc_npa_aura_bp_configure(uint64_t aura_id, uint16_t bpid, uint8_t bp_intf,
+					uint8_t bp_thresh, bool enable);
 
 /* Init callbacks */
 typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev);
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index e7c6f6bce5..d740d9df81 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -99,6 +99,7 @@ INTERNAL {
 	roc_idev_npa_nix_get;
 	roc_idev_num_lmtlines_get;
 	roc_idev_nix_inl_meta_aura_get;
+	roc_idev_nix_list_get;
 	roc_ml_reg_read64;
 	roc_ml_reg_write64;
 	roc_ml_reg_read32;
@@ -361,6 +362,7 @@ INTERNAL {
 	roc_npa_aura_limit_modify;
 	roc_npa_aura_op_range_get;
 	roc_npa_aura_op_range_set;
+	roc_npa_aura_bp_configure;
 	roc_npa_ctx_dump;
 	roc_npa_dev_fini;
 	roc_npa_dev_init;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 03/32] common/cnxk: fix CPT backpressure disable on LBK
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 02/32] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 04/32] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
                     ` (28 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rahul Bhansali, stable

From: Rahul Bhansali <rbhansali@marvell.com>

For LBK interfaces, roc_nix_inl_inb_is_enabled() is false,
hence backpressure on CPT is disabled and causing CQ full
interrupt.
NIXX_AF_RX_CHAN[0x800]_CFG is global config for all PF, VF
of RPM/LBK, hence backpressure disable on CPT is not required.

Fixes: 0663a84524e ("common/cnxk: enable backpressure on CPT with inline inbound")
cc: stable@dpdk.org

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/common/cnxk/roc_nix_fc.c | 18 ++----------------
 1 file changed, 2 insertions(+), 16 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 8b7659fb9a..19334b1eb0 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -87,25 +87,11 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		if (rc)
 			goto exit;
 		nix->cpt_lbpid = rsp->chan_bpid[0] & 0x1FF;
-	} else {
-		req = mbox_alloc_msg_nix_cpt_bp_disable(mbox);
-		if (req == NULL)
-			goto exit;
-		req->chan_base = 0;
-		if (roc_nix_is_lbk(roc_nix) || roc_nix_is_sdp(roc_nix))
-			req->chan_cnt = NIX_LBK_MAX_CHAN;
-		else
-			req->chan_cnt = NIX_CGX_MAX_CHAN;
-		req->bpid_per_chan = 0;
-
-		rc = mbox_process_msg(mbox, (void *)&rsp);
-		if (rc)
-			goto exit;
-		nix->cpt_lbpid = 0;
 	}
 
 	/* CPT to NIX BP on all channels */
-	if (!roc_feature_nix_has_rxchan_multi_bpid() || !nix->cpt_nixbpid)
+	if (!roc_feature_nix_has_rxchan_multi_bpid() || !nix->cpt_nixbpid ||
+	    !roc_nix_inl_inb_is_enabled(roc_nix))
 		goto exit;
 
 	mbox_put(mbox);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 04/32] common/cnxk: skip flow ctrl set on non-existent meta aura
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 02/32] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 03/32] common/cnxk: fix CPT backpressure disable on LBK Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 05/32] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
                     ` (27 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Thomas Monjalon, Nithin Kumar Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Skip setting flow control on local meta aura if it is not yet
created. Also in flow control mode set, do get to confirm
if it is in same state to avoid a set and unnecessary mbox
failures.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 .mailmap                           | 1 +
 drivers/common/cnxk/roc_dev.c      | 1 +
 drivers/common/cnxk/roc_nix_fc.c   | 4 ++--
 drivers/common/cnxk/roc_nix_inl.c  | 3 +++
 drivers/net/cnxk/cnxk_ethdev.c     | 5 +++--
 drivers/net/cnxk/cnxk_ethdev_ops.c | 4 ++++
 6 files changed, 14 insertions(+), 4 deletions(-)

diff --git a/.mailmap b/.mailmap
index 57d02a87aa..edf2f5e639 100644
--- a/.mailmap
+++ b/.mailmap
@@ -991,6 +991,7 @@ Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
+Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
 Noa Ezra <noae@mellanox.com>
diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 2388237186..5e4e564ebe 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -421,6 +421,7 @@ process_msgs(struct dev *dev, struct mbox *mbox)
 			dev->pf_func = msg->pcifunc;
 			break;
 		case MBOX_MSG_CGX_PRIO_FLOW_CTRL_CFG:
+		case MBOX_MSG_CGX_CFG_PAUSE_FRM:
 			/* Handling the case where one VF tries to disable PFC
 			 * while PFC already configured on other VFs. This is
 			 * not an error but a warning which can be ignored.
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 19334b1eb0..a34ce27d70 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -297,7 +297,7 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 				      fc_cfg->rq_cfg.enable, true,
 				      fc_cfg->rq_cfg.tc);
 
-		if (roc_nix->local_meta_aura_ena)
+		if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
 					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc);
 	}
@@ -395,6 +395,7 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
 		goto exit;
 	}
 
+	/* Set new config */
 	req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
 	if (req == NULL)
 		goto exit;
@@ -408,7 +409,6 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
 
 	nix->rx_pause = rx_pause;
 	nix->tx_pause = tx_pause;
-
 exit:
 	mbox_put(mbox);
 	return rc;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 9485bba099..b16756d642 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -131,6 +131,9 @@ nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_
 	}
 	roc_nix->meta_mempool = mp;
 
+	plt_nix_dbg("Created meta aura %p(%s)for port %d", (void *)*meta_aura, mp_name,
+		    roc_nix->port_id);
+
 	if (!roc_nix->local_meta_aura_ena) {
 		inl_cfg->buf_sz = buf_sz;
 		inl_cfg->nb_bufs = nb_bufs;
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 42a52ed0ca..7cc41e0c31 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -378,8 +378,9 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	if (rc)
 		return rc;
 
-	fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL :
-						  RTE_ETH_FC_TX_PAUSE;
+	fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL : RTE_ETH_FC_TX_PAUSE;
+	fc->rx_pause = (fc->mode == RTE_ETH_FC_FULL) || (fc->mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc->mode == RTE_ETH_FC_FULL) || (fc->mode == RTE_ETH_FC_TX_PAUSE);
 	return rc;
 }
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 068b7c3502..bce6d59bbc 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -342,6 +342,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			return rc;
 	}
 
+	/* Skip mode set if it is we are in same state */
+	if (fc->rx_pause == rx_pause && fc->tx_pause == tx_pause)
+		return 0;
+
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
 	if (rc)
 		return rc;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 05/32] common/cnxk: reduce sqes per sqb by one
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (2 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 04/32] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 06/32] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
                     ` (26 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Each SQB reserves last SQE to store pointer to next SQB. So
each SQB will holds either 31 or 63 based on send descriptors
selected.

This patch also consider sqb_slack to maintain threshold buffers
to sync between HW and SW. Threshold will be maximum of 30% of
queue size or sqb_slack.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       |  2 +-
 drivers/common/cnxk/roc_nix_priv.h  |  2 +-
 drivers/common/cnxk/roc_nix_queue.c | 21 ++++++++++-----------
 drivers/event/cnxk/cn10k_eventdev.c |  2 +-
 drivers/event/cnxk/cn9k_eventdev.c  |  2 +-
 5 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 188b8800d3..50aef4fe85 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -13,7 +13,7 @@
 #define ROC_NIX_BPF_STATS_MAX	      12
 #define ROC_NIX_MTR_ID_INVALID	      UINT32_MAX
 #define ROC_NIX_PFC_CLASS_INVALID     UINT8_MAX
-#define ROC_NIX_SQB_LOWER_THRESH      70U
+#define ROC_NIX_SQB_THRESH	      30U
 #define ROC_NIX_SQB_SLACK	      12U
 
 /* Reserved interface types for BPID allocation */
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 99e27cdc56..7144d1ee10 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -12,7 +12,7 @@
 #define NIX_MAX_SQB	     ((uint16_t)512)
 #define NIX_DEF_SQB	     ((uint16_t)16)
 #define NIX_MIN_SQB	     ((uint16_t)8)
-#define NIX_SQB_LIST_SPACE   ((uint16_t)2)
+#define NIX_SQB_PREFETCH     ((uint16_t)1)
 
 /* Apply BP/DROP when CQ is 95% full */
 #define NIX_CQ_THRESH_LEVEL	(5 * 256 / 100)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index ac4d9856c1..d29fafa895 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -982,7 +982,7 @@ static int
 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
-	uint16_t sqes_per_sqb, count, nb_sqb_bufs;
+	uint16_t sqes_per_sqb, count, nb_sqb_bufs, thr;
 	struct npa_pool_s pool;
 	struct npa_aura_s aura;
 	uint64_t blk_sz;
@@ -995,22 +995,21 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	else
 		sqes_per_sqb = (blk_sz / 8) / 8;
 
+	/* Reserve One SQE in each SQB to hold pointer for next SQB */
+	sqes_per_sqb -= 1;
+
 	sq->nb_desc = PLT_MAX(512U, sq->nb_desc);
-	nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
-	nb_sqb_bufs += NIX_SQB_LIST_SPACE;
+	nb_sqb_bufs = PLT_DIV_CEIL(sq->nb_desc, sqes_per_sqb);
+	thr = PLT_DIV_CEIL((nb_sqb_bufs * ROC_NIX_SQB_THRESH), 100);
+	nb_sqb_bufs += NIX_SQB_PREFETCH;
 	/* Clamp up the SQB count */
-	nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
-			      (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
+	nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count, (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
 
 	sq->nb_sqb_bufs = nb_sqb_bufs;
 	sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
-	sq->nb_sqb_bufs_adj =
-		nb_sqb_bufs -
-		(PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
-	sq->nb_sqb_bufs_adj =
-		(sq->nb_sqb_bufs_adj * ROC_NIX_SQB_LOWER_THRESH) / 100;
+	sq->nb_sqb_bufs_adj = nb_sqb_bufs;
 
-	nb_sqb_bufs += roc_nix->sqb_slack;
+	nb_sqb_bufs += PLT_MAX(thr, roc_nix->sqb_slack);
 	/* Explicitly set nat_align alone as by default pool is with both
 	 * nat_align and buf_offset = 1 which we don't want for SQB.
 	 */
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 49d205af39..fd71ff15ca 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -995,7 +995,7 @@ cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
 		txq->nb_sqb_bufs_adj =
-			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
+			((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 2d2985f175..b104d19b9b 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1037,7 +1037,7 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
 		txq->nb_sqb_bufs_adj =
-			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
+			((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 06/32] common/cnxk: dump SW SSO work count as xstat
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (3 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 05/32] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 07/32] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
                     ` (25 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Dump SW SSO work count as xstat.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_inl_dev_irq.c |  1 +
 drivers/common/cnxk/roc_nix_inl_priv.h    |  1 +
 drivers/common/cnxk/roc_nix_stats.c       | 17 +++++++++++------
 drivers/common/cnxk/roc_nix_xstats.h      |  4 ++++
 4 files changed, 17 insertions(+), 6 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_inl_dev_irq.c b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
index 445b440447..becd7907f2 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev_irq.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
@@ -41,6 +41,7 @@ nix_inl_sso_work_cb(struct nix_inl_dev *inl_dev)
 			goto again;
 	}
 
+	inl_dev->sso_work_cnt += cnt;
 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
 }
 
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index 528d2db365..b0a8976c6b 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -57,6 +57,7 @@ struct nix_inl_dev {
 	bool is_nix1;
 	uint8_t spb_drop_pc;
 	uint8_t lpb_drop_pc;
+	uint64_t sso_work_cnt;
 
 	/* NIX/CPT data */
 	void *inb_sa_base;
diff --git a/drivers/common/cnxk/roc_nix_stats.c b/drivers/common/cnxk/roc_nix_stats.c
index 6b5803af84..ca0e8ccb4f 100644
--- a/drivers/common/cnxk/roc_nix_stats.c
+++ b/drivers/common/cnxk/roc_nix_stats.c
@@ -24,12 +24,7 @@
 int
 roc_nix_num_xstats_get(struct roc_nix *roc_nix)
 {
-	if (roc_nix_is_vf_or_sdp(roc_nix))
-		return CNXK_NIX_NUM_XSTATS_REG;
-	else if (roc_model_is_cn9k())
-		return CNXK_NIX_NUM_XSTATS_CGX;
-
-	return CNXK_NIX_NUM_XSTATS_RPM;
+	return roc_nix_xstats_names_get(roc_nix, NULL, 0);
 }
 
 int
@@ -360,6 +355,12 @@ roc_nix_xstats_get(struct roc_nix *roc_nix, struct roc_nix_xstat *xstats,
 				xstats[count].id = count;
 				count++;
 			}
+			for (i = 0; i < PLT_DIM(inl_sw_xstats); i++) {
+				if (!inl_sw_xstats[i].offset)
+					xstats[count].value = inl_dev->sso_work_cnt;
+				xstats[count].id = count;
+				count++;
+			}
 		}
 	}
 
@@ -475,6 +476,10 @@ roc_nix_xstats_names_get(struct roc_nix *roc_nix,
 						      inl_nix_rq_xstats, i);
 				count++;
 			}
+			for (i = 0; i < PLT_DIM(inl_sw_xstats); i++) {
+				NIX_XSTATS_NAME_PRINT(xstats_names, count, inl_sw_xstats, i);
+				count++;
+			}
 		}
 	}
 
diff --git a/drivers/common/cnxk/roc_nix_xstats.h b/drivers/common/cnxk/roc_nix_xstats.h
index 813fb7f578..11b8e1c0ff 100644
--- a/drivers/common/cnxk/roc_nix_xstats.h
+++ b/drivers/common/cnxk/roc_nix_xstats.h
@@ -206,6 +206,10 @@ static const struct cnxk_nix_xstats_name nix_tx_xstats_cgx[] = {
 	{"cgx_tx_pause_packets", CGX_TX_PAUSE_PKTS},
 };
 
+static const struct cnxk_nix_xstats_name inl_sw_xstats[] = {
+	{"inl_sso_work_cnt", 0},
+};
+
 #define CNXK_NIX_NUM_RX_XSTATS	   PLT_DIM(nix_rx_xstats)
 #define CNXK_NIX_NUM_TX_XSTATS	   PLT_DIM(nix_tx_xstats)
 #define CNXK_NIX_NUM_QUEUE_XSTATS  PLT_DIM(nix_q_xstats)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 07/32] common/cnxk: add percent drop threshold to pool
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (4 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 06/32] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 08/32] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
                     ` (24 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

From: Sunil Kumar Kori <skori@marvell.com>

Currently hard coded drop threshold(95%) is configured to aura/pool as a
threshold for drop limit.

Patch adds a input parameter to RoC API so that user passed percentage
value can be configured.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/common/cnxk/roc_nix.h            |  6 ++++--
 drivers/common/cnxk/roc_nix_fc.c         | 17 ++++++++++++-----
 drivers/common/cnxk/roc_nix_inl.c        |  2 +-
 drivers/common/cnxk/roc_nix_priv.h       |  2 +-
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  4 ++--
 5 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 50aef4fe85..fde8fe4ecc 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -15,6 +15,7 @@
 #define ROC_NIX_PFC_CLASS_INVALID     UINT8_MAX
 #define ROC_NIX_SQB_THRESH	      30U
 #define ROC_NIX_SQB_SLACK	      12U
+#define ROC_NIX_AURA_THRESH	      95U
 
 /* Reserved interface types for BPID allocation */
 #define ROC_NIX_INTF_TYPE_CGX  0
@@ -197,6 +198,7 @@ struct roc_nix_fc_cfg {
 			uint16_t cq_drop;
 			bool enable;
 			uint64_t pool;
+			uint64_t pool_drop_pct;
 		} rq_cfg;
 
 		struct {
@@ -849,8 +851,8 @@ uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
 
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
-void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
-				     uint8_t ena, uint8_t force, uint8_t tc);
+void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
+				     uint8_t force, uint8_t tc, uint64_t drop_percent);
 int __roc_api roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type,
 				  uint8_t bp_cnt, uint16_t *bpids);
 int __roc_api roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index a34ce27d70..f791b7ab6d 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -283,6 +283,7 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct roc_nix_fc_cfg tmp;
+	uint64_t pool_drop_pct;
 	struct roc_nix_rq *rq;
 	int sso_ena = 0, rc;
 
@@ -293,13 +294,19 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return -EINVAL;
 
 	if (sso_ena) {
+		pool_drop_pct = fc_cfg->rq_cfg.pool_drop_pct;
+		/* Use default value for zero pct */
+		if (fc_cfg->rq_cfg.enable && !pool_drop_pct)
+			pool_drop_pct = ROC_NIX_AURA_THRESH;
+
 		roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool,
 				      fc_cfg->rq_cfg.enable, true,
-				      fc_cfg->rq_cfg.tc);
+				      fc_cfg->rq_cfg.tc, fc_cfg->rq_cfg.pool_drop_pct);
 
 		if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc);
+					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc,
+					      fc_cfg->rq_cfg.pool_drop_pct);
 	}
 
 	/* Copy RQ config to CQ config as they are occupying same area */
@@ -462,8 +469,8 @@ nix_rx_chan_multi_bpid_cfg(struct roc_nix *roc_nix, uint8_t chan, uint16_t bpid,
 #define NIX_BPID_INVALID 0xFFFF
 
 void
-roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
-		      uint8_t force, uint8_t tc)
+roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, uint8_t force,
+		      uint8_t tc, uint64_t drop_percent)
 {
 	uint32_t aura_id = roc_npa_aura_handle_to_aura(pool_id);
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
@@ -499,7 +506,7 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 	}
 
 	bp_intf = 1 << nix->is_nix1;
-	bp_thresh = NIX_RQ_AURA_THRESH(rsp->aura.limit >> rsp->aura.shift);
+	bp_thresh = NIX_RQ_AURA_THRESH(drop_percent, rsp->aura.limit >> rsp->aura.shift);
 
 	/* BP is already enabled. */
 	if (rsp->aura.bp_ena && ena) {
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index b16756d642..329ebf9405 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -263,7 +263,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 		 */
 		if (aura_setup && nix->rqs[0] && nix->rqs[0]->tc != ROC_NIX_PFC_CLASS_INVALID)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      true, true, nix->rqs[0]->tc);
+					      true, true, nix->rqs[0]->tc, ROC_NIX_AURA_THRESH);
 	} else {
 		rc = nix_inl_global_meta_buffer_validate(idev, rq);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 7144d1ee10..f900a81d8a 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -20,7 +20,7 @@
 /* Apply LBP at 75% of actual BP */
 #define NIX_CQ_LPB_THRESH_FRAC	(75 * 16 / 100)
 #define NIX_CQ_FULL_ERRATA_SKID (1024ull * 256)
-#define NIX_RQ_AURA_THRESH(x)	(((x)*95) / 100)
+#define NIX_RQ_AURA_THRESH(percent, val) (((val) * (percent)) / 100)
 
 /* IRQ triggered when NIX_LF_CINTX_CNT[QCOUNT] crosses this value */
 #define CQ_CQE_THRESH_DEFAULT	0x1ULL
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 9a02026ea6..d39bed6e84 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -263,7 +263,7 @@ cnxk_sso_rx_adapter_queue_add(
 		if (rxq_sp->tx_pause)
 			roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
 					      rxq_sp->qconf.mp->pool_id, true,
-					      dev->force_ena_bp, rxq_sp->tc);
+					      dev->force_ena_bp, rxq_sp->tc, ROC_NIX_AURA_THRESH);
 		cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
 		cnxk_eth_dev->nb_rxq_sso++;
 	}
@@ -307,7 +307,7 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 		rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
 		roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
 				      rxq_sp->qconf.mp->pool_id, false,
-				      dev->force_ena_bp, 0);
+				      dev->force_ena_bp, 0, ROC_NIX_AURA_THRESH);
 		cnxk_eth_dev->nb_rxq_sso--;
 
 		/* Enable drop_re if it was disabled earlier */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 08/32] common/cnxk: make aura flow control config more predictable
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (5 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 07/32] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 09/32] common/cnxk: update age drop statistics Nithin Dabilpuram
                     ` (23 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

Restrict shared BPID config only when force BP is enabled
and make aura flow control config more predictable by not disabling
it if there is a collision but ignore new config and log the same.

Also remove BPID setup from Rx adapter as it is now evaluated and
configured every time ethdev is stopped/started.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix.h            |  1 +
 drivers/common/cnxk/roc_nix_fc.c         | 49 ++++++++++++------------
 drivers/common/cnxk/roc_nix_inl.c        |  2 +-
 drivers/common/cnxk/roc_npa.c            |  3 ++
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 13 +------
 5 files changed, 32 insertions(+), 36 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index fde8fe4ecc..2b576f0891 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -451,6 +451,7 @@ struct roc_nix {
 	bool custom_sa_action;
 	bool local_meta_aura_ena;
 	uint32_t meta_buf_sz;
+	bool force_rx_aura_bp;
 	/* End of input parameters */
 	/* LMT line base for "Per Core Tx LMT line" mode*/
 	uintptr_t lmt_base;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index f791b7ab6d..0bbc233376 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -300,13 +300,13 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 			pool_drop_pct = ROC_NIX_AURA_THRESH;
 
 		roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool,
-				      fc_cfg->rq_cfg.enable, true,
-				      fc_cfg->rq_cfg.tc, fc_cfg->rq_cfg.pool_drop_pct);
+				      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
+				      fc_cfg->rq_cfg.tc, pool_drop_pct);
 
 		if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc,
-					      fc_cfg->rq_cfg.pool_drop_pct);
+					      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
+					      fc_cfg->rq_cfg.tc, pool_drop_pct);
 	}
 
 	/* Copy RQ config to CQ config as they are occupying same area */
@@ -479,7 +479,8 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui
 	struct npa_aq_enq_rsp *rsp;
 	uint8_t bp_thresh, bp_intf;
 	struct mbox *mbox;
-	int rc;
+	uint16_t bpid;
+	int rc, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return;
@@ -508,34 +509,25 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui
 	bp_intf = 1 << nix->is_nix1;
 	bp_thresh = NIX_RQ_AURA_THRESH(drop_percent, rsp->aura.limit >> rsp->aura.shift);
 
+	bpid = (rsp->aura.bp_ena & 0x1) ? rsp->aura.nix0_bpid : rsp->aura.nix1_bpid;
 	/* BP is already enabled. */
 	if (rsp->aura.bp_ena && ena) {
-		uint16_t bpid =
-			(rsp->aura.bp_ena & 0x1) ? rsp->aura.nix0_bpid : rsp->aura.nix1_bpid;
-
 		/* Disable BP if BPIDs don't match and couldn't add new BPID. */
 		if (bpid != nix->bpid[tc]) {
 			uint16_t bpid_new = NIX_BPID_INVALID;
 
-			if ((nix_rx_chan_multi_bpid_cfg(roc_nix, tc, bpid, &bpid_new) < 0) &&
-			    !force) {
-				plt_info("Disabling BP/FC on aura 0x%" PRIx64
-					 " as it shared across ports or tc",
+			if (force && !nix_rx_chan_multi_bpid_cfg(roc_nix, tc, bpid, &bpid_new)) {
+				plt_info("Setting up shared BPID on shared aura 0x%" PRIx64,
 					 pool_id);
 
-				if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
-					plt_nix_dbg(
-						"Disabling backpressue failed on aura 0x%" PRIx64,
-						pool_id);
-			}
-
-			/* Configure Aura with new BPID if it is allocated. */
-			if (bpid_new != NIX_BPID_INVALID) {
+				/* Configure Aura with new BPID if it is allocated. */
 				if (roc_npa_aura_bp_configure(pool_id, bpid_new, bp_intf, bp_thresh,
 							      true))
-					plt_nix_dbg(
-						"Enabling backpressue failed on aura 0x%" PRIx64,
+					plt_err("Enabling backpressue failed on aura 0x%" PRIx64,
 						pool_id);
+			} else {
+				plt_info("Ignoring port=%u tc=%u config on shared aura 0x%" PRIx64,
+					 roc_nix->port_id, tc, pool_id);
 			}
 		}
 
@@ -548,10 +540,19 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui
 
 	if (ena) {
 		if (roc_npa_aura_bp_configure(pool_id, nix->bpid[tc], bp_intf, bp_thresh, true))
-			plt_nix_dbg("Enabling backpressue failed on aura 0x%" PRIx64, pool_id);
+			plt_err("Enabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	} else {
+		bool found = !!force;
+
+		/* Don't disable if existing BPID is not within this port's list */
+		for (i = 0; i < nix->chan_cnt; i++)
+			if (bpid == nix->bpid[i])
+				found = true;
+		if (!found)
+			return;
+
 		if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
-			plt_nix_dbg("Disabling backpressue failed on aura 0x%" PRIx64, pool_id);
+			plt_err("Disabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	}
 
 	return;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 329ebf9405..8592e1cb0b 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -263,7 +263,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 		 */
 		if (aura_setup && nix->rqs[0] && nix->rqs[0]->tc != ROC_NIX_PFC_CLASS_INVALID)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      true, true, nix->rqs[0]->tc, ROC_NIX_AURA_THRESH);
+					      true, false, nix->rqs[0]->tc, ROC_NIX_AURA_THRESH);
 	} else {
 		rc = nix_inl_global_meta_buffer_validate(idev, rq);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index dd649812b4..377439c2ba 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -892,6 +892,9 @@ roc_npa_aura_bp_configure(uint64_t aura_handle, uint16_t bpid, uint8_t bp_intf,
 	struct mbox *mbox;
 	int rc = 0;
 
+	plt_npa_dbg("Setting BPID %u BP_INTF 0x%x BP_THRESH %u enable %u on aura %" PRIx64,
+		    bpid, bp_intf, bp_thresh, enable, aura_handle);
+
 	if (lf == NULL)
 		return NPA_ERR_PARAM;
 
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index d39bed6e84..8ad84198b9 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -260,10 +260,8 @@ cnxk_sso_rx_adapter_queue_add(
 							     false);
 		}
 
-		if (rxq_sp->tx_pause)
-			roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
-					      rxq_sp->qconf.mp->pool_id, true,
-					      dev->force_ena_bp, rxq_sp->tc, ROC_NIX_AURA_THRESH);
+		/* Propagate force bp devarg */
+		cnxk_eth_dev->nix.force_rx_aura_bp = dev->force_ena_bp;
 		cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
 		cnxk_eth_dev->nb_rxq_sso++;
 	}
@@ -293,8 +291,6 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 			      int32_t rx_queue_id)
 {
 	struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
-	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-	struct cnxk_eth_rxq_sp *rxq_sp;
 	int i, rc = 0;
 
 	RTE_SET_USED(event_dev);
@@ -302,12 +298,7 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 		for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
 			cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, i);
 	} else {
-		rxq_sp = cnxk_eth_rxq_to_sp(
-			eth_dev->data->rx_queues[rx_queue_id]);
 		rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
-		roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
-				      rxq_sp->qconf.mp->pool_id, false,
-				      dev->force_ena_bp, 0, ROC_NIX_AURA_THRESH);
 		cnxk_eth_dev->nb_rxq_sso--;
 
 		/* Enable drop_re if it was disabled earlier */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 09/32] common/cnxk: update age drop statistics
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (6 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 08/32] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 10/32] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
                     ` (22 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Update age drop statistics. Added telemetry statistics for age drops.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/cnxk_telemetry_nix.c | 4 ++++
 drivers/common/cnxk/hw/nix.h             | 2 ++
 drivers/common/cnxk/roc_features.h       | 6 ++++++
 drivers/common/cnxk/roc_nix.h            | 2 ++
 drivers/common/cnxk/roc_nix_stats.c      | 4 ++++
 5 files changed, 18 insertions(+)

diff --git a/drivers/common/cnxk/cnxk_telemetry_nix.c b/drivers/common/cnxk/cnxk_telemetry_nix.c
index b7285cf137..ccae5d7853 100644
--- a/drivers/common/cnxk/cnxk_telemetry_nix.c
+++ b/drivers/common/cnxk/cnxk_telemetry_nix.c
@@ -680,6 +680,10 @@ nix_sq_ctx(volatile void *qctx, struct plt_tel_data *d)
 	/* W12 */
 	CNXK_TEL_DICT_BF_PTR(d, ctx, pkts, w12_);
 
+	/* W13 */
+	CNXK_TEL_DICT_INT(d, ctx, aged_drop_octs, w13_);
+	CNXK_TEL_DICT_INT(d, ctx, aged_drop_pkts, w13_);
+
 	/* W14 */
 	CNXK_TEL_DICT_BF_PTR(d, ctx, drop_octs, w14_);
 
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index 0d8f2a5e9b..fbdf1b64f6 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -363,6 +363,8 @@
 #define NIX_LF_SQ_OP_STATUS	 (0xa30ull)
 #define NIX_LF_SQ_OP_DROP_OCTS	 (0xa40ull)
 #define NIX_LF_SQ_OP_DROP_PKTS	 (0xa50ull)
+#define NIX_LF_SQ_OP_AGE_DROP_OCTS (0xa60ull) /* [CN10K, .) */
+#define NIX_LF_SQ_OP_AGE_DROP_PKTS (0xa70ull) /* [CN10K, .) */
 #define NIX_LF_CQ_OP_INT	 (0xb00ull)
 #define NIX_LF_CQ_OP_DOOR	 (0xb30ull)
 #define NIX_LF_CQ_OP_STATUS	 (0xb40ull)
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index c2893faa65..6fe01015d8 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -47,4 +47,10 @@ roc_feature_nix_has_rxchan_multi_bpid(void)
 		return true;
 	return false;
 }
+
+static inline bool
+roc_feature_nix_has_age_drop_stats(void)
+{
+	return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+}
 #endif
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 2b576f0891..f84e473db6 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -293,6 +293,8 @@ struct roc_nix_stats_queue {
 			uint64_t tx_octs;
 			uint64_t tx_drop_pkts;
 			uint64_t tx_drop_octs;
+			uint64_t tx_age_drop_pkts;
+			uint64_t tx_age_drop_octs;
 		};
 	};
 };
diff --git a/drivers/common/cnxk/roc_nix_stats.c b/drivers/common/cnxk/roc_nix_stats.c
index ca0e8ccb4f..1e93191a07 100644
--- a/drivers/common/cnxk/roc_nix_stats.c
+++ b/drivers/common/cnxk/roc_nix_stats.c
@@ -137,6 +137,10 @@ nix_stat_tx_queue_get(struct nix *nix, uint16_t qid,
 	qstats->tx_octs = qstat_read(nix, qid, NIX_LF_SQ_OP_OCTS);
 	qstats->tx_drop_pkts = qstat_read(nix, qid, NIX_LF_SQ_OP_DROP_PKTS);
 	qstats->tx_drop_octs = qstat_read(nix, qid, NIX_LF_SQ_OP_DROP_OCTS);
+	if (roc_feature_nix_has_age_drop_stats()) {
+		qstats->tx_age_drop_pkts = qstat_read(nix, qid, NIX_LF_SQ_OP_AGE_DROP_PKTS);
+		qstats->tx_age_drop_octs = qstat_read(nix, qid, NIX_LF_SQ_OP_AGE_DROP_OCTS);
+	}
 }
 
 static int
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 10/32] common/cnxk: fetch eng caps for inl outb inst format
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (7 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 09/32] common/cnxk: update age drop statistics Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 11/32] common/cnxk: add receive error mask Nithin Dabilpuram
                     ` (21 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Fetch engine caps and use it along with model check
to determine inline outbound instruction format
with NIX Tx offset or address.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_cpt.h       |   3 +
 drivers/common/cnxk/roc_nix_inl.c   | 101 ++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_nix_inl.h   |   1 +
 drivers/common/cnxk/roc_nix_priv.h  |   1 +
 drivers/common/cnxk/version.map     |   1 +
 drivers/net/cnxk/cn10k_ethdev_sec.c |   3 +-
 drivers/net/cnxk/cnxk_ethdev.c      |   2 +
 drivers/net/cnxk/cnxk_ethdev.h      |   3 +
 8 files changed, 114 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 92a18711dc..910bd37a0c 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -12,6 +12,9 @@
 #define ROC_AE_CPT_BLOCK_TYPE1 0
 #define ROC_AE_CPT_BLOCK_TYPE2 1
 
+#define ROC_LOADFVC_MAJOR_OP 0x01UL
+#define ROC_LOADFVC_MINOR_OP 0x08UL
+
 /* Default engine groups */
 #define ROC_CPT_DFLT_ENG_GRP_SE	   0UL
 #define ROC_CPT_DFLT_ENG_GRP_SE_IE 1UL
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 8592e1cb0b..67f8ce9aa0 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -602,6 +602,96 @@ nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
 	return rc;
 }
 
+static void
+nix_inl_eng_caps_get(struct nix *nix)
+{
+	struct roc_cpt_lf *lf = nix->cpt_lf_base;
+	uintptr_t lmt_base = lf->lmt_base;
+	union cpt_res_s res, *hw_res;
+	struct cpt_inst_s inst;
+	uint64_t *rptr;
+
+	hw_res = plt_zmalloc(sizeof(*hw_res), ROC_CPT_RES_ALIGN);
+	if (hw_res == NULL) {
+		plt_err("Couldn't allocate memory for result address");
+		return;
+	}
+
+	rptr = plt_zmalloc(ROC_ALIGN, 0);
+	if (rptr == NULL) {
+		plt_err("Couldn't allocate memory for rptr");
+		plt_free(hw_res);
+		return;
+	}
+
+	/* Fill CPT_INST_S for LOAD_FVC/HW_CRYPTO_SUPPORT microcode op */
+	memset(&inst, 0, sizeof(struct cpt_inst_s));
+	inst.res_addr = (uint64_t)hw_res;
+	inst.rptr = (uint64_t)rptr;
+	inst.w4.s.opcode_major = ROC_LOADFVC_MAJOR_OP;
+	inst.w4.s.opcode_minor = ROC_LOADFVC_MINOR_OP;
+	inst.w7.s.egrp = ROC_CPT_DFLT_ENG_GRP_SE;
+
+	/* Use 1 min timeout for the poll */
+	const uint64_t timeout = plt_tsc_cycles() + 60 * plt_tsc_hz();
+
+	if (roc_model_is_cn9k()) {
+		uint64_t lmt_status;
+
+		hw_res->cn9k.compcode = CPT_COMP_NOT_DONE;
+		plt_io_wmb();
+
+		do {
+			roc_lmt_mov_seg((void *)lmt_base, &inst, 4);
+			lmt_status = roc_lmt_submit_ldeor(lf->io_addr);
+		} while (lmt_status != 0);
+
+		/* Wait until CPT instruction completes */
+		do {
+			res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
+			if (unlikely(plt_tsc_cycles() > timeout))
+				break;
+		} while (res.cn9k.compcode == CPT_COMP_NOT_DONE);
+
+		if (res.cn9k.compcode != CPT_COMP_GOOD) {
+			plt_err("LOAD FVC operation timed out");
+			return;
+		}
+	} else {
+		uint64_t lmt_arg, io_addr;
+		uint16_t lmt_id;
+
+		hw_res->cn10k.compcode = CPT_COMP_NOT_DONE;
+
+		/* Use this lcore's LMT line as no one else is using it */
+		ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+		memcpy((void *)lmt_base, &inst, sizeof(inst));
+
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
+		io_addr = lf->io_addr | ROC_CN10K_CPT_INST_DW_M1 << 4;
+
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+		plt_io_wmb();
+
+		/* Wait until CPT instruction completes */
+		do {
+			res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
+			if (unlikely(plt_tsc_cycles() > timeout))
+				break;
+		} while (res.cn10k.compcode == CPT_COMP_NOT_DONE);
+
+		if (res.cn10k.compcode != CPT_COMP_GOOD || res.cn10k.uc_compcode) {
+			plt_err("LOAD FVC operation timed out");
+			goto exit;
+		}
+	}
+
+	nix->cpt_eng_caps = plt_be_to_cpu_64(*rptr);
+exit:
+	plt_free(rptr);
+	plt_free(hw_res);
+}
+
 int
 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 {
@@ -652,6 +742,7 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 		plt_err("Failed to setup inbound lf, rc=%d", rc);
 		return rc;
 	}
+	nix->cpt_eng_caps = roc_cpt->hw_caps[CPT_ENG_TYPE_SE].u;
 
 	/* Setup Inbound SA table */
 	rc = nix_inl_inb_sa_tbl_setup(roc_nix);
@@ -871,6 +962,8 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
 		}
 	}
 
+	/* Fetch engine capabilities */
+	nix_inl_eng_caps_get(nix);
 	return 0;
 
 lf_fini:
@@ -1571,3 +1664,11 @@ roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb)
 {
 	meta_pool_cb = cb;
 }
+
+uint64_t
+roc_nix_inl_eng_caps_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->cpt_eng_caps;
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 6220ba6773..daa21a941a 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -212,5 +212,6 @@ int __roc_api roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
 int __roc_api roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr,
 				    void *sa_cptr, bool inb, uint16_t sa_len);
 void __roc_api roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix, FILE *file);
+uint64_t __roc_api roc_nix_inl_eng_caps_get(struct roc_nix *roc_nix);
 
 #endif /* _ROC_NIX_INL_H_ */
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index f900a81d8a..6872630dc8 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -209,6 +209,7 @@ struct nix {
 	uint16_t outb_se_ring_base;
 	uint16_t cpt_lbpid;
 	uint16_t cpt_nixbpid;
+	uint64_t cpt_eng_caps;
 	bool need_meta_aura;
 	/* Mode provided by driver */
 	bool inb_inl_dev;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index d740d9df81..809fd81b20 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -186,6 +186,7 @@ INTERNAL {
 	roc_nix_inl_dev_rq_put;
 	roc_nix_inl_dev_unlock;
 	roc_nix_inl_dev_xaq_realloc;
+	roc_nix_inl_eng_caps_get;
 	roc_nix_inl_inb_is_enabled;
 	roc_nix_inl_inb_init;
 	roc_nix_inl_inb_sa_base_get;
diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 3c32de0f94..9625704ec1 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -809,7 +809,8 @@ cn10k_eth_sec_session_create(void *device,
 		sess_priv.chksum = (!ipsec->options.ip_csum_enable << 1 |
 				    !ipsec->options.l4_csum_enable);
 		sess_priv.dec_ttl = ipsec->options.dec_ttl;
-		if (roc_feature_nix_has_inl_ipsec_mseg())
+		if (roc_feature_nix_has_inl_ipsec_mseg() &&
+		    dev->outb.cpt_eng_caps & BIT_ULL(35))
 			sess_priv.nixtx_off = 1;
 
 		/* Pointer from eth_sec -> outb_sa */
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 7cc41e0c31..eeabf6edec 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -203,6 +203,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 			plt_err("Outbound fc sw mem alloc failed");
 			goto sa_bmap_free;
 		}
+
+		dev->outb.cpt_eng_caps = roc_nix_inl_eng_caps_get(nix);
 	}
 	return 0;
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 97537de17a..44e37d6550 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -287,6 +287,9 @@ struct cnxk_eth_dev_sec_outb {
 
 	/* Lock to synchronize sa setup/release */
 	rte_spinlock_t lock;
+
+	/* Engine caps */
+	uint64_t cpt_eng_caps;
 };
 
 struct cnxk_eth_dev {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 11/32] common/cnxk: add receive error mask
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (8 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 10/32] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 12/32] common/cnxk: fix null pointer dereference Nithin Dabilpuram
                     ` (20 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Adding support to configure receive error mask
for 106B0

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/common/cnxk/roc_features.h |  6 ++++++
 drivers/common/cnxk/roc_nix.h      | 16 ++++++++++++++++
 2 files changed, 22 insertions(+)

diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 6fe01015d8..ce12a1dca4 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -16,6 +16,12 @@ roc_feature_nix_has_inl_ipsec_mseg(void)
 	return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
 }
 
+static inline bool
+roc_feature_nix_has_drop_re_mask(void)
+{
+	return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+}
+
 static inline bool
 roc_feature_nix_has_inl_rq_mask(void)
 {
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index f84e473db6..37d0ed5ebe 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -242,6 +242,22 @@ struct roc_nix_eeprom_info {
 #define ROC_NIX_LF_RX_CFG_LEN_OL4     BIT_ULL(40)
 #define ROC_NIX_LF_RX_CFG_LEN_OL3     BIT_ULL(41)
 
+#define ROC_NIX_LF_RX_CFG_RX_ERROR_MASK 0xFFFFFFFFFFF80000
+#define ROC_NIX_RE_PARTIAL		BIT_ULL(1)
+#define ROC_NIX_RE_JABBER		BIT_ULL(2)
+#define ROC_NIX_RE_CRC8_PCH		BIT_ULL(5)
+#define ROC_NIX_RE_CNC_INV		BIT_ULL(6)
+#define ROC_NIX_RE_FCS			BIT_ULL(7)
+#define ROC_NIX_RE_FCS_RCV		BIT_ULL(8)
+#define ROC_NIX_RE_TERMINATE		BIT_ULL(9)
+#define ROC_NIX_RE_MACSEC		BIT_ULL(10)
+#define ROC_NIX_RE_RX_CTL		BIT_ULL(11)
+#define ROC_NIX_RE_SKIP			BIT_ULL(12)
+#define ROC_NIX_RE_DMAPKT		BIT_ULL(15)
+#define ROC_NIX_RE_UNDERSIZE		BIT_ULL(16)
+#define ROC_NIX_RE_OVERSIZE		BIT_ULL(17)
+#define ROC_NIX_RE_OL2_LENMISM		BIT_ULL(18)
+
 /* Group 0 will be used for RSS, 1 -7 will be used for npc_flow RSS action*/
 #define ROC_NIX_RSS_GROUP_DEFAULT    0
 #define ROC_NIX_RSS_GRPS	     8
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 12/32] common/cnxk: fix null pointer dereference
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (9 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 11/32] common/cnxk: add receive error mask Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 13/32] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
                     ` (19 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Gowrishankar Muthukrishnan, stable

From: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>

Fix null pointer dereferences reported by klocwork.

Fixes: 4398c4092f3d ("common/cnxk: dump inline device RQ context")
Fixes: 79dc6f324e82 ("common/cnxk: add inline function for statistics")
cc: stable@dpdk.org

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
 drivers/common/cnxk/roc_nix_debug.c   | 8 +++++++-
 drivers/common/cnxk/roc_nix_inl_dev.c | 2 +-
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index 399d0d7eae..a1c3db284b 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -733,7 +733,13 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix, FILE *file)
 	inl_rq = roc_nix_inl_dev_rq(roc_nix);
 	if (inl_rq) {
 		struct idev_cfg *idev = idev_get_cfg();
-		struct nix_inl_dev *inl_dev = idev->nix_inl_dev;
+		struct nix_inl_dev *inl_dev = NULL;
+
+		if (idev && idev->nix_inl_dev)
+			inl_dev = idev->nix_inl_dev;
+
+		if (!inl_dev)
+			return -EINVAL;
 
 		rc = nix_q_ctx_get(&inl_dev->dev, NIX_AQ_CTYPE_RQ, inl_rq->qid, &ctx);
 		if (rc) {
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 0df148c3ed..ca948d3bc7 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -843,7 +843,7 @@ roc_nix_inl_dev_stats_get(struct roc_nix_stats *stats)
 	if (stats == NULL)
 		return NIX_ERR_PARAM;
 
-	if (!idev && idev->nix_inl_dev)
+	if (idev && idev->nix_inl_dev)
 		inl_dev = idev->nix_inl_dev;
 
 	if (!inl_dev)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 13/32] common/cnxk: fix parameter in NIX dump
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (10 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 12/32] common/cnxk: fix null pointer dereference Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 14/32] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
                     ` (18 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Gowrishankar Muthukrishnan, stable

From: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>

Fix parameter passed to nix_dump to what expected in format
specifier.
Fixes: d2f168dfa5de ("common/cnxk: support 10K B0 for inline IPsec")
cc: stable@dpdk.org

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
 drivers/common/cnxk/roc_nix_debug.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index a1c3db284b..8c7d902f1e 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -664,8 +664,8 @@ nix_lf_cq_dump(__io struct nix_cq_ctx_s *ctx, FILE *file)
 	nix_dump(file,
 		 "W1: lbpid_high \t\t\t0x%03x\nW1: lbpid_med \t\t\t0x%03x\n"
 		 "W1: lbpid_low \t\t\t0x%03x\n(W1: lbpid) \t\t\t0x%03x\n",
-		 ctx->lbpid_high, ctx->lbpid_med, ctx->lbpid_low,
-		 ctx->lbpid_high << 6 | ctx->lbpid_med << 3 | ctx->lbpid_low);
+		 ctx->lbpid_high, ctx->lbpid_med, ctx->lbpid_low, (unsigned int)
+		 (ctx->lbpid_high << 6 | ctx->lbpid_med << 3 | ctx->lbpid_low));
 	nix_dump(file, "W1: lbp_ena \t\t\t\t%d\n", ctx->lbp_ena);
 
 	nix_dump(file, "W2: update_time \t\t%d\nW2: avg_level \t\t\t%d",
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 14/32] common/cnxk: set relchan in TL4 config for each SDP queue
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (11 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 13/32] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 15/32] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
                     ` (17 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Veerasenareddy Burru

From: Veerasenareddy Burru <vburru@marvell.com>

set distinct relchan in each TL4 queue connected to SDP.

currently rechan in TL4 SDP config is getting set to 0 for all
SDP-NIX queues.
Each TL4 queues for SDP need to be configured with distinct channel
for SDP to provide per channel backpressure to NIX.

Signed-off-by: Veerasenareddy Burru <vburru@marvell.com>
---
 drivers/common/cnxk/roc_nix_tm_utils.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 5864833109..9ede1bebe7 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -582,6 +582,7 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
 
 		/* Configure TL4 to send to SDP channel instead of CGX/LBK */
 		if (nix->sdp_link) {
+			relchan = nix->tx_chan_base & 0xff;
 			plt_tm_dbg("relchan=%u schq=%u tx_chan_cnt=%u\n", relchan, schq,
 				   nix->tx_chan_cnt);
 			reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 15/32] common/cnxk: avoid STALL with dual rate on CNF95N
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (12 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 14/32] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 16/32] common/cnxk: update errata info Nithin Dabilpuram
                     ` (16 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Due to errata RED_ALGO STALL with dual shaper rate will hangs on
platforms CNF95N and CNF95O. Set READ_ALGO to DISCARD with dual
shaper rate on CNF95N and CNF95O.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix_tm_utils.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 9ede1bebe7..3840d6d457 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -1267,7 +1267,8 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 	tm_node->red_algo = roc_prof->red_algo;
 
 	/* C0 doesn't support STALL when both PIR & CIR are enabled */
-	if (roc_model_is_cn96_cx()) {
+	if (roc_model_is_cn96_cx() || roc_model_is_cnf95xxn_a0() || roc_model_is_cnf95xxo_a0() ||
+	    roc_model_is_cnf95xxn_a1() || roc_model_is_cnf95xxn_b0()) {
 		nix_tm_shaper_conf_get(profile, &cir, &pir);
 
 		if (pir.rate && cir.rate)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 16/32] common/cnxk: update errata info
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (13 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 15/32] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 17/32] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
                     ` (15 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Update errata info based on CN10KA B0 and CN10KB A0.
Also remove duplicate model check roc_model_is_cn103xx()

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_errata.h   | 20 ++++++++------------
 drivers/common/cnxk/roc_features.h |  2 +-
 drivers/common/cnxk/roc_model.h    |  6 ------
 3 files changed, 9 insertions(+), 19 deletions(-)

diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h
index 1333bde629..22d2406e94 100644
--- a/drivers/common/cnxk/roc_errata.h
+++ b/drivers/common/cnxk/roc_errata.h
@@ -6,7 +6,7 @@
 
 #include "roc_model.h"
 
-/* Errata IPBUNIXRX-40129 */
+/* Errata IPBUNIXRX-40129, IPBUNIXRX-40179 */
 static inline bool
 roc_errata_nix_has_no_drop_re(void)
 {
@@ -41,7 +41,8 @@ static inline bool
 roc_errata_nix_has_no_vwqe_flush_op(void)
 {
 	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
-		roc_model_is_cnf10kb_a0());
+		roc_model_is_cnf10kb_a0() || roc_model_is_cn10ka_a1() || roc_model_is_cn10ka_b0() ||
+		roc_model_is_cn10kb_a0());
 }
 
 /* Errata IPBURVUM-38481 */
@@ -51,13 +52,6 @@ roc_errata_ruvm_has_no_interrupt_with_msixen(void)
 	return true;
 }
 
-/* Errata IPBUNIXTX-39300 */
-static inline bool
-roc_errata_nix_has_assign_incorrect_qintidx(void)
-{
-	return true;
-}
-
 /* Errata IPBUCPT-38551 */
 static inline bool
 roc_errata_cpt_has_use_incorrect_ldwb(void)
@@ -69,17 +63,19 @@ roc_errata_cpt_has_use_incorrect_ldwb(void)
 static inline bool
 roc_errata_nix_has_overwrite_incorrect_sq_intr(void)
 {
-	return true;
+	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
+		roc_model_is_cnf10kb_a0() || roc_model_is_cn10ka_a1());
 }
 
 /* Errata IPBUNIXTX-39248 */
 static inline bool
 roc_errata_nix_has_perf_issue_on_stats_update(void)
 {
-	return true;
+	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
+		roc_model_is_cnf10kb_a0() || roc_model_is_cn10ka_a1());
 }
 
-/* Errata IPBUCPT-38726, IPBUCPT-38727 */
+/* Errata IPBUCPT-38736, IPBUCPT-38737 */
 static inline bool
 roc_errata_cpt_hang_on_x2p_bp(void)
 {
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index ce12a1dca4..36ef315f5a 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -7,7 +7,7 @@
 static inline bool
 roc_feature_sso_has_stash(void)
 {
-	return (roc_model_is_cn103xx() | roc_model_is_cn10ka_b0()) ? true : false;
+	return (roc_model_is_cn10kb() | roc_model_is_cn10ka_b0()) ? true : false;
 }
 
 static inline bool
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index f010cc4a44..58046af193 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -258,12 +258,6 @@ roc_model_is_cn10kb(void)
 	return roc_model->flag & ROC_MODEL_CN103xx;
 }
 
-static inline uint64_t
-roc_model_is_cn103xx(void)
-{
-	return roc_model->flag & ROC_MODEL_CN103xx;
-}
-
 static inline bool
 roc_env_is_hw(void)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 17/32] common/cnxk: sync between mbox up and down messages
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (14 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 16/32] common/cnxk: update errata info Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 18/32] common/cnxk: add more comments to mbox code Nithin Dabilpuram
                     ` (14 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

An issue is observed where if PF is with DPDK and VF as kernel
netdev does not responds to link events. It was due to recent
design change in kernel where sender checks whether previous
interrupt is received before triggering current interrupt by
waiting for mailbox data register to become zero.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_dev.c       | 20 ++++++++-
 drivers/common/cnxk/roc_mbox.c      | 64 +++++++++++++++++++++--------
 drivers/common/cnxk/roc_mbox.h      | 15 +++++++
 drivers/common/cnxk/roc_mbox_priv.h |  6 ++-
 4 files changed, 84 insertions(+), 21 deletions(-)

diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 5e4e564ebe..e5a5cd7c10 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -195,7 +195,8 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 				vf_msg->rc = msg->rc;
 				vf_msg->pcifunc = msg->pcifunc;
 				/* Send to VF */
-				mbox_msg_send(&dev->mbox_vfpf_up, vf);
+				mbox_msg_send_up(&dev->mbox_vfpf_up, vf);
+				mbox_wait_for_zero(&dev->mbox_vfpf_up, vf);
 			}
 		}
 
@@ -498,6 +499,7 @@ pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
 
 		/* Send to VF */
 		mbox_msg_send(vf_mbox, vf);
+		mbox_wait_for_zero(&dev->mbox_vfpf_up, vf);
 	}
 }
 
@@ -631,6 +633,7 @@ static void
 roc_pf_vf_mbox_irq(void *param)
 {
 	struct dev *dev = param;
+	uint64_t mbox_data;
 	uint64_t intr;
 
 	intr = plt_read64(dev->bar2 + RVU_VF_INT);
@@ -640,6 +643,13 @@ roc_pf_vf_mbox_irq(void *param)
 	plt_write64(intr, dev->bar2 + RVU_VF_INT);
 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
 
+	/* Reading for UP/DOWN message, next message sending will be delayed
+	 * by 1ms until this region is zeroed mbox_wait_for_zero()
+	 */
+	mbox_data = plt_read64(dev->bar2 + RVU_VF_VFPF_MBOX0);
+	if (mbox_data)
+		plt_write64(!mbox_data, dev->bar2 + RVU_VF_VFPF_MBOX0);
+
 	/* First process all configuration messages */
 	process_msgs(dev, dev->mbox);
 
@@ -651,6 +661,7 @@ static void
 roc_af_pf_mbox_irq(void *param)
 {
 	struct dev *dev = param;
+	uint64_t mbox_data;
 	uint64_t intr;
 
 	intr = plt_read64(dev->bar2 + RVU_PF_INT);
@@ -660,6 +671,13 @@ roc_af_pf_mbox_irq(void *param)
 	plt_write64(intr, dev->bar2 + RVU_PF_INT);
 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
 
+	/* Reading for UP/DOWN message, next message sending will be delayed
+	 * by 1ms until this region is zeroed mbox_wait_for_zero()
+	 */
+	mbox_data = plt_read64(dev->bar2 + RVU_PF_PFAF_MBOX0);
+	if (mbox_data)
+		plt_write64(!mbox_data, dev->bar2 + RVU_PF_PFAF_MBOX0);
+
 	/* First process all configuration messages */
 	process_msgs(dev, dev->mbox);
 
diff --git a/drivers/common/cnxk/roc_mbox.c b/drivers/common/cnxk/roc_mbox.c
index 7dcd188ca7..5338a960d9 100644
--- a/drivers/common/cnxk/roc_mbox.c
+++ b/drivers/common/cnxk/roc_mbox.c
@@ -10,18 +10,6 @@
 #include "roc_api.h"
 #include "roc_priv.h"
 
-#define RVU_AF_AFPF_MBOX0 (0x02000)
-#define RVU_AF_AFPF_MBOX1 (0x02008)
-
-#define RVU_PF_PFAF_MBOX0 (0xC00)
-#define RVU_PF_PFAF_MBOX1 (0xC08)
-
-#define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
-#define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
-
-#define RVU_VF_VFPF_MBOX0 (0x0000)
-#define RVU_VF_VFPF_MBOX1 (0x0008)
-
 /* RCLK, SCLK in MHz */
 uint16_t dev_rclk_freq;
 uint16_t dev_sclk_freq;
@@ -194,10 +182,31 @@ mbox_alloc_msg_rsp(struct mbox *mbox, int devid, int size, int size_rsp)
 
 /**
  * @internal
- * Send a mailbox message
+ * Synchronization between UP and DOWN messages
  */
-void
-mbox_msg_send(struct mbox *mbox, int devid)
+bool
+mbox_wait_for_zero(struct mbox *mbox, int devid)
+{
+	uint64_t data;
+
+	data = plt_read64((volatile void *)(mbox->reg_base +
+				(mbox->trigger | (devid << mbox->tr_shift))));
+
+	/* If data is non-zero wait for ~1ms and return to caller
+	 * whether data has changed to zero or not after the wait.
+	 */
+	if (data)
+		usleep(1000);
+	else
+		return true;
+
+	data = plt_read64((volatile void *)(mbox->reg_base +
+				(mbox->trigger | (devid << mbox->tr_shift))));
+	return data == 0;
+}
+
+static void
+mbox_msg_send_data(struct mbox *mbox, int devid, uint8_t data)
 {
 	struct mbox_dev *mdev = &mbox->dev[devid];
 	struct mbox_hdr *tx_hdr =
@@ -223,9 +232,28 @@ mbox_msg_send(struct mbox *mbox, int devid)
 	/* The interrupt should be fired after num_msgs is written
 	 * to the shared memory
 	 */
-	plt_write64(1, (volatile void *)(mbox->reg_base +
-					 (mbox->trigger |
-					  (devid << mbox->tr_shift))));
+	plt_write64(data, (volatile void *)(mbox->reg_base +
+				(mbox->trigger | (devid << mbox->tr_shift))));
+}
+
+/**
+ * @internal
+ * Send a mailbox message
+ */
+void
+mbox_msg_send(struct mbox *mbox, int devid)
+{
+	mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
+}
+
+/**
+ * @internal
+ * Send an UP mailbox message
+ */
+void
+mbox_msg_send_up(struct mbox *mbox, int devid)
+{
+	mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
 }
 
 /**
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 3d5746b9b8..93c5451c0f 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -35,6 +35,21 @@ struct mbox_msghdr {
 	int __io rc; /* Msg processed response code */
 };
 
+#define RVU_AF_AFPF_MBOX0 (0x02000)
+#define RVU_AF_AFPF_MBOX1 (0x02008)
+
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+
+#define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
+
+#define RVU_VF_VFPF_MBOX0 (0x0000)
+#define RVU_VF_VFPF_MBOX1 (0x0008)
+
+#define MBOX_DOWN_MSG 1
+#define MBOX_UP_MSG   2
+
 /* Mailbox message types */
 #define MBOX_MSG_MASK	 0xFFFF
 #define MBOX_MSG_INVALID 0xFFFE
diff --git a/drivers/common/cnxk/roc_mbox_priv.h b/drivers/common/cnxk/roc_mbox_priv.h
index 4fafca6f72..354c8fa52a 100644
--- a/drivers/common/cnxk/roc_mbox_priv.h
+++ b/drivers/common/cnxk/roc_mbox_priv.h
@@ -71,10 +71,12 @@ struct mbox {
 const char *mbox_id2name(uint16_t id);
 int mbox_id2size(uint16_t id);
 void mbox_reset(struct mbox *mbox, int devid);
-int mbox_init(struct mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
-	      int direction, int ndevsi, uint64_t intr_offset);
+int mbox_init(struct mbox *mbox, uintptr_t hwbase, uintptr_t reg_base, int direction, int ndevsi,
+	      uint64_t intr_offset);
 void mbox_fini(struct mbox *mbox);
 void mbox_msg_send(struct mbox *mbox, int devid);
+void mbox_msg_send_up(struct mbox *mbox, int devid);
+bool mbox_wait_for_zero(struct mbox *mbox, int devid);
 int mbox_wait_for_rsp(struct mbox *mbox, int devid);
 int mbox_wait_for_rsp_tmo(struct mbox *mbox, int devid, uint32_t tmo);
 int mbox_get_rsp(struct mbox *mbox, int devid, void **msg);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 18/32] common/cnxk: add more comments to mbox code
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (15 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 17/32] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 19/32] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
                     ` (13 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding more comments to the mbox routines to understand the flow
well.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_dev.c  | 20 +++++++++++++++++---
 drivers/common/cnxk/roc_mbox.c |  5 +++++
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index e5a5cd7c10..3125f9dda2 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -98,6 +98,9 @@ pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp)
 	return rc;
 }
 
+/* PF will send the messages to AF and wait for responses and forward the
+ * responses to VF.
+ */
 static int
 af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 {
@@ -115,9 +118,10 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 	/* We need to disable PF interrupts. We are in timer interrupt */
 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
 
-	/* Send message */
+	/* Send message to AF */
 	mbox_msg_send(mbox, 0);
 
+	/* Wait for AF response */
 	do {
 		plt_delay_ms(sleep);
 		timeout++;
@@ -206,6 +210,7 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 	return req_hdr->num_msgs;
 }
 
+/* PF receives mbox DOWN messages from VF and forwards to AF */
 static int
 vf_pf_process_msgs(struct dev *dev, uint16_t vf)
 {
@@ -274,6 +279,7 @@ vf_pf_process_msgs(struct dev *dev, uint16_t vf)
 	if (routed > 0) {
 		plt_base_dbg("pf:%d routed %d messages from vf:%d to AF",
 			     dev->pf, routed, vf);
+		/* PF will send the messages to AF and wait for responses */
 		af_pf_wait_msg(dev, vf, routed);
 		mbox_reset(dev->mbox, 0);
 	}
@@ -289,6 +295,7 @@ vf_pf_process_msgs(struct dev *dev, uint16_t vf)
 	return i;
 }
 
+/* VF sends Ack to PF's UP messages */
 static int
 vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
 {
@@ -339,6 +346,7 @@ vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
 	return i;
 }
 
+/* PF handling messages from VF */
 static void
 roc_vf_pf_mbox_handle_msg(void *param)
 {
@@ -352,8 +360,9 @@ roc_vf_pf_mbox_handle_msg(void *param)
 		if (dev->intr.bits[vf / max_bits] & BIT_ULL(vf % max_bits)) {
 			plt_base_dbg("Process vf:%d request (pf:%d, vf:%d)", vf,
 				     dev->pf, dev->vf);
+			/* VF initiated down messages */
 			vf_pf_process_msgs(dev, vf);
-			/* UP messages */
+			/* VF replies to PF's UP messages */
 			vf_pf_process_up_msgs(dev, vf);
 			dev->intr.bits[vf / max_bits] &=
 				~(BIT_ULL(vf % max_bits));
@@ -362,6 +371,7 @@ roc_vf_pf_mbox_handle_msg(void *param)
 	dev->timer_set = 0;
 }
 
+/* IRQ to PF from VF - PF context (interrupt thread) */
 static void
 roc_vf_pf_mbox_irq(void *param)
 {
@@ -392,6 +402,7 @@ roc_vf_pf_mbox_irq(void *param)
 	}
 }
 
+/* Received response from AF (PF context) / PF (VF context) */
 static void
 process_msgs(struct dev *dev, struct mbox *mbox)
 {
@@ -451,7 +462,7 @@ process_msgs(struct dev *dev, struct mbox *mbox)
 	}
 
 	mbox_reset(mbox, 0);
-	/* Update acked if someone is waiting a message */
+	/* Update acked if someone is waiting a message - mbox_wait is waiting */
 	mdev->msgs_acked = msgs_acked;
 	plt_wmb();
 }
@@ -597,6 +608,7 @@ mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
 	return -ENODEV;
 }
 
+/* Received up messages from AF (PF context) / PF (in context) */
 static void
 process_msgs_up(struct dev *dev, struct mbox *mbox)
 {
@@ -629,6 +641,7 @@ process_msgs_up(struct dev *dev, struct mbox *mbox)
 	}
 }
 
+/* IRQ to VF from PF - VF context (interrupt thread) */
 static void
 roc_pf_vf_mbox_irq(void *param)
 {
@@ -657,6 +670,7 @@ roc_pf_vf_mbox_irq(void *param)
 	process_msgs_up(dev, &dev->mbox_up);
 }
 
+/* IRQ to PF from AF - PF context (interrupt thread) */
 static void
 roc_af_pf_mbox_irq(void *param)
 {
diff --git a/drivers/common/cnxk/roc_mbox.c b/drivers/common/cnxk/roc_mbox.c
index 5338a960d9..c91fa63e83 100644
--- a/drivers/common/cnxk/roc_mbox.c
+++ b/drivers/common/cnxk/roc_mbox.c
@@ -350,6 +350,11 @@ mbox_wait(struct mbox *mbox, int devid, uint32_t rst_timo)
 	uint32_t timeout = 0, sleep = 1;
 
 	rst_timo = rst_timo * 1000; /* Milli seconds to micro seconds */
+
+	/* Waiting for mdev->msgs_acked tp become equal to mdev->num_msgs,
+	 * mdev->msgs_acked are incremented at process_msgs() in interrupt
+	 * thread context.
+	 */
 	while (mdev->num_msgs > mdev->msgs_acked) {
 		plt_delay_us(sleep);
 		timeout += sleep;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 19/32] common/cnxk: add CN105xxN B0 model
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (16 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 18/32] common/cnxk: add more comments to mbox code Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 20/32] common/cnxk: access valid pass value Nithin Dabilpuram
                     ` (12 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding support for CN105xxN B0 pass

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_model.c | 1 +
 drivers/common/cnxk/roc_model.h | 9 ++++++++-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index e4767ed91f..f4f2a38e70 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -66,6 +66,7 @@ static const struct model_db {
 	{VENDOR_ARM, PART_105xx, 0, 1, ROC_MODEL_CNF105xx_A1, "cnf10ka_a1"},
 	{VENDOR_ARM, PART_103xx, 0, 0, ROC_MODEL_CN103xx_A0, "cn10kb_a0"},
 	{VENDOR_ARM, PART_105xxN, 0, 0, ROC_MODEL_CNF105xxN_A0, "cnf10kb_a0"},
+	{VENDOR_ARM, PART_105xxN, 1, 0, ROC_MODEL_CNF105xxN_B0, "cnf10kb_b0"},
 	{VENDOR_CAVIUM, PART_98xx, 0, 0, ROC_MODEL_CN98xx_A0, "cn98xx_a0"},
 	{VENDOR_CAVIUM, PART_98xx, 0, 1, ROC_MODEL_CN98xx_A1, "cn98xx_a1"},
 	{VENDOR_CAVIUM, PART_96xx, 0, 0, ROC_MODEL_CN96xx_A0, "cn96xx_a0"},
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index 58046af193..b6dab4f64e 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -31,6 +31,7 @@ struct roc_model {
 #define ROC_MODEL_CN106xx_A1   BIT_ULL(24)
 #define ROC_MODEL_CNF105xx_A1  BIT_ULL(25)
 #define ROC_MODEL_CN106xx_B0   BIT_ULL(26)
+#define ROC_MODEL_CNF105xxN_B0 BIT_ULL(27)
 /* Following flags describe platform code is running on */
 #define ROC_ENV_HW   BIT_ULL(61)
 #define ROC_ENV_EMUL BIT_ULL(62)
@@ -57,7 +58,7 @@ struct roc_model {
 
 #define ROC_MODEL_CN106xx   (ROC_MODEL_CN106xx_A0 | ROC_MODEL_CN106xx_A1 | ROC_MODEL_CN106xx_B0)
 #define ROC_MODEL_CNF105xx  (ROC_MODEL_CNF105xx_A0 | ROC_MODEL_CNF105xx_A1)
-#define ROC_MODEL_CNF105xxN (ROC_MODEL_CNF105xxN_A0)
+#define ROC_MODEL_CNF105xxN (ROC_MODEL_CNF105xxN_A0 | ROC_MODEL_CNF105xxN_B0)
 #define ROC_MODEL_CN103xx   (ROC_MODEL_CN103xx_A0)
 #define ROC_MODEL_CN10K                                                        \
 	(ROC_MODEL_CN106xx | ROC_MODEL_CNF105xx | ROC_MODEL_CNF105xxN |        \
@@ -252,6 +253,12 @@ roc_model_is_cnf10kb_a0(void)
 	return roc_model->flag & ROC_MODEL_CNF105xxN_A0;
 }
 
+static inline uint64_t
+roc_model_is_cnf10kb_b0(void)
+{
+	return roc_model->flag & ROC_MODEL_CNF105xxN_B0;
+}
+
 static inline uint64_t
 roc_model_is_cn10kb(void)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 20/32] common/cnxk: access valid pass value
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (17 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 19/32] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 21/32] net/cnxk: add receive error mask Nithin Dabilpuram
                     ` (11 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Hanumanth Pothula

From: Hanumanth Pothula <hpothula@marvell.com>

There is a possibility of accessing an invalid pass value on
rvu device look up failure, as the return value is dropped.

Hence pass through the return value of rvu device look, to make
sure valid pass value is accessed.

Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
---
 drivers/common/cnxk/roc_model.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index f4f2a38e70..6dc2afe7f0 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -148,6 +148,7 @@ cn10k_part_pass_get(uint32_t *part, uint32_t *pass)
 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
 	char dirname[PATH_MAX];
 	struct dirent *e;
+	int ret = -1;
 	DIR *dir;
 
 	dir = opendir(SYSFS_PCI_DEVICES);
@@ -165,18 +166,19 @@ cn10k_part_pass_get(uint32_t *part, uint32_t *pass)
 			 e->d_name);
 
 		/* Lookup for rvu device and get part pass information */
-		if (!rvu_device_lookup(dirname, part, pass))
+		ret = rvu_device_lookup(dirname, part, pass);
+		if (!ret)
 			break;
 	}
 
 	closedir(dir);
-	return 0;
+	return ret;
 }
 
 static bool
 populate_model(struct roc_model *model, uint32_t midr)
 {
-	uint32_t impl, major, part, minor, pass;
+	uint32_t impl, major, part, minor, pass = 0;
 	bool found = false;
 	size_t i;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 21/32] net/cnxk: add receive error mask
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (18 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 20/32] common/cnxk: access valid pass value Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 22/32] common/cnxk: support of 1-N pool-aura per NIX LF Nithin Dabilpuram
                     ` (10 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

receive errors related to MACSEC and USXGMI are masked
for cn10kb_b0 and cn10kb

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index eeabf6edec..3ceda8c8f9 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1267,6 +1267,11 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		   ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
 		   ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
 
+	rx_cfg &= (ROC_NIX_LF_RX_CFG_RX_ERROR_MASK);
+
+	if (roc_feature_nix_has_drop_re_mask())
+		rx_cfg |= (ROC_NIX_RE_CRC8_PCH | ROC_NIX_RE_MACSEC);
+
 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
 		/* Disable drop re if rx offload security is enabled and
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 22/32] common/cnxk: support of 1-N pool-aura per NIX LF
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (19 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 21/32] net/cnxk: add receive error mask Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 23/32] net/cnxk: support for inbound without inline dev mode Nithin Dabilpuram
                     ` (9 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev, Rahul Bhansali

From: Rahul Bhansali <rbhansali@marvell.com>

This will add the support of 1:N pool:aura per NIX LF when
inl_cpt_channel devargs is set to inline device, otherwise
it will create 1:1 pool:aura for CN103/CN106B0 SOCs.

With 1:N, global pool will be created with Aura 0, and per NIX
individual aura will be created and mapped to this global pool.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/common/cnxk/roc_idev_priv.h |   1 +
 drivers/common/cnxk/roc_nix.h       |   1 +
 drivers/common/cnxk/roc_nix_inl.c   | 178 ++++++++++++++++++++++++----
 drivers/common/cnxk/roc_nix_inl.h   |   4 +
 drivers/common/cnxk/version.map     |   1 +
 drivers/event/cnxk/cn10k_worker.h   |   9 +-
 drivers/net/cnxk/cn10k_rx_select.c  |   5 +-
 drivers/net/cnxk/cnxk_ethdev.c      |   3 +
 drivers/net/cnxk/cnxk_ethdev.h      |   3 +
 drivers/net/cnxk/cnxk_ethdev_sec.c  |  62 ++++++++++
 10 files changed, 240 insertions(+), 27 deletions(-)

diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index d83522799f..4983578fc6 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -13,6 +13,7 @@ struct nix_inl_dev;
 
 struct idev_nix_inl_cfg {
 	uint64_t meta_aura;
+	uintptr_t meta_mempool;
 	uint32_t nb_bufs;
 	uint32_t buf_sz;
 	uint32_t refs;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 37d0ed5ebe..548854952b 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -470,6 +470,7 @@ struct roc_nix {
 	bool local_meta_aura_ena;
 	uint32_t meta_buf_sz;
 	bool force_rx_aura_bp;
+	bool custom_meta_aura_ena;
 	/* End of input parameters */
 	/* LMT line base for "Per Core Tx LMT line" mode*/
 	uintptr_t lmt_base;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 67f8ce9aa0..69f658ba87 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -7,6 +7,7 @@
 
 uint32_t soft_exp_consumer_cnt;
 roc_nix_inl_meta_pool_cb_t meta_pool_cb;
+roc_nix_inl_custom_meta_pool_cb_t custom_meta_pool_cb;
 
 PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_INB_SA_SZ ==
 		  1UL << ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2);
@@ -33,13 +34,14 @@ nix_inl_meta_aura_destroy(struct roc_nix *roc_nix)
 		return -EINVAL;
 
 	inl_cfg = &idev->inl_cfg;
-	if (roc_nix->local_meta_aura_ena) {
+
+	if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena) {
+		meta_aura = &inl_cfg->meta_aura;
+	} else {
 		meta_aura = &roc_nix->meta_aura_handle;
 		snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
 			 roc_nix->port_id + 1);
 		mp_name = mempool_name;
-	} else {
-		meta_aura = &inl_cfg->meta_aura;
 	}
 
 	/* Destroy existing Meta aura */
@@ -72,7 +74,7 @@ nix_inl_meta_aura_destroy(struct roc_nix *roc_nix)
 
 static int
 nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_t first_skip,
-			 uint64_t *meta_aura)
+			 uint64_t *meta_aura, bool is_local_metaaura)
 {
 	uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
 	struct idev_nix_inl_cfg *inl_cfg;
@@ -89,7 +91,7 @@ nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_
 	inl_cfg = &idev->inl_cfg;
 	nix_inl_dev = idev->nix_inl_dev;
 
-	if (roc_nix->local_meta_aura_ena) {
+	if (is_local_metaaura) {
 		/* Per LF Meta Aura */
 		inl_rq_id = nix_inl_dev->nb_rqs > 1 ? port_id : 0;
 		inl_rq = &nix_inl_dev->rqs[inl_rq_id];
@@ -134,15 +136,107 @@ nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_
 	plt_nix_dbg("Created meta aura %p(%s)for port %d", (void *)*meta_aura, mp_name,
 		    roc_nix->port_id);
 
-	if (!roc_nix->local_meta_aura_ena) {
+	if (!is_local_metaaura) {
 		inl_cfg->buf_sz = buf_sz;
 		inl_cfg->nb_bufs = nb_bufs;
+		inl_cfg->meta_mempool = mp;
 	} else
 		roc_nix->buf_sz = buf_sz;
 
 	return 0;
 }
 
+static int
+nix_inl_custom_meta_aura_destroy(struct roc_nix *roc_nix)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct idev_nix_inl_cfg *inl_cfg;
+	char mempool_name[24] = {'\0'};
+	char *mp_name = NULL;
+	uint64_t *meta_aura;
+	int rc;
+
+	if (!idev)
+		return -EINVAL;
+
+	inl_cfg = &idev->inl_cfg;
+	meta_aura = &roc_nix->meta_aura_handle;
+	snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
+		 roc_nix->port_id + 1);
+	mp_name = mempool_name;
+
+	/* Destroy existing Meta aura */
+	if (*meta_aura) {
+		uint64_t avail, limit;
+
+		/* Check if all buffers are back to pool */
+		avail = roc_npa_aura_op_available(*meta_aura);
+		limit = roc_npa_aura_op_limit_get(*meta_aura);
+		if (avail != limit)
+			plt_warn("Not all buffers are back to meta pool,"
+				 " %" PRIu64 " != %" PRIu64, avail, limit);
+
+		rc = custom_meta_pool_cb(inl_cfg->meta_mempool, &roc_nix->meta_mempool, mp_name,
+					 meta_aura, 0, 0, true);
+		if (rc) {
+			plt_err("Failed to destroy meta aura, rc=%d", rc);
+			return rc;
+		}
+
+		roc_nix->buf_sz = 0;
+	}
+
+	return 0;
+}
+
+static int
+nix_inl_custom_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_t first_skip,
+				uint64_t *meta_aura)
+{
+	uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
+	struct idev_nix_inl_cfg *inl_cfg;
+	struct nix_inl_dev *nix_inl_dev;
+	char mempool_name[24] = {'\0'};
+	uint32_t nb_bufs, buf_sz;
+	char *mp_name = NULL;
+	uintptr_t mp;
+	int rc;
+
+	inl_cfg = &idev->inl_cfg;
+	nix_inl_dev = idev->nix_inl_dev;
+
+	/* Override meta buf count from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->nb_meta_bufs)
+		nb_bufs = nix_inl_dev->nb_meta_bufs;
+	else
+		nb_bufs = roc_npa_buf_type_limit_get(mask);
+
+	/* Override meta buf size from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->meta_buf_sz)
+		buf_sz = nix_inl_dev->meta_buf_sz;
+	else
+		buf_sz = first_skip + NIX_INL_META_SIZE;
+
+	/* Create Metapool name */
+	snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
+		 roc_nix->port_id + 1);
+	mp_name = mempool_name;
+
+	/* Allocate meta aura */
+	rc = custom_meta_pool_cb(inl_cfg->meta_mempool, &mp, mp_name, meta_aura,
+				 buf_sz, nb_bufs, false);
+	if (rc) {
+		plt_err("Failed to allocate meta aura, rc=%d", rc);
+		return rc;
+	}
+
+	/* Overwrite */
+	roc_nix->meta_mempool = mp;
+	roc_nix->buf_sz = buf_sz;
+
+	return 0;
+}
+
 static int
 nix_inl_global_meta_buffer_validate(struct idev_cfg *idev, struct roc_nix_rq *rq)
 {
@@ -228,6 +322,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	struct idev_nix_inl_cfg *inl_cfg;
+	bool is_local_metaaura;
 	bool aura_setup = false;
 	uint64_t *meta_aura;
 	int rc;
@@ -238,18 +333,39 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 	inl_cfg = &idev->inl_cfg;
 
 	/* Create meta aura if not present */
-	if (roc_nix->local_meta_aura_ena)
-		meta_aura = &roc_nix->meta_aura_handle;
-	else
+	if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena) {
 		meta_aura = &inl_cfg->meta_aura;
+		is_local_metaaura = false;
+	} else {
+		meta_aura = &roc_nix->meta_aura_handle;
+		is_local_metaaura = true;
+	}
 
 	if (!(*meta_aura)) {
-		rc = nix_inl_meta_aura_create(idev, roc_nix, rq->first_skip, meta_aura);
+		rc = nix_inl_meta_aura_create(idev, roc_nix, rq->first_skip, meta_aura,
+					      is_local_metaaura);
 		if (rc)
 			return rc;
 
 		aura_setup = true;
 	}
+
+	if (roc_nix->custom_meta_aura_ena) {
+		/* Create metaura for 1:N pool:aura */
+		if (!custom_meta_pool_cb)
+			return -EFAULT;
+
+		meta_aura = &roc_nix->meta_aura_handle;
+		if (!(*meta_aura)) {
+			rc = nix_inl_custom_meta_aura_create(idev, roc_nix, rq->first_skip,
+							     meta_aura);
+			if (rc)
+				return rc;
+
+			aura_setup = true;
+		}
+	}
+
 	/* Update rq meta aura handle */
 	rq->meta_aura_handle = *meta_aura;
 
@@ -698,6 +814,7 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct roc_cpt_inline_ipsec_inb_cfg cfg;
 	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev;
 	uint16_t bpids[ROC_NIX_MAX_BPID_CNT];
 	struct roc_cpt *roc_cpt;
 	int rc;
@@ -749,9 +866,13 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	if (rc)
 		return rc;
 
+	inl_dev = idev->nix_inl_dev;
+
+	roc_nix->custom_meta_aura_ena = (roc_nix->local_meta_aura_ena &&
+					 (inl_dev->is_multi_channel || roc_nix->custom_sa_action));
 	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
 		nix->need_meta_aura = true;
-		if (!roc_nix->local_meta_aura_ena)
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs++;
 	}
 
@@ -773,15 +894,17 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 		return -EFAULT;
 
 	nix->inl_inb_ena = false;
+
 	if (nix->need_meta_aura) {
 		nix->need_meta_aura = false;
-		if (roc_nix->local_meta_aura_ena) {
-			nix_inl_meta_aura_destroy(roc_nix);
-		} else {
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs--;
-			if (!idev->inl_cfg.refs)
-				nix_inl_meta_aura_destroy(roc_nix);
-		}
+
+		if (roc_nix->custom_meta_aura_ena)
+			nix_inl_custom_meta_aura_destroy(roc_nix);
+
+		if (!idev->inl_cfg.refs)
+			nix_inl_meta_aura_destroy(roc_nix);
 	}
 
 	if (roc_feature_nix_has_inl_rq_mask()) {
@@ -1309,17 +1432,18 @@ roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena)
 
 	if (ena) {
 		nix->need_meta_aura = true;
-		if (!roc_nix->local_meta_aura_ena)
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs++;
 	} else if (nix->need_meta_aura) {
 		nix->need_meta_aura = false;
-		if (roc_nix->local_meta_aura_ena) {
-			nix_inl_meta_aura_destroy(roc_nix);
-		} else {
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs--;
-			if (!idev->inl_cfg.refs)
-				nix_inl_meta_aura_destroy(roc_nix);
-		}
+
+		if (roc_nix->custom_meta_aura_ena)
+			nix_inl_custom_meta_aura_destroy(roc_nix);
+
+		if (!idev->inl_cfg.refs)
+			nix_inl_meta_aura_destroy(roc_nix);
 	}
 }
 
@@ -1672,3 +1796,9 @@ roc_nix_inl_eng_caps_get(struct roc_nix *roc_nix)
 
 	return nix->cpt_eng_caps;
 }
+
+void
+roc_nix_inl_custom_meta_pool_cb_register(roc_nix_inl_custom_meta_pool_cb_t cb)
+{
+	custom_meta_pool_cb = cb;
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index daa21a941a..885d95335e 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -121,6 +121,9 @@ typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
 typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle,  uintptr_t *mpool,
 					  uint32_t blk_sz, uint32_t nb_bufs, bool destroy,
 					  const char *mempool_name);
+typedef int (*roc_nix_inl_custom_meta_pool_cb_t)(uintptr_t pmpool, uintptr_t *mpool,
+						 const char *mempool_name, uint64_t *aura_handle,
+						 uint32_t blk_sz, uint32_t nb_bufs, bool destroy);
 
 struct roc_nix_inl_dev {
 	/* Input parameters */
@@ -199,6 +202,7 @@ int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
 						    bool poll);
 uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
 void __roc_api roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb);
+void __roc_api roc_nix_inl_custom_meta_pool_cb_register(roc_nix_inl_custom_meta_pool_cb_t cb);
 
 /* NIX Inline/Outbound API */
 enum roc_nix_inl_sa_sync_op {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 809fd81b20..c76564b46e 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -199,6 +199,7 @@ INTERNAL {
 	roc_nix_inb_is_with_inl_dev;
 	roc_nix_inl_meta_aura_check;
 	roc_nix_inl_meta_pool_cb_register;
+	roc_nix_inl_custom_meta_pool_cb_register;
 	roc_nix_inb_mode_set;
 	roc_nix_inl_outb_fini;
 	roc_nix_inl_outb_init;
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 1e519d8156..f049b5c348 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -168,6 +168,10 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		RTE_MEMPOOL_CHECK_COOKIES(((struct rte_mbuf *)mbuf)->pool, (void **)&mbuf, 1, 1);
 
 		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			void *lookup_mem = ws->lookup_mem;
+			struct rte_mempool *mp = NULL;
+			uint64_t meta_aura;
+
 			const uint64_t mbuf_init =
 				0x100010000ULL | RTE_PKTMBUF_HEADROOM |
 				(flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
@@ -192,8 +196,11 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 				cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
 				(struct rte_mbuf *)mbuf, d_off, flags,
 				mbuf_init | ((uint64_t)port) << 48);
+			mp = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port, lookup_mem);
+			meta_aura = mp ? mp->pool_id : m->pool->pool_id;
+
 			if (loff)
-				roc_npa_aura_op_free(m->pool->pool_id, 0, iova);
+				roc_npa_aura_op_free(meta_aura, 0, iova);
 		}
 
 		u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
diff --git a/drivers/net/cnxk/cn10k_rx_select.c b/drivers/net/cnxk/cn10k_rx_select.c
index b906f6725a..1e0de1b7ac 100644
--- a/drivers/net/cnxk/cn10k_rx_select.c
+++ b/drivers/net/cnxk/cn10k_rx_select.c
@@ -79,9 +79,10 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 #undef R
 	};
 
-	/* Copy multi seg version with no offload for tear down sequence */
+	/* Copy multi seg version with security for tear down sequence */
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		dev->rx_pkt_burst_no_offload = nix_eth_rx_burst_mseg[0];
+		dev->rx_pkt_burst_no_offload =
+			nix_eth_rx_burst_mseg_reas[NIX_RX_OFFLOAD_SECURITY_F];
 
 	if (dev->scalar_ena) {
 		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 3ceda8c8f9..aaa1014479 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1885,6 +1885,9 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 	/* Register callback for inline meta pool create */
 	roc_nix_inl_meta_pool_cb_register(cnxk_nix_inl_meta_pool_cb);
 
+	/* Register callback for inline meta pool create 1:N pool:aura */
+	roc_nix_inl_custom_meta_pool_cb_register(cnxk_nix_inl_custom_meta_pool_cb);
+
 	dev->eth_dev = eth_dev;
 	dev->configured = 0;
 	dev->ptype_disable = 0;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 44e37d6550..e280d6c05e 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -610,6 +610,9 @@ cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
 			      struct rte_security_session *sess);
 int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_sz,
 			      uint32_t nb_bufs, bool destroy, const char *mempool_name);
+int cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
+				     uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
+				     bool destroy);
 
 /* Congestion Management */
 int cnxk_nix_cman_info_get(struct rte_eth_dev *dev, struct rte_eth_cman_info *info);
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index cd64daacc0..a66d58ca61 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -6,6 +6,7 @@
 #include <cnxk_mempool.h>
 
 #define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
+#define CN10K_HW_POOL_OPS_NAME "cn10k_hwpool_ops"
 
 #define CNXK_NIX_INL_SELFTEST	      "selftest"
 #define CNXK_NIX_INL_IPSEC_IN_MIN_SPI "ipsec_in_min_spi"
@@ -114,6 +115,67 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
 	return rc;
 }
 
+/* Create Aura and link with Global mempool for 1:N Pool:Aura case */
+int
+cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
+				 uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
+				 bool destroy)
+{
+	struct rte_mempool *hp;
+	int rc;
+
+	/* Destroy the mempool if requested */
+	if (destroy) {
+		hp = rte_mempool_lookup(mempool_name);
+		if (!hp)
+			return -ENOENT;
+
+		if (hp->pool_id != *aura_handle) {
+			plt_err("Meta pool aura mismatch");
+			return -EINVAL;
+		}
+
+		rte_mempool_free(hp);
+		plt_free(hp->pool_config);
+
+		*aura_handle = 0;
+		*mpool = 0;
+		return 0;
+	}
+
+	/* Need to make it similar to rte_pktmbuf_pool() for sake of OOP
+	 * support.
+	 */
+	hp = rte_mempool_create_empty(mempool_name, nb_bufs, buf_sz, 0,
+				      sizeof(struct rte_pktmbuf_pool_private),
+				      SOCKET_ID_ANY, 0);
+	if (!hp) {
+		plt_err("Failed to create inline meta pool");
+		return -EIO;
+	}
+
+	rc = rte_mempool_set_ops_byname(hp, CN10K_HW_POOL_OPS_NAME, (void *)pmpool);
+
+	if (rc) {
+		plt_err("Failed to setup ops, rc=%d", rc);
+		goto free_hp;
+	}
+
+	/* Populate buffer */
+	rc = rte_mempool_populate_default(hp);
+	if (rc < 0) {
+		plt_err("Failed to populate pool, rc=%d", rc);
+		goto free_hp;
+	}
+
+	*aura_handle = hp->pool_id;
+	*mpool = (uintptr_t)hp;
+	return 0;
+free_hp:
+	rte_mempool_free(hp);
+	return rc;
+}
+
 static int
 parse_max_ipsec_rules(const char *key, const char *value, void *extra_args)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 23/32] net/cnxk: support for inbound without inline dev mode
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (20 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 22/32] common/cnxk: support of 1-N pool-aura per NIX LF Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:03   ` [PATCH v2 24/32] common/cnxk: fix inline device VF identification Nithin Dabilpuram
                     ` (8 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

Support for inbound Inline IPsec without Inline device
RQ i.e both first pass and second pass hitting same
ethdev RQ in poll mode. Remove the switching from
inline dev to non inline dev mode as inline dev mode
is default and can only be overridden by devargs.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_queue.c      |  3 +++
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 15 ---------------
 drivers/net/cnxk/cnxk_ethdev.c           | 15 ++++++++++-----
 3 files changed, 13 insertions(+), 20 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index d29fafa895..08e8bf7ea2 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -473,6 +473,9 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	if (rq->ipsech_ena) {
 		aq->rq.ipsech_ena = 1;
 		aq->rq.ipsecd_drop_en = 1;
+		aq->rq.ena_wqwd = 1;
+		aq->rq.wqe_skip = rq->wqe_skip;
+		aq->rq.wqe_caching = 1;
 	}
 
 	aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 8ad84198b9..92aea92389 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -273,15 +273,6 @@ cnxk_sso_rx_adapter_queue_add(
 	}
 
 	dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
-
-	/* Switch to use PF/VF's NIX LF instead of inline device for inbound
-	 * when all the RQ's are switched to event dev mode. We do this only
-	 * when dev arg no_inl_dev=1 is selected.
-	 */
-	if (cnxk_eth_dev->inb.no_inl_dev &&
-	    cnxk_eth_dev->nb_rxq_sso == cnxk_eth_dev->nb_rxq)
-		cnxk_nix_inb_mode_set(cnxk_eth_dev, false);
-
 	return 0;
 }
 
@@ -309,12 +300,6 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 	if (rc < 0)
 		plt_err("Failed to clear Rx adapter config port=%d, q=%d",
 			eth_dev->data->port_id, rx_queue_id);
-
-	/* Removing RQ from Rx adapter implies need to use
-	 * inline device for CQ/Poll mode.
-	 */
-	cnxk_nix_inb_mode_set(cnxk_eth_dev, true);
-
 	return rc;
 }
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index aaa1014479..916198d802 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -81,9 +81,6 @@ cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
 {
 	struct roc_nix *nix = &dev->nix;
 
-	if (dev->inb.inl_dev == use_inl_dev)
-		return 0;
-
 	plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
 		    dev->inb.nb_sess, !!dev->inb.inl_dev);
 
@@ -119,7 +116,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 		/* By default pick using inline device for poll mode.
 		 * Will be overridden when event mode rq's are setup.
 		 */
-		cnxk_nix_inb_mode_set(dev, true);
+		cnxk_nix_inb_mode_set(dev, !dev->inb.no_inl_dev);
 
 		/* Allocate memory to be used as dptr for CPT ucode
 		 * WRITE_SA op.
@@ -633,6 +630,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	struct roc_nix_rq *rq;
 	struct roc_nix_cq *cq;
 	uint16_t first_skip;
+	uint16_t wqe_skip;
 	int rc = -EINVAL;
 	size_t rxq_sz;
 	struct rte_mempool *lpb_pool = mp;
@@ -712,8 +710,15 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 		rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
 
 	/* Enable Inline IPSec on RQ, will not be used for Poll mode */
-	if (roc_nix_inl_inb_is_enabled(nix))
+	if (roc_nix_inl_inb_is_enabled(nix) && !dev->inb.inl_dev) {
 		rq->ipsech_ena = true;
+		/* WQE skip is needed when poll mode is enabled in CN10KA_B0 and above
+		 * for Inline IPsec traffic to CQ without inline device.
+		 */
+		wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ);
+		wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ;
+		rq->wqe_skip = wqe_skip;
+	}
 
 	if (spb_pool) {
 		rq->spb_ena = 1;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 24/32] common/cnxk: fix inline device VF identification
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (21 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 23/32] net/cnxk: support for inbound without inline dev mode Nithin Dabilpuram
@ 2023-05-24 10:03   ` Nithin Dabilpuram
  2023-05-24 10:04   ` [PATCH v2 25/32] common/cnxk: avoid inline dev CPT lf detach multiple times Nithin Dabilpuram
                     ` (7 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:03 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, stable

Fix inline device VF identification so that the right
mbox region is used.

Fixes: ee48f711f3b0 ("common/cnxk: support NIX inline inbound and outbound setup")
cc: stable@dpdk.org

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_dev.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 3125f9dda2..d87b00e7e8 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -1001,6 +1001,7 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev)
 	case PCI_DEVID_CNXK_RVU_AF_VF:
 	case PCI_DEVID_CNXK_RVU_VF:
 	case PCI_DEVID_CNXK_RVU_SDP_VF:
+	case PCI_DEVID_CNXK_RVU_NIX_INL_VF:
 		dev->hwcap |= DEV_HWCAP_F_VF;
 		break;
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 25/32] common/cnxk: avoid inline dev CPT lf detach multiple times
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (22 preceding siblings ...)
  2023-05-24 10:03   ` [PATCH v2 24/32] common/cnxk: fix inline device VF identification Nithin Dabilpuram
@ 2023-05-24 10:04   ` Nithin Dabilpuram
  2023-05-24 10:04   ` [PATCH v2 26/32] common/cnxk: skip CGX promisc mode with NPC exact match Nithin Dabilpuram
                     ` (6 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:04 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

CPT LF detach is done along with all other LF's of inline
device. Hence remove it from nix_inl_cpt_release().
Also provide ROC API for setup and release of CPT LF
separately.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_inl.h     |  2 +
 drivers/common/cnxk/roc_nix_inl_dev.c | 59 ++++++++++++++++++++-------
 drivers/common/cnxk/version.map       |  2 +
 3 files changed, 48 insertions(+), 15 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 885d95335e..ab1e9c0f98 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -158,6 +158,8 @@ void __roc_api roc_nix_inl_dev_unlock(void);
 int __roc_api roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle);
 int __roc_api roc_nix_inl_dev_stats_get(struct roc_nix_stats *stats);
 uint16_t __roc_api roc_nix_inl_dev_pffunc_get(void);
+int __roc_api roc_nix_inl_dev_cpt_setup(bool use_inl_dev_sso);
+int __roc_api roc_nix_inl_dev_cpt_release(void);
 
 /* NIX Inline Inbound API */
 int __roc_api roc_nix_inl_inb_init(struct roc_nix *roc_nix);
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index ca948d3bc7..d76158e30d 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -172,7 +172,7 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 }
 
 static int
-nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
+nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 {
 	struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
 	struct dev *dev = &inl_dev->dev;
@@ -186,7 +186,7 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
 	eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
 		       1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
 		       1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
-	rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, false);
+	rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, inl_dev_sso);
 	if (rc) {
 		plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
 		return rc;
@@ -218,7 +218,7 @@ nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
 {
 	struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
 	struct dev *dev = &inl_dev->dev;
-	int rc, ret = 0;
+	int rc;
 
 	if (!inl_dev->attach_cptlf)
 		return 0;
@@ -228,17 +228,11 @@ nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
 
 	/* Free LF resources */
 	rc = cpt_lfs_free(dev);
-	if (rc)
+	if (!rc)
+		lf->dev = NULL;
+	else
 		plt_err("Failed to free CPT LF resources, rc=%d", rc);
-	ret |= rc;
-
-	/* Detach LF */
-	rc = cpt_lfs_detach(dev);
-	if (rc)
-		plt_err("Failed to detach CPT LF, rc=%d", rc);
-	ret |= rc;
-
-	return ret;
+	return rc;
 }
 
 static int
@@ -940,7 +934,7 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 		goto nix_release;
 
 	/* Setup CPT LF */
-	rc = nix_inl_cpt_setup(inl_dev);
+	rc = nix_inl_cpt_setup(inl_dev, false);
 	if (rc)
 		goto sso_release;
 
@@ -1035,8 +1029,11 @@ roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
 	/* Flush Inbound CTX cache entries */
 	nix_inl_cpt_ctx_cache_sync(inl_dev);
 
+	/* Release CPT */
+	rc = nix_inl_cpt_release(inl_dev);
+
 	/* Release SSO */
-	rc = nix_inl_sso_release(inl_dev);
+	rc |= nix_inl_sso_release(inl_dev);
 
 	/* Release NIX */
 	rc |= nix_inl_nix_release(inl_dev);
@@ -1052,3 +1049,35 @@ roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
 	idev->nix_inl_dev = NULL;
 	return 0;
 }
+
+int
+roc_nix_inl_dev_cpt_setup(bool use_inl_dev_sso)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev = NULL;
+
+	if (!idev || !idev->nix_inl_dev)
+		return -ENOENT;
+	inl_dev = idev->nix_inl_dev;
+
+	if (inl_dev->cpt_lf.dev != NULL)
+		return -EBUSY;
+
+	return nix_inl_cpt_setup(inl_dev, use_inl_dev_sso);
+}
+
+int
+roc_nix_inl_dev_cpt_release(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev = NULL;
+
+	if (!idev || !idev->nix_inl_dev)
+		return -ENOENT;
+	inl_dev = idev->nix_inl_dev;
+
+	if (inl_dev->cpt_lf.dev == NULL)
+		return 0;
+
+	return nix_inl_cpt_release(inl_dev);
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index c76564b46e..e1335e9068 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -174,6 +174,8 @@ INTERNAL {
 	roc_nix_inl_cb_register;
 	roc_nix_inl_cb_unregister;
 	roc_nix_inl_ctx_write;
+	roc_nix_inl_dev_cpt_setup;
+	roc_nix_inl_dev_cpt_release;
 	roc_nix_inl_dev_dump;
 	roc_nix_inl_dev_fini;
 	roc_nix_inl_dev_init;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 26/32] common/cnxk: skip CGX promisc mode with NPC exact match
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (23 preceding siblings ...)
  2023-05-24 10:04   ` [PATCH v2 25/32] common/cnxk: avoid inline dev CPT lf detach multiple times Nithin Dabilpuram
@ 2023-05-24 10:04   ` Nithin Dabilpuram
  2023-05-24 10:04   ` [PATCH v2 27/32] common/cnxk: configure PFC on SPB aura Nithin Dabilpuram
                     ` (5 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:04 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

When NPC exact match is enabled, we cannot use CGX promisc mode
as CGX filtering is permanently disabled. Hence skip calling
it to avoid mbox errors.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
---
 drivers/common/cnxk/roc_nix_mac.c  | 8 ++++++++
 drivers/common/cnxk/roc_nix_priv.h | 1 +
 drivers/common/cnxk/roc_npc.c      | 1 +
 drivers/common/cnxk/roc_npc_mcam.c | 1 +
 drivers/common/cnxk/roc_npc_priv.h | 1 +
 5 files changed, 12 insertions(+)

diff --git a/drivers/common/cnxk/roc_nix_mac.c b/drivers/common/cnxk/roc_nix_mac.c
index ac30fb52d1..754d75ac73 100644
--- a/drivers/common/cnxk/roc_nix_mac.c
+++ b/drivers/common/cnxk/roc_nix_mac.c
@@ -201,6 +201,14 @@ roc_nix_mac_promisc_mode_enable(struct roc_nix *roc_nix, int enable)
 		goto exit;
 	}
 
+	/* Skip CGX promisc toggling if NPC exact match is enabled as
+	 * CGX filtering is disabled permanently.
+	 */
+	if (nix->exact_match_ena) {
+		rc = 0;
+		goto exit;
+	}
+
 	if (enable)
 		mbox_alloc_msg_cgx_promisc_enable(mbox);
 	else
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 6872630dc8..ea4211dfed 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -130,6 +130,7 @@ struct nix {
 	struct nix_qint *cints_mem;
 	uint8_t configured_qints;
 	uint8_t configured_cints;
+	uint8_t exact_match_ena;
 	struct roc_nix_rq **rqs;
 	struct roc_nix_sq **sqs;
 	uint16_t vwqe_interval;
diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c
index d556b4c3a5..1d74ea5a1e 100644
--- a/drivers/common/cnxk/roc_npc.c
+++ b/drivers/common/cnxk/roc_npc.c
@@ -272,6 +272,7 @@ roc_npc_init(struct roc_npc *roc_npc)
 	roc_npc->rx_parse_nibble = npc->keyx_supp_nmask[NPC_MCAM_RX];
 
 	npc->mcam_entries = npc_mcam_tot_entries() >> npc->keyw[NPC_MCAM_RX];
+	nix->exact_match_ena = npc->exact_match_ena;
 
 	/* Free, free_rev, live and live_rev entries */
 	bmap_sz = plt_bitmap_get_memory_footprint(npc->mcam_entries);
diff --git a/drivers/common/cnxk/roc_npc_mcam.c b/drivers/common/cnxk/roc_npc_mcam.c
index 72892be300..46f0b9c33d 100644
--- a/drivers/common/cnxk/roc_npc_mcam.c
+++ b/drivers/common/cnxk/roc_npc_mcam.c
@@ -542,6 +542,7 @@ npc_mcam_fetch_kex_cfg(struct npc *npc)
 	mbox_memcpy((char *)npc->profile_name, kex_rsp->mkex_pfl_name,
 		    MKEX_NAME_LEN);
 
+	npc->exact_match_ena = (kex_rsp->rx_keyx_cfg >> 40) & 0xF;
 	npc_mcam_process_mkex_cfg(npc, kex_rsp);
 
 done:
diff --git a/drivers/common/cnxk/roc_npc_priv.h b/drivers/common/cnxk/roc_npc_priv.h
index 714dcb09c9..82c2d0aa06 100644
--- a/drivers/common/cnxk/roc_npc_priv.h
+++ b/drivers/common/cnxk/roc_npc_priv.h
@@ -403,6 +403,7 @@ struct npc {
 	struct npc_prio_flow_list_head *prio_flow_list;
 	struct plt_bitmap *rss_grp_entries;
 	struct npc_flow_list ipsec_list;
+	uint8_t exact_match_ena;
 };
 
 #define NPC_HASH_FIELD_LEN 16
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 27/32] common/cnxk: configure PFC on SPB aura
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (24 preceding siblings ...)
  2023-05-24 10:04   ` [PATCH v2 26/32] common/cnxk: skip CGX promisc mode with NPC exact match Nithin Dabilpuram
@ 2023-05-24 10:04   ` Nithin Dabilpuram
  2023-05-24 10:04   ` [PATCH v2 28/32] common/nix: check for null derefernce Nithin Dabilpuram
                     ` (4 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:04 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

From: Sunil Kumar Kori <skori@marvell.com>

A RQ can be configured with lpb_aura and spb_aura at the same time
and both can contribute to generate aura based back pressure from
NIX to RPM.

But currently PFC configuration are applied on lpb_aura only and
spb_aura does not contribute to create back pressure.

Patch adds support for the same.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/common/cnxk/roc_nix.h    | 1 +
 drivers/common/cnxk/roc_nix_fc.c | 6 ++++++
 2 files changed, 7 insertions(+)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 548854952b..f60e546c01 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -198,6 +198,7 @@ struct roc_nix_fc_cfg {
 			uint16_t cq_drop;
 			bool enable;
 			uint64_t pool;
+			uint64_t spb_pool;
 			uint64_t pool_drop_pct;
 		} rq_cfg;
 
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 0bbc233376..b6956cec39 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -303,6 +303,12 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 				      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
 				      fc_cfg->rq_cfg.tc, pool_drop_pct);
 
+		if (rq->spb_ena) {
+			roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.spb_pool,
+					      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
+					      fc_cfg->rq_cfg.tc, pool_drop_pct);
+		}
+
 		if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
 					      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 28/32] common/nix: check for null derefernce
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (25 preceding siblings ...)
  2023-05-24 10:04   ` [PATCH v2 27/32] common/cnxk: configure PFC on SPB aura Nithin Dabilpuram
@ 2023-05-24 10:04   ` Nithin Dabilpuram
  2023-05-24 10:04   ` [PATCH v2 29/32] common/cnxk: fix receive queue with multiple mask Nithin Dabilpuram
                     ` (3 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:04 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Gowrishankar Muthukrishnan

From: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>

Check for NULL value return before dereferencing on it.

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
 drivers/common/cnxk/roc_nix.c    | 8 +++++++-
 drivers/common/cnxk/roc_nix_fc.c | 7 ++++++-
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 39943e4ba7..152ef7269e 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -392,6 +392,7 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 {
 	enum roc_nix_rss_reta_sz reta_sz;
 	struct plt_pci_device *pci_dev;
+	struct roc_nix_list *nix_list;
 	uint16_t max_sqb_count;
 	uint64_t blkaddr;
 	struct dev *dev;
@@ -417,7 +418,12 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 	nix = roc_nix_to_nix_priv(roc_nix);
 	pci_dev = roc_nix->pci_dev;
 	dev = &nix->dev;
-	TAILQ_INSERT_TAIL(roc_idev_nix_list_get(), roc_nix, next);
+
+	nix_list = roc_idev_nix_list_get();
+	if (nix_list == NULL)
+		return -EINVAL;
+
+	TAILQ_INSERT_TAIL(nix_list, roc_nix, next);
 
 	if (nix->dev.drv_inited)
 		return 0;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index b6956cec39..88439f8e6b 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -431,13 +431,18 @@ static int
 nix_rx_chan_multi_bpid_cfg(struct roc_nix *roc_nix, uint8_t chan, uint16_t bpid, uint16_t *bpid_new)
 {
 	struct roc_nix *roc_nix_tmp, *roc_nix_pre = NULL;
+	struct roc_nix_list *nix_list;
 	uint8_t chan_pre;
 
 	if (!roc_feature_nix_has_rxchan_multi_bpid())
 		return -ENOTSUP;
 
+	nix_list = roc_idev_nix_list_get();
+	if (nix_list == NULL)
+		return -EINVAL;
+
 	/* Find associated NIX RX channel if Aura BPID is of that of a NIX. */
-	TAILQ_FOREACH (roc_nix_tmp, roc_idev_nix_list_get(), next) {
+	TAILQ_FOREACH (roc_nix_tmp, nix_list, next) {
 		struct nix *nix = roc_nix_to_nix_priv(roc_nix_tmp);
 		int i;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 29/32] common/cnxk: fix receive queue with multiple mask
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (26 preceding siblings ...)
  2023-05-24 10:04   ` [PATCH v2 28/32] common/nix: check for null derefernce Nithin Dabilpuram
@ 2023-05-24 10:04   ` Nithin Dabilpuram
  2023-05-24 10:04   ` [PATCH v2 30/32] net/cnxk: handle extbuf completion on ethdev stop Nithin Dabilpuram
                     ` (2 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:04 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla, stable

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

enable or disable RQ mask based on function
parameter.This fix allocates different profile
for different rq masks

Fixes: ddf955d3917e ("common/cnxk: support CPT second pass")
cc: stable@dpdk.org

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/common/cnxk/roc_nix_inl.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 69f658ba87..16f858f561 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -1382,7 +1382,7 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)
 		return -EFAULT;
 
 	if (roc_feature_nix_has_inl_rq_mask()) {
-		rc = nix_inl_rq_mask_cfg(roc_nix, true);
+		rc = nix_inl_rq_mask_cfg(roc_nix, enable);
 		if (rc) {
 			plt_err("Failed to get rq mask rc=%d", rc);
 			return rc;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 30/32] net/cnxk: handle extbuf completion on ethdev stop
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (27 preceding siblings ...)
  2023-05-24 10:04   ` [PATCH v2 29/32] common/cnxk: fix receive queue with multiple mask Nithin Dabilpuram
@ 2023-05-24 10:04   ` Nithin Dabilpuram
  2023-05-24 10:04   ` [PATCH v2 31/32] net/cnxk: add aes-ccm to inline IPsec capabilities Nithin Dabilpuram
  2023-05-24 10:04   ` [PATCH v2 32/32] common/cnxk: add check for null auth and anti-replay Nithin Dabilpuram
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:04 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Nithin Kumar Dabilpuram,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

During tranmissoin of packets, CQ corresponding to
SQ is polled for transmit completion packets in
transmit function, when last burst is transmitted
corresponding transmit completion packets are left
in CQ.This patch reads leftover packets in CQ on
ethdev stop.Moved transmit completion code to cn10k_rxtx.h
and cn9k_ethdev.h to avoid code duplication

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/event/cnxk/cn10k_tx_worker.h |  2 +-
 drivers/event/cnxk/cn9k_worker.h     |  2 +-
 drivers/net/cnxk/cn10k_ethdev.c      | 13 +++++
 drivers/net/cnxk/cn10k_rxtx.h        | 76 +++++++++++++++++++++++++
 drivers/net/cnxk/cn10k_tx.h          | 83 +---------------------------
 drivers/net/cnxk/cn9k_ethdev.c       | 14 +++++
 drivers/net/cnxk/cn9k_ethdev.h       | 77 ++++++++++++++++++++++++++
 drivers/net/cnxk/cn9k_tx.h           | 83 +---------------------------
 8 files changed, 188 insertions(+), 162 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_tx_worker.h b/drivers/event/cnxk/cn10k_tx_worker.h
index c18786a14c..7f170ac5f0 100644
--- a/drivers/event/cnxk/cn10k_tx_worker.h
+++ b/drivers/event/cnxk/cn10k_tx_worker.h
@@ -55,7 +55,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
 		return 0;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, 1, 1);
+		handle_tx_completion_pkts(txq, 1);
 
 	cn10k_nix_tx_skeleton(txq, cmd, flags, 0);
 	/* Perform header writes before barrier
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 1ce4b044e8..fcb82987e5 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -784,7 +784,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 	txq = cn9k_sso_hws_xtract_meta(m, txq_data);
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, 1, 1);
+		handle_tx_completion_pkts(txq, 1);
 
 	if (((txq->nb_sqb_bufs_adj -
 	      __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 2b4ab8b772..792c1b1970 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -367,6 +367,10 @@ static int
 cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 {
 	struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	uint16_t flags = dev->tx_offload_flags;
+	struct roc_nix *nix = &dev->nix;
+	uint32_t head = 0, tail = 0;
 	int rc;
 
 	rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
@@ -375,6 +379,15 @@ cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 
 	/* Clear fc cache pkts to trigger worker stop */
 	txq->fc_cache_pkts = 0;
+
+	if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) && txq->tx_compl.ena) {
+		struct roc_nix_sq *sq = &dev->sqs[qidx];
+		do {
+			handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
+			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+		} while (head != tail);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index c256d54307..65dd57494a 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -113,4 +113,80 @@ struct cn10k_sec_sess_priv {
 	(void *)((uintptr_t)(lmt_addr) +                                       \
 		 ((uint64_t)(lmt_num) << ROC_LMT_LINE_SIZE_LOG2) + (offset))
 
+static inline uint16_t
+nix_tx_compl_nb_pkts(struct cn10k_eth_txq *txq, const uint64_t wdata,
+		const uint32_t qmask)
+{
+	uint16_t available = txq->tx_compl.available;
+
+	/* Update the available count if cached value is not enough */
+	if (!unlikely(available)) {
+		uint64_t reg, head, tail;
+
+		/* Use LDADDA version to avoid reorder */
+		reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
+		/* CQ_OP_STATUS operation error */
+		if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
+				reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
+			return 0;
+
+		tail = reg & 0xFFFFF;
+		head = (reg >> 20) & 0xFFFFF;
+		if (tail < head)
+			available = tail - head + qmask + 1;
+		else
+			available = tail - head;
+
+		txq->tx_compl.available = available;
+	}
+	return available;
+}
+
+static inline void
+handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe)
+{
+#define CNXK_NIX_CQ_ENTRY_SZ 128
+#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)
+
+	uint16_t tx_pkts = 0, nb_pkts;
+	const uintptr_t desc = txq->tx_compl.desc_base;
+	const uint64_t wdata = txq->tx_compl.wdata;
+	const uint32_t qmask = txq->tx_compl.qmask;
+	uint32_t head = txq->tx_compl.head;
+	struct nix_cqe_hdr_s *tx_compl_cq;
+	struct nix_send_comp_s *tx_compl_s0;
+	struct rte_mbuf *m_next, *m;
+
+	if (mt_safe)
+		rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
+
+	nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, qmask);
+	while (tx_pkts < nb_pkts) {
+		rte_prefetch_non_temporal((void *)(desc +
+					(CQE_SZ((head + 2) & qmask))));
+		tx_compl_cq = (struct nix_cqe_hdr_s *)
+			(desc + CQE_SZ(head));
+		tx_compl_s0 = (struct nix_send_comp_s *)
+			((uint64_t *)tx_compl_cq + 1);
+		m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
+		while (m->next != NULL) {
+			m_next = m->next;
+			rte_pktmbuf_free_seg(m);
+			m = m_next;
+		}
+		rte_pktmbuf_free_seg(m);
+
+		head++;
+		head &= qmask;
+		tx_pkts++;
+	}
+	txq->tx_compl.head = head;
+	txq->tx_compl.available -= nb_pkts;
+
+	plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
+
+	if (mt_safe)
+		rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
+}
+
 #endif /* __CN10K_RXTX_H__ */
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index c9ec01cd9d..4f23a8dfc3 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1151,83 +1151,6 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 	return segdw;
 }
 
-static inline uint16_t
-nix_tx_compl_nb_pkts(struct cn10k_eth_txq *txq, const uint64_t wdata,
-		const uint16_t pkts, const uint32_t qmask)
-{
-	uint32_t available = txq->tx_compl.available;
-
-	/* Update the available count if cached value is not enough */
-	if (unlikely(available < pkts)) {
-		uint64_t reg, head, tail;
-
-		/* Use LDADDA version to avoid reorder */
-		reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
-		/* CQ_OP_STATUS operation error */
-		if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
-				reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
-			return 0;
-
-		tail = reg & 0xFFFFF;
-		head = (reg >> 20) & 0xFFFFF;
-		if (tail < head)
-			available = tail - head + qmask + 1;
-		else
-			available = tail - head;
-
-		txq->tx_compl.available = available;
-	}
-	return RTE_MIN(pkts, available);
-}
-
-static inline void
-handle_tx_completion_pkts(struct cn10k_eth_txq *txq, const uint16_t pkts,
-			  uint8_t mt_safe)
-{
-#define CNXK_NIX_CQ_ENTRY_SZ 128
-#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)
-
-	uint16_t tx_pkts = 0, nb_pkts;
-	const uintptr_t desc = txq->tx_compl.desc_base;
-	const uint64_t wdata = txq->tx_compl.wdata;
-	const uint32_t qmask = txq->tx_compl.qmask;
-	uint32_t head = txq->tx_compl.head;
-	struct nix_cqe_hdr_s *tx_compl_cq;
-	struct nix_send_comp_s *tx_compl_s0;
-	struct rte_mbuf *m_next, *m;
-
-	if (mt_safe)
-		rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
-
-	nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, pkts, qmask);
-	while (tx_pkts < nb_pkts) {
-		rte_prefetch_non_temporal((void *)(desc +
-					(CQE_SZ((head + 2) & qmask))));
-		tx_compl_cq = (struct nix_cqe_hdr_s *)
-			(desc + CQE_SZ(head));
-		tx_compl_s0 = (struct nix_send_comp_s *)
-			((uint64_t *)tx_compl_cq + 1);
-		m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
-		while (m->next != NULL) {
-			m_next = m->next;
-			rte_pktmbuf_free_seg(m);
-			m = m_next;
-		}
-		rte_pktmbuf_free_seg(m);
-
-		head++;
-		head &= qmask;
-		tx_pkts++;
-	}
-	txq->tx_compl.head = head;
-	txq->tx_compl.available -= nb_pkts;
-
-	plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
-
-	if (mt_safe)
-		rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
-}
-
 static __rte_always_inline uint16_t
 cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 		    uint16_t pkts, uint64_t *cmd, const uint16_t flags)
@@ -1249,7 +1172,7 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 	bool sec;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);
+		handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
 
 	if (!(flags & NIX_TX_VWQE_F)) {
 		NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -1398,7 +1321,7 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 	bool sec;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);
+		handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
 
 	if (!(flags & NIX_TX_VWQE_F)) {
 		NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -1953,7 +1876,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 	} wd;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);
+		handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
 
 	if (!(flags & NIX_TX_VWQE_F)) {
 		NIX_XMIT_FC_OR_RETURN(txq, pkts);
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index e55a2aa133..bae4dda5e2 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -329,14 +329,28 @@ static int
 cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 {
 	struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	uint16_t flags = dev->tx_offload_flags;
+	struct roc_nix *nix = &dev->nix;
+	uint32_t head = 0, tail = 0;
 	int rc;
 
+
 	rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
 	if (rc)
 		return rc;
 
 	/* Clear fc cache pkts to trigger worker stop */
 	txq->fc_cache_pkts = 0;
+
+	if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) && txq->tx_compl.ena) {
+		struct roc_nix_sq *sq = &dev->sqs[qidx];
+		do {
+			handle_tx_completion_pkts(txq, 0);
+			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+		} while (head != tail);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index a82dcb3d19..9e0a3c5bb2 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -107,4 +107,81 @@ void cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev);
 /* Security context setup */
 void cn9k_eth_sec_ops_override(void);
 
+static inline uint16_t
+nix_tx_compl_nb_pkts(struct cn9k_eth_txq *txq, const uint64_t wdata,
+		const uint32_t qmask)
+{
+	uint16_t available = txq->tx_compl.available;
+
+	/* Update the available count if cached value is not enough */
+	if (!unlikely(available)) {
+		uint64_t reg, head, tail;
+
+		/* Use LDADDA version to avoid reorder */
+		reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
+		/* CQ_OP_STATUS operation error */
+		if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
+				reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
+			return 0;
+
+		tail = reg & 0xFFFFF;
+		head = (reg >> 20) & 0xFFFFF;
+		if (tail < head)
+			available = tail - head + qmask + 1;
+		else
+			available = tail - head;
+
+		txq->tx_compl.available = available;
+	}
+	return available;
+}
+
+static inline void
+handle_tx_completion_pkts(struct cn9k_eth_txq *txq, uint8_t mt_safe)
+{
+#define CNXK_NIX_CQ_ENTRY_SZ 128
+#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)
+
+	uint16_t tx_pkts = 0, nb_pkts;
+	const uintptr_t desc = txq->tx_compl.desc_base;
+	const uint64_t wdata = txq->tx_compl.wdata;
+	const uint32_t qmask = txq->tx_compl.qmask;
+	uint32_t head = txq->tx_compl.head;
+	struct nix_cqe_hdr_s *tx_compl_cq;
+	struct nix_send_comp_s *tx_compl_s0;
+	struct rte_mbuf *m_next, *m;
+
+	if (mt_safe)
+		rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
+
+	nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, qmask);
+	while (tx_pkts < nb_pkts) {
+		rte_prefetch_non_temporal((void *)(desc +
+					(CQE_SZ((head + 2) & qmask))));
+		tx_compl_cq = (struct nix_cqe_hdr_s *)
+			(desc + CQE_SZ(head));
+		tx_compl_s0 = (struct nix_send_comp_s *)
+			((uint64_t *)tx_compl_cq + 1);
+		m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
+		while (m->next != NULL) {
+			m_next = m->next;
+			rte_pktmbuf_free_seg(m);
+			m = m_next;
+		}
+		rte_pktmbuf_free_seg(m);
+
+		head++;
+		head &= qmask;
+		tx_pkts++;
+	}
+	txq->tx_compl.head = head;
+	txq->tx_compl.available -= nb_pkts;
+
+	plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
+
+	if (mt_safe)
+		rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
+}
+
+
 #endif /* __CN9K_ETHDEV_H__ */
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index e956c1ad2a..8f1e05a461 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -559,83 +559,6 @@ cn9k_nix_xmit_mseg_one_release(uint64_t *cmd, void *lmt_addr,
 	} while (lmt_status == 0);
 }
 
-static inline uint16_t
-nix_tx_compl_nb_pkts(struct cn9k_eth_txq *txq, const uint64_t wdata,
-		const uint16_t pkts, const uint32_t qmask)
-{
-	uint32_t available = txq->tx_compl.available;
-
-	/* Update the available count if cached value is not enough */
-	if (unlikely(available < pkts)) {
-		uint64_t reg, head, tail;
-
-		/* Use LDADDA version to avoid reorder */
-		reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
-		/* CQ_OP_STATUS operation error */
-		if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
-				reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
-			return 0;
-
-		tail = reg & 0xFFFFF;
-		head = (reg >> 20) & 0xFFFFF;
-		if (tail < head)
-			available = tail - head + qmask + 1;
-		else
-			available = tail - head;
-
-		txq->tx_compl.available = available;
-	}
-	return RTE_MIN(pkts, available);
-}
-
-static inline void
-handle_tx_completion_pkts(struct cn9k_eth_txq *txq, const uint16_t pkts,
-			  uint8_t mt_safe)
-{
-#define CNXK_NIX_CQ_ENTRY_SZ 128
-#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)
-
-	uint16_t tx_pkts = 0, nb_pkts;
-	const uintptr_t desc = txq->tx_compl.desc_base;
-	const uint64_t wdata = txq->tx_compl.wdata;
-	const uint32_t qmask = txq->tx_compl.qmask;
-	uint32_t head = txq->tx_compl.head;
-	struct nix_cqe_hdr_s *tx_compl_cq;
-	struct nix_send_comp_s *tx_compl_s0;
-	struct rte_mbuf *m_next, *m;
-
-	if (mt_safe)
-		rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
-
-	nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, pkts, qmask);
-	while (tx_pkts < nb_pkts) {
-		rte_prefetch_non_temporal((void *)(desc +
-					(CQE_SZ((head + 2) & qmask))));
-		tx_compl_cq = (struct nix_cqe_hdr_s *)
-			(desc + CQE_SZ(head));
-		tx_compl_s0 = (struct nix_send_comp_s *)
-			((uint64_t *)tx_compl_cq + 1);
-		m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
-		while (m->next != NULL) {
-			m_next = m->next;
-			rte_pktmbuf_free_seg(m);
-			m = m_next;
-		}
-		rte_pktmbuf_free_seg(m);
-
-		head++;
-		head &= qmask;
-		tx_pkts++;
-	}
-	txq->tx_compl.head = head;
-	txq->tx_compl.available -= nb_pkts;
-
-	plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
-
-	if (mt_safe)
-		rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
-}
-
 static __rte_always_inline uint16_t
 cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
 		   uint64_t *cmd, const uint16_t flags)
@@ -648,7 +571,7 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
 	uint16_t i;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, 0);
+		handle_tx_completion_pkts(txq, 0);
 
 	NIX_XMIT_FC_OR_RETURN(txq, pkts);
 
@@ -700,7 +623,7 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint64_t i;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, 0);
+		handle_tx_completion_pkts(txq, 0);
 
 	NIX_XMIT_FC_OR_RETURN(txq, pkts);
 
@@ -1049,7 +972,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t pkts_left;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, 0);
+		handle_tx_completion_pkts(txq, 0);
 
 	NIX_XMIT_FC_OR_RETURN(txq, pkts);
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 31/32] net/cnxk: add aes-ccm to inline IPsec capabilities
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (28 preceding siblings ...)
  2023-05-24 10:04   ` [PATCH v2 30/32] net/cnxk: handle extbuf completion on ethdev stop Nithin Dabilpuram
@ 2023-05-24 10:04   ` Nithin Dabilpuram
  2023-05-24 10:04   ` [PATCH v2 32/32] common/cnxk: add check for null auth and anti-replay Nithin Dabilpuram
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:04 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Srujana Challa

From: Srujana Challa <schalla@marvell.com>

Declares AES-CCM support in inline IPsec capabilities.

Signed-off-by: Srujana Challa <schalla@marvell.com>
---
 drivers/net/cnxk/cn10k_ethdev_sec.c | 30 +++++++++++++++++++++++++++++
 drivers/net/cnxk/cn9k_ethdev_sec.c  | 30 +++++++++++++++++++++++++++++
 2 files changed, 60 insertions(+)

diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 9625704ec1..5bc547051d 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -228,6 +228,36 @@ static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = {
+					.min = 8,
+					.max = 12,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 11,
+					.max = 13,
+					.increment = 1
+				}
+			}, }
+		}, }
+	},
 	{	/* NULL (AUTH) */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 327f221e38..688b13ae1e 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -100,6 +100,36 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = {
+					.min = 8,
+					.max = 12,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 11,
+					.max = 13,
+					.increment = 1
+				}
+			}, }
+		}, }
+	},
 	{	/* AES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 32/32] common/cnxk: add check for null auth and anti-replay
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (29 preceding siblings ...)
  2023-05-24 10:04   ` [PATCH v2 31/32] net/cnxk: add aes-ccm to inline IPsec capabilities Nithin Dabilpuram
@ 2023-05-24 10:04   ` Nithin Dabilpuram
  2023-05-25  9:28     ` Jerin Jacob
  30 siblings, 1 reply; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-24 10:04 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Srujana Challa

From: Srujana Challa <schalla@marvell.com>

As per IPsec RFC, the anti-replay service can be selected for
an SA only if the integrity service is selected for that SA.
This patch adds the validation check for the same.

Signed-off-by: Srujana Challa <schalla@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 13ca2c7791..a8c3ba90cd 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -155,6 +155,10 @@ ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2,
 
 		switch (auth_xfrm->auth.algo) {
 		case RTE_CRYPTO_AUTH_NULL:
+			if (w2->s.dir == ROC_IE_SA_DIR_INBOUND && ipsec_xfrm->replay_win_sz) {
+				plt_err("anti-replay can't be supported with integrity service disabled");
+				return -EINVAL;
+			}
 			w2->s.auth_type = ROC_IE_OT_SA_AUTH_NULL;
 			break;
 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
@@ -1392,6 +1396,11 @@ cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
 	if (ret)
 		return ret;
 
+	if (crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD &&
+	    crypto_xform->auth.algo == RTE_CRYPTO_AUTH_NULL && ipsec->replay_win_sz) {
+		plt_err("anti-replay can't be supported with integrity service disabled");
+		return -EINVAL;
+	}
 	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD ||
 	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_NULL ||
 	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* Re: [PATCH v2 32/32] common/cnxk: add check for null auth and anti-replay
  2023-05-24 10:04   ` [PATCH v2 32/32] common/cnxk: add check for null auth and anti-replay Nithin Dabilpuram
@ 2023-05-25  9:28     ` Jerin Jacob
  0 siblings, 0 replies; 89+ messages in thread
From: Jerin Jacob @ 2023-05-25  9:28 UTC (permalink / raw)
  To: Nithin Dabilpuram
  Cc: Kiran Kumar K, Sunil Kumar Kori, Satha Rao, jerinj, dev, Srujana Challa

On Wed, May 24, 2023 at 3:38 PM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> From: Srujana Challa <schalla@marvell.com>
>
> As per IPsec RFC, the anti-replay service can be selected for
> an SA only if the integrity service is selected for that SA.
> This patch adds the validation check for the same.


Please fix the following in this series:

[for-next-net]dell[dpdk-next-net-mrvl] $ ./devtools/checkpatches.sh -n 32

### [PATCH] common/cnxk: add pool BPID to RQ while using common pool

WARNING:SPACING: space prohibited between function name and open parenthesis '('
#130: FILE: drivers/common/cnxk/roc_nix_fc.c:441:
+       TAILQ_FOREACH (roc_nix_tmp, roc_idev_nix_list_get(), next) {

total: 0 errors, 1 warnings, 347 lines checked

### [PATCH] common/cnxk: fix CPT backpressure disable on LBK

WARNING:BAD_FIXES_TAG: Please use correct Fixes: style 'Fixes: <12
chars of sha1> ("<title line>")' - ie: 'Fixes: 0663a84524e5
("common/cnxk: enable backpressure on CPT with inline inbound")'
#12:
Fixes: 0663a84524e ("common/cnxk: enable backpressure on CPT with
inline inbound")

total: 0 errors, 1 warnings, 27 lines checked

### [PATCH] common/nix: check for null derefernce

WARNING:TYPO_SPELLING: 'derefernce' may be misspelled - perhaps 'dereference'?
#4:
Subject: [PATCH] common/nix: check for null derefernce
                                            ^^^^^^^^^^

WARNING:SPACING: space prohibited between function name and open parenthesis '('
#56: FILE: drivers/common/cnxk/roc_nix_fc.c:445:
+       TAILQ_FOREACH (roc_nix_tmp, nix_list, next) {

total: 0 errors, 2 warnings, 39 lines checked

29/32 valid patches
[for-next-net]dell[dpdk-next-net-mrvl] $

>
> Signed-off-by: Srujana Challa <schalla@marvell.com>
> ---
>  drivers/common/cnxk/cnxk_security.c | 9 +++++++++
>  1 file changed, 9 insertions(+)
>
> diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
> index 13ca2c7791..a8c3ba90cd 100644
> --- a/drivers/common/cnxk/cnxk_security.c
> +++ b/drivers/common/cnxk/cnxk_security.c
> @@ -155,6 +155,10 @@ ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2,
>
>                 switch (auth_xfrm->auth.algo) {
>                 case RTE_CRYPTO_AUTH_NULL:
> +                       if (w2->s.dir == ROC_IE_SA_DIR_INBOUND && ipsec_xfrm->replay_win_sz) {
> +                               plt_err("anti-replay can't be supported with integrity service disabled");
> +                               return -EINVAL;
> +                       }
>                         w2->s.auth_type = ROC_IE_OT_SA_AUTH_NULL;
>                         break;
>                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
> @@ -1392,6 +1396,11 @@ cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
>         if (ret)
>                 return ret;
>
> +       if (crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD &&
> +           crypto_xform->auth.algo == RTE_CRYPTO_AUTH_NULL && ipsec->replay_win_sz) {
> +               plt_err("anti-replay can't be supported with integrity service disabled");
> +               return -EINVAL;
> +       }
>         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD ||
>             auth_xform->auth.algo == RTE_CRYPTO_AUTH_NULL ||
>             auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs
  2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                   ` (20 preceding siblings ...)
  2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
@ 2023-05-25  9:58 ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 02/32] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
                     ` (30 more replies)
  21 siblings, 31 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

New mail box to allocate/free dynamic BPIDs based on NIX type.
Added to new mail box APIs to get/set RX channel config with
new BPIDs.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
v3:
- Fixed checkpatch issues missed in v2.

v2:
- Rebased on top of dpdk-next-net-mrvl
- Handled comments from Jerin w.r.t commit messages
- Added few more fixes to the series.

 drivers/common/cnxk/roc_cpt.c      |  10 +-
 drivers/common/cnxk/roc_cpt.h      |   3 +-
 drivers/common/cnxk/roc_features.h |   7 ++
 drivers/common/cnxk/roc_mbox.h     |  31 ++++-
 drivers/common/cnxk/roc_nix.h      |  21 ++++
 drivers/common/cnxk/roc_nix_fc.c   | 182 +++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_nix_inl.c  |  24 ++--
 drivers/common/cnxk/roc_nix_priv.h |   1 +
 drivers/common/cnxk/version.map    |   5 +
 9 files changed, 266 insertions(+), 18 deletions(-)

diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index dff2fbf2a4..d235ff51ca 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -311,8 +311,7 @@ roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
 }
 
 int
-roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
-			     uint16_t param2, uint16_t opcode)
+roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, struct roc_cpt_inline_ipsec_inb_cfg *cfg)
 {
 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 	struct cpt_rx_inline_lf_cfg_msg *req;
@@ -328,9 +327,10 @@ roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
 	}
 
 	req->sso_pf_func = idev_sso_pffunc_get();
-	req->param1 = param1;
-	req->param2 = param2;
-	req->opcode = opcode;
+	req->param1 = cfg->param1;
+	req->param2 = cfg->param2;
+	req->opcode = cfg->opcode;
+	req->bpid = cfg->bpid;
 
 	rc = mbox_process(mbox);
 exit:
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index d3a5683dc8..92a18711dc 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -178,8 +178,7 @@ int __roc_api roc_cpt_inline_ipsec_cfg(struct dev *dev, uint8_t slot,
 int __roc_api roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
 					struct roc_cpt_inline_ipsec_inb_cfg *cfg);
 int __roc_api roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt,
-					   uint16_t param1, uint16_t param2,
-					   uint16_t opcode);
+					   struct roc_cpt_inline_ipsec_inb_cfg *cfg);
 int __roc_api roc_cpt_afs_print(struct roc_cpt *roc_cpt);
 int __roc_api roc_cpt_lfs_print(struct roc_cpt *roc_cpt);
 void __roc_api roc_cpt_iq_disable(struct roc_cpt_lf *lf);
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 252f306a86..c2893faa65 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -40,4 +40,11 @@ roc_feature_nix_has_reass(void)
 	return roc_model_is_cn10ka();
 }
 
+static inline bool
+roc_feature_nix_has_rxchan_multi_bpid(void)
+{
+	if (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0())
+		return true;
+	return false;
+}
 #endif
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index af3c10b0b0..3d5746b9b8 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -275,7 +275,12 @@ struct mbox_msghdr {
 	M(NIX_SPI_TO_SA_ADD, 0x8026, nix_spi_to_sa_add, nix_spi_to_sa_add_req, \
 	  nix_spi_to_sa_add_rsp)                                               \
 	M(NIX_SPI_TO_SA_DELETE, 0x8027, nix_spi_to_sa_delete,                  \
-	  nix_spi_to_sa_delete_req, msg_rsp)
+	  nix_spi_to_sa_delete_req, msg_rsp)                                   \
+	M(NIX_ALLOC_BPIDS, 0x8028, nix_alloc_bpids, nix_alloc_bpid_req,        \
+	  nix_bpids)                                                           \
+	M(NIX_FREE_BPIDS, 0x8029, nix_free_bpids, nix_bpids, msg_rsp)          \
+	M(NIX_RX_CHAN_CFG, 0x802a, nix_rx_chan_cfg, nix_rx_chan_cfg,           \
+	  nix_rx_chan_cfg)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES                                                   \
@@ -1186,6 +1191,30 @@ struct nix_bp_cfg_rsp {
 	uint8_t __io chan_cnt;
 };
 
+struct nix_alloc_bpid_req {
+	struct mbox_msghdr hdr;
+	uint8_t __io bpid_cnt;
+	uint8_t __io type;
+	uint64_t __io rsvd;
+};
+
+struct nix_bpids {
+#define ROC_NIX_MAX_BPID_CNT	8
+	struct mbox_msghdr hdr;
+	uint8_t __io bpid_cnt;
+	uint16_t __io bpids[ROC_NIX_MAX_BPID_CNT];
+	uint64_t __io rsvd;
+};
+
+struct nix_rx_chan_cfg {
+	struct mbox_msghdr hdr;
+	uint8_t __io type; /* Interface type(CGX/CPT/LBK) */
+	uint8_t __io read;
+	uint16_t __io chan; /* RX channel to be configured */
+	uint64_t __io val; /* NIX_AF_RX_CHAN_CFG value */
+	uint64_t __io rsvd;
+};
+
 /* Global NIX inline IPSec configuration */
 struct nix_inline_ipsec_cfg {
 	struct mbox_msghdr hdr;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 0ec98ad630..2737bb9517 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -16,6 +16,17 @@
 #define ROC_NIX_SQB_LOWER_THRESH      70U
 #define ROC_NIX_SQB_SLACK	      12U
 
+/* Reserved interface types for BPID allocation */
+#define ROC_NIX_INTF_TYPE_CGX  0
+#define ROC_NIX_INTF_TYPE_LBK  1
+#define ROC_NIX_INTF_TYPE_SDP  2
+#define ROC_NIX_INTF_TYPE_CPT  3
+#define ROC_NIX_INTF_TYPE_RSVD 4
+
+/* Application based types for BPID allocation, start from end (255 unused rsvd) */
+#define ROC_NIX_INTF_TYPE_CPT_NIX 254
+#define ROC_NIX_INTF_TYPE_SSO     253
+
 enum roc_nix_rss_reta_sz {
 	ROC_NIX_RSS_RETA_SZ_64 = 64,
 	ROC_NIX_RSS_RETA_SZ_128 = 128,
@@ -837,6 +848,16 @@ enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
 void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
 				     uint8_t ena, uint8_t force, uint8_t tc);
+int __roc_api roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type,
+				  uint8_t bp_cnt, uint16_t *bpids);
+int __roc_api roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt,
+				 uint16_t *bpids);
+int __roc_api roc_nix_rx_chan_cfg_get(struct roc_nix *roc_nix, uint16_t chan,
+				      bool is_cpt, uint64_t *cfg);
+int __roc_api roc_nix_rx_chan_cfg_set(struct roc_nix *roc_nix, uint16_t chan,
+				      bool is_cpt, uint64_t val);
+int __roc_api roc_nix_chan_bpid_set(struct roc_nix *roc_nix, uint16_t chan,
+				    uint64_t bpid, int ena, bool cpt_chan);
 
 /* NPC */
 int __roc_api roc_nix_npc_promisc_ena_dis(struct roc_nix *roc_nix, int enable);
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index cec83b31f3..3b726673a6 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -104,6 +104,17 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		nix->cpt_lbpid = 0;
 	}
 
+	/* CPT to NIX BP on all channels */
+	if (!roc_feature_nix_has_rxchan_multi_bpid() || !nix->cpt_nixbpid)
+		goto exit;
+
+	mbox_put(mbox);
+	for (i = 0; i < nix->rx_chan_cnt; i++) {
+		rc = roc_nix_chan_bpid_set(roc_nix, i, nix->cpt_nixbpid, enable, false);
+		if (rc)
+			break;
+	}
+	return rc;
 exit:
 	mbox_put(mbox);
 	return rc;
@@ -599,3 +610,174 @@ roc_nix_chan_count_get(struct roc_nix *roc_nix)
 
 	return nix->chan_cnt;
 }
+
+/* Allocate BPID for requested type
+ * Returns number of BPIDs allocated
+ *	0 if no BPIDs available
+ *	-ve value on error
+ */
+int
+roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type, uint8_t bp_cnt, uint16_t *bpids)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_alloc_bpid_req *req;
+	struct nix_bpids *rsp;
+	int rc = -EINVAL;
+
+	/* Use this api for unreserved interface types */
+	if ((type < ROC_NIX_INTF_TYPE_RSVD) || (bp_cnt > ROC_NIX_MAX_BPID_CNT) || !bpids)
+		goto exit;
+
+	rc = -ENOSPC;
+	req = mbox_alloc_msg_nix_alloc_bpids(mbox);
+	if (req == NULL)
+		goto exit;
+	req->type = type;
+	req->bpid_cnt = bp_cnt;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+
+	for (rc = 0; rc < rsp->bpid_cnt; rc++)
+		bpids[rc] = rsp->bpids[rc];
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+int
+roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt, uint16_t *bpids)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_bpids *req;
+	int rc = -EINVAL;
+
+	/* Use this api for unreserved interface types */
+	if ((bp_cnt > ROC_NIX_MAX_BPID_CNT) || !bpids)
+		goto exit;
+
+	rc = -ENOSPC;
+	req = mbox_alloc_msg_nix_free_bpids(mbox);
+	if (req == NULL)
+		goto exit;
+	for (rc = 0; rc < bp_cnt; rc++)
+		req->bpids[rc] = bpids[rc];
+	req->bpid_cnt = rc;
+
+	rc = mbox_process(mbox);
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+int
+roc_nix_rx_chan_cfg_get(struct roc_nix *roc_nix, uint16_t chan, bool is_cpt, uint64_t *cfg)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_rx_chan_cfg *req;
+	struct nix_rx_chan_cfg *rsp;
+	int rc = -EINVAL;
+
+	req = mbox_alloc_msg_nix_rx_chan_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+	if (is_cpt)
+		req->type = ROC_NIX_INTF_TYPE_CPT;
+	req->chan = chan;
+	req->read = 1;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		goto exit;
+	*cfg = rsp->val;
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+int
+roc_nix_rx_chan_cfg_set(struct roc_nix *roc_nix, uint16_t chan, bool is_cpt, uint64_t val)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct mbox *mbox = mbox_get(nix->dev.mbox);
+	struct nix_rx_chan_cfg *req;
+	int rc = -EINVAL;
+
+	req = mbox_alloc_msg_nix_rx_chan_cfg(mbox);
+	if (req == NULL)
+		goto exit;
+	if (is_cpt)
+		req->type = ROC_NIX_INTF_TYPE_CPT;
+	req->chan = chan;
+	req->val = val;
+	req->read = 0;
+
+	rc = mbox_process(mbox);
+	if (rc)
+		goto exit;
+exit:
+	mbox_put(mbox);
+	return rc;
+}
+
+#define NIX_BPID1_ENA 15
+#define NIX_BPID2_ENA 14
+#define NIX_BPID3_ENA 13
+
+#define NIX_BPID1_OFF 20
+#define NIX_BPID2_OFF 32
+#define NIX_BPID3_OFF 44
+
+int
+roc_nix_chan_bpid_set(struct roc_nix *roc_nix, uint16_t chan, uint64_t bpid, int ena, bool cpt_chan)
+{
+	uint64_t cfg;
+	int rc;
+
+	if (!roc_feature_nix_has_rxchan_multi_bpid())
+		return -ENOTSUP;
+
+	rc = roc_nix_rx_chan_cfg_get(roc_nix, chan, cpt_chan, &cfg);
+	if (rc)
+		return rc;
+
+	if (ena) {
+		if ((((cfg >> NIX_BPID1_OFF) & GENMASK_ULL(8, 0)) == bpid) ||
+		    (((cfg >> NIX_BPID2_OFF) & GENMASK_ULL(8, 0)) == bpid) ||
+		    (((cfg >> NIX_BPID3_OFF) & GENMASK_ULL(8, 0)) == bpid))
+			return 0;
+
+		if (!(cfg & BIT_ULL(NIX_BPID1_ENA))) {
+			cfg &= ~GENMASK_ULL(NIX_BPID1_OFF + 8, NIX_BPID1_OFF);
+			cfg |= (((uint64_t)bpid << NIX_BPID1_OFF) | BIT_ULL(NIX_BPID1_ENA));
+		} else if (!(cfg & BIT_ULL(NIX_BPID2_ENA))) {
+			cfg &= ~GENMASK_ULL(NIX_BPID2_OFF + 8, NIX_BPID2_OFF);
+			cfg |= (((uint64_t)bpid << NIX_BPID2_OFF) | BIT_ULL(NIX_BPID2_ENA));
+		} else if (!(cfg & BIT_ULL(NIX_BPID3_ENA))) {
+			cfg &= ~GENMASK_ULL(NIX_BPID3_OFF + 8, NIX_BPID3_OFF);
+			cfg |= (((uint64_t)bpid << NIX_BPID3_OFF) | BIT_ULL(NIX_BPID3_ENA));
+		} else {
+			plt_nix_dbg("Exceed maximum BPIDs");
+			return -ENOSPC;
+		}
+	} else {
+		if (((cfg >> NIX_BPID1_OFF) & GENMASK_ULL(8, 0)) == bpid) {
+			cfg &= ~(GENMASK_ULL(NIX_BPID1_OFF + 8, NIX_BPID1_OFF) |
+				 BIT_ULL(NIX_BPID1_ENA));
+		} else if (((cfg >> NIX_BPID2_OFF) & GENMASK_ULL(8, 0)) == bpid) {
+			cfg &= ~(GENMASK_ULL(NIX_BPID2_OFF + 8, NIX_BPID2_OFF) |
+				 BIT_ULL(NIX_BPID2_ENA));
+		} else if (((cfg >> NIX_BPID3_OFF) & GENMASK_ULL(8, 0)) == bpid) {
+			cfg &= ~(GENMASK_ULL(NIX_BPID3_OFF + 8, NIX_BPID3_OFF) |
+				 BIT_ULL(NIX_BPID3_ENA));
+		} else {
+			plt_nix_dbg("BPID not found");
+			return -EINVAL;
+		}
+	}
+	return roc_nix_rx_chan_cfg_set(roc_nix, chan, cpt_chan, cfg);
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 076d83e8d5..9485bba099 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -603,11 +603,10 @@ int
 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct roc_cpt_inline_ipsec_inb_cfg cfg;
 	struct idev_cfg *idev = idev_get_cfg();
+	uint16_t bpids[ROC_NIX_MAX_BPID_CNT];
 	struct roc_cpt *roc_cpt;
-	uint16_t opcode;
-	uint16_t param1;
-	uint16_t param2;
 	int rc;
 
 	if (idev == NULL)
@@ -624,9 +623,9 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	}
 
 	if (roc_model_is_cn9k()) {
-		param1 = (ROC_ONF_IPSEC_INB_MAX_L2_SZ >> 3) & 0xf;
-		param2 = ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT;
-		opcode =
+		cfg.param1 = (ROC_ONF_IPSEC_INB_MAX_L2_SZ >> 3) & 0xf;
+		cfg.param2 = ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT;
+		cfg.opcode =
 			((ROC_IE_ON_INB_MAX_CTX_LEN << 8) |
 			 (ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6)));
 	} else {
@@ -634,13 +633,18 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 
 		u.u16 = 0;
 		u.s.esp_trailer_disable = 1;
-		param1 = u.u16;
-		param2 = 0;
-		opcode = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6));
+		cfg.param1 = u.u16;
+		cfg.param2 = 0;
+		cfg.opcode = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6));
+		rc = roc_nix_bpids_alloc(roc_nix, ROC_NIX_INTF_TYPE_CPT_NIX, 1, bpids);
+		if (rc > 0) {
+			nix->cpt_nixbpid = bpids[0];
+			cfg.bpid = nix->cpt_nixbpid;
+		}
 	}
 
 	/* Do onetime Inbound Inline config in CPTPF */
-	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, param2, opcode);
+	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, &cfg);
 	if (rc && rc != -EEXIST) {
 		plt_err("Failed to setup inbound lf, rc=%d", rc);
 		return rc;
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 2fe9093324..99e27cdc56 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -208,6 +208,7 @@ struct nix {
 	uint16_t outb_se_ring_cnt;
 	uint16_t outb_se_ring_base;
 	uint16_t cpt_lbpid;
+	uint16_t cpt_nixbpid;
 	bool need_meta_aura;
 	/* Mode provided by driver */
 	bool inb_inl_dev;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 5281c71550..e7c6f6bce5 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -147,6 +147,9 @@ INTERNAL {
 	roc_nix_bpf_stats_reset;
 	roc_nix_bpf_stats_to_idx;
 	roc_nix_bpf_timeunit_get;
+	roc_nix_bpids_alloc;
+	roc_nix_bpids_free;
+	roc_nix_chan_bpid_set;
 	roc_nix_chan_count_get;
 	roc_nix_cq_dump;
 	roc_nix_cq_fini;
@@ -277,6 +280,8 @@ INTERNAL {
 	roc_nix_rss_key_set;
 	roc_nix_rss_reta_get;
 	roc_nix_rss_reta_set;
+	roc_nix_rx_chan_cfg_get;
+	roc_nix_rx_chan_cfg_set;
 	roc_nix_rx_drop_re_set;
 	roc_nix_rx_queue_intr_disable;
 	roc_nix_rx_queue_intr_enable;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 02/32] common/cnxk: add pool BPID to RQ while using common pool
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 03/32] common/cnxk: fix CPT backpressure disable on LBK Nithin Dabilpuram
                     ` (29 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Shijith Thotton

From: Shijith Thotton <sthotton@marvell.com>

When RQs of two different traffic classes are using the same mempool,
BPIDs could differ between the RQs and BPID of only one RQ can be
configured per pool. In such cases, a new BPID is configured on both RQs
and pool or pool back-pressure is disabled.

CN103xx and CN106xx B0 supports configuring multiple BPID per RQ.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
 drivers/common/cnxk/roc_idev.c      |  12 +++
 drivers/common/cnxk/roc_idev.h      |   1 +
 drivers/common/cnxk/roc_idev_priv.h |   1 +
 drivers/common/cnxk/roc_nix.c       |   5 +
 drivers/common/cnxk/roc_nix.h       |   3 +
 drivers/common/cnxk/roc_nix_fc.c    | 156 ++++++++++++++++------------
 drivers/common/cnxk/roc_npa.c       |  48 +++++++++
 drivers/common/cnxk/roc_npa.h       |   2 +
 drivers/common/cnxk/version.map     |   2 +
 9 files changed, 166 insertions(+), 64 deletions(-)

diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index 62a4fd8880..f420f0158d 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -39,6 +39,7 @@ idev_set_defaults(struct idev_cfg *idev)
 	idev->bphy = NULL;
 	idev->cpt = NULL;
 	idev->nix_inl_dev = NULL;
+	TAILQ_INIT(&idev->roc_nix_list);
 	plt_spinlock_init(&idev->nix_inl_dev_lock);
 	plt_spinlock_init(&idev->npa_dev_lock);
 	__atomic_store_n(&idev->npa_refcnt, 0, __ATOMIC_RELEASE);
@@ -201,6 +202,17 @@ roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix)
 	return (uint64_t *)&inl_dev->sa_soft_exp_ring[nix->outb_se_ring_base];
 }
 
+struct roc_nix_list *
+roc_idev_nix_list_get(void)
+{
+	struct idev_cfg *idev;
+
+	idev = idev_get_cfg();
+	if (idev != NULL)
+		return &idev->roc_nix_list;
+	return NULL;
+}
+
 void
 roc_idev_cpt_set(struct roc_cpt *cpt)
 {
diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h
index 926aac0634..640ca97708 100644
--- a/drivers/common/cnxk/roc_idev.h
+++ b/drivers/common/cnxk/roc_idev.h
@@ -17,5 +17,6 @@ void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
 
 struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
 uint64_t __roc_api roc_idev_nix_inl_meta_aura_get(void);
+struct roc_nix_list *__roc_api roc_idev_nix_list_get(void);
 
 #endif /* _ROC_IDEV_H_ */
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index b97d2936a2..d83522799f 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -32,6 +32,7 @@ struct idev_cfg {
 	struct roc_sso *sso;
 	struct nix_inl_dev *nix_inl_dev;
 	struct idev_nix_inl_cfg inl_cfg;
+	struct roc_nix_list roc_nix_list;
 	plt_spinlock_t nix_inl_dev_lock;
 	plt_spinlock_t npa_dev_lock;
 };
diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 97ef1c7133..39943e4ba7 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -417,6 +417,7 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 	nix = roc_nix_to_nix_priv(roc_nix);
 	pci_dev = roc_nix->pci_dev;
 	dev = &nix->dev;
+	TAILQ_INSERT_TAIL(roc_idev_nix_list_get(), roc_nix, next);
 
 	if (nix->dev.drv_inited)
 		return 0;
@@ -425,6 +426,10 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 		goto skip_dev_init;
 
 	memset(nix, 0, sizeof(*nix));
+
+	/* Since 0 is a valid BPID, use -1 to represent invalid value. */
+	memset(nix->bpid, -1, sizeof(nix->bpid));
+
 	/* Initialize device  */
 	rc = dev_init(dev, pci_dev);
 	if (rc) {
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 2737bb9517..188b8800d3 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -425,6 +425,8 @@ typedef void (*q_err_get_t)(struct roc_nix *roc_nix, void *data);
 typedef void (*link_info_get_t)(struct roc_nix *roc_nix,
 				struct roc_nix_link_info *link);
 
+TAILQ_HEAD(roc_nix_list, roc_nix);
+
 struct roc_nix {
 	/* Input parameters */
 	struct plt_pci_device *pci_dev;
@@ -456,6 +458,7 @@ struct roc_nix {
 	uint32_t buf_sz;
 	uint64_t meta_aura_handle;
 	uintptr_t meta_mempool;
+	TAILQ_ENTRY(roc_nix) next;
 
 #define ROC_NIX_MEM_SZ (6 * 1056)
 	uint8_t reserved[ROC_NIX_MEM_SZ] __plt_cache_aligned;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 3b726673a6..8feb773e1d 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -428,17 +428,64 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
 	return rc;
 }
 
+static int
+nix_rx_chan_multi_bpid_cfg(struct roc_nix *roc_nix, uint8_t chan, uint16_t bpid, uint16_t *bpid_new)
+{
+	struct roc_nix *roc_nix_tmp, *roc_nix_pre = NULL;
+	uint8_t chan_pre;
+
+	if (!roc_feature_nix_has_rxchan_multi_bpid())
+		return -ENOTSUP;
+
+	/* Find associated NIX RX channel if Aura BPID is of that of a NIX. */
+	TAILQ_FOREACH(roc_nix_tmp, roc_idev_nix_list_get(), next) {
+		struct nix *nix = roc_nix_to_nix_priv(roc_nix_tmp);
+		int i;
+
+		for (i = 0; i < NIX_MAX_CHAN; i++) {
+			if (nix->bpid[i] == bpid)
+				break;
+		}
+
+		if (i < NIX_MAX_CHAN) {
+			roc_nix_pre = roc_nix_tmp;
+			chan_pre = i;
+			break;
+		}
+	}
+
+	/* Alloc and configure a new BPID if Aura BPID is that of a NIX. */
+	if (roc_nix_pre) {
+		if (roc_nix_bpids_alloc(roc_nix, ROC_NIX_INTF_TYPE_SSO, 1, bpid_new) <= 0)
+			return -ENOSPC;
+
+		if (roc_nix_chan_bpid_set(roc_nix_pre, chan_pre, *bpid_new, 1, false) < 0)
+			return -ENOSPC;
+
+		if (roc_nix_chan_bpid_set(roc_nix, chan, *bpid_new, 1, false) < 0)
+			return -ENOSPC;
+
+		return 0;
+	} else {
+		return roc_nix_chan_bpid_set(roc_nix, chan, bpid, 1, false);
+	}
+
+	return 0;
+}
+
+#define NIX_BPID_INVALID 0xFFFF
+
 void
 roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 		      uint8_t force, uint8_t tc)
 {
+	uint32_t aura_id = roc_npa_aura_handle_to_aura(pool_id);
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct npa_lf *lf = idev_npa_obj_get();
 	struct npa_aq_enq_req *req;
 	struct npa_aq_enq_rsp *rsp;
+	uint8_t bp_thresh, bp_intf;
 	struct mbox *mbox;
-	uint32_t limit;
-	uint64_t shift;
 	int rc;
 
 	if (roc_nix_is_sdp(roc_nix))
@@ -446,93 +493,74 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 
 	if (!lf)
 		return;
-	mbox = mbox_get(lf->mbox);
 
-	req = mbox_alloc_msg_npa_aq_enq(mbox);
-	if (req == NULL)
-		goto exit;
+	mbox = lf->mbox;
+	req = mbox_alloc_msg_npa_aq_enq(mbox_get(mbox));
+	if (req == NULL) {
+		mbox_put(mbox);
+		return;
+	}
 
-	req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
+	req->aura_id = aura_id;
 	req->ctype = NPA_AQ_CTYPE_AURA;
 	req->op = NPA_AQ_INSTOP_READ;
 
 	rc = mbox_process_msg(mbox, (void *)&rsp);
-	if (rc)
-		goto exit;
+	mbox_put(mbox);
+	if (rc) {
+		plt_nix_dbg("Failed to read context of aura 0x%" PRIx64, pool_id);
+		return;
+	}
 
-	limit = rsp->aura.limit;
-	shift = rsp->aura.shift;
+	bp_intf = 1 << nix->is_nix1;
+	bp_thresh = NIX_RQ_AURA_THRESH(rsp->aura.limit >> rsp->aura.shift);
 
 	/* BP is already enabled. */
 	if (rsp->aura.bp_ena && ena) {
-		uint16_t bpid;
-		bool nix1;
+		uint16_t bpid =
+			(rsp->aura.bp_ena & 0x1) ? rsp->aura.nix0_bpid : rsp->aura.nix1_bpid;
 
-		nix1 = !!(rsp->aura.bp_ena & 0x2);
-		if (nix1)
-			bpid = rsp->aura.nix1_bpid;
-		else
-			bpid = rsp->aura.nix0_bpid;
+		/* Disable BP if BPIDs don't match and couldn't add new BPID. */
+		if (bpid != nix->bpid[tc]) {
+			uint16_t bpid_new = NIX_BPID_INVALID;
 
-		/* If BP ids don't match disable BP. */
-		if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc])) &&
-		    !force) {
-			req = mbox_alloc_msg_npa_aq_enq(mbox);
-			if (req == NULL)
-				goto exit;
+			if ((nix_rx_chan_multi_bpid_cfg(roc_nix, tc, bpid, &bpid_new) < 0) &&
+			    !force) {
+				plt_info("Disabling BP/FC on aura 0x%" PRIx64
+					 " as it shared across ports or tc",
+					 pool_id);
 
-			plt_info("Disabling BP/FC on aura 0x%" PRIx64
-				 " as it shared across ports or tc",
-				 pool_id);
-			req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
-			req->ctype = NPA_AQ_CTYPE_AURA;
-			req->op = NPA_AQ_INSTOP_WRITE;
+				if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
+					plt_nix_dbg(
+						"Disabling backpressue failed on aura 0x%" PRIx64,
+						pool_id);
+			}
 
-			req->aura.bp_ena = 0;
-			req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
-
-			mbox_process(mbox);
+			/* Configure Aura with new BPID if it is allocated. */
+			if (bpid_new != NIX_BPID_INVALID) {
+				if (roc_npa_aura_bp_configure(pool_id, bpid_new, bp_intf, bp_thresh,
+							      true))
+					plt_nix_dbg(
+						"Enabling backpressue failed on aura 0x%" PRIx64,
+						pool_id);
+			}
 		}
 
-		if ((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc]))
-			plt_info("Ignoring aura 0x%" PRIx64 "->%u bpid mapping",
-				 pool_id, nix->bpid[tc]);
-		goto exit;
+		return;
 	}
 
 	/* BP was previously enabled but now disabled skip. */
 	if (rsp->aura.bp && ena)
-		goto exit;
-
-	req = mbox_alloc_msg_npa_aq_enq(mbox);
-	if (req == NULL)
-		goto exit;
-
-	req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
-	req->ctype = NPA_AQ_CTYPE_AURA;
-	req->op = NPA_AQ_INSTOP_WRITE;
+		return;
 
 	if (ena) {
-		if (nix->is_nix1) {
-			req->aura.nix1_bpid = nix->bpid[tc];
-			req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
-		} else {
-			req->aura.nix0_bpid = nix->bpid[tc];
-			req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
-		}
-		req->aura.bp = NIX_RQ_AURA_THRESH(limit >> shift);
-		req->aura_mask.bp = ~(req->aura_mask.bp);
+		if (roc_npa_aura_bp_configure(pool_id, nix->bpid[tc], bp_intf, bp_thresh, true))
+			plt_nix_dbg("Enabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	} else {
-		req->aura.bp = 0;
-		req->aura_mask.bp = ~(req->aura_mask.bp);
+		if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
+			plt_nix_dbg("Disabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	}
 
-	req->aura.bp_ena = (!!ena << nix->is_nix1);
-	req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
-
-	mbox_process(mbox);
-exit:
-	mbox_put(mbox);
 	return;
 }
 
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 3b0f95a304..dd649812b4 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -882,6 +882,54 @@ roc_npa_zero_aura_handle(void)
 	return 0;
 }
 
+int
+roc_npa_aura_bp_configure(uint64_t aura_handle, uint16_t bpid, uint8_t bp_intf, uint8_t bp_thresh,
+			  bool enable)
+{
+	uint32_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+	struct npa_lf *lf = idev_npa_obj_get();
+	struct npa_aq_enq_req *req;
+	struct mbox *mbox;
+	int rc = 0;
+
+	if (lf == NULL)
+		return NPA_ERR_PARAM;
+
+	mbox = mbox_get(lf->mbox);
+	req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (req == NULL) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	req->aura_id = aura_id;
+	req->ctype = NPA_AQ_CTYPE_AURA;
+	req->op = NPA_AQ_INSTOP_WRITE;
+
+	if (enable) {
+		if (bp_intf & 0x1) {
+			req->aura.nix0_bpid = bpid;
+			req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
+		} else {
+			req->aura.nix1_bpid = bpid;
+			req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
+		}
+		req->aura.bp = bp_thresh;
+		req->aura_mask.bp = ~(req->aura_mask.bp);
+	} else {
+		req->aura.bp = 0;
+		req->aura_mask.bp = ~(req->aura_mask.bp);
+	}
+
+	req->aura.bp_ena = bp_intf;
+	req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
+
+	mbox_process(mbox);
+fail:
+	mbox_put(mbox);
+	return rc;
+}
+
 static inline int
 npa_attach(struct mbox *m_box)
 {
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index d3caa71586..e1e164499e 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -749,6 +749,8 @@ uint64_t __roc_api roc_npa_zero_aura_handle(void);
 int __roc_api roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int cnt);
 uint64_t __roc_api roc_npa_buf_type_mask(uint64_t aura_handle);
 uint64_t __roc_api roc_npa_buf_type_limit_get(uint64_t type_mask);
+int __roc_api roc_npa_aura_bp_configure(uint64_t aura_id, uint16_t bpid, uint8_t bp_intf,
+					uint8_t bp_thresh, bool enable);
 
 /* Init callbacks */
 typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev);
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index e7c6f6bce5..d740d9df81 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -99,6 +99,7 @@ INTERNAL {
 	roc_idev_npa_nix_get;
 	roc_idev_num_lmtlines_get;
 	roc_idev_nix_inl_meta_aura_get;
+	roc_idev_nix_list_get;
 	roc_ml_reg_read64;
 	roc_ml_reg_write64;
 	roc_ml_reg_read32;
@@ -361,6 +362,7 @@ INTERNAL {
 	roc_npa_aura_limit_modify;
 	roc_npa_aura_op_range_get;
 	roc_npa_aura_op_range_set;
+	roc_npa_aura_bp_configure;
 	roc_npa_ctx_dump;
 	roc_npa_dev_fini;
 	roc_npa_dev_init;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 03/32] common/cnxk: fix CPT backpressure disable on LBK
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 02/32] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 04/32] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
                     ` (28 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rahul Bhansali, stable

From: Rahul Bhansali <rbhansali@marvell.com>

For LBK interfaces, roc_nix_inl_inb_is_enabled() is false,
hence backpressure on CPT is disabled and causing CQ full
interrupt.
NIXX_AF_RX_CHAN[0x800]_CFG is global config for all PF, VF
of RPM/LBK, hence backpressure disable on CPT is not required.

Fixes: 0663a84524e5 ("common/cnxk: enable backpressure on CPT with inline inbound")
cc: stable@dpdk.org

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/common/cnxk/roc_nix_fc.c | 18 ++----------------
 1 file changed, 2 insertions(+), 16 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 8feb773e1d..6d6997ed82 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -87,25 +87,11 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
 		if (rc)
 			goto exit;
 		nix->cpt_lbpid = rsp->chan_bpid[0] & 0x1FF;
-	} else {
-		req = mbox_alloc_msg_nix_cpt_bp_disable(mbox);
-		if (req == NULL)
-			goto exit;
-		req->chan_base = 0;
-		if (roc_nix_is_lbk(roc_nix) || roc_nix_is_sdp(roc_nix))
-			req->chan_cnt = NIX_LBK_MAX_CHAN;
-		else
-			req->chan_cnt = NIX_CGX_MAX_CHAN;
-		req->bpid_per_chan = 0;
-
-		rc = mbox_process_msg(mbox, (void *)&rsp);
-		if (rc)
-			goto exit;
-		nix->cpt_lbpid = 0;
 	}
 
 	/* CPT to NIX BP on all channels */
-	if (!roc_feature_nix_has_rxchan_multi_bpid() || !nix->cpt_nixbpid)
+	if (!roc_feature_nix_has_rxchan_multi_bpid() || !nix->cpt_nixbpid ||
+	    !roc_nix_inl_inb_is_enabled(roc_nix))
 		goto exit;
 
 	mbox_put(mbox);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 04/32] common/cnxk: skip flow ctrl set on non-existent meta aura
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 02/32] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 03/32] common/cnxk: fix CPT backpressure disable on LBK Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 05/32] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
                     ` (27 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Thomas Monjalon, Nithin Kumar Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Skip setting flow control on local meta aura if it is not yet
created. Also in flow control mode set, do get to confirm
if it is in same state to avoid a set and unnecessary mbox
failures.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 .mailmap                           | 1 +
 drivers/common/cnxk/roc_dev.c      | 1 +
 drivers/common/cnxk/roc_nix_fc.c   | 4 ++--
 drivers/common/cnxk/roc_nix_inl.c  | 3 +++
 drivers/net/cnxk/cnxk_ethdev.c     | 5 +++--
 drivers/net/cnxk/cnxk_ethdev_ops.c | 4 ++++
 6 files changed, 14 insertions(+), 4 deletions(-)

diff --git a/.mailmap b/.mailmap
index 57d02a87aa..edf2f5e639 100644
--- a/.mailmap
+++ b/.mailmap
@@ -991,6 +991,7 @@ Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
+Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
 Noa Ezra <noae@mellanox.com>
diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 2388237186..5e4e564ebe 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -421,6 +421,7 @@ process_msgs(struct dev *dev, struct mbox *mbox)
 			dev->pf_func = msg->pcifunc;
 			break;
 		case MBOX_MSG_CGX_PRIO_FLOW_CTRL_CFG:
+		case MBOX_MSG_CGX_CFG_PAUSE_FRM:
 			/* Handling the case where one VF tries to disable PFC
 			 * while PFC already configured on other VFs. This is
 			 * not an error but a warning which can be ignored.
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 6d6997ed82..69e331d67d 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -297,7 +297,7 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 				      fc_cfg->rq_cfg.enable, true,
 				      fc_cfg->rq_cfg.tc);
 
-		if (roc_nix->local_meta_aura_ena)
+		if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
 					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc);
 	}
@@ -395,6 +395,7 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
 		goto exit;
 	}
 
+	/* Set new config */
 	req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
 	if (req == NULL)
 		goto exit;
@@ -408,7 +409,6 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
 
 	nix->rx_pause = rx_pause;
 	nix->tx_pause = tx_pause;
-
 exit:
 	mbox_put(mbox);
 	return rc;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 9485bba099..b16756d642 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -131,6 +131,9 @@ nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_
 	}
 	roc_nix->meta_mempool = mp;
 
+	plt_nix_dbg("Created meta aura %p(%s)for port %d", (void *)*meta_aura, mp_name,
+		    roc_nix->port_id);
+
 	if (!roc_nix->local_meta_aura_ena) {
 		inl_cfg->buf_sz = buf_sz;
 		inl_cfg->nb_bufs = nb_bufs;
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 42a52ed0ca..7cc41e0c31 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -378,8 +378,9 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	if (rc)
 		return rc;
 
-	fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL :
-						  RTE_ETH_FC_TX_PAUSE;
+	fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL : RTE_ETH_FC_TX_PAUSE;
+	fc->rx_pause = (fc->mode == RTE_ETH_FC_FULL) || (fc->mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc->mode == RTE_ETH_FC_FULL) || (fc->mode == RTE_ETH_FC_TX_PAUSE);
 	return rc;
 }
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 068b7c3502..bce6d59bbc 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -342,6 +342,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			return rc;
 	}
 
+	/* Skip mode set if it is we are in same state */
+	if (fc->rx_pause == rx_pause && fc->tx_pause == tx_pause)
+		return 0;
+
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
 	if (rc)
 		return rc;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 05/32] common/cnxk: reduce sqes per sqb by one
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (2 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 04/32] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 06/32] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
                     ` (26 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Each SQB reserves last SQE to store pointer to next SQB. So
each SQB will holds either 31 or 63 based on send descriptors
selected.

This patch also consider sqb_slack to maintain threshold buffers
to sync between HW and SW. Threshold will be maximum of 30% of
queue size or sqb_slack.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       |  2 +-
 drivers/common/cnxk/roc_nix_priv.h  |  2 +-
 drivers/common/cnxk/roc_nix_queue.c | 21 ++++++++++-----------
 drivers/event/cnxk/cn10k_eventdev.c |  2 +-
 drivers/event/cnxk/cn9k_eventdev.c  |  2 +-
 5 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 188b8800d3..50aef4fe85 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -13,7 +13,7 @@
 #define ROC_NIX_BPF_STATS_MAX	      12
 #define ROC_NIX_MTR_ID_INVALID	      UINT32_MAX
 #define ROC_NIX_PFC_CLASS_INVALID     UINT8_MAX
-#define ROC_NIX_SQB_LOWER_THRESH      70U
+#define ROC_NIX_SQB_THRESH	      30U
 #define ROC_NIX_SQB_SLACK	      12U
 
 /* Reserved interface types for BPID allocation */
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 99e27cdc56..7144d1ee10 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -12,7 +12,7 @@
 #define NIX_MAX_SQB	     ((uint16_t)512)
 #define NIX_DEF_SQB	     ((uint16_t)16)
 #define NIX_MIN_SQB	     ((uint16_t)8)
-#define NIX_SQB_LIST_SPACE   ((uint16_t)2)
+#define NIX_SQB_PREFETCH     ((uint16_t)1)
 
 /* Apply BP/DROP when CQ is 95% full */
 #define NIX_CQ_THRESH_LEVEL	(5 * 256 / 100)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index ac4d9856c1..d29fafa895 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -982,7 +982,7 @@ static int
 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
-	uint16_t sqes_per_sqb, count, nb_sqb_bufs;
+	uint16_t sqes_per_sqb, count, nb_sqb_bufs, thr;
 	struct npa_pool_s pool;
 	struct npa_aura_s aura;
 	uint64_t blk_sz;
@@ -995,22 +995,21 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	else
 		sqes_per_sqb = (blk_sz / 8) / 8;
 
+	/* Reserve One SQE in each SQB to hold pointer for next SQB */
+	sqes_per_sqb -= 1;
+
 	sq->nb_desc = PLT_MAX(512U, sq->nb_desc);
-	nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
-	nb_sqb_bufs += NIX_SQB_LIST_SPACE;
+	nb_sqb_bufs = PLT_DIV_CEIL(sq->nb_desc, sqes_per_sqb);
+	thr = PLT_DIV_CEIL((nb_sqb_bufs * ROC_NIX_SQB_THRESH), 100);
+	nb_sqb_bufs += NIX_SQB_PREFETCH;
 	/* Clamp up the SQB count */
-	nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
-			      (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
+	nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count, (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
 
 	sq->nb_sqb_bufs = nb_sqb_bufs;
 	sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
-	sq->nb_sqb_bufs_adj =
-		nb_sqb_bufs -
-		(PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
-	sq->nb_sqb_bufs_adj =
-		(sq->nb_sqb_bufs_adj * ROC_NIX_SQB_LOWER_THRESH) / 100;
+	sq->nb_sqb_bufs_adj = nb_sqb_bufs;
 
-	nb_sqb_bufs += roc_nix->sqb_slack;
+	nb_sqb_bufs += PLT_MAX(thr, roc_nix->sqb_slack);
 	/* Explicitly set nat_align alone as by default pool is with both
 	 * nat_align and buf_offset = 1 which we don't want for SQB.
 	 */
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 49d205af39..fd71ff15ca 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -995,7 +995,7 @@ cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
 		txq->nb_sqb_bufs_adj =
-			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
+			((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 2d2985f175..b104d19b9b 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1037,7 +1037,7 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
 		txq->nb_sqb_bufs_adj =
-			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
+			((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 06/32] common/cnxk: dump SW SSO work count as xstat
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (3 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 05/32] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 07/32] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
                     ` (25 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Dump SW SSO work count as xstat.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_inl_dev_irq.c |  1 +
 drivers/common/cnxk/roc_nix_inl_priv.h    |  1 +
 drivers/common/cnxk/roc_nix_stats.c       | 17 +++++++++++------
 drivers/common/cnxk/roc_nix_xstats.h      |  4 ++++
 4 files changed, 17 insertions(+), 6 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_inl_dev_irq.c b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
index 445b440447..becd7907f2 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev_irq.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
@@ -41,6 +41,7 @@ nix_inl_sso_work_cb(struct nix_inl_dev *inl_dev)
 			goto again;
 	}
 
+	inl_dev->sso_work_cnt += cnt;
 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
 }
 
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index 528d2db365..b0a8976c6b 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -57,6 +57,7 @@ struct nix_inl_dev {
 	bool is_nix1;
 	uint8_t spb_drop_pc;
 	uint8_t lpb_drop_pc;
+	uint64_t sso_work_cnt;
 
 	/* NIX/CPT data */
 	void *inb_sa_base;
diff --git a/drivers/common/cnxk/roc_nix_stats.c b/drivers/common/cnxk/roc_nix_stats.c
index 6b5803af84..ca0e8ccb4f 100644
--- a/drivers/common/cnxk/roc_nix_stats.c
+++ b/drivers/common/cnxk/roc_nix_stats.c
@@ -24,12 +24,7 @@
 int
 roc_nix_num_xstats_get(struct roc_nix *roc_nix)
 {
-	if (roc_nix_is_vf_or_sdp(roc_nix))
-		return CNXK_NIX_NUM_XSTATS_REG;
-	else if (roc_model_is_cn9k())
-		return CNXK_NIX_NUM_XSTATS_CGX;
-
-	return CNXK_NIX_NUM_XSTATS_RPM;
+	return roc_nix_xstats_names_get(roc_nix, NULL, 0);
 }
 
 int
@@ -360,6 +355,12 @@ roc_nix_xstats_get(struct roc_nix *roc_nix, struct roc_nix_xstat *xstats,
 				xstats[count].id = count;
 				count++;
 			}
+			for (i = 0; i < PLT_DIM(inl_sw_xstats); i++) {
+				if (!inl_sw_xstats[i].offset)
+					xstats[count].value = inl_dev->sso_work_cnt;
+				xstats[count].id = count;
+				count++;
+			}
 		}
 	}
 
@@ -475,6 +476,10 @@ roc_nix_xstats_names_get(struct roc_nix *roc_nix,
 						      inl_nix_rq_xstats, i);
 				count++;
 			}
+			for (i = 0; i < PLT_DIM(inl_sw_xstats); i++) {
+				NIX_XSTATS_NAME_PRINT(xstats_names, count, inl_sw_xstats, i);
+				count++;
+			}
 		}
 	}
 
diff --git a/drivers/common/cnxk/roc_nix_xstats.h b/drivers/common/cnxk/roc_nix_xstats.h
index 813fb7f578..11b8e1c0ff 100644
--- a/drivers/common/cnxk/roc_nix_xstats.h
+++ b/drivers/common/cnxk/roc_nix_xstats.h
@@ -206,6 +206,10 @@ static const struct cnxk_nix_xstats_name nix_tx_xstats_cgx[] = {
 	{"cgx_tx_pause_packets", CGX_TX_PAUSE_PKTS},
 };
 
+static const struct cnxk_nix_xstats_name inl_sw_xstats[] = {
+	{"inl_sso_work_cnt", 0},
+};
+
 #define CNXK_NIX_NUM_RX_XSTATS	   PLT_DIM(nix_rx_xstats)
 #define CNXK_NIX_NUM_TX_XSTATS	   PLT_DIM(nix_tx_xstats)
 #define CNXK_NIX_NUM_QUEUE_XSTATS  PLT_DIM(nix_q_xstats)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 07/32] common/cnxk: add percent drop threshold to pool
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (4 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 06/32] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 08/32] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
                     ` (24 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

From: Sunil Kumar Kori <skori@marvell.com>

Currently hard coded drop threshold(95%) is configured to aura/pool as a
threshold for drop limit.

Patch adds a input parameter to RoC API so that user passed percentage
value can be configured.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/common/cnxk/roc_nix.h            |  6 ++++--
 drivers/common/cnxk/roc_nix_fc.c         | 17 ++++++++++++-----
 drivers/common/cnxk/roc_nix_inl.c        |  2 +-
 drivers/common/cnxk/roc_nix_priv.h       |  2 +-
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  4 ++--
 5 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 50aef4fe85..fde8fe4ecc 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -15,6 +15,7 @@
 #define ROC_NIX_PFC_CLASS_INVALID     UINT8_MAX
 #define ROC_NIX_SQB_THRESH	      30U
 #define ROC_NIX_SQB_SLACK	      12U
+#define ROC_NIX_AURA_THRESH	      95U
 
 /* Reserved interface types for BPID allocation */
 #define ROC_NIX_INTF_TYPE_CGX  0
@@ -197,6 +198,7 @@ struct roc_nix_fc_cfg {
 			uint16_t cq_drop;
 			bool enable;
 			uint64_t pool;
+			uint64_t pool_drop_pct;
 		} rq_cfg;
 
 		struct {
@@ -849,8 +851,8 @@ uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
 
 enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
 
-void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
-				     uint8_t ena, uint8_t force, uint8_t tc);
+void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
+				     uint8_t force, uint8_t tc, uint64_t drop_percent);
 int __roc_api roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type,
 				  uint8_t bp_cnt, uint16_t *bpids);
 int __roc_api roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt,
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 69e331d67d..78f482ea52 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -283,6 +283,7 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct roc_nix_fc_cfg tmp;
+	uint64_t pool_drop_pct;
 	struct roc_nix_rq *rq;
 	int sso_ena = 0, rc;
 
@@ -293,13 +294,19 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 		return -EINVAL;
 
 	if (sso_ena) {
+		pool_drop_pct = fc_cfg->rq_cfg.pool_drop_pct;
+		/* Use default value for zero pct */
+		if (fc_cfg->rq_cfg.enable && !pool_drop_pct)
+			pool_drop_pct = ROC_NIX_AURA_THRESH;
+
 		roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool,
 				      fc_cfg->rq_cfg.enable, true,
-				      fc_cfg->rq_cfg.tc);
+				      fc_cfg->rq_cfg.tc, fc_cfg->rq_cfg.pool_drop_pct);
 
 		if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc);
+					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc,
+					      fc_cfg->rq_cfg.pool_drop_pct);
 	}
 
 	/* Copy RQ config to CQ config as they are occupying same area */
@@ -462,8 +469,8 @@ nix_rx_chan_multi_bpid_cfg(struct roc_nix *roc_nix, uint8_t chan, uint16_t bpid,
 #define NIX_BPID_INVALID 0xFFFF
 
 void
-roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
-		      uint8_t force, uint8_t tc)
+roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, uint8_t force,
+		      uint8_t tc, uint64_t drop_percent)
 {
 	uint32_t aura_id = roc_npa_aura_handle_to_aura(pool_id);
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
@@ -499,7 +506,7 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
 	}
 
 	bp_intf = 1 << nix->is_nix1;
-	bp_thresh = NIX_RQ_AURA_THRESH(rsp->aura.limit >> rsp->aura.shift);
+	bp_thresh = NIX_RQ_AURA_THRESH(drop_percent, rsp->aura.limit >> rsp->aura.shift);
 
 	/* BP is already enabled. */
 	if (rsp->aura.bp_ena && ena) {
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index b16756d642..329ebf9405 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -263,7 +263,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 		 */
 		if (aura_setup && nix->rqs[0] && nix->rqs[0]->tc != ROC_NIX_PFC_CLASS_INVALID)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      true, true, nix->rqs[0]->tc);
+					      true, true, nix->rqs[0]->tc, ROC_NIX_AURA_THRESH);
 	} else {
 		rc = nix_inl_global_meta_buffer_validate(idev, rq);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 7144d1ee10..f900a81d8a 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -20,7 +20,7 @@
 /* Apply LBP at 75% of actual BP */
 #define NIX_CQ_LPB_THRESH_FRAC	(75 * 16 / 100)
 #define NIX_CQ_FULL_ERRATA_SKID (1024ull * 256)
-#define NIX_RQ_AURA_THRESH(x)	(((x)*95) / 100)
+#define NIX_RQ_AURA_THRESH(percent, val) (((val) * (percent)) / 100)
 
 /* IRQ triggered when NIX_LF_CINTX_CNT[QCOUNT] crosses this value */
 #define CQ_CQE_THRESH_DEFAULT	0x1ULL
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 9a02026ea6..d39bed6e84 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -263,7 +263,7 @@ cnxk_sso_rx_adapter_queue_add(
 		if (rxq_sp->tx_pause)
 			roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
 					      rxq_sp->qconf.mp->pool_id, true,
-					      dev->force_ena_bp, rxq_sp->tc);
+					      dev->force_ena_bp, rxq_sp->tc, ROC_NIX_AURA_THRESH);
 		cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
 		cnxk_eth_dev->nb_rxq_sso++;
 	}
@@ -307,7 +307,7 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 		rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
 		roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
 				      rxq_sp->qconf.mp->pool_id, false,
-				      dev->force_ena_bp, 0);
+				      dev->force_ena_bp, 0, ROC_NIX_AURA_THRESH);
 		cnxk_eth_dev->nb_rxq_sso--;
 
 		/* Enable drop_re if it was disabled earlier */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 08/32] common/cnxk: make aura flow control config more predictable
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (5 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 07/32] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 09/32] common/cnxk: update age drop statistics Nithin Dabilpuram
                     ` (23 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

Restrict shared BPID config only when force BP is enabled
and make aura flow control config more predictable by not disabling
it if there is a collision but ignore new config and log the same.

Also remove BPID setup from Rx adapter as it is now evaluated and
configured every time ethdev is stopped/started.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix.h            |  1 +
 drivers/common/cnxk/roc_nix_fc.c         | 49 ++++++++++++------------
 drivers/common/cnxk/roc_nix_inl.c        |  2 +-
 drivers/common/cnxk/roc_npa.c            |  3 ++
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 13 +------
 5 files changed, 32 insertions(+), 36 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index fde8fe4ecc..2b576f0891 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -451,6 +451,7 @@ struct roc_nix {
 	bool custom_sa_action;
 	bool local_meta_aura_ena;
 	uint32_t meta_buf_sz;
+	bool force_rx_aura_bp;
 	/* End of input parameters */
 	/* LMT line base for "Per Core Tx LMT line" mode*/
 	uintptr_t lmt_base;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 78f482ea52..21e3b7d5bd 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -300,13 +300,13 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 			pool_drop_pct = ROC_NIX_AURA_THRESH;
 
 		roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool,
-				      fc_cfg->rq_cfg.enable, true,
-				      fc_cfg->rq_cfg.tc, fc_cfg->rq_cfg.pool_drop_pct);
+				      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
+				      fc_cfg->rq_cfg.tc, pool_drop_pct);
 
 		if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc,
-					      fc_cfg->rq_cfg.pool_drop_pct);
+					      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
+					      fc_cfg->rq_cfg.tc, pool_drop_pct);
 	}
 
 	/* Copy RQ config to CQ config as they are occupying same area */
@@ -479,7 +479,8 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui
 	struct npa_aq_enq_rsp *rsp;
 	uint8_t bp_thresh, bp_intf;
 	struct mbox *mbox;
-	int rc;
+	uint16_t bpid;
+	int rc, i;
 
 	if (roc_nix_is_sdp(roc_nix))
 		return;
@@ -508,34 +509,25 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui
 	bp_intf = 1 << nix->is_nix1;
 	bp_thresh = NIX_RQ_AURA_THRESH(drop_percent, rsp->aura.limit >> rsp->aura.shift);
 
+	bpid = (rsp->aura.bp_ena & 0x1) ? rsp->aura.nix0_bpid : rsp->aura.nix1_bpid;
 	/* BP is already enabled. */
 	if (rsp->aura.bp_ena && ena) {
-		uint16_t bpid =
-			(rsp->aura.bp_ena & 0x1) ? rsp->aura.nix0_bpid : rsp->aura.nix1_bpid;
-
 		/* Disable BP if BPIDs don't match and couldn't add new BPID. */
 		if (bpid != nix->bpid[tc]) {
 			uint16_t bpid_new = NIX_BPID_INVALID;
 
-			if ((nix_rx_chan_multi_bpid_cfg(roc_nix, tc, bpid, &bpid_new) < 0) &&
-			    !force) {
-				plt_info("Disabling BP/FC on aura 0x%" PRIx64
-					 " as it shared across ports or tc",
+			if (force && !nix_rx_chan_multi_bpid_cfg(roc_nix, tc, bpid, &bpid_new)) {
+				plt_info("Setting up shared BPID on shared aura 0x%" PRIx64,
 					 pool_id);
 
-				if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
-					plt_nix_dbg(
-						"Disabling backpressue failed on aura 0x%" PRIx64,
-						pool_id);
-			}
-
-			/* Configure Aura with new BPID if it is allocated. */
-			if (bpid_new != NIX_BPID_INVALID) {
+				/* Configure Aura with new BPID if it is allocated. */
 				if (roc_npa_aura_bp_configure(pool_id, bpid_new, bp_intf, bp_thresh,
 							      true))
-					plt_nix_dbg(
-						"Enabling backpressue failed on aura 0x%" PRIx64,
+					plt_err("Enabling backpressue failed on aura 0x%" PRIx64,
 						pool_id);
+			} else {
+				plt_info("Ignoring port=%u tc=%u config on shared aura 0x%" PRIx64,
+					 roc_nix->port_id, tc, pool_id);
 			}
 		}
 
@@ -548,10 +540,19 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui
 
 	if (ena) {
 		if (roc_npa_aura_bp_configure(pool_id, nix->bpid[tc], bp_intf, bp_thresh, true))
-			plt_nix_dbg("Enabling backpressue failed on aura 0x%" PRIx64, pool_id);
+			plt_err("Enabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	} else {
+		bool found = !!force;
+
+		/* Don't disable if existing BPID is not within this port's list */
+		for (i = 0; i < nix->chan_cnt; i++)
+			if (bpid == nix->bpid[i])
+				found = true;
+		if (!found)
+			return;
+
 		if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false))
-			plt_nix_dbg("Disabling backpressue failed on aura 0x%" PRIx64, pool_id);
+			plt_err("Disabling backpressue failed on aura 0x%" PRIx64, pool_id);
 	}
 
 	return;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 329ebf9405..8592e1cb0b 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -263,7 +263,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 		 */
 		if (aura_setup && nix->rqs[0] && nix->rqs[0]->tc != ROC_NIX_PFC_CLASS_INVALID)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
-					      true, true, nix->rqs[0]->tc, ROC_NIX_AURA_THRESH);
+					      true, false, nix->rqs[0]->tc, ROC_NIX_AURA_THRESH);
 	} else {
 		rc = nix_inl_global_meta_buffer_validate(idev, rq);
 		if (rc)
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index dd649812b4..377439c2ba 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -892,6 +892,9 @@ roc_npa_aura_bp_configure(uint64_t aura_handle, uint16_t bpid, uint8_t bp_intf,
 	struct mbox *mbox;
 	int rc = 0;
 
+	plt_npa_dbg("Setting BPID %u BP_INTF 0x%x BP_THRESH %u enable %u on aura %" PRIx64,
+		    bpid, bp_intf, bp_thresh, enable, aura_handle);
+
 	if (lf == NULL)
 		return NPA_ERR_PARAM;
 
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index d39bed6e84..8ad84198b9 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -260,10 +260,8 @@ cnxk_sso_rx_adapter_queue_add(
 							     false);
 		}
 
-		if (rxq_sp->tx_pause)
-			roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
-					      rxq_sp->qconf.mp->pool_id, true,
-					      dev->force_ena_bp, rxq_sp->tc, ROC_NIX_AURA_THRESH);
+		/* Propagate force bp devarg */
+		cnxk_eth_dev->nix.force_rx_aura_bp = dev->force_ena_bp;
 		cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
 		cnxk_eth_dev->nb_rxq_sso++;
 	}
@@ -293,8 +291,6 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 			      int32_t rx_queue_id)
 {
 	struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
-	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-	struct cnxk_eth_rxq_sp *rxq_sp;
 	int i, rc = 0;
 
 	RTE_SET_USED(event_dev);
@@ -302,12 +298,7 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 		for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
 			cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, i);
 	} else {
-		rxq_sp = cnxk_eth_rxq_to_sp(
-			eth_dev->data->rx_queues[rx_queue_id]);
 		rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
-		roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
-				      rxq_sp->qconf.mp->pool_id, false,
-				      dev->force_ena_bp, 0, ROC_NIX_AURA_THRESH);
 		cnxk_eth_dev->nb_rxq_sso--;
 
 		/* Enable drop_re if it was disabled earlier */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 09/32] common/cnxk: update age drop statistics
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (6 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 08/32] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 10/32] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
                     ` (22 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Update age drop statistics. Added telemetry statistics for age drops.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/cnxk_telemetry_nix.c | 4 ++++
 drivers/common/cnxk/hw/nix.h             | 2 ++
 drivers/common/cnxk/roc_features.h       | 6 ++++++
 drivers/common/cnxk/roc_nix.h            | 2 ++
 drivers/common/cnxk/roc_nix_stats.c      | 4 ++++
 5 files changed, 18 insertions(+)

diff --git a/drivers/common/cnxk/cnxk_telemetry_nix.c b/drivers/common/cnxk/cnxk_telemetry_nix.c
index b7285cf137..ccae5d7853 100644
--- a/drivers/common/cnxk/cnxk_telemetry_nix.c
+++ b/drivers/common/cnxk/cnxk_telemetry_nix.c
@@ -680,6 +680,10 @@ nix_sq_ctx(volatile void *qctx, struct plt_tel_data *d)
 	/* W12 */
 	CNXK_TEL_DICT_BF_PTR(d, ctx, pkts, w12_);
 
+	/* W13 */
+	CNXK_TEL_DICT_INT(d, ctx, aged_drop_octs, w13_);
+	CNXK_TEL_DICT_INT(d, ctx, aged_drop_pkts, w13_);
+
 	/* W14 */
 	CNXK_TEL_DICT_BF_PTR(d, ctx, drop_octs, w14_);
 
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index 0d8f2a5e9b..fbdf1b64f6 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -363,6 +363,8 @@
 #define NIX_LF_SQ_OP_STATUS	 (0xa30ull)
 #define NIX_LF_SQ_OP_DROP_OCTS	 (0xa40ull)
 #define NIX_LF_SQ_OP_DROP_PKTS	 (0xa50ull)
+#define NIX_LF_SQ_OP_AGE_DROP_OCTS (0xa60ull) /* [CN10K, .) */
+#define NIX_LF_SQ_OP_AGE_DROP_PKTS (0xa70ull) /* [CN10K, .) */
 #define NIX_LF_CQ_OP_INT	 (0xb00ull)
 #define NIX_LF_CQ_OP_DOOR	 (0xb30ull)
 #define NIX_LF_CQ_OP_STATUS	 (0xb40ull)
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index c2893faa65..6fe01015d8 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -47,4 +47,10 @@ roc_feature_nix_has_rxchan_multi_bpid(void)
 		return true;
 	return false;
 }
+
+static inline bool
+roc_feature_nix_has_age_drop_stats(void)
+{
+	return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+}
 #endif
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 2b576f0891..f84e473db6 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -293,6 +293,8 @@ struct roc_nix_stats_queue {
 			uint64_t tx_octs;
 			uint64_t tx_drop_pkts;
 			uint64_t tx_drop_octs;
+			uint64_t tx_age_drop_pkts;
+			uint64_t tx_age_drop_octs;
 		};
 	};
 };
diff --git a/drivers/common/cnxk/roc_nix_stats.c b/drivers/common/cnxk/roc_nix_stats.c
index ca0e8ccb4f..1e93191a07 100644
--- a/drivers/common/cnxk/roc_nix_stats.c
+++ b/drivers/common/cnxk/roc_nix_stats.c
@@ -137,6 +137,10 @@ nix_stat_tx_queue_get(struct nix *nix, uint16_t qid,
 	qstats->tx_octs = qstat_read(nix, qid, NIX_LF_SQ_OP_OCTS);
 	qstats->tx_drop_pkts = qstat_read(nix, qid, NIX_LF_SQ_OP_DROP_PKTS);
 	qstats->tx_drop_octs = qstat_read(nix, qid, NIX_LF_SQ_OP_DROP_OCTS);
+	if (roc_feature_nix_has_age_drop_stats()) {
+		qstats->tx_age_drop_pkts = qstat_read(nix, qid, NIX_LF_SQ_OP_AGE_DROP_PKTS);
+		qstats->tx_age_drop_octs = qstat_read(nix, qid, NIX_LF_SQ_OP_AGE_DROP_OCTS);
+	}
 }
 
 static int
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 10/32] common/cnxk: fetch eng caps for inl outb inst format
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (7 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 09/32] common/cnxk: update age drop statistics Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 11/32] common/cnxk: add receive error mask Nithin Dabilpuram
                     ` (21 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Fetch engine caps and use it along with model check
to determine inline outbound instruction format
with NIX Tx offset or address.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_cpt.h       |   3 +
 drivers/common/cnxk/roc_nix_inl.c   | 101 ++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_nix_inl.h   |   1 +
 drivers/common/cnxk/roc_nix_priv.h  |   1 +
 drivers/common/cnxk/version.map     |   1 +
 drivers/net/cnxk/cn10k_ethdev_sec.c |   3 +-
 drivers/net/cnxk/cnxk_ethdev.c      |   2 +
 drivers/net/cnxk/cnxk_ethdev.h      |   3 +
 8 files changed, 114 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 92a18711dc..910bd37a0c 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -12,6 +12,9 @@
 #define ROC_AE_CPT_BLOCK_TYPE1 0
 #define ROC_AE_CPT_BLOCK_TYPE2 1
 
+#define ROC_LOADFVC_MAJOR_OP 0x01UL
+#define ROC_LOADFVC_MINOR_OP 0x08UL
+
 /* Default engine groups */
 #define ROC_CPT_DFLT_ENG_GRP_SE	   0UL
 #define ROC_CPT_DFLT_ENG_GRP_SE_IE 1UL
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 8592e1cb0b..67f8ce9aa0 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -602,6 +602,96 @@ nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
 	return rc;
 }
 
+static void
+nix_inl_eng_caps_get(struct nix *nix)
+{
+	struct roc_cpt_lf *lf = nix->cpt_lf_base;
+	uintptr_t lmt_base = lf->lmt_base;
+	union cpt_res_s res, *hw_res;
+	struct cpt_inst_s inst;
+	uint64_t *rptr;
+
+	hw_res = plt_zmalloc(sizeof(*hw_res), ROC_CPT_RES_ALIGN);
+	if (hw_res == NULL) {
+		plt_err("Couldn't allocate memory for result address");
+		return;
+	}
+
+	rptr = plt_zmalloc(ROC_ALIGN, 0);
+	if (rptr == NULL) {
+		plt_err("Couldn't allocate memory for rptr");
+		plt_free(hw_res);
+		return;
+	}
+
+	/* Fill CPT_INST_S for LOAD_FVC/HW_CRYPTO_SUPPORT microcode op */
+	memset(&inst, 0, sizeof(struct cpt_inst_s));
+	inst.res_addr = (uint64_t)hw_res;
+	inst.rptr = (uint64_t)rptr;
+	inst.w4.s.opcode_major = ROC_LOADFVC_MAJOR_OP;
+	inst.w4.s.opcode_minor = ROC_LOADFVC_MINOR_OP;
+	inst.w7.s.egrp = ROC_CPT_DFLT_ENG_GRP_SE;
+
+	/* Use 1 min timeout for the poll */
+	const uint64_t timeout = plt_tsc_cycles() + 60 * plt_tsc_hz();
+
+	if (roc_model_is_cn9k()) {
+		uint64_t lmt_status;
+
+		hw_res->cn9k.compcode = CPT_COMP_NOT_DONE;
+		plt_io_wmb();
+
+		do {
+			roc_lmt_mov_seg((void *)lmt_base, &inst, 4);
+			lmt_status = roc_lmt_submit_ldeor(lf->io_addr);
+		} while (lmt_status != 0);
+
+		/* Wait until CPT instruction completes */
+		do {
+			res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
+			if (unlikely(plt_tsc_cycles() > timeout))
+				break;
+		} while (res.cn9k.compcode == CPT_COMP_NOT_DONE);
+
+		if (res.cn9k.compcode != CPT_COMP_GOOD) {
+			plt_err("LOAD FVC operation timed out");
+			return;
+		}
+	} else {
+		uint64_t lmt_arg, io_addr;
+		uint16_t lmt_id;
+
+		hw_res->cn10k.compcode = CPT_COMP_NOT_DONE;
+
+		/* Use this lcore's LMT line as no one else is using it */
+		ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+		memcpy((void *)lmt_base, &inst, sizeof(inst));
+
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
+		io_addr = lf->io_addr | ROC_CN10K_CPT_INST_DW_M1 << 4;
+
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+		plt_io_wmb();
+
+		/* Wait until CPT instruction completes */
+		do {
+			res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
+			if (unlikely(plt_tsc_cycles() > timeout))
+				break;
+		} while (res.cn10k.compcode == CPT_COMP_NOT_DONE);
+
+		if (res.cn10k.compcode != CPT_COMP_GOOD || res.cn10k.uc_compcode) {
+			plt_err("LOAD FVC operation timed out");
+			goto exit;
+		}
+	}
+
+	nix->cpt_eng_caps = plt_be_to_cpu_64(*rptr);
+exit:
+	plt_free(rptr);
+	plt_free(hw_res);
+}
+
 int
 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 {
@@ -652,6 +742,7 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 		plt_err("Failed to setup inbound lf, rc=%d", rc);
 		return rc;
 	}
+	nix->cpt_eng_caps = roc_cpt->hw_caps[CPT_ENG_TYPE_SE].u;
 
 	/* Setup Inbound SA table */
 	rc = nix_inl_inb_sa_tbl_setup(roc_nix);
@@ -871,6 +962,8 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
 		}
 	}
 
+	/* Fetch engine capabilities */
+	nix_inl_eng_caps_get(nix);
 	return 0;
 
 lf_fini:
@@ -1571,3 +1664,11 @@ roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb)
 {
 	meta_pool_cb = cb;
 }
+
+uint64_t
+roc_nix_inl_eng_caps_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	return nix->cpt_eng_caps;
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 6220ba6773..daa21a941a 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -212,5 +212,6 @@ int __roc_api roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
 int __roc_api roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr,
 				    void *sa_cptr, bool inb, uint16_t sa_len);
 void __roc_api roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix, FILE *file);
+uint64_t __roc_api roc_nix_inl_eng_caps_get(struct roc_nix *roc_nix);
 
 #endif /* _ROC_NIX_INL_H_ */
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index f900a81d8a..6872630dc8 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -209,6 +209,7 @@ struct nix {
 	uint16_t outb_se_ring_base;
 	uint16_t cpt_lbpid;
 	uint16_t cpt_nixbpid;
+	uint64_t cpt_eng_caps;
 	bool need_meta_aura;
 	/* Mode provided by driver */
 	bool inb_inl_dev;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index d740d9df81..809fd81b20 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -186,6 +186,7 @@ INTERNAL {
 	roc_nix_inl_dev_rq_put;
 	roc_nix_inl_dev_unlock;
 	roc_nix_inl_dev_xaq_realloc;
+	roc_nix_inl_eng_caps_get;
 	roc_nix_inl_inb_is_enabled;
 	roc_nix_inl_inb_init;
 	roc_nix_inl_inb_sa_base_get;
diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 3c32de0f94..9625704ec1 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -809,7 +809,8 @@ cn10k_eth_sec_session_create(void *device,
 		sess_priv.chksum = (!ipsec->options.ip_csum_enable << 1 |
 				    !ipsec->options.l4_csum_enable);
 		sess_priv.dec_ttl = ipsec->options.dec_ttl;
-		if (roc_feature_nix_has_inl_ipsec_mseg())
+		if (roc_feature_nix_has_inl_ipsec_mseg() &&
+		    dev->outb.cpt_eng_caps & BIT_ULL(35))
 			sess_priv.nixtx_off = 1;
 
 		/* Pointer from eth_sec -> outb_sa */
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 7cc41e0c31..eeabf6edec 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -203,6 +203,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 			plt_err("Outbound fc sw mem alloc failed");
 			goto sa_bmap_free;
 		}
+
+		dev->outb.cpt_eng_caps = roc_nix_inl_eng_caps_get(nix);
 	}
 	return 0;
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 97537de17a..44e37d6550 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -287,6 +287,9 @@ struct cnxk_eth_dev_sec_outb {
 
 	/* Lock to synchronize sa setup/release */
 	rte_spinlock_t lock;
+
+	/* Engine caps */
+	uint64_t cpt_eng_caps;
 };
 
 struct cnxk_eth_dev {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 11/32] common/cnxk: add receive error mask
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (8 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 10/32] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 12/32] common/cnxk: fix null pointer dereference Nithin Dabilpuram
                     ` (20 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Adding support to configure receive error mask
for 106B0

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/common/cnxk/roc_features.h |  6 ++++++
 drivers/common/cnxk/roc_nix.h      | 16 ++++++++++++++++
 2 files changed, 22 insertions(+)

diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 6fe01015d8..ce12a1dca4 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -16,6 +16,12 @@ roc_feature_nix_has_inl_ipsec_mseg(void)
 	return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
 }
 
+static inline bool
+roc_feature_nix_has_drop_re_mask(void)
+{
+	return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+}
+
 static inline bool
 roc_feature_nix_has_inl_rq_mask(void)
 {
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index f84e473db6..37d0ed5ebe 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -242,6 +242,22 @@ struct roc_nix_eeprom_info {
 #define ROC_NIX_LF_RX_CFG_LEN_OL4     BIT_ULL(40)
 #define ROC_NIX_LF_RX_CFG_LEN_OL3     BIT_ULL(41)
 
+#define ROC_NIX_LF_RX_CFG_RX_ERROR_MASK 0xFFFFFFFFFFF80000
+#define ROC_NIX_RE_PARTIAL		BIT_ULL(1)
+#define ROC_NIX_RE_JABBER		BIT_ULL(2)
+#define ROC_NIX_RE_CRC8_PCH		BIT_ULL(5)
+#define ROC_NIX_RE_CNC_INV		BIT_ULL(6)
+#define ROC_NIX_RE_FCS			BIT_ULL(7)
+#define ROC_NIX_RE_FCS_RCV		BIT_ULL(8)
+#define ROC_NIX_RE_TERMINATE		BIT_ULL(9)
+#define ROC_NIX_RE_MACSEC		BIT_ULL(10)
+#define ROC_NIX_RE_RX_CTL		BIT_ULL(11)
+#define ROC_NIX_RE_SKIP			BIT_ULL(12)
+#define ROC_NIX_RE_DMAPKT		BIT_ULL(15)
+#define ROC_NIX_RE_UNDERSIZE		BIT_ULL(16)
+#define ROC_NIX_RE_OVERSIZE		BIT_ULL(17)
+#define ROC_NIX_RE_OL2_LENMISM		BIT_ULL(18)
+
 /* Group 0 will be used for RSS, 1 -7 will be used for npc_flow RSS action*/
 #define ROC_NIX_RSS_GROUP_DEFAULT    0
 #define ROC_NIX_RSS_GRPS	     8
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 12/32] common/cnxk: fix null pointer dereference
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (9 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 11/32] common/cnxk: add receive error mask Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 13/32] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
                     ` (19 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Gowrishankar Muthukrishnan, stable

From: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>

Fix null pointer dereferences reported by klocwork.

Fixes: 4398c4092f3d ("common/cnxk: dump inline device RQ context")
Fixes: 79dc6f324e82 ("common/cnxk: add inline function for statistics")
cc: stable@dpdk.org

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
 drivers/common/cnxk/roc_nix_debug.c   | 8 +++++++-
 drivers/common/cnxk/roc_nix_inl_dev.c | 2 +-
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index 399d0d7eae..a1c3db284b 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -733,7 +733,13 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix, FILE *file)
 	inl_rq = roc_nix_inl_dev_rq(roc_nix);
 	if (inl_rq) {
 		struct idev_cfg *idev = idev_get_cfg();
-		struct nix_inl_dev *inl_dev = idev->nix_inl_dev;
+		struct nix_inl_dev *inl_dev = NULL;
+
+		if (idev && idev->nix_inl_dev)
+			inl_dev = idev->nix_inl_dev;
+
+		if (!inl_dev)
+			return -EINVAL;
 
 		rc = nix_q_ctx_get(&inl_dev->dev, NIX_AQ_CTYPE_RQ, inl_rq->qid, &ctx);
 		if (rc) {
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 0df148c3ed..ca948d3bc7 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -843,7 +843,7 @@ roc_nix_inl_dev_stats_get(struct roc_nix_stats *stats)
 	if (stats == NULL)
 		return NIX_ERR_PARAM;
 
-	if (!idev && idev->nix_inl_dev)
+	if (idev && idev->nix_inl_dev)
 		inl_dev = idev->nix_inl_dev;
 
 	if (!inl_dev)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 13/32] common/cnxk: fix parameter in NIX dump
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (10 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 12/32] common/cnxk: fix null pointer dereference Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 14/32] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
                     ` (18 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Gowrishankar Muthukrishnan, stable

From: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>

Fix parameter passed to nix_dump to what expected in format
specifier.
Fixes: d2f168dfa5de ("common/cnxk: support 10K B0 for inline IPsec")
cc: stable@dpdk.org

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
 drivers/common/cnxk/roc_nix_debug.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index a1c3db284b..8c7d902f1e 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -664,8 +664,8 @@ nix_lf_cq_dump(__io struct nix_cq_ctx_s *ctx, FILE *file)
 	nix_dump(file,
 		 "W1: lbpid_high \t\t\t0x%03x\nW1: lbpid_med \t\t\t0x%03x\n"
 		 "W1: lbpid_low \t\t\t0x%03x\n(W1: lbpid) \t\t\t0x%03x\n",
-		 ctx->lbpid_high, ctx->lbpid_med, ctx->lbpid_low,
-		 ctx->lbpid_high << 6 | ctx->lbpid_med << 3 | ctx->lbpid_low);
+		 ctx->lbpid_high, ctx->lbpid_med, ctx->lbpid_low, (unsigned int)
+		 (ctx->lbpid_high << 6 | ctx->lbpid_med << 3 | ctx->lbpid_low));
 	nix_dump(file, "W1: lbp_ena \t\t\t\t%d\n", ctx->lbp_ena);
 
 	nix_dump(file, "W2: update_time \t\t%d\nW2: avg_level \t\t\t%d",
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 14/32] common/cnxk: set relchan in TL4 config for each SDP queue
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (11 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 13/32] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 15/32] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
                     ` (17 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Veerasenareddy Burru

From: Veerasenareddy Burru <vburru@marvell.com>

set distinct relchan in each TL4 queue connected to SDP.

currently rechan in TL4 SDP config is getting set to 0 for all
SDP-NIX queues.
Each TL4 queues for SDP need to be configured with distinct channel
for SDP to provide per channel backpressure to NIX.

Signed-off-by: Veerasenareddy Burru <vburru@marvell.com>
---
 drivers/common/cnxk/roc_nix_tm_utils.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 5864833109..9ede1bebe7 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -582,6 +582,7 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
 
 		/* Configure TL4 to send to SDP channel instead of CGX/LBK */
 		if (nix->sdp_link) {
+			relchan = nix->tx_chan_base & 0xff;
 			plt_tm_dbg("relchan=%u schq=%u tx_chan_cnt=%u\n", relchan, schq,
 				   nix->tx_chan_cnt);
 			reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 15/32] common/cnxk: avoid STALL with dual rate on CNF95N
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (12 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 14/32] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 16/32] common/cnxk: update errata info Nithin Dabilpuram
                     ` (16 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Due to errata RED_ALGO STALL with dual shaper rate will hangs on
platforms CNF95N and CNF95O. Set READ_ALGO to DISCARD with dual
shaper rate on CNF95N and CNF95O.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix_tm_utils.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 9ede1bebe7..3840d6d457 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -1267,7 +1267,8 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 	tm_node->red_algo = roc_prof->red_algo;
 
 	/* C0 doesn't support STALL when both PIR & CIR are enabled */
-	if (roc_model_is_cn96_cx()) {
+	if (roc_model_is_cn96_cx() || roc_model_is_cnf95xxn_a0() || roc_model_is_cnf95xxo_a0() ||
+	    roc_model_is_cnf95xxn_a1() || roc_model_is_cnf95xxn_b0()) {
 		nix_tm_shaper_conf_get(profile, &cir, &pir);
 
 		if (pir.rate && cir.rate)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 16/32] common/cnxk: update errata info
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (13 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 15/32] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 17/32] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
                     ` (15 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Update errata info based on CN10KA B0 and CN10KB A0.
Also remove duplicate model check roc_model_is_cn103xx()

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_errata.h   | 20 ++++++++------------
 drivers/common/cnxk/roc_features.h |  2 +-
 drivers/common/cnxk/roc_model.h    |  6 ------
 3 files changed, 9 insertions(+), 19 deletions(-)

diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h
index 1333bde629..22d2406e94 100644
--- a/drivers/common/cnxk/roc_errata.h
+++ b/drivers/common/cnxk/roc_errata.h
@@ -6,7 +6,7 @@
 
 #include "roc_model.h"
 
-/* Errata IPBUNIXRX-40129 */
+/* Errata IPBUNIXRX-40129, IPBUNIXRX-40179 */
 static inline bool
 roc_errata_nix_has_no_drop_re(void)
 {
@@ -41,7 +41,8 @@ static inline bool
 roc_errata_nix_has_no_vwqe_flush_op(void)
 {
 	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
-		roc_model_is_cnf10kb_a0());
+		roc_model_is_cnf10kb_a0() || roc_model_is_cn10ka_a1() || roc_model_is_cn10ka_b0() ||
+		roc_model_is_cn10kb_a0());
 }
 
 /* Errata IPBURVUM-38481 */
@@ -51,13 +52,6 @@ roc_errata_ruvm_has_no_interrupt_with_msixen(void)
 	return true;
 }
 
-/* Errata IPBUNIXTX-39300 */
-static inline bool
-roc_errata_nix_has_assign_incorrect_qintidx(void)
-{
-	return true;
-}
-
 /* Errata IPBUCPT-38551 */
 static inline bool
 roc_errata_cpt_has_use_incorrect_ldwb(void)
@@ -69,17 +63,19 @@ roc_errata_cpt_has_use_incorrect_ldwb(void)
 static inline bool
 roc_errata_nix_has_overwrite_incorrect_sq_intr(void)
 {
-	return true;
+	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
+		roc_model_is_cnf10kb_a0() || roc_model_is_cn10ka_a1());
 }
 
 /* Errata IPBUNIXTX-39248 */
 static inline bool
 roc_errata_nix_has_perf_issue_on_stats_update(void)
 {
-	return true;
+	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
+		roc_model_is_cnf10kb_a0() || roc_model_is_cn10ka_a1());
 }
 
-/* Errata IPBUCPT-38726, IPBUCPT-38727 */
+/* Errata IPBUCPT-38736, IPBUCPT-38737 */
 static inline bool
 roc_errata_cpt_hang_on_x2p_bp(void)
 {
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index ce12a1dca4..36ef315f5a 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -7,7 +7,7 @@
 static inline bool
 roc_feature_sso_has_stash(void)
 {
-	return (roc_model_is_cn103xx() | roc_model_is_cn10ka_b0()) ? true : false;
+	return (roc_model_is_cn10kb() | roc_model_is_cn10ka_b0()) ? true : false;
 }
 
 static inline bool
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index f010cc4a44..58046af193 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -258,12 +258,6 @@ roc_model_is_cn10kb(void)
 	return roc_model->flag & ROC_MODEL_CN103xx;
 }
 
-static inline uint64_t
-roc_model_is_cn103xx(void)
-{
-	return roc_model->flag & ROC_MODEL_CN103xx;
-}
-
 static inline bool
 roc_env_is_hw(void)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 17/32] common/cnxk: sync between mbox up and down messages
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (14 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 16/32] common/cnxk: update errata info Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 18/32] common/cnxk: add more comments to mbox code Nithin Dabilpuram
                     ` (14 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

An issue is observed where if PF is with DPDK and VF as kernel
netdev does not responds to link events. It was due to recent
design change in kernel where sender checks whether previous
interrupt is received before triggering current interrupt by
waiting for mailbox data register to become zero.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_dev.c       | 20 ++++++++-
 drivers/common/cnxk/roc_mbox.c      | 64 +++++++++++++++++++++--------
 drivers/common/cnxk/roc_mbox.h      | 15 +++++++
 drivers/common/cnxk/roc_mbox_priv.h |  6 ++-
 4 files changed, 84 insertions(+), 21 deletions(-)

diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 5e4e564ebe..e5a5cd7c10 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -195,7 +195,8 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 				vf_msg->rc = msg->rc;
 				vf_msg->pcifunc = msg->pcifunc;
 				/* Send to VF */
-				mbox_msg_send(&dev->mbox_vfpf_up, vf);
+				mbox_msg_send_up(&dev->mbox_vfpf_up, vf);
+				mbox_wait_for_zero(&dev->mbox_vfpf_up, vf);
 			}
 		}
 
@@ -498,6 +499,7 @@ pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
 
 		/* Send to VF */
 		mbox_msg_send(vf_mbox, vf);
+		mbox_wait_for_zero(&dev->mbox_vfpf_up, vf);
 	}
 }
 
@@ -631,6 +633,7 @@ static void
 roc_pf_vf_mbox_irq(void *param)
 {
 	struct dev *dev = param;
+	uint64_t mbox_data;
 	uint64_t intr;
 
 	intr = plt_read64(dev->bar2 + RVU_VF_INT);
@@ -640,6 +643,13 @@ roc_pf_vf_mbox_irq(void *param)
 	plt_write64(intr, dev->bar2 + RVU_VF_INT);
 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
 
+	/* Reading for UP/DOWN message, next message sending will be delayed
+	 * by 1ms until this region is zeroed mbox_wait_for_zero()
+	 */
+	mbox_data = plt_read64(dev->bar2 + RVU_VF_VFPF_MBOX0);
+	if (mbox_data)
+		plt_write64(!mbox_data, dev->bar2 + RVU_VF_VFPF_MBOX0);
+
 	/* First process all configuration messages */
 	process_msgs(dev, dev->mbox);
 
@@ -651,6 +661,7 @@ static void
 roc_af_pf_mbox_irq(void *param)
 {
 	struct dev *dev = param;
+	uint64_t mbox_data;
 	uint64_t intr;
 
 	intr = plt_read64(dev->bar2 + RVU_PF_INT);
@@ -660,6 +671,13 @@ roc_af_pf_mbox_irq(void *param)
 	plt_write64(intr, dev->bar2 + RVU_PF_INT);
 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
 
+	/* Reading for UP/DOWN message, next message sending will be delayed
+	 * by 1ms until this region is zeroed mbox_wait_for_zero()
+	 */
+	mbox_data = plt_read64(dev->bar2 + RVU_PF_PFAF_MBOX0);
+	if (mbox_data)
+		plt_write64(!mbox_data, dev->bar2 + RVU_PF_PFAF_MBOX0);
+
 	/* First process all configuration messages */
 	process_msgs(dev, dev->mbox);
 
diff --git a/drivers/common/cnxk/roc_mbox.c b/drivers/common/cnxk/roc_mbox.c
index 7dcd188ca7..5338a960d9 100644
--- a/drivers/common/cnxk/roc_mbox.c
+++ b/drivers/common/cnxk/roc_mbox.c
@@ -10,18 +10,6 @@
 #include "roc_api.h"
 #include "roc_priv.h"
 
-#define RVU_AF_AFPF_MBOX0 (0x02000)
-#define RVU_AF_AFPF_MBOX1 (0x02008)
-
-#define RVU_PF_PFAF_MBOX0 (0xC00)
-#define RVU_PF_PFAF_MBOX1 (0xC08)
-
-#define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
-#define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
-
-#define RVU_VF_VFPF_MBOX0 (0x0000)
-#define RVU_VF_VFPF_MBOX1 (0x0008)
-
 /* RCLK, SCLK in MHz */
 uint16_t dev_rclk_freq;
 uint16_t dev_sclk_freq;
@@ -194,10 +182,31 @@ mbox_alloc_msg_rsp(struct mbox *mbox, int devid, int size, int size_rsp)
 
 /**
  * @internal
- * Send a mailbox message
+ * Synchronization between UP and DOWN messages
  */
-void
-mbox_msg_send(struct mbox *mbox, int devid)
+bool
+mbox_wait_for_zero(struct mbox *mbox, int devid)
+{
+	uint64_t data;
+
+	data = plt_read64((volatile void *)(mbox->reg_base +
+				(mbox->trigger | (devid << mbox->tr_shift))));
+
+	/* If data is non-zero wait for ~1ms and return to caller
+	 * whether data has changed to zero or not after the wait.
+	 */
+	if (data)
+		usleep(1000);
+	else
+		return true;
+
+	data = plt_read64((volatile void *)(mbox->reg_base +
+				(mbox->trigger | (devid << mbox->tr_shift))));
+	return data == 0;
+}
+
+static void
+mbox_msg_send_data(struct mbox *mbox, int devid, uint8_t data)
 {
 	struct mbox_dev *mdev = &mbox->dev[devid];
 	struct mbox_hdr *tx_hdr =
@@ -223,9 +232,28 @@ mbox_msg_send(struct mbox *mbox, int devid)
 	/* The interrupt should be fired after num_msgs is written
 	 * to the shared memory
 	 */
-	plt_write64(1, (volatile void *)(mbox->reg_base +
-					 (mbox->trigger |
-					  (devid << mbox->tr_shift))));
+	plt_write64(data, (volatile void *)(mbox->reg_base +
+				(mbox->trigger | (devid << mbox->tr_shift))));
+}
+
+/**
+ * @internal
+ * Send a mailbox message
+ */
+void
+mbox_msg_send(struct mbox *mbox, int devid)
+{
+	mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
+}
+
+/**
+ * @internal
+ * Send an UP mailbox message
+ */
+void
+mbox_msg_send_up(struct mbox *mbox, int devid)
+{
+	mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
 }
 
 /**
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 3d5746b9b8..93c5451c0f 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -35,6 +35,21 @@ struct mbox_msghdr {
 	int __io rc; /* Msg processed response code */
 };
 
+#define RVU_AF_AFPF_MBOX0 (0x02000)
+#define RVU_AF_AFPF_MBOX1 (0x02008)
+
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+
+#define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
+
+#define RVU_VF_VFPF_MBOX0 (0x0000)
+#define RVU_VF_VFPF_MBOX1 (0x0008)
+
+#define MBOX_DOWN_MSG 1
+#define MBOX_UP_MSG   2
+
 /* Mailbox message types */
 #define MBOX_MSG_MASK	 0xFFFF
 #define MBOX_MSG_INVALID 0xFFFE
diff --git a/drivers/common/cnxk/roc_mbox_priv.h b/drivers/common/cnxk/roc_mbox_priv.h
index 4fafca6f72..354c8fa52a 100644
--- a/drivers/common/cnxk/roc_mbox_priv.h
+++ b/drivers/common/cnxk/roc_mbox_priv.h
@@ -71,10 +71,12 @@ struct mbox {
 const char *mbox_id2name(uint16_t id);
 int mbox_id2size(uint16_t id);
 void mbox_reset(struct mbox *mbox, int devid);
-int mbox_init(struct mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
-	      int direction, int ndevsi, uint64_t intr_offset);
+int mbox_init(struct mbox *mbox, uintptr_t hwbase, uintptr_t reg_base, int direction, int ndevsi,
+	      uint64_t intr_offset);
 void mbox_fini(struct mbox *mbox);
 void mbox_msg_send(struct mbox *mbox, int devid);
+void mbox_msg_send_up(struct mbox *mbox, int devid);
+bool mbox_wait_for_zero(struct mbox *mbox, int devid);
 int mbox_wait_for_rsp(struct mbox *mbox, int devid);
 int mbox_wait_for_rsp_tmo(struct mbox *mbox, int devid, uint32_t tmo);
 int mbox_get_rsp(struct mbox *mbox, int devid, void **msg);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 18/32] common/cnxk: add more comments to mbox code
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (15 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 17/32] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 19/32] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
                     ` (13 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding more comments to the mbox routines to understand the flow
well.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_dev.c  | 20 +++++++++++++++++---
 drivers/common/cnxk/roc_mbox.c |  5 +++++
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index e5a5cd7c10..3125f9dda2 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -98,6 +98,9 @@ pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp)
 	return rc;
 }
 
+/* PF will send the messages to AF and wait for responses and forward the
+ * responses to VF.
+ */
 static int
 af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 {
@@ -115,9 +118,10 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 	/* We need to disable PF interrupts. We are in timer interrupt */
 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
 
-	/* Send message */
+	/* Send message to AF */
 	mbox_msg_send(mbox, 0);
 
+	/* Wait for AF response */
 	do {
 		plt_delay_ms(sleep);
 		timeout++;
@@ -206,6 +210,7 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 	return req_hdr->num_msgs;
 }
 
+/* PF receives mbox DOWN messages from VF and forwards to AF */
 static int
 vf_pf_process_msgs(struct dev *dev, uint16_t vf)
 {
@@ -274,6 +279,7 @@ vf_pf_process_msgs(struct dev *dev, uint16_t vf)
 	if (routed > 0) {
 		plt_base_dbg("pf:%d routed %d messages from vf:%d to AF",
 			     dev->pf, routed, vf);
+		/* PF will send the messages to AF and wait for responses */
 		af_pf_wait_msg(dev, vf, routed);
 		mbox_reset(dev->mbox, 0);
 	}
@@ -289,6 +295,7 @@ vf_pf_process_msgs(struct dev *dev, uint16_t vf)
 	return i;
 }
 
+/* VF sends Ack to PF's UP messages */
 static int
 vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
 {
@@ -339,6 +346,7 @@ vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
 	return i;
 }
 
+/* PF handling messages from VF */
 static void
 roc_vf_pf_mbox_handle_msg(void *param)
 {
@@ -352,8 +360,9 @@ roc_vf_pf_mbox_handle_msg(void *param)
 		if (dev->intr.bits[vf / max_bits] & BIT_ULL(vf % max_bits)) {
 			plt_base_dbg("Process vf:%d request (pf:%d, vf:%d)", vf,
 				     dev->pf, dev->vf);
+			/* VF initiated down messages */
 			vf_pf_process_msgs(dev, vf);
-			/* UP messages */
+			/* VF replies to PF's UP messages */
 			vf_pf_process_up_msgs(dev, vf);
 			dev->intr.bits[vf / max_bits] &=
 				~(BIT_ULL(vf % max_bits));
@@ -362,6 +371,7 @@ roc_vf_pf_mbox_handle_msg(void *param)
 	dev->timer_set = 0;
 }
 
+/* IRQ to PF from VF - PF context (interrupt thread) */
 static void
 roc_vf_pf_mbox_irq(void *param)
 {
@@ -392,6 +402,7 @@ roc_vf_pf_mbox_irq(void *param)
 	}
 }
 
+/* Received response from AF (PF context) / PF (VF context) */
 static void
 process_msgs(struct dev *dev, struct mbox *mbox)
 {
@@ -451,7 +462,7 @@ process_msgs(struct dev *dev, struct mbox *mbox)
 	}
 
 	mbox_reset(mbox, 0);
-	/* Update acked if someone is waiting a message */
+	/* Update acked if someone is waiting a message - mbox_wait is waiting */
 	mdev->msgs_acked = msgs_acked;
 	plt_wmb();
 }
@@ -597,6 +608,7 @@ mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
 	return -ENODEV;
 }
 
+/* Received up messages from AF (PF context) / PF (in context) */
 static void
 process_msgs_up(struct dev *dev, struct mbox *mbox)
 {
@@ -629,6 +641,7 @@ process_msgs_up(struct dev *dev, struct mbox *mbox)
 	}
 }
 
+/* IRQ to VF from PF - VF context (interrupt thread) */
 static void
 roc_pf_vf_mbox_irq(void *param)
 {
@@ -657,6 +670,7 @@ roc_pf_vf_mbox_irq(void *param)
 	process_msgs_up(dev, &dev->mbox_up);
 }
 
+/* IRQ to PF from AF - PF context (interrupt thread) */
 static void
 roc_af_pf_mbox_irq(void *param)
 {
diff --git a/drivers/common/cnxk/roc_mbox.c b/drivers/common/cnxk/roc_mbox.c
index 5338a960d9..c91fa63e83 100644
--- a/drivers/common/cnxk/roc_mbox.c
+++ b/drivers/common/cnxk/roc_mbox.c
@@ -350,6 +350,11 @@ mbox_wait(struct mbox *mbox, int devid, uint32_t rst_timo)
 	uint32_t timeout = 0, sleep = 1;
 
 	rst_timo = rst_timo * 1000; /* Milli seconds to micro seconds */
+
+	/* Waiting for mdev->msgs_acked tp become equal to mdev->num_msgs,
+	 * mdev->msgs_acked are incremented at process_msgs() in interrupt
+	 * thread context.
+	 */
 	while (mdev->num_msgs > mdev->msgs_acked) {
 		plt_delay_us(sleep);
 		timeout += sleep;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 19/32] common/cnxk: add CN105xxN B0 model
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (16 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 18/32] common/cnxk: add more comments to mbox code Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 20/32] common/cnxk: access valid pass value Nithin Dabilpuram
                     ` (12 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding support for CN105xxN B0 pass

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_model.c | 1 +
 drivers/common/cnxk/roc_model.h | 9 ++++++++-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index e4767ed91f..f4f2a38e70 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -66,6 +66,7 @@ static const struct model_db {
 	{VENDOR_ARM, PART_105xx, 0, 1, ROC_MODEL_CNF105xx_A1, "cnf10ka_a1"},
 	{VENDOR_ARM, PART_103xx, 0, 0, ROC_MODEL_CN103xx_A0, "cn10kb_a0"},
 	{VENDOR_ARM, PART_105xxN, 0, 0, ROC_MODEL_CNF105xxN_A0, "cnf10kb_a0"},
+	{VENDOR_ARM, PART_105xxN, 1, 0, ROC_MODEL_CNF105xxN_B0, "cnf10kb_b0"},
 	{VENDOR_CAVIUM, PART_98xx, 0, 0, ROC_MODEL_CN98xx_A0, "cn98xx_a0"},
 	{VENDOR_CAVIUM, PART_98xx, 0, 1, ROC_MODEL_CN98xx_A1, "cn98xx_a1"},
 	{VENDOR_CAVIUM, PART_96xx, 0, 0, ROC_MODEL_CN96xx_A0, "cn96xx_a0"},
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index 58046af193..b6dab4f64e 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -31,6 +31,7 @@ struct roc_model {
 #define ROC_MODEL_CN106xx_A1   BIT_ULL(24)
 #define ROC_MODEL_CNF105xx_A1  BIT_ULL(25)
 #define ROC_MODEL_CN106xx_B0   BIT_ULL(26)
+#define ROC_MODEL_CNF105xxN_B0 BIT_ULL(27)
 /* Following flags describe platform code is running on */
 #define ROC_ENV_HW   BIT_ULL(61)
 #define ROC_ENV_EMUL BIT_ULL(62)
@@ -57,7 +58,7 @@ struct roc_model {
 
 #define ROC_MODEL_CN106xx   (ROC_MODEL_CN106xx_A0 | ROC_MODEL_CN106xx_A1 | ROC_MODEL_CN106xx_B0)
 #define ROC_MODEL_CNF105xx  (ROC_MODEL_CNF105xx_A0 | ROC_MODEL_CNF105xx_A1)
-#define ROC_MODEL_CNF105xxN (ROC_MODEL_CNF105xxN_A0)
+#define ROC_MODEL_CNF105xxN (ROC_MODEL_CNF105xxN_A0 | ROC_MODEL_CNF105xxN_B0)
 #define ROC_MODEL_CN103xx   (ROC_MODEL_CN103xx_A0)
 #define ROC_MODEL_CN10K                                                        \
 	(ROC_MODEL_CN106xx | ROC_MODEL_CNF105xx | ROC_MODEL_CNF105xxN |        \
@@ -252,6 +253,12 @@ roc_model_is_cnf10kb_a0(void)
 	return roc_model->flag & ROC_MODEL_CNF105xxN_A0;
 }
 
+static inline uint64_t
+roc_model_is_cnf10kb_b0(void)
+{
+	return roc_model->flag & ROC_MODEL_CNF105xxN_B0;
+}
+
 static inline uint64_t
 roc_model_is_cn10kb(void)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 20/32] common/cnxk: access valid pass value
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (17 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 19/32] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 21/32] net/cnxk: add receive error mask Nithin Dabilpuram
                     ` (11 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Hanumanth Pothula

From: Hanumanth Pothula <hpothula@marvell.com>

There is a possibility of accessing an invalid pass value on
rvu device look up failure, as the return value is dropped.

Hence pass through the return value of rvu device look, to make
sure valid pass value is accessed.

Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
---
 drivers/common/cnxk/roc_model.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index f4f2a38e70..6dc2afe7f0 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -148,6 +148,7 @@ cn10k_part_pass_get(uint32_t *part, uint32_t *pass)
 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
 	char dirname[PATH_MAX];
 	struct dirent *e;
+	int ret = -1;
 	DIR *dir;
 
 	dir = opendir(SYSFS_PCI_DEVICES);
@@ -165,18 +166,19 @@ cn10k_part_pass_get(uint32_t *part, uint32_t *pass)
 			 e->d_name);
 
 		/* Lookup for rvu device and get part pass information */
-		if (!rvu_device_lookup(dirname, part, pass))
+		ret = rvu_device_lookup(dirname, part, pass);
+		if (!ret)
 			break;
 	}
 
 	closedir(dir);
-	return 0;
+	return ret;
 }
 
 static bool
 populate_model(struct roc_model *model, uint32_t midr)
 {
-	uint32_t impl, major, part, minor, pass;
+	uint32_t impl, major, part, minor, pass = 0;
 	bool found = false;
 	size_t i;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 21/32] net/cnxk: add receive error mask
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (18 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 20/32] common/cnxk: access valid pass value Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 22/32] common/cnxk: support of 1-N pool-aura per NIX LF Nithin Dabilpuram
                     ` (10 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

receive errors related to MACSEC and USXGMI are masked
for cn10kb_b0 and cn10kb

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index eeabf6edec..3ceda8c8f9 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1267,6 +1267,11 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		   ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
 		   ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
 
+	rx_cfg &= (ROC_NIX_LF_RX_CFG_RX_ERROR_MASK);
+
+	if (roc_feature_nix_has_drop_re_mask())
+		rx_cfg |= (ROC_NIX_RE_CRC8_PCH | ROC_NIX_RE_MACSEC);
+
 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
 		/* Disable drop re if rx offload security is enabled and
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 22/32] common/cnxk: support of 1-N pool-aura per NIX LF
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (19 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 21/32] net/cnxk: add receive error mask Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 23/32] net/cnxk: support for inbound without inline dev mode Nithin Dabilpuram
                     ` (9 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev, Rahul Bhansali

From: Rahul Bhansali <rbhansali@marvell.com>

This will add the support of 1:N pool:aura per NIX LF when
inl_cpt_channel devargs is set to inline device, otherwise
it will create 1:1 pool:aura for CN103/CN106B0 SOCs.

With 1:N, global pool will be created with Aura 0, and per NIX
individual aura will be created and mapped to this global pool.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/common/cnxk/roc_idev_priv.h |   1 +
 drivers/common/cnxk/roc_nix.h       |   1 +
 drivers/common/cnxk/roc_nix_inl.c   | 178 ++++++++++++++++++++++++----
 drivers/common/cnxk/roc_nix_inl.h   |   4 +
 drivers/common/cnxk/version.map     |   1 +
 drivers/event/cnxk/cn10k_worker.h   |   9 +-
 drivers/net/cnxk/cn10k_rx_select.c  |   5 +-
 drivers/net/cnxk/cnxk_ethdev.c      |   3 +
 drivers/net/cnxk/cnxk_ethdev.h      |   3 +
 drivers/net/cnxk/cnxk_ethdev_sec.c  |  62 ++++++++++
 10 files changed, 240 insertions(+), 27 deletions(-)

diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index d83522799f..4983578fc6 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -13,6 +13,7 @@ struct nix_inl_dev;
 
 struct idev_nix_inl_cfg {
 	uint64_t meta_aura;
+	uintptr_t meta_mempool;
 	uint32_t nb_bufs;
 	uint32_t buf_sz;
 	uint32_t refs;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 37d0ed5ebe..548854952b 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -470,6 +470,7 @@ struct roc_nix {
 	bool local_meta_aura_ena;
 	uint32_t meta_buf_sz;
 	bool force_rx_aura_bp;
+	bool custom_meta_aura_ena;
 	/* End of input parameters */
 	/* LMT line base for "Per Core Tx LMT line" mode*/
 	uintptr_t lmt_base;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 67f8ce9aa0..69f658ba87 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -7,6 +7,7 @@
 
 uint32_t soft_exp_consumer_cnt;
 roc_nix_inl_meta_pool_cb_t meta_pool_cb;
+roc_nix_inl_custom_meta_pool_cb_t custom_meta_pool_cb;
 
 PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_INB_SA_SZ ==
 		  1UL << ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2);
@@ -33,13 +34,14 @@ nix_inl_meta_aura_destroy(struct roc_nix *roc_nix)
 		return -EINVAL;
 
 	inl_cfg = &idev->inl_cfg;
-	if (roc_nix->local_meta_aura_ena) {
+
+	if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena) {
+		meta_aura = &inl_cfg->meta_aura;
+	} else {
 		meta_aura = &roc_nix->meta_aura_handle;
 		snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
 			 roc_nix->port_id + 1);
 		mp_name = mempool_name;
-	} else {
-		meta_aura = &inl_cfg->meta_aura;
 	}
 
 	/* Destroy existing Meta aura */
@@ -72,7 +74,7 @@ nix_inl_meta_aura_destroy(struct roc_nix *roc_nix)
 
 static int
 nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_t first_skip,
-			 uint64_t *meta_aura)
+			 uint64_t *meta_aura, bool is_local_metaaura)
 {
 	uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
 	struct idev_nix_inl_cfg *inl_cfg;
@@ -89,7 +91,7 @@ nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_
 	inl_cfg = &idev->inl_cfg;
 	nix_inl_dev = idev->nix_inl_dev;
 
-	if (roc_nix->local_meta_aura_ena) {
+	if (is_local_metaaura) {
 		/* Per LF Meta Aura */
 		inl_rq_id = nix_inl_dev->nb_rqs > 1 ? port_id : 0;
 		inl_rq = &nix_inl_dev->rqs[inl_rq_id];
@@ -134,15 +136,107 @@ nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_
 	plt_nix_dbg("Created meta aura %p(%s)for port %d", (void *)*meta_aura, mp_name,
 		    roc_nix->port_id);
 
-	if (!roc_nix->local_meta_aura_ena) {
+	if (!is_local_metaaura) {
 		inl_cfg->buf_sz = buf_sz;
 		inl_cfg->nb_bufs = nb_bufs;
+		inl_cfg->meta_mempool = mp;
 	} else
 		roc_nix->buf_sz = buf_sz;
 
 	return 0;
 }
 
+static int
+nix_inl_custom_meta_aura_destroy(struct roc_nix *roc_nix)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct idev_nix_inl_cfg *inl_cfg;
+	char mempool_name[24] = {'\0'};
+	char *mp_name = NULL;
+	uint64_t *meta_aura;
+	int rc;
+
+	if (!idev)
+		return -EINVAL;
+
+	inl_cfg = &idev->inl_cfg;
+	meta_aura = &roc_nix->meta_aura_handle;
+	snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
+		 roc_nix->port_id + 1);
+	mp_name = mempool_name;
+
+	/* Destroy existing Meta aura */
+	if (*meta_aura) {
+		uint64_t avail, limit;
+
+		/* Check if all buffers are back to pool */
+		avail = roc_npa_aura_op_available(*meta_aura);
+		limit = roc_npa_aura_op_limit_get(*meta_aura);
+		if (avail != limit)
+			plt_warn("Not all buffers are back to meta pool,"
+				 " %" PRIu64 " != %" PRIu64, avail, limit);
+
+		rc = custom_meta_pool_cb(inl_cfg->meta_mempool, &roc_nix->meta_mempool, mp_name,
+					 meta_aura, 0, 0, true);
+		if (rc) {
+			plt_err("Failed to destroy meta aura, rc=%d", rc);
+			return rc;
+		}
+
+		roc_nix->buf_sz = 0;
+	}
+
+	return 0;
+}
+
+static int
+nix_inl_custom_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_t first_skip,
+				uint64_t *meta_aura)
+{
+	uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
+	struct idev_nix_inl_cfg *inl_cfg;
+	struct nix_inl_dev *nix_inl_dev;
+	char mempool_name[24] = {'\0'};
+	uint32_t nb_bufs, buf_sz;
+	char *mp_name = NULL;
+	uintptr_t mp;
+	int rc;
+
+	inl_cfg = &idev->inl_cfg;
+	nix_inl_dev = idev->nix_inl_dev;
+
+	/* Override meta buf count from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->nb_meta_bufs)
+		nb_bufs = nix_inl_dev->nb_meta_bufs;
+	else
+		nb_bufs = roc_npa_buf_type_limit_get(mask);
+
+	/* Override meta buf size from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->meta_buf_sz)
+		buf_sz = nix_inl_dev->meta_buf_sz;
+	else
+		buf_sz = first_skip + NIX_INL_META_SIZE;
+
+	/* Create Metapool name */
+	snprintf(mempool_name, sizeof(mempool_name), "NIX_INL_META_POOL_%d",
+		 roc_nix->port_id + 1);
+	mp_name = mempool_name;
+
+	/* Allocate meta aura */
+	rc = custom_meta_pool_cb(inl_cfg->meta_mempool, &mp, mp_name, meta_aura,
+				 buf_sz, nb_bufs, false);
+	if (rc) {
+		plt_err("Failed to allocate meta aura, rc=%d", rc);
+		return rc;
+	}
+
+	/* Overwrite */
+	roc_nix->meta_mempool = mp;
+	roc_nix->buf_sz = buf_sz;
+
+	return 0;
+}
+
 static int
 nix_inl_global_meta_buffer_validate(struct idev_cfg *idev, struct roc_nix_rq *rq)
 {
@@ -228,6 +322,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	struct idev_nix_inl_cfg *inl_cfg;
+	bool is_local_metaaura;
 	bool aura_setup = false;
 	uint64_t *meta_aura;
 	int rc;
@@ -238,18 +333,39 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
 	inl_cfg = &idev->inl_cfg;
 
 	/* Create meta aura if not present */
-	if (roc_nix->local_meta_aura_ena)
-		meta_aura = &roc_nix->meta_aura_handle;
-	else
+	if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena) {
 		meta_aura = &inl_cfg->meta_aura;
+		is_local_metaaura = false;
+	} else {
+		meta_aura = &roc_nix->meta_aura_handle;
+		is_local_metaaura = true;
+	}
 
 	if (!(*meta_aura)) {
-		rc = nix_inl_meta_aura_create(idev, roc_nix, rq->first_skip, meta_aura);
+		rc = nix_inl_meta_aura_create(idev, roc_nix, rq->first_skip, meta_aura,
+					      is_local_metaaura);
 		if (rc)
 			return rc;
 
 		aura_setup = true;
 	}
+
+	if (roc_nix->custom_meta_aura_ena) {
+		/* Create metaura for 1:N pool:aura */
+		if (!custom_meta_pool_cb)
+			return -EFAULT;
+
+		meta_aura = &roc_nix->meta_aura_handle;
+		if (!(*meta_aura)) {
+			rc = nix_inl_custom_meta_aura_create(idev, roc_nix, rq->first_skip,
+							     meta_aura);
+			if (rc)
+				return rc;
+
+			aura_setup = true;
+		}
+	}
+
 	/* Update rq meta aura handle */
 	rq->meta_aura_handle = *meta_aura;
 
@@ -698,6 +814,7 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct roc_cpt_inline_ipsec_inb_cfg cfg;
 	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev;
 	uint16_t bpids[ROC_NIX_MAX_BPID_CNT];
 	struct roc_cpt *roc_cpt;
 	int rc;
@@ -749,9 +866,13 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	if (rc)
 		return rc;
 
+	inl_dev = idev->nix_inl_dev;
+
+	roc_nix->custom_meta_aura_ena = (roc_nix->local_meta_aura_ena &&
+					 (inl_dev->is_multi_channel || roc_nix->custom_sa_action));
 	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
 		nix->need_meta_aura = true;
-		if (!roc_nix->local_meta_aura_ena)
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs++;
 	}
 
@@ -773,15 +894,17 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 		return -EFAULT;
 
 	nix->inl_inb_ena = false;
+
 	if (nix->need_meta_aura) {
 		nix->need_meta_aura = false;
-		if (roc_nix->local_meta_aura_ena) {
-			nix_inl_meta_aura_destroy(roc_nix);
-		} else {
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs--;
-			if (!idev->inl_cfg.refs)
-				nix_inl_meta_aura_destroy(roc_nix);
-		}
+
+		if (roc_nix->custom_meta_aura_ena)
+			nix_inl_custom_meta_aura_destroy(roc_nix);
+
+		if (!idev->inl_cfg.refs)
+			nix_inl_meta_aura_destroy(roc_nix);
 	}
 
 	if (roc_feature_nix_has_inl_rq_mask()) {
@@ -1309,17 +1432,18 @@ roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena)
 
 	if (ena) {
 		nix->need_meta_aura = true;
-		if (!roc_nix->local_meta_aura_ena)
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs++;
 	} else if (nix->need_meta_aura) {
 		nix->need_meta_aura = false;
-		if (roc_nix->local_meta_aura_ena) {
-			nix_inl_meta_aura_destroy(roc_nix);
-		} else {
+		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
 			idev->inl_cfg.refs--;
-			if (!idev->inl_cfg.refs)
-				nix_inl_meta_aura_destroy(roc_nix);
-		}
+
+		if (roc_nix->custom_meta_aura_ena)
+			nix_inl_custom_meta_aura_destroy(roc_nix);
+
+		if (!idev->inl_cfg.refs)
+			nix_inl_meta_aura_destroy(roc_nix);
 	}
 }
 
@@ -1672,3 +1796,9 @@ roc_nix_inl_eng_caps_get(struct roc_nix *roc_nix)
 
 	return nix->cpt_eng_caps;
 }
+
+void
+roc_nix_inl_custom_meta_pool_cb_register(roc_nix_inl_custom_meta_pool_cb_t cb)
+{
+	custom_meta_pool_cb = cb;
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index daa21a941a..885d95335e 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -121,6 +121,9 @@ typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
 typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle,  uintptr_t *mpool,
 					  uint32_t blk_sz, uint32_t nb_bufs, bool destroy,
 					  const char *mempool_name);
+typedef int (*roc_nix_inl_custom_meta_pool_cb_t)(uintptr_t pmpool, uintptr_t *mpool,
+						 const char *mempool_name, uint64_t *aura_handle,
+						 uint32_t blk_sz, uint32_t nb_bufs, bool destroy);
 
 struct roc_nix_inl_dev {
 	/* Input parameters */
@@ -199,6 +202,7 @@ int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
 						    bool poll);
 uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
 void __roc_api roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb);
+void __roc_api roc_nix_inl_custom_meta_pool_cb_register(roc_nix_inl_custom_meta_pool_cb_t cb);
 
 /* NIX Inline/Outbound API */
 enum roc_nix_inl_sa_sync_op {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 809fd81b20..c76564b46e 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -199,6 +199,7 @@ INTERNAL {
 	roc_nix_inb_is_with_inl_dev;
 	roc_nix_inl_meta_aura_check;
 	roc_nix_inl_meta_pool_cb_register;
+	roc_nix_inl_custom_meta_pool_cb_register;
 	roc_nix_inb_mode_set;
 	roc_nix_inl_outb_fini;
 	roc_nix_inl_outb_init;
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 1e519d8156..f049b5c348 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -168,6 +168,10 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		RTE_MEMPOOL_CHECK_COOKIES(((struct rte_mbuf *)mbuf)->pool, (void **)&mbuf, 1, 1);
 
 		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			void *lookup_mem = ws->lookup_mem;
+			struct rte_mempool *mp = NULL;
+			uint64_t meta_aura;
+
 			const uint64_t mbuf_init =
 				0x100010000ULL | RTE_PKTMBUF_HEADROOM |
 				(flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
@@ -192,8 +196,11 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 				cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
 				(struct rte_mbuf *)mbuf, d_off, flags,
 				mbuf_init | ((uint64_t)port) << 48);
+			mp = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port, lookup_mem);
+			meta_aura = mp ? mp->pool_id : m->pool->pool_id;
+
 			if (loff)
-				roc_npa_aura_op_free(m->pool->pool_id, 0, iova);
+				roc_npa_aura_op_free(meta_aura, 0, iova);
 		}
 
 		u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
diff --git a/drivers/net/cnxk/cn10k_rx_select.c b/drivers/net/cnxk/cn10k_rx_select.c
index b906f6725a..1e0de1b7ac 100644
--- a/drivers/net/cnxk/cn10k_rx_select.c
+++ b/drivers/net/cnxk/cn10k_rx_select.c
@@ -79,9 +79,10 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 #undef R
 	};
 
-	/* Copy multi seg version with no offload for tear down sequence */
+	/* Copy multi seg version with security for tear down sequence */
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		dev->rx_pkt_burst_no_offload = nix_eth_rx_burst_mseg[0];
+		dev->rx_pkt_burst_no_offload =
+			nix_eth_rx_burst_mseg_reas[NIX_RX_OFFLOAD_SECURITY_F];
 
 	if (dev->scalar_ena) {
 		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 3ceda8c8f9..aaa1014479 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1885,6 +1885,9 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 	/* Register callback for inline meta pool create */
 	roc_nix_inl_meta_pool_cb_register(cnxk_nix_inl_meta_pool_cb);
 
+	/* Register callback for inline meta pool create 1:N pool:aura */
+	roc_nix_inl_custom_meta_pool_cb_register(cnxk_nix_inl_custom_meta_pool_cb);
+
 	dev->eth_dev = eth_dev;
 	dev->configured = 0;
 	dev->ptype_disable = 0;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 44e37d6550..e280d6c05e 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -610,6 +610,9 @@ cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
 			      struct rte_security_session *sess);
 int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_sz,
 			      uint32_t nb_bufs, bool destroy, const char *mempool_name);
+int cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
+				     uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
+				     bool destroy);
 
 /* Congestion Management */
 int cnxk_nix_cman_info_get(struct rte_eth_dev *dev, struct rte_eth_cman_info *info);
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index cd64daacc0..a66d58ca61 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -6,6 +6,7 @@
 #include <cnxk_mempool.h>
 
 #define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
+#define CN10K_HW_POOL_OPS_NAME "cn10k_hwpool_ops"
 
 #define CNXK_NIX_INL_SELFTEST	      "selftest"
 #define CNXK_NIX_INL_IPSEC_IN_MIN_SPI "ipsec_in_min_spi"
@@ -114,6 +115,67 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
 	return rc;
 }
 
+/* Create Aura and link with Global mempool for 1:N Pool:Aura case */
+int
+cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
+				 uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
+				 bool destroy)
+{
+	struct rte_mempool *hp;
+	int rc;
+
+	/* Destroy the mempool if requested */
+	if (destroy) {
+		hp = rte_mempool_lookup(mempool_name);
+		if (!hp)
+			return -ENOENT;
+
+		if (hp->pool_id != *aura_handle) {
+			plt_err("Meta pool aura mismatch");
+			return -EINVAL;
+		}
+
+		rte_mempool_free(hp);
+		plt_free(hp->pool_config);
+
+		*aura_handle = 0;
+		*mpool = 0;
+		return 0;
+	}
+
+	/* Need to make it similar to rte_pktmbuf_pool() for sake of OOP
+	 * support.
+	 */
+	hp = rte_mempool_create_empty(mempool_name, nb_bufs, buf_sz, 0,
+				      sizeof(struct rte_pktmbuf_pool_private),
+				      SOCKET_ID_ANY, 0);
+	if (!hp) {
+		plt_err("Failed to create inline meta pool");
+		return -EIO;
+	}
+
+	rc = rte_mempool_set_ops_byname(hp, CN10K_HW_POOL_OPS_NAME, (void *)pmpool);
+
+	if (rc) {
+		plt_err("Failed to setup ops, rc=%d", rc);
+		goto free_hp;
+	}
+
+	/* Populate buffer */
+	rc = rte_mempool_populate_default(hp);
+	if (rc < 0) {
+		plt_err("Failed to populate pool, rc=%d", rc);
+		goto free_hp;
+	}
+
+	*aura_handle = hp->pool_id;
+	*mpool = (uintptr_t)hp;
+	return 0;
+free_hp:
+	rte_mempool_free(hp);
+	return rc;
+}
+
 static int
 parse_max_ipsec_rules(const char *key, const char *value, void *extra_args)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 23/32] net/cnxk: support for inbound without inline dev mode
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (20 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 22/32] common/cnxk: support of 1-N pool-aura per NIX LF Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 24/32] common/cnxk: fix inline device VF identification Nithin Dabilpuram
                     ` (8 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

Support for inbound Inline IPsec without Inline device
RQ i.e both first pass and second pass hitting same
ethdev RQ in poll mode. Remove the switching from
inline dev to non inline dev mode as inline dev mode
is default and can only be overridden by devargs.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_queue.c      |  3 +++
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 15 ---------------
 drivers/net/cnxk/cnxk_ethdev.c           | 15 ++++++++++-----
 3 files changed, 13 insertions(+), 20 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index d29fafa895..08e8bf7ea2 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -473,6 +473,9 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 	if (rq->ipsech_ena) {
 		aq->rq.ipsech_ena = 1;
 		aq->rq.ipsecd_drop_en = 1;
+		aq->rq.ena_wqwd = 1;
+		aq->rq.wqe_skip = rq->wqe_skip;
+		aq->rq.wqe_caching = 1;
 	}
 
 	aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 8ad84198b9..92aea92389 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -273,15 +273,6 @@ cnxk_sso_rx_adapter_queue_add(
 	}
 
 	dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
-
-	/* Switch to use PF/VF's NIX LF instead of inline device for inbound
-	 * when all the RQ's are switched to event dev mode. We do this only
-	 * when dev arg no_inl_dev=1 is selected.
-	 */
-	if (cnxk_eth_dev->inb.no_inl_dev &&
-	    cnxk_eth_dev->nb_rxq_sso == cnxk_eth_dev->nb_rxq)
-		cnxk_nix_inb_mode_set(cnxk_eth_dev, false);
-
 	return 0;
 }
 
@@ -309,12 +300,6 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 	if (rc < 0)
 		plt_err("Failed to clear Rx adapter config port=%d, q=%d",
 			eth_dev->data->port_id, rx_queue_id);
-
-	/* Removing RQ from Rx adapter implies need to use
-	 * inline device for CQ/Poll mode.
-	 */
-	cnxk_nix_inb_mode_set(cnxk_eth_dev, true);
-
 	return rc;
 }
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index aaa1014479..916198d802 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -81,9 +81,6 @@ cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
 {
 	struct roc_nix *nix = &dev->nix;
 
-	if (dev->inb.inl_dev == use_inl_dev)
-		return 0;
-
 	plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
 		    dev->inb.nb_sess, !!dev->inb.inl_dev);
 
@@ -119,7 +116,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 		/* By default pick using inline device for poll mode.
 		 * Will be overridden when event mode rq's are setup.
 		 */
-		cnxk_nix_inb_mode_set(dev, true);
+		cnxk_nix_inb_mode_set(dev, !dev->inb.no_inl_dev);
 
 		/* Allocate memory to be used as dptr for CPT ucode
 		 * WRITE_SA op.
@@ -633,6 +630,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	struct roc_nix_rq *rq;
 	struct roc_nix_cq *cq;
 	uint16_t first_skip;
+	uint16_t wqe_skip;
 	int rc = -EINVAL;
 	size_t rxq_sz;
 	struct rte_mempool *lpb_pool = mp;
@@ -712,8 +710,15 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 		rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
 
 	/* Enable Inline IPSec on RQ, will not be used for Poll mode */
-	if (roc_nix_inl_inb_is_enabled(nix))
+	if (roc_nix_inl_inb_is_enabled(nix) && !dev->inb.inl_dev) {
 		rq->ipsech_ena = true;
+		/* WQE skip is needed when poll mode is enabled in CN10KA_B0 and above
+		 * for Inline IPsec traffic to CQ without inline device.
+		 */
+		wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ);
+		wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ;
+		rq->wqe_skip = wqe_skip;
+	}
 
 	if (spb_pool) {
 		rq->spb_ena = 1;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 24/32] common/cnxk: fix inline device VF identification
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (21 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 23/32] net/cnxk: support for inbound without inline dev mode Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 25/32] common/cnxk: avoid inline dev CPT lf detach multiple times Nithin Dabilpuram
                     ` (7 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, stable

Fix inline device VF identification so that the right
mbox region is used.

Fixes: ee48f711f3b0 ("common/cnxk: support NIX inline inbound and outbound setup")
cc: stable@dpdk.org

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_dev.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 3125f9dda2..d87b00e7e8 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -1001,6 +1001,7 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev)
 	case PCI_DEVID_CNXK_RVU_AF_VF:
 	case PCI_DEVID_CNXK_RVU_VF:
 	case PCI_DEVID_CNXK_RVU_SDP_VF:
+	case PCI_DEVID_CNXK_RVU_NIX_INL_VF:
 		dev->hwcap |= DEV_HWCAP_F_VF;
 		break;
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 25/32] common/cnxk: avoid inline dev CPT lf detach multiple times
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (22 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 24/32] common/cnxk: fix inline device VF identification Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 26/32] common/cnxk: skip CGX promisc mode with NPC exact match Nithin Dabilpuram
                     ` (6 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

CPT LF detach is done along with all other LF's of inline
device. Hence remove it from nix_inl_cpt_release().
Also provide ROC API for setup and release of CPT LF
separately.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_inl.h     |  2 +
 drivers/common/cnxk/roc_nix_inl_dev.c | 59 ++++++++++++++++++++-------
 drivers/common/cnxk/version.map       |  2 +
 3 files changed, 48 insertions(+), 15 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 885d95335e..ab1e9c0f98 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -158,6 +158,8 @@ void __roc_api roc_nix_inl_dev_unlock(void);
 int __roc_api roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle);
 int __roc_api roc_nix_inl_dev_stats_get(struct roc_nix_stats *stats);
 uint16_t __roc_api roc_nix_inl_dev_pffunc_get(void);
+int __roc_api roc_nix_inl_dev_cpt_setup(bool use_inl_dev_sso);
+int __roc_api roc_nix_inl_dev_cpt_release(void);
 
 /* NIX Inline Inbound API */
 int __roc_api roc_nix_inl_inb_init(struct roc_nix *roc_nix);
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index ca948d3bc7..d76158e30d 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -172,7 +172,7 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 }
 
 static int
-nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
+nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 {
 	struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
 	struct dev *dev = &inl_dev->dev;
@@ -186,7 +186,7 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
 	eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
 		       1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
 		       1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
-	rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, false);
+	rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, inl_dev_sso);
 	if (rc) {
 		plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
 		return rc;
@@ -218,7 +218,7 @@ nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
 {
 	struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
 	struct dev *dev = &inl_dev->dev;
-	int rc, ret = 0;
+	int rc;
 
 	if (!inl_dev->attach_cptlf)
 		return 0;
@@ -228,17 +228,11 @@ nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
 
 	/* Free LF resources */
 	rc = cpt_lfs_free(dev);
-	if (rc)
+	if (!rc)
+		lf->dev = NULL;
+	else
 		plt_err("Failed to free CPT LF resources, rc=%d", rc);
-	ret |= rc;
-
-	/* Detach LF */
-	rc = cpt_lfs_detach(dev);
-	if (rc)
-		plt_err("Failed to detach CPT LF, rc=%d", rc);
-	ret |= rc;
-
-	return ret;
+	return rc;
 }
 
 static int
@@ -940,7 +934,7 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 		goto nix_release;
 
 	/* Setup CPT LF */
-	rc = nix_inl_cpt_setup(inl_dev);
+	rc = nix_inl_cpt_setup(inl_dev, false);
 	if (rc)
 		goto sso_release;
 
@@ -1035,8 +1029,11 @@ roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
 	/* Flush Inbound CTX cache entries */
 	nix_inl_cpt_ctx_cache_sync(inl_dev);
 
+	/* Release CPT */
+	rc = nix_inl_cpt_release(inl_dev);
+
 	/* Release SSO */
-	rc = nix_inl_sso_release(inl_dev);
+	rc |= nix_inl_sso_release(inl_dev);
 
 	/* Release NIX */
 	rc |= nix_inl_nix_release(inl_dev);
@@ -1052,3 +1049,35 @@ roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
 	idev->nix_inl_dev = NULL;
 	return 0;
 }
+
+int
+roc_nix_inl_dev_cpt_setup(bool use_inl_dev_sso)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev = NULL;
+
+	if (!idev || !idev->nix_inl_dev)
+		return -ENOENT;
+	inl_dev = idev->nix_inl_dev;
+
+	if (inl_dev->cpt_lf.dev != NULL)
+		return -EBUSY;
+
+	return nix_inl_cpt_setup(inl_dev, use_inl_dev_sso);
+}
+
+int
+roc_nix_inl_dev_cpt_release(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev = NULL;
+
+	if (!idev || !idev->nix_inl_dev)
+		return -ENOENT;
+	inl_dev = idev->nix_inl_dev;
+
+	if (inl_dev->cpt_lf.dev == NULL)
+		return 0;
+
+	return nix_inl_cpt_release(inl_dev);
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index c76564b46e..e1335e9068 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -174,6 +174,8 @@ INTERNAL {
 	roc_nix_inl_cb_register;
 	roc_nix_inl_cb_unregister;
 	roc_nix_inl_ctx_write;
+	roc_nix_inl_dev_cpt_setup;
+	roc_nix_inl_dev_cpt_release;
 	roc_nix_inl_dev_dump;
 	roc_nix_inl_dev_fini;
 	roc_nix_inl_dev_init;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 26/32] common/cnxk: skip CGX promisc mode with NPC exact match
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (23 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 25/32] common/cnxk: avoid inline dev CPT lf detach multiple times Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:58   ` [PATCH v3 27/32] common/cnxk: configure PFC on SPB aura Nithin Dabilpuram
                     ` (5 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

When NPC exact match is enabled, we cannot use CGX promisc mode
as CGX filtering is permanently disabled. Hence skip calling
it to avoid mbox errors.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
---
 drivers/common/cnxk/roc_nix_mac.c  | 8 ++++++++
 drivers/common/cnxk/roc_nix_priv.h | 1 +
 drivers/common/cnxk/roc_npc.c      | 1 +
 drivers/common/cnxk/roc_npc_mcam.c | 1 +
 drivers/common/cnxk/roc_npc_priv.h | 1 +
 5 files changed, 12 insertions(+)

diff --git a/drivers/common/cnxk/roc_nix_mac.c b/drivers/common/cnxk/roc_nix_mac.c
index ac30fb52d1..754d75ac73 100644
--- a/drivers/common/cnxk/roc_nix_mac.c
+++ b/drivers/common/cnxk/roc_nix_mac.c
@@ -201,6 +201,14 @@ roc_nix_mac_promisc_mode_enable(struct roc_nix *roc_nix, int enable)
 		goto exit;
 	}
 
+	/* Skip CGX promisc toggling if NPC exact match is enabled as
+	 * CGX filtering is disabled permanently.
+	 */
+	if (nix->exact_match_ena) {
+		rc = 0;
+		goto exit;
+	}
+
 	if (enable)
 		mbox_alloc_msg_cgx_promisc_enable(mbox);
 	else
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 6872630dc8..ea4211dfed 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -130,6 +130,7 @@ struct nix {
 	struct nix_qint *cints_mem;
 	uint8_t configured_qints;
 	uint8_t configured_cints;
+	uint8_t exact_match_ena;
 	struct roc_nix_rq **rqs;
 	struct roc_nix_sq **sqs;
 	uint16_t vwqe_interval;
diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c
index 1baa71fc45..d88c4d3bd6 100644
--- a/drivers/common/cnxk/roc_npc.c
+++ b/drivers/common/cnxk/roc_npc.c
@@ -272,6 +272,7 @@ roc_npc_init(struct roc_npc *roc_npc)
 	roc_npc->rx_parse_nibble = npc->keyx_supp_nmask[NPC_MCAM_RX];
 
 	npc->mcam_entries = npc_mcam_tot_entries() >> npc->keyw[NPC_MCAM_RX];
+	nix->exact_match_ena = npc->exact_match_ena;
 
 	/* Free, free_rev, live and live_rev entries */
 	bmap_sz = plt_bitmap_get_memory_footprint(npc->mcam_entries);
diff --git a/drivers/common/cnxk/roc_npc_mcam.c b/drivers/common/cnxk/roc_npc_mcam.c
index e0019818c7..62e0ce21b2 100644
--- a/drivers/common/cnxk/roc_npc_mcam.c
+++ b/drivers/common/cnxk/roc_npc_mcam.c
@@ -542,6 +542,7 @@ npc_mcam_fetch_kex_cfg(struct npc *npc)
 	mbox_memcpy((char *)npc->profile_name, kex_rsp->mkex_pfl_name,
 		    MKEX_NAME_LEN);
 
+	npc->exact_match_ena = (kex_rsp->rx_keyx_cfg >> 40) & 0xF;
 	npc_mcam_process_mkex_cfg(npc, kex_rsp);
 
 done:
diff --git a/drivers/common/cnxk/roc_npc_priv.h b/drivers/common/cnxk/roc_npc_priv.h
index 6f41df18bb..30274e837b 100644
--- a/drivers/common/cnxk/roc_npc_priv.h
+++ b/drivers/common/cnxk/roc_npc_priv.h
@@ -404,6 +404,7 @@ struct npc {
 	struct npc_prio_flow_list_head *prio_flow_list;
 	struct plt_bitmap *rss_grp_entries;
 	struct npc_flow_list ipsec_list;
+	uint8_t exact_match_ena;
 };
 
 #define NPC_HASH_FIELD_LEN 16
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 27/32] common/cnxk: configure PFC on SPB aura
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (24 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 26/32] common/cnxk: skip CGX promisc mode with NPC exact match Nithin Dabilpuram
@ 2023-05-25  9:58   ` Nithin Dabilpuram
  2023-05-25  9:59   ` [PATCH v3 28/32] common/nix: check for null dereference Nithin Dabilpuram
                     ` (4 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:58 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

From: Sunil Kumar Kori <skori@marvell.com>

A RQ can be configured with lpb_aura and spb_aura at the same time
and both can contribute to generate aura based back pressure from
NIX to RPM.

But currently PFC configuration are applied on lpb_aura only and
spb_aura does not contribute to create back pressure.

Patch adds support for the same.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/common/cnxk/roc_nix.h    | 1 +
 drivers/common/cnxk/roc_nix_fc.c | 6 ++++++
 2 files changed, 7 insertions(+)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 548854952b..f60e546c01 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -198,6 +198,7 @@ struct roc_nix_fc_cfg {
 			uint16_t cq_drop;
 			bool enable;
 			uint64_t pool;
+			uint64_t spb_pool;
 			uint64_t pool_drop_pct;
 		} rq_cfg;
 
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 21e3b7d5bd..44eade5ba6 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -303,6 +303,12 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 				      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
 				      fc_cfg->rq_cfg.tc, pool_drop_pct);
 
+		if (rq->spb_ena) {
+			roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.spb_pool,
+					      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
+					      fc_cfg->rq_cfg.tc, pool_drop_pct);
+		}
+
 		if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle)
 			roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,
 					      fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 28/32] common/nix: check for null dereference
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (25 preceding siblings ...)
  2023-05-25  9:58   ` [PATCH v3 27/32] common/cnxk: configure PFC on SPB aura Nithin Dabilpuram
@ 2023-05-25  9:59   ` Nithin Dabilpuram
  2023-05-25  9:59   ` [PATCH v3 29/32] common/cnxk: fix receive queue with multiple mask Nithin Dabilpuram
                     ` (3 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:59 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Gowrishankar Muthukrishnan

From: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>

Check for NULL value return before dereferencing on it.

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
 drivers/common/cnxk/roc_nix.c    | 8 +++++++-
 drivers/common/cnxk/roc_nix_fc.c | 7 ++++++-
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 39943e4ba7..152ef7269e 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -392,6 +392,7 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 {
 	enum roc_nix_rss_reta_sz reta_sz;
 	struct plt_pci_device *pci_dev;
+	struct roc_nix_list *nix_list;
 	uint16_t max_sqb_count;
 	uint64_t blkaddr;
 	struct dev *dev;
@@ -417,7 +418,12 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 	nix = roc_nix_to_nix_priv(roc_nix);
 	pci_dev = roc_nix->pci_dev;
 	dev = &nix->dev;
-	TAILQ_INSERT_TAIL(roc_idev_nix_list_get(), roc_nix, next);
+
+	nix_list = roc_idev_nix_list_get();
+	if (nix_list == NULL)
+		return -EINVAL;
+
+	TAILQ_INSERT_TAIL(nix_list, roc_nix, next);
 
 	if (nix->dev.drv_inited)
 		return 0;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 44eade5ba6..f79baf9242 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -431,13 +431,18 @@ static int
 nix_rx_chan_multi_bpid_cfg(struct roc_nix *roc_nix, uint8_t chan, uint16_t bpid, uint16_t *bpid_new)
 {
 	struct roc_nix *roc_nix_tmp, *roc_nix_pre = NULL;
+	struct roc_nix_list *nix_list;
 	uint8_t chan_pre;
 
 	if (!roc_feature_nix_has_rxchan_multi_bpid())
 		return -ENOTSUP;
 
+	nix_list = roc_idev_nix_list_get();
+	if (nix_list == NULL)
+		return -EINVAL;
+
 	/* Find associated NIX RX channel if Aura BPID is of that of a NIX. */
-	TAILQ_FOREACH(roc_nix_tmp, roc_idev_nix_list_get(), next) {
+	TAILQ_FOREACH(roc_nix_tmp, nix_list, next) {
 		struct nix *nix = roc_nix_to_nix_priv(roc_nix_tmp);
 		int i;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 29/32] common/cnxk: fix receive queue with multiple mask
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (26 preceding siblings ...)
  2023-05-25  9:59   ` [PATCH v3 28/32] common/nix: check for null dereference Nithin Dabilpuram
@ 2023-05-25  9:59   ` Nithin Dabilpuram
  2023-05-25  9:59   ` [PATCH v3 30/32] net/cnxk: handle extbuf completion on ethdev stop Nithin Dabilpuram
                     ` (2 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:59 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla, stable

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

enable or disable RQ mask based on function
parameter.This fix allocates different profile
for different rq masks

Fixes: ddf955d3917e ("common/cnxk: support CPT second pass")
cc: stable@dpdk.org

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/common/cnxk/roc_nix_inl.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 69f658ba87..16f858f561 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -1382,7 +1382,7 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)
 		return -EFAULT;
 
 	if (roc_feature_nix_has_inl_rq_mask()) {
-		rc = nix_inl_rq_mask_cfg(roc_nix, true);
+		rc = nix_inl_rq_mask_cfg(roc_nix, enable);
 		if (rc) {
 			plt_err("Failed to get rq mask rc=%d", rc);
 			return rc;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 30/32] net/cnxk: handle extbuf completion on ethdev stop
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (27 preceding siblings ...)
  2023-05-25  9:59   ` [PATCH v3 29/32] common/cnxk: fix receive queue with multiple mask Nithin Dabilpuram
@ 2023-05-25  9:59   ` Nithin Dabilpuram
  2023-05-25  9:59   ` [PATCH v3 31/32] net/cnxk: add aes-ccm to inline IPsec capabilities Nithin Dabilpuram
  2023-05-25  9:59   ` [PATCH v3 32/32] common/cnxk: add check for null auth and anti-replay Nithin Dabilpuram
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:59 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Nithin Kumar Dabilpuram,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

During tranmissoin of packets, CQ corresponding to
SQ is polled for transmit completion packets in
transmit function, when last burst is transmitted
corresponding transmit completion packets are left
in CQ.This patch reads leftover packets in CQ on
ethdev stop.Moved transmit completion code to cn10k_rxtx.h
and cn9k_ethdev.h to avoid code duplication

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/event/cnxk/cn10k_tx_worker.h |  2 +-
 drivers/event/cnxk/cn9k_worker.h     |  2 +-
 drivers/net/cnxk/cn10k_ethdev.c      | 13 +++++
 drivers/net/cnxk/cn10k_rxtx.h        | 76 +++++++++++++++++++++++++
 drivers/net/cnxk/cn10k_tx.h          | 83 +---------------------------
 drivers/net/cnxk/cn9k_ethdev.c       | 14 +++++
 drivers/net/cnxk/cn9k_ethdev.h       | 77 ++++++++++++++++++++++++++
 drivers/net/cnxk/cn9k_tx.h           | 83 +---------------------------
 8 files changed, 188 insertions(+), 162 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_tx_worker.h b/drivers/event/cnxk/cn10k_tx_worker.h
index c18786a14c..7f170ac5f0 100644
--- a/drivers/event/cnxk/cn10k_tx_worker.h
+++ b/drivers/event/cnxk/cn10k_tx_worker.h
@@ -55,7 +55,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
 		return 0;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, 1, 1);
+		handle_tx_completion_pkts(txq, 1);
 
 	cn10k_nix_tx_skeleton(txq, cmd, flags, 0);
 	/* Perform header writes before barrier
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 1ce4b044e8..fcb82987e5 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -784,7 +784,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 	txq = cn9k_sso_hws_xtract_meta(m, txq_data);
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, 1, 1);
+		handle_tx_completion_pkts(txq, 1);
 
 	if (((txq->nb_sqb_bufs_adj -
 	      __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 2b4ab8b772..792c1b1970 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -367,6 +367,10 @@ static int
 cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 {
 	struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	uint16_t flags = dev->tx_offload_flags;
+	struct roc_nix *nix = &dev->nix;
+	uint32_t head = 0, tail = 0;
 	int rc;
 
 	rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
@@ -375,6 +379,15 @@ cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 
 	/* Clear fc cache pkts to trigger worker stop */
 	txq->fc_cache_pkts = 0;
+
+	if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) && txq->tx_compl.ena) {
+		struct roc_nix_sq *sq = &dev->sqs[qidx];
+		do {
+			handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
+			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+		} while (head != tail);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index c256d54307..65dd57494a 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -113,4 +113,80 @@ struct cn10k_sec_sess_priv {
 	(void *)((uintptr_t)(lmt_addr) +                                       \
 		 ((uint64_t)(lmt_num) << ROC_LMT_LINE_SIZE_LOG2) + (offset))
 
+static inline uint16_t
+nix_tx_compl_nb_pkts(struct cn10k_eth_txq *txq, const uint64_t wdata,
+		const uint32_t qmask)
+{
+	uint16_t available = txq->tx_compl.available;
+
+	/* Update the available count if cached value is not enough */
+	if (!unlikely(available)) {
+		uint64_t reg, head, tail;
+
+		/* Use LDADDA version to avoid reorder */
+		reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
+		/* CQ_OP_STATUS operation error */
+		if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
+				reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
+			return 0;
+
+		tail = reg & 0xFFFFF;
+		head = (reg >> 20) & 0xFFFFF;
+		if (tail < head)
+			available = tail - head + qmask + 1;
+		else
+			available = tail - head;
+
+		txq->tx_compl.available = available;
+	}
+	return available;
+}
+
+static inline void
+handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe)
+{
+#define CNXK_NIX_CQ_ENTRY_SZ 128
+#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)
+
+	uint16_t tx_pkts = 0, nb_pkts;
+	const uintptr_t desc = txq->tx_compl.desc_base;
+	const uint64_t wdata = txq->tx_compl.wdata;
+	const uint32_t qmask = txq->tx_compl.qmask;
+	uint32_t head = txq->tx_compl.head;
+	struct nix_cqe_hdr_s *tx_compl_cq;
+	struct nix_send_comp_s *tx_compl_s0;
+	struct rte_mbuf *m_next, *m;
+
+	if (mt_safe)
+		rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
+
+	nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, qmask);
+	while (tx_pkts < nb_pkts) {
+		rte_prefetch_non_temporal((void *)(desc +
+					(CQE_SZ((head + 2) & qmask))));
+		tx_compl_cq = (struct nix_cqe_hdr_s *)
+			(desc + CQE_SZ(head));
+		tx_compl_s0 = (struct nix_send_comp_s *)
+			((uint64_t *)tx_compl_cq + 1);
+		m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
+		while (m->next != NULL) {
+			m_next = m->next;
+			rte_pktmbuf_free_seg(m);
+			m = m_next;
+		}
+		rte_pktmbuf_free_seg(m);
+
+		head++;
+		head &= qmask;
+		tx_pkts++;
+	}
+	txq->tx_compl.head = head;
+	txq->tx_compl.available -= nb_pkts;
+
+	plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
+
+	if (mt_safe)
+		rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
+}
+
 #endif /* __CN10K_RXTX_H__ */
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index c9ec01cd9d..4f23a8dfc3 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1151,83 +1151,6 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 	return segdw;
 }
 
-static inline uint16_t
-nix_tx_compl_nb_pkts(struct cn10k_eth_txq *txq, const uint64_t wdata,
-		const uint16_t pkts, const uint32_t qmask)
-{
-	uint32_t available = txq->tx_compl.available;
-
-	/* Update the available count if cached value is not enough */
-	if (unlikely(available < pkts)) {
-		uint64_t reg, head, tail;
-
-		/* Use LDADDA version to avoid reorder */
-		reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
-		/* CQ_OP_STATUS operation error */
-		if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
-				reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
-			return 0;
-
-		tail = reg & 0xFFFFF;
-		head = (reg >> 20) & 0xFFFFF;
-		if (tail < head)
-			available = tail - head + qmask + 1;
-		else
-			available = tail - head;
-
-		txq->tx_compl.available = available;
-	}
-	return RTE_MIN(pkts, available);
-}
-
-static inline void
-handle_tx_completion_pkts(struct cn10k_eth_txq *txq, const uint16_t pkts,
-			  uint8_t mt_safe)
-{
-#define CNXK_NIX_CQ_ENTRY_SZ 128
-#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)
-
-	uint16_t tx_pkts = 0, nb_pkts;
-	const uintptr_t desc = txq->tx_compl.desc_base;
-	const uint64_t wdata = txq->tx_compl.wdata;
-	const uint32_t qmask = txq->tx_compl.qmask;
-	uint32_t head = txq->tx_compl.head;
-	struct nix_cqe_hdr_s *tx_compl_cq;
-	struct nix_send_comp_s *tx_compl_s0;
-	struct rte_mbuf *m_next, *m;
-
-	if (mt_safe)
-		rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
-
-	nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, pkts, qmask);
-	while (tx_pkts < nb_pkts) {
-		rte_prefetch_non_temporal((void *)(desc +
-					(CQE_SZ((head + 2) & qmask))));
-		tx_compl_cq = (struct nix_cqe_hdr_s *)
-			(desc + CQE_SZ(head));
-		tx_compl_s0 = (struct nix_send_comp_s *)
-			((uint64_t *)tx_compl_cq + 1);
-		m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
-		while (m->next != NULL) {
-			m_next = m->next;
-			rte_pktmbuf_free_seg(m);
-			m = m_next;
-		}
-		rte_pktmbuf_free_seg(m);
-
-		head++;
-		head &= qmask;
-		tx_pkts++;
-	}
-	txq->tx_compl.head = head;
-	txq->tx_compl.available -= nb_pkts;
-
-	plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
-
-	if (mt_safe)
-		rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
-}
-
 static __rte_always_inline uint16_t
 cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 		    uint16_t pkts, uint64_t *cmd, const uint16_t flags)
@@ -1249,7 +1172,7 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 	bool sec;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);
+		handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
 
 	if (!(flags & NIX_TX_VWQE_F)) {
 		NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -1398,7 +1321,7 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 	bool sec;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);
+		handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
 
 	if (!(flags & NIX_TX_VWQE_F)) {
 		NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -1953,7 +1876,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 	} wd;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);
+		handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
 
 	if (!(flags & NIX_TX_VWQE_F)) {
 		NIX_XMIT_FC_OR_RETURN(txq, pkts);
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index e55a2aa133..bae4dda5e2 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -329,14 +329,28 @@ static int
 cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 {
 	struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	uint16_t flags = dev->tx_offload_flags;
+	struct roc_nix *nix = &dev->nix;
+	uint32_t head = 0, tail = 0;
 	int rc;
 
+
 	rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
 	if (rc)
 		return rc;
 
 	/* Clear fc cache pkts to trigger worker stop */
 	txq->fc_cache_pkts = 0;
+
+	if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) && txq->tx_compl.ena) {
+		struct roc_nix_sq *sq = &dev->sqs[qidx];
+		do {
+			handle_tx_completion_pkts(txq, 0);
+			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+		} while (head != tail);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index a82dcb3d19..9e0a3c5bb2 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -107,4 +107,81 @@ void cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev);
 /* Security context setup */
 void cn9k_eth_sec_ops_override(void);
 
+static inline uint16_t
+nix_tx_compl_nb_pkts(struct cn9k_eth_txq *txq, const uint64_t wdata,
+		const uint32_t qmask)
+{
+	uint16_t available = txq->tx_compl.available;
+
+	/* Update the available count if cached value is not enough */
+	if (!unlikely(available)) {
+		uint64_t reg, head, tail;
+
+		/* Use LDADDA version to avoid reorder */
+		reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
+		/* CQ_OP_STATUS operation error */
+		if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
+				reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
+			return 0;
+
+		tail = reg & 0xFFFFF;
+		head = (reg >> 20) & 0xFFFFF;
+		if (tail < head)
+			available = tail - head + qmask + 1;
+		else
+			available = tail - head;
+
+		txq->tx_compl.available = available;
+	}
+	return available;
+}
+
+static inline void
+handle_tx_completion_pkts(struct cn9k_eth_txq *txq, uint8_t mt_safe)
+{
+#define CNXK_NIX_CQ_ENTRY_SZ 128
+#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)
+
+	uint16_t tx_pkts = 0, nb_pkts;
+	const uintptr_t desc = txq->tx_compl.desc_base;
+	const uint64_t wdata = txq->tx_compl.wdata;
+	const uint32_t qmask = txq->tx_compl.qmask;
+	uint32_t head = txq->tx_compl.head;
+	struct nix_cqe_hdr_s *tx_compl_cq;
+	struct nix_send_comp_s *tx_compl_s0;
+	struct rte_mbuf *m_next, *m;
+
+	if (mt_safe)
+		rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
+
+	nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, qmask);
+	while (tx_pkts < nb_pkts) {
+		rte_prefetch_non_temporal((void *)(desc +
+					(CQE_SZ((head + 2) & qmask))));
+		tx_compl_cq = (struct nix_cqe_hdr_s *)
+			(desc + CQE_SZ(head));
+		tx_compl_s0 = (struct nix_send_comp_s *)
+			((uint64_t *)tx_compl_cq + 1);
+		m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
+		while (m->next != NULL) {
+			m_next = m->next;
+			rte_pktmbuf_free_seg(m);
+			m = m_next;
+		}
+		rte_pktmbuf_free_seg(m);
+
+		head++;
+		head &= qmask;
+		tx_pkts++;
+	}
+	txq->tx_compl.head = head;
+	txq->tx_compl.available -= nb_pkts;
+
+	plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
+
+	if (mt_safe)
+		rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
+}
+
+
 #endif /* __CN9K_ETHDEV_H__ */
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index e956c1ad2a..8f1e05a461 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -559,83 +559,6 @@ cn9k_nix_xmit_mseg_one_release(uint64_t *cmd, void *lmt_addr,
 	} while (lmt_status == 0);
 }
 
-static inline uint16_t
-nix_tx_compl_nb_pkts(struct cn9k_eth_txq *txq, const uint64_t wdata,
-		const uint16_t pkts, const uint32_t qmask)
-{
-	uint32_t available = txq->tx_compl.available;
-
-	/* Update the available count if cached value is not enough */
-	if (unlikely(available < pkts)) {
-		uint64_t reg, head, tail;
-
-		/* Use LDADDA version to avoid reorder */
-		reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
-		/* CQ_OP_STATUS operation error */
-		if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
-				reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
-			return 0;
-
-		tail = reg & 0xFFFFF;
-		head = (reg >> 20) & 0xFFFFF;
-		if (tail < head)
-			available = tail - head + qmask + 1;
-		else
-			available = tail - head;
-
-		txq->tx_compl.available = available;
-	}
-	return RTE_MIN(pkts, available);
-}
-
-static inline void
-handle_tx_completion_pkts(struct cn9k_eth_txq *txq, const uint16_t pkts,
-			  uint8_t mt_safe)
-{
-#define CNXK_NIX_CQ_ENTRY_SZ 128
-#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)
-
-	uint16_t tx_pkts = 0, nb_pkts;
-	const uintptr_t desc = txq->tx_compl.desc_base;
-	const uint64_t wdata = txq->tx_compl.wdata;
-	const uint32_t qmask = txq->tx_compl.qmask;
-	uint32_t head = txq->tx_compl.head;
-	struct nix_cqe_hdr_s *tx_compl_cq;
-	struct nix_send_comp_s *tx_compl_s0;
-	struct rte_mbuf *m_next, *m;
-
-	if (mt_safe)
-		rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
-
-	nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, pkts, qmask);
-	while (tx_pkts < nb_pkts) {
-		rte_prefetch_non_temporal((void *)(desc +
-					(CQE_SZ((head + 2) & qmask))));
-		tx_compl_cq = (struct nix_cqe_hdr_s *)
-			(desc + CQE_SZ(head));
-		tx_compl_s0 = (struct nix_send_comp_s *)
-			((uint64_t *)tx_compl_cq + 1);
-		m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
-		while (m->next != NULL) {
-			m_next = m->next;
-			rte_pktmbuf_free_seg(m);
-			m = m_next;
-		}
-		rte_pktmbuf_free_seg(m);
-
-		head++;
-		head &= qmask;
-		tx_pkts++;
-	}
-	txq->tx_compl.head = head;
-	txq->tx_compl.available -= nb_pkts;
-
-	plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
-
-	if (mt_safe)
-		rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
-}
-
 static __rte_always_inline uint16_t
 cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
 		   uint64_t *cmd, const uint16_t flags)
@@ -648,7 +571,7 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
 	uint16_t i;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, 0);
+		handle_tx_completion_pkts(txq, 0);
 
 	NIX_XMIT_FC_OR_RETURN(txq, pkts);
 
@@ -700,7 +623,7 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint64_t i;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, 0);
+		handle_tx_completion_pkts(txq, 0);
 
 	NIX_XMIT_FC_OR_RETURN(txq, pkts);
 
@@ -1049,7 +972,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t pkts_left;
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
-		handle_tx_completion_pkts(txq, pkts, 0);
+		handle_tx_completion_pkts(txq, 0);
 
 	NIX_XMIT_FC_OR_RETURN(txq, pkts);
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 31/32] net/cnxk: add aes-ccm to inline IPsec capabilities
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (28 preceding siblings ...)
  2023-05-25  9:59   ` [PATCH v3 30/32] net/cnxk: handle extbuf completion on ethdev stop Nithin Dabilpuram
@ 2023-05-25  9:59   ` Nithin Dabilpuram
  2023-05-25  9:59   ` [PATCH v3 32/32] common/cnxk: add check for null auth and anti-replay Nithin Dabilpuram
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:59 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Srujana Challa

From: Srujana Challa <schalla@marvell.com>

Declares AES-CCM support in inline IPsec capabilities.

Signed-off-by: Srujana Challa <schalla@marvell.com>
---
 drivers/net/cnxk/cn10k_ethdev_sec.c | 30 +++++++++++++++++++++++++++++
 drivers/net/cnxk/cn9k_ethdev_sec.c  | 30 +++++++++++++++++++++++++++++
 2 files changed, 60 insertions(+)

diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 9625704ec1..5bc547051d 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -228,6 +228,36 @@ static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = {
+					.min = 8,
+					.max = 12,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 11,
+					.max = 13,
+					.increment = 1
+				}
+			}, }
+		}, }
+	},
 	{	/* NULL (AUTH) */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 327f221e38..688b13ae1e 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -100,6 +100,36 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = {
+					.min = 8,
+					.max = 12,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 11,
+					.max = 13,
+					.increment = 1
+				}
+			}, }
+		}, }
+	},
 	{	/* AES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 32/32] common/cnxk: add check for null auth and anti-replay
  2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
                     ` (29 preceding siblings ...)
  2023-05-25  9:59   ` [PATCH v3 31/32] net/cnxk: add aes-ccm to inline IPsec capabilities Nithin Dabilpuram
@ 2023-05-25  9:59   ` Nithin Dabilpuram
  2023-05-26  8:55     ` Jerin Jacob
  30 siblings, 1 reply; 89+ messages in thread
From: Nithin Dabilpuram @ 2023-05-25  9:59 UTC (permalink / raw)
  To: Nithin Kumar Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Srujana Challa

From: Srujana Challa <schalla@marvell.com>

As per IPsec RFC, the anti-replay service can be selected for
an SA only if the integrity service is selected for that SA.
This patch adds the validation check for the same.

Signed-off-by: Srujana Challa <schalla@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 13ca2c7791..a8c3ba90cd 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -155,6 +155,10 @@ ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2,
 
 		switch (auth_xfrm->auth.algo) {
 		case RTE_CRYPTO_AUTH_NULL:
+			if (w2->s.dir == ROC_IE_SA_DIR_INBOUND && ipsec_xfrm->replay_win_sz) {
+				plt_err("anti-replay can't be supported with integrity service disabled");
+				return -EINVAL;
+			}
 			w2->s.auth_type = ROC_IE_OT_SA_AUTH_NULL;
 			break;
 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
@@ -1392,6 +1396,11 @@ cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
 	if (ret)
 		return ret;
 
+	if (crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD &&
+	    crypto_xform->auth.algo == RTE_CRYPTO_AUTH_NULL && ipsec->replay_win_sz) {
+		plt_err("anti-replay can't be supported with integrity service disabled");
+		return -EINVAL;
+	}
 	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD ||
 	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_NULL ||
 	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* Re: [PATCH v3 32/32] common/cnxk: add check for null auth and anti-replay
  2023-05-25  9:59   ` [PATCH v3 32/32] common/cnxk: add check for null auth and anti-replay Nithin Dabilpuram
@ 2023-05-26  8:55     ` Jerin Jacob
  0 siblings, 0 replies; 89+ messages in thread
From: Jerin Jacob @ 2023-05-26  8:55 UTC (permalink / raw)
  To: Nithin Dabilpuram
  Cc: Kiran Kumar K, Sunil Kumar Kori, Satha Rao, jerinj, dev, Srujana Challa

On Thu, May 25, 2023 at 3:41 PM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> From: Srujana Challa <schalla@marvell.com>
>
> As per IPsec RFC, the anti-replay service can be selected for
> an SA only if the integrity service is selected for that SA.
> This patch adds the validation check for the same.
>
> Signed-off-by: Srujana Challa <schalla@marvell.com>


Series applied to dpdk-next-net-mrvl/for-next-net. Thanks


> ---
>  drivers/common/cnxk/cnxk_security.c | 9 +++++++++
>  1 file changed, 9 insertions(+)
>
> diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
> index 13ca2c7791..a8c3ba90cd 100644
> --- a/drivers/common/cnxk/cnxk_security.c
> +++ b/drivers/common/cnxk/cnxk_security.c
> @@ -155,6 +155,10 @@ ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2,
>
>                 switch (auth_xfrm->auth.algo) {
>                 case RTE_CRYPTO_AUTH_NULL:
> +                       if (w2->s.dir == ROC_IE_SA_DIR_INBOUND && ipsec_xfrm->replay_win_sz) {
> +                               plt_err("anti-replay can't be supported with integrity service disabled");
> +                               return -EINVAL;
> +                       }
>                         w2->s.auth_type = ROC_IE_OT_SA_AUTH_NULL;
>                         break;
>                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
> @@ -1392,6 +1396,11 @@ cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
>         if (ret)
>                 return ret;
>
> +       if (crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD &&
> +           crypto_xform->auth.algo == RTE_CRYPTO_AUTH_NULL && ipsec->replay_win_sz) {
> +               plt_err("anti-replay can't be supported with integrity service disabled");
> +               return -EINVAL;
> +       }
>         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD ||
>             auth_xform->auth.algo == RTE_CRYPTO_AUTH_NULL ||
>             auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 89+ messages in thread

end of thread, other threads:[~2023-05-26  8:55 UTC | newest]

Thread overview: 89+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-11  9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 02/21] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
2023-05-18  5:52   ` Jerin Jacob
2023-04-11  9:11 ` [PATCH 03/21] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 04/21] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 05/21] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 06/21] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 07/21] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 08/21] common/cnxk: update age drop statistics Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 09/21] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 10/21] common/cnxk: add receive error mask Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 11/21] common/cnxk: fix null pointer dereference Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 12/21] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 13/21] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 14/21] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 15/21] common/cnxk: update errata info Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 16/21] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 17/21] common/cnxk: add more comments to mbox code Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 18/21] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 19/21] common/cnxk: access valid pass value Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 20/21] net/cnxk: add receive error mask Nithin Dabilpuram
2023-04-11  9:11 ` [PATCH 21/21] common/cnxk: support of 1:n pool:aura per NIX LF Nithin Dabilpuram
2023-05-18  5:50   ` Jerin Jacob
2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 02/32] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 03/32] common/cnxk: fix CPT backpressure disable on LBK Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 04/32] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 05/32] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 06/32] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 07/32] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 08/32] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 09/32] common/cnxk: update age drop statistics Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 10/32] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 11/32] common/cnxk: add receive error mask Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 12/32] common/cnxk: fix null pointer dereference Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 13/32] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 14/32] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 15/32] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 16/32] common/cnxk: update errata info Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 17/32] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 18/32] common/cnxk: add more comments to mbox code Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 19/32] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 20/32] common/cnxk: access valid pass value Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 21/32] net/cnxk: add receive error mask Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 22/32] common/cnxk: support of 1-N pool-aura per NIX LF Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 23/32] net/cnxk: support for inbound without inline dev mode Nithin Dabilpuram
2023-05-24 10:03   ` [PATCH v2 24/32] common/cnxk: fix inline device VF identification Nithin Dabilpuram
2023-05-24 10:04   ` [PATCH v2 25/32] common/cnxk: avoid inline dev CPT lf detach multiple times Nithin Dabilpuram
2023-05-24 10:04   ` [PATCH v2 26/32] common/cnxk: skip CGX promisc mode with NPC exact match Nithin Dabilpuram
2023-05-24 10:04   ` [PATCH v2 27/32] common/cnxk: configure PFC on SPB aura Nithin Dabilpuram
2023-05-24 10:04   ` [PATCH v2 28/32] common/nix: check for null derefernce Nithin Dabilpuram
2023-05-24 10:04   ` [PATCH v2 29/32] common/cnxk: fix receive queue with multiple mask Nithin Dabilpuram
2023-05-24 10:04   ` [PATCH v2 30/32] net/cnxk: handle extbuf completion on ethdev stop Nithin Dabilpuram
2023-05-24 10:04   ` [PATCH v2 31/32] net/cnxk: add aes-ccm to inline IPsec capabilities Nithin Dabilpuram
2023-05-24 10:04   ` [PATCH v2 32/32] common/cnxk: add check for null auth and anti-replay Nithin Dabilpuram
2023-05-25  9:28     ` Jerin Jacob
2023-05-25  9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 02/32] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 03/32] common/cnxk: fix CPT backpressure disable on LBK Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 04/32] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 05/32] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 06/32] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 07/32] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 08/32] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 09/32] common/cnxk: update age drop statistics Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 10/32] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 11/32] common/cnxk: add receive error mask Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 12/32] common/cnxk: fix null pointer dereference Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 13/32] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 14/32] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 15/32] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 16/32] common/cnxk: update errata info Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 17/32] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 18/32] common/cnxk: add more comments to mbox code Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 19/32] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 20/32] common/cnxk: access valid pass value Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 21/32] net/cnxk: add receive error mask Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 22/32] common/cnxk: support of 1-N pool-aura per NIX LF Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 23/32] net/cnxk: support for inbound without inline dev mode Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 24/32] common/cnxk: fix inline device VF identification Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 25/32] common/cnxk: avoid inline dev CPT lf detach multiple times Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 26/32] common/cnxk: skip CGX promisc mode with NPC exact match Nithin Dabilpuram
2023-05-25  9:58   ` [PATCH v3 27/32] common/cnxk: configure PFC on SPB aura Nithin Dabilpuram
2023-05-25  9:59   ` [PATCH v3 28/32] common/nix: check for null dereference Nithin Dabilpuram
2023-05-25  9:59   ` [PATCH v3 29/32] common/cnxk: fix receive queue with multiple mask Nithin Dabilpuram
2023-05-25  9:59   ` [PATCH v3 30/32] net/cnxk: handle extbuf completion on ethdev stop Nithin Dabilpuram
2023-05-25  9:59   ` [PATCH v3 31/32] net/cnxk: add aes-ccm to inline IPsec capabilities Nithin Dabilpuram
2023-05-25  9:59   ` [PATCH v3 32/32] common/cnxk: add check for null auth and anti-replay Nithin Dabilpuram
2023-05-26  8:55     ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).